repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
ielab/llm-rankers
run.py
[ { "identifier": "SearchResult", "path": "rankers/rankers.py", "snippet": "class SearchResult:\n docid: str\n score: float\n text: str" }, { "identifier": "PointwiseLlmRanker", "path": "rankers/pointwise.py", "snippet": "class PointwiseLlmRanker(LlmRanker):\n\n def __init__(self, model_name_or_path, tokenizer_name_or_path, device, method=\"qlm\", batch_size=1, cache_dir=None):\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path,\n cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n\n self.device = device\n self.method = method\n self.batch_size = batch_size\n\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n if self.method == \"qlm\":\n prompt = \"Passage: {text}\\nPlease write a question based on this passage.\"\n data = [prompt.format(text=doc.text) for doc in ranking]\n dataset = Text2TextGenerationDataset(data, self.tokenizer)\n loader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n\n labels = self.tokenizer.encode(f\"<pad> {query}\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.llm.device).repeat(self.batch_size, 1)\n current_id = 0\n with torch.no_grad():\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_labels = labels if labels.shape[0] == len(batch_inputs['input_ids']) \\\n else labels[:len(batch_inputs['input_ids']), :] # last batch might be smaller\n self.total_prompt_tokens += batch_labels.shape[0] * batch_labels.shape[\n 1] # we count decoder inputs as part of prompt.\n\n batch_inputs = batch_inputs.to(self.llm.device)\n logits = self.llm(input_ids=batch_inputs['input_ids'],\n attention_mask=batch_inputs['attention_mask'],\n labels=batch_labels).logits\n\n loss_fct = torch.nn.CrossEntropyLoss(reduction=\"none\")\n scores = loss_fct(logits.view(-1, logits.size(-1)), batch_labels.view(-1))\n scores = -1 * scores.view(-1, batch_labels.size(-1)).sum(dim=1) # neg log prob\n for score in scores:\n ranking[current_id].score = score.item()\n current_id += 1\n\n elif self.method == \"yes_no\":\n prompt = \"Passage: {text}\\nQuery: {query}\\nDoes the passage answer the query? Answer 'Yes' or 'No'\"\n yes_id = self.tokenizer.encode(\"Yes\", add_special_tokens=False)[0]\n no_id = self.tokenizer.encode(\"No\", add_special_tokens=False)[0]\n data = [prompt.format(text=doc.text, query=query) for doc in ranking]\n dataset = Text2TextGenerationDataset(data, self.tokenizer)\n loader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n decoder_input_ids = torch.Tensor([self.tokenizer.pad_token_id]).to(self.llm.device, dtype=torch.long).repeat(self.batch_size, 1)\n current_id = 0\n with torch.no_grad():\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_inputs = batch_inputs.to(self.llm.device)\n\n batch_decoder_input_ids = decoder_input_ids if decoder_input_ids.shape[0] == len(batch_inputs['input_ids']) \\\n else decoder_input_ids[:len(batch_inputs['input_ids']), :] # last batch might be smaller\n\n self.total_prompt_tokens += batch_decoder_input_ids.shape[0] * batch_decoder_input_ids.shape[\n 1]\n\n logits = self.llm(input_ids=batch_inputs['input_ids'],\n attention_mask=batch_inputs['attention_mask'],\n decoder_input_ids=batch_decoder_input_ids).logits\n yes_scores = logits[:, :, yes_id]\n no_scores = logits[:, :, no_id]\n batch_scores = torch.cat((yes_scores, no_scores), dim=1)\n batch_scores = torch.nn.functional.softmax(batch_scores, dim=1)\n scores = batch_scores[:, 0]\n for score in scores:\n ranking[current_id].score = score.item()\n current_id += 1\n\n ranking = sorted(ranking, key=lambda x: x.score, reverse=True)\n return ranking\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" }, { "identifier": "MonoT5LlmRanker", "path": "rankers/pointwise.py", "snippet": "class MonoT5LlmRanker(PointwiseLlmRanker):\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n prompt = \"Query: {query} Document: {document} Relevant:\"\n data = [prompt.format(query=query, document=doc.text) for doc in ranking]\n dataset = Text2TextGenerationDataset(data, self.tokenizer)\n loader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n decoder_input_ids = torch.Tensor([self.llm.config.decoder_start_token_id]).to(self.llm.device, dtype=torch.long).repeat(\n self.batch_size, 1)\n current_id = 0\n with torch.no_grad():\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_inputs = batch_inputs.to(self.llm.device)\n\n batch_decoder_input_ids = decoder_input_ids if decoder_input_ids.shape[0] == len(\n batch_inputs['input_ids']) \\\n else decoder_input_ids[:len(batch_inputs['input_ids']), :] # last batch might be smaller\n\n self.total_prompt_tokens += batch_decoder_input_ids.shape[0] * batch_decoder_input_ids.shape[\n 1]\n\n logits = self.llm(input_ids=batch_inputs['input_ids'],\n attention_mask=batch_inputs['attention_mask'],\n decoder_input_ids=batch_decoder_input_ids).logits\n\n # 6136 and 1176 are the indexes of the tokens false and true in T5.\n batch_scores = logits[:, 0, [6136, 1176]]\n batch_scores = torch.nn.functional.softmax(batch_scores, dim=1)\n scores = batch_scores[:, 1]\n for score in scores:\n ranking[current_id].score = score.item()\n current_id += 1\n\n ranking = sorted(ranking, key=lambda x: x.score, reverse=True)\n return ranking" }, { "identifier": "SetwiseLlmRanker", "path": "rankers/setwise.py", "snippet": "class SetwiseLlmRanker(LlmRanker):\n CHARACTERS = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\",\n \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\"] # \"Passage X\" and \"Passage Y\" will be tokenized into 3 tokens, so we dont use for now\n\n def __init__(self,\n model_name_or_path,\n tokenizer_name_or_path,\n device,\n num_child=3,\n k=10,\n scoring='generation',\n method=\"heapsort\",\n num_permutation=1,\n cache_dir=None):\n\n self.device = device\n self.num_child = num_child\n self.num_permutation = num_permutation\n self.k = k\n self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n if self.config.model_type == 't5':\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path,\n cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n self.decoder_input_ids = self.tokenizer.encode(\"<pad> Passage\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.device) if self.tokenizer else None\n\n test = []\n for i in range(len(self.CHARACTERS)):\n test.append(f'<pad> Passage {self.CHARACTERS[i]}')\n\n self.target_token_ids = self.tokenizer.batch_encode_plus([f'<pad> Passage {self.CHARACTERS[i]}'\n for i in range(len(self.CHARACTERS))],\n return_tensors=\"pt\",\n add_special_tokens=False,\n padding=True).input_ids[:, -1]\n elif self.config.model_type == 'llama':\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n self.tokenizer.use_default_system_prompt = False\n if 'vicuna' and 'v1.5' in model_name_or_path:\n self.tokenizer.chat_template = \"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\\\'s questions.' %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ system_message }}{% endif %}{% if message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}\"\n self.llm = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir).eval()\n\n self.scoring = scoring\n self.method = method\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1 if self.num_permutation == 1 else self.num_permutation\n\n passages = \"\\n\\n\".join([f'Passage {self.CHARACTERS[i]}: \"{doc.text}\"' for i, doc in enumerate(docs)])\n input_text = f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage:'\n\n if self.scoring == 'generation':\n if self.config.model_type == 't5':\n\n if self.num_permutation == 1:\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n decoder_input_ids=self.decoder_input_ids,\n max_new_tokens=2)[0]\n\n self.total_completion_tokens += output_ids.shape[0]\n\n output = self.tokenizer.decode(output_ids,\n skip_special_tokens=True).strip()\n output = output[-1]\n else:\n id_passage = [(i, p) for i, p in enumerate(docs)]\n labels = [self.CHARACTERS[i] for i in range(len(docs))]\n batch_data = []\n for _ in range(self.num_permutation):\n batch_data.append([random.sample(id_passage, len(id_passage)),\n random.sample(labels, len(labels))])\n\n batch_ref = []\n input_text = []\n for batch in batch_data:\n ref = []\n passages = []\n characters = []\n for p, c in zip(batch[0], batch[1]):\n ref.append(p[0])\n passages.append(p[1].text)\n characters.append(c)\n batch_ref.append((ref, characters))\n passages = \"\\n\\n\".join([f'Passage {characters[i]}: \"{passages[i]}\"' for i in range(len(passages))])\n input_text.append(f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage:')\n\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1] * input_ids.shape[0]\n\n output_ids = self.llm.generate(input_ids,\n decoder_input_ids=self.decoder_input_ids.repeat(input_ids.shape[0], 1),\n max_new_tokens=2)\n output = self.tokenizer.batch_decode(output_ids[:, self.decoder_input_ids.shape[1]:],\n skip_special_tokens=True)\n\n # vote\n candidates = []\n for ref, result in zip(batch_ref, output):\n result = result.strip().upper()\n docids, characters = ref\n if len(result) != 1 or result not in characters:\n print(f\"Unexpected output: {result}\")\n continue\n win_doc = docids[characters.index(result)]\n candidates.append(win_doc)\n\n if len(candidates) == 0:\n print(f\"Unexpected voting: {output}\")\n output = \"Unexpected voting.\"\n else:\n # handle tie\n candidate_counts = Counter(candidates)\n max_count = max(candidate_counts.values())\n most_common_candidates = [candidate for candidate, count in candidate_counts.items() if\n count == max_count]\n if len(most_common_candidates) == 1:\n output = self.CHARACTERS[most_common_candidates[0]]\n else:\n output = self.CHARACTERS[random.choice(most_common_candidates)]\n\n elif self.config.model_type == 'llama':\n conversation = [{\"role\": \"user\", \"content\": input_text}]\n\n prompt = self.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)\n prompt += \" Passage:\"\n\n input_ids = self.tokenizer(prompt, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n do_sample=False,\n temperature=0.0,\n top_p=None,\n max_new_tokens=1)[0]\n\n self.total_completion_tokens += output_ids.shape[0]\n\n output = self.tokenizer.decode(output_ids[input_ids.shape[1]:],\n skip_special_tokens=True).strip().upper()\n else:\n raise NotImplementedError\n\n elif self.scoring == 'likelihood':\n if self.config.model_type == 't5':\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n with torch.no_grad():\n logits = self.llm(input_ids=input_ids, decoder_input_ids=self.decoder_input_ids).logits[0][-1]\n distributions = torch.softmax(logits, dim=0)\n scores = distributions[self.target_token_ids[:len(docs)]]\n ranked = sorted(zip(self.CHARACTERS[:len(docs)], scores), key=lambda x: x[1], reverse=True)\n output = ranked[0][0]\n\n else:\n raise NotImplementedError\n\n if len(output) == 1 and output in self.CHARACTERS:\n pass\n else:\n print(f\"Unexpected output: {output}\")\n\n return output\n\n def heapify(self, arr, n, i, query):\n # Find largest among root and children\n if self.num_child * i + 1 < n: # if there are children\n docs = [arr[i]] + arr[self.num_child * i + 1: min((self.num_child * (i + 1) + 1), n)]\n inds = [i] + list(range(self.num_child * i + 1, min((self.num_child * (i + 1) + 1), n)))\n output = self.compare(query, docs)\n try:\n best_ind = self.CHARACTERS.index(output)\n except ValueError:\n best_ind = 0\n try:\n largest = inds[best_ind]\n except IndexError:\n largest = i\n # If root is not largest, swap with largest and continue heapifying\n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n self.heapify(arr, n, largest, query)\n\n def heapSort(self, arr, query, k):\n n = len(arr)\n ranked = 0\n # Build max heap\n for i in range(n // self.num_child, -1, -1):\n self.heapify(arr, n, i, query)\n for i in range(n - 1, 0, -1):\n # Swap\n arr[i], arr[0] = arr[0], arr[i]\n ranked += 1\n if ranked == k:\n break\n # Heapify root element\n self.heapify(arr, i, 0, query)\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n original_ranking = copy.deepcopy(ranking)\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n if self.method == \"heapsort\":\n self.heapSort(ranking, query, self.k)\n ranking = list(reversed(ranking))\n\n # elif self.method == \"bubblesort\":\n # for i in range(k):\n # start_ind = len(ranking) - (self.num_child + 1)\n # end_ind = len(ranking)\n # while True:\n # if start_ind < i:\n # start_ind = i\n # output = self.compare(query, ranking[start_ind:end_ind])\n # try:\n # best_ind = self.CHARACTERS.index(output)\n # except ValueError:\n # best_ind = 0\n # if best_ind != 0:\n # ranking[start_ind], ranking[start_ind + best_ind] = ranking[start_ind + best_ind], ranking[start_ind]\n #\n # if start_ind == i:\n # break\n #\n # start_ind -= self.num_child\n # end_ind -= self.num_child\n elif self.method == \"bubblesort\":\n last_start = len(ranking) - (self.num_child + 1)\n\n for i in range(self.k):\n start_ind = last_start\n end_ind = last_start + (self.num_child + 1)\n is_change = False\n while True:\n if start_ind < i:\n start_ind = i\n output = self.compare(query, ranking[start_ind:end_ind])\n try:\n best_ind = self.CHARACTERS.index(output)\n except ValueError:\n best_ind = 0\n if best_ind != 0:\n ranking[start_ind], ranking[start_ind + best_ind] = ranking[start_ind + best_ind], ranking[start_ind]\n if not is_change:\n is_change = True\n if last_start != len(ranking) - (self.num_child + 1) \\\n and best_ind == len(ranking[start_ind:end_ind])-1:\n last_start += len(ranking[start_ind:end_ind])-1\n\n if start_ind == i:\n break\n\n if not is_change:\n last_start -= self.num_child\n\n start_ind -= self.num_child\n end_ind -= self.num_child\n\n else:\n raise NotImplementedError(f'Method {self.method} is not implemented.')\n\n results = []\n top_doc_ids = set()\n rank = 1\n\n for i, doc in enumerate(ranking[:self.k]):\n top_doc_ids.add(doc.docid)\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n for doc in original_ranking:\n if doc.docid not in top_doc_ids:\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n\n return results\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" }, { "identifier": "OpenAiSetwiseLlmRanker", "path": "rankers/setwise.py", "snippet": "class OpenAiSetwiseLlmRanker(SetwiseLlmRanker):\n def __init__(self, model_name_or_path, api_key, num_child=3, method='heapsort', k=10):\n self.llm = model_name_or_path\n self.tokenizer = tiktoken.encoding_for_model(model_name_or_path)\n self.num_child = num_child\n self.method = method\n self.k = k\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n self.system_prompt = \"You are RankGPT, an intelligent assistant specialized in selecting the most relevant passage from a pool of passages based on their relevance to the query.\"\n openai.api_key = api_key\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n passages = \"\\n\\n\".join([f'Passage {self.CHARACTERS[i]}: \"{doc.text}\"' for i, doc in enumerate(docs)])\n input_text = f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage.'\n\n while True:\n try:\n response = openai.ChatCompletion.create(\n model=self.llm,\n messages=[\n {\"role\": \"system\", \"content\": self.system_prompt},\n {\"role\": \"user\", \"content\": input_text},\n ],\n temperature=0.0,\n request_timeout=15\n )\n\n self.total_completion_tokens += int(response['usage']['completion_tokens'])\n self.total_prompt_tokens += int(response['usage']['prompt_tokens'])\n\n output = response['choices'][0]['message']['content']\n matches = re.findall(r\"(Passage [A-Z])\", output, re.MULTILINE)\n if matches:\n output = matches[0][8]\n elif output.strip() in self.CHARACTERS:\n pass\n else:\n print(f\"Unexpected output: {output}\")\n output = \"A\"\n return output\n\n except openai.error.APIError as e:\n # Handle API error here, e.g. retry or log\n print(f\"OpenAI API returned an API Error: {e}\")\n time.sleep(5)\n continue\n except openai.error.APIConnectionError as e:\n # Handle connection error here\n print(f\"Failed to connect to OpenAI API: {e}\")\n time.sleep(5)\n continue\n except openai.error.RateLimitError as e:\n # Handle rate limit error (we recommend using exponential backoff)\n print(f\"OpenAI API request exceeded rate limit: {e}\")\n time.sleep(5)\n continue\n except openai.error.InvalidRequestError as e:\n # Handle invalid request error\n print(f\"OpenAI API request was invalid: {e}\")\n raise e\n except openai.error.AuthenticationError as e:\n # Handle authentication error\n print(f\"OpenAI API request failed authentication: {e}\")\n raise e\n except openai.error.Timeout as e:\n # Handle timeout error\n print(f\"OpenAI API request timed out: {e}\")\n time.sleep(5)\n continue\n except openai.error.ServiceUnavailableError as e:\n # Handle service unavailable error\n print(f\"OpenAI API request failed with a service unavailable error: {e}\")\n time.sleep(5)\n continue\n except Exception as e:\n print(f\"Unknown error: {e}\")\n raise e\n\n def truncate(self, text, length):\n return self.tokenizer.decode(self.tokenizer.encode(text)[:length])" }, { "identifier": "PairwiseLlmRanker", "path": "rankers/pairwise.py", "snippet": "class PairwiseLlmRanker(LlmRanker):\n def __init__(self, model_name_or_path,\n tokenizer_name_or_path,\n device,\n method=\"allpair\",\n batch_size=2,\n k=10,\n cache_dir=None\n ):\n self.device = device\n self.method = method\n self.batch_size = batch_size\n self.k = k\n self.prompt = \"\"\"Given a query \"{query}\", which of the following two passages is more relevant to the query?\n\nPassage A: \"{doc1}\"\n\nPassage B: \"{doc2}\"\n\nOutput Passage A or Passage B:\"\"\"\n\n self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n if self.config.model_type == 't5':\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path, cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n self.decoder_input_ids = self.tokenizer.encode(\"<pad> Passage\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.llm.device)\n self.decoder_input_ids = self.decoder_input_ids.repeat(self.batch_size, 1)\n elif self.config.model_type == 'llama':\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n self.tokenizer.use_default_system_prompt = False\n if 'vicuna' and 'v1.5' in model_name_or_path:\n self.tokenizer.chat_template = \"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\\\'s questions.' %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ system_message }}{% endif %}{% if message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}\"\n\n self.tokenizer.pad_token = \"[PAD]\"\n self.tokenizer.padding_side = \"left\"\n self.llm = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir).eval()\n else:\n raise NotImplementedError\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n doc1, doc2 = docs[0], docs[1]\n input_texts = [self.prompt.format(query=query, doc1=doc1, doc2=doc2),\n self.prompt.format(query=query, doc1=doc2, doc2=doc1)]\n if self.config.model_type == 't5':\n input_ids = self.tokenizer(input_texts,\n padding='longest',\n return_tensors=\"pt\").input_ids.to(self.llm.device)\n\n self.total_prompt_tokens += input_ids.shape[0] * input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n decoder_input_ids=self.decoder_input_ids,\n max_new_tokens=2)\n\n self.total_completion_tokens += output_ids.shape[0] * output_ids.shape[1]\n\n output = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n\n elif self.config.model_type == 'llama':\n conversation0 = [{\"role\": \"user\", \"content\": input_texts[0]}]\n conversation1 = [{\"role\": \"user\", \"content\": input_texts[1]}]\n\n prompt0 = self.tokenizer.apply_chat_template(conversation0, tokenize=False, add_generation_prompt=True)\n prompt0 += \" Passage:\"\n prompt1 = self.tokenizer.apply_chat_template(conversation1, tokenize=False, add_generation_prompt=True)\n prompt1 += \" Passage:\"\n\n input_ids = self.tokenizer([prompt0, prompt1], return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[0] * input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n do_sample=False,\n temperature=0.0,\n top_p=None,\n max_new_tokens=1)\n\n self.total_completion_tokens += output_ids.shape[0] * output_ids.shape[1]\n\n output0 = self.tokenizer.decode(output_ids[0][input_ids.shape[1]:],\n skip_special_tokens=True).strip().upper()\n output1 = self.tokenizer.decode(output_ids[1][input_ids.shape[1]:],\n skip_special_tokens=True).strip().upper()\n return [f'Passage {output0}', f'Passage {output1}']\n else:\n raise NotImplementedError\n\n return output\n\n def heapify(self, arr, n, i):\n # Find largest among root and children\n largest = i\n l = 2 * i + 1\n r = 2 * i + 2\n if l < n and arr[l] > arr[i]:\n largest = l\n\n if r < n and arr[r] > arr[largest]:\n largest = r\n\n # If root is not largest, swap with largest and continue heapifying\n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n self.heapify(arr, n, largest)\n\n def heapSort(self, arr, k):\n n = len(arr)\n ranked = 0\n # Build max heap\n for i in range(n // 2, -1, -1):\n self.heapify(arr, n, i)\n for i in range(n - 1, 0, -1):\n # Swap\n arr[i], arr[0] = arr[0], arr[i]\n ranked += 1\n if ranked == k:\n break\n # Heapify root element\n self.heapify(arr, i, 0)\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n original_ranking = copy.deepcopy(ranking)\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n if self.method == \"allpair\":\n doc_pairs = list(combinations(ranking, 2))\n allpairs = []\n for doc1, doc2 in tqdm(doc_pairs):\n allpairs.append(self.prompt.format(query=query, doc1=doc1.text, doc2=doc2.text))\n allpairs.append(self.prompt.format(query=query, doc1=doc2.text, doc2=doc1.text))\n\n allpairs_dataset = Text2TextGenerationDataset(allpairs, self.tokenizer)\n\n loader = DataLoader(\n allpairs_dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n\n outputs = []\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_outputs = self.llm.generate(batch_inputs['input_ids'].to(self.llm.device),\n decoder_input_ids=self.decoder_input_ids\n if self.decoder_input_ids.shape[0] == len(batch_inputs['input_ids'])\n else self.decoder_input_ids[:len(batch_inputs['input_ids']), :], # last batch might be smaller\n max_new_tokens=2)\n self.total_completion_tokens += batch_outputs.shape[0] * batch_outputs.shape[1]\n outputs.extend(batch_outputs.cpu().numpy())\n\n outputs = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)\n scores = defaultdict(float)\n for i in range(0, len(outputs), 2):\n doc1, doc2 = doc_pairs[i//2]\n output1 = outputs[i]\n output2 = outputs[i + 1]\n if output1 == \"Passage A\" and output2 == \"Passage B\":\n scores[doc1.docid] += 1\n elif output1 == \"Passage B\" and output2 == \"Passage A\":\n scores[doc2.docid] += 1\n else: # conflict\n scores[doc1.docid] += 0.5\n scores[doc2.docid] += 0.5\n\n ranking = sorted([SearchResult(docid=docid, score=score, text=None) for docid, score in scores.items()],\n key=lambda x: x.score, reverse=True)\n\n elif self.method == \"heapsort\":\n class ComparableDoc:\n def __init__(self, docid, text, ranker):\n self.docid = docid\n self.text = text\n self.ranker = ranker\n\n def __gt__(self, other):\n out = self.ranker.compare(query, [self.text, other.text])\n if out[0] == \"Passage A\" and out[1] == \"Passage B\":\n return True\n else:\n return False\n\n arr = [ComparableDoc(docid=doc.docid, text=doc.text, ranker=self) for doc in ranking]\n self.heapSort(arr, self.k)\n ranking = [SearchResult(docid=doc.docid, score=-i, text=None) for i, doc in enumerate(reversed(arr))]\n\n #\n # elif self.method == \"bubblesort\":\n # k = min(k, len(ranking))\n # for i in range(k):\n # current_ind = len(ranking) - 1\n # while True:\n # if current_ind == i:\n # break\n # doc1 = ranking[current_ind]\n # doc2 = ranking[current_ind - 1]\n # output = self.compare(query, [doc1.text, doc2.text])\n # if output[0] == \"Passage A\" and output[1] == \"Passage B\":\n # ranking[current_ind - 1], ranking[current_ind] = ranking[current_ind], ranking[current_ind - 1]\n # current_ind -= 1\n elif self.method == \"bubblesort\":\n k = min(self.k, len(ranking))\n\n last_end = len(ranking) - 1\n for i in range(k):\n current_ind = last_end\n is_change = False\n while True:\n if current_ind <= i:\n break\n doc1 = ranking[current_ind]\n doc2 = ranking[current_ind - 1]\n output = self.compare(query, [doc1.text, doc2.text])\n if output[0] == \"Passage A\" and output[1] == \"Passage B\":\n ranking[current_ind - 1], ranking[current_ind] = ranking[current_ind], ranking[current_ind - 1]\n\n if not is_change:\n is_change = True\n if last_end != len(ranking) - 1: # skip unchanged pairs at the bottom\n last_end += 1\n if not is_change:\n last_end -= 1\n current_ind -= 1\n else:\n raise NotImplementedError(f'Method {self.method} is not implemented.')\n\n results = []\n top_doc_ids = set()\n rank = 1\n for i, doc in enumerate(ranking[:self.k]):\n top_doc_ids.add(doc.docid)\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n for doc in original_ranking:\n if doc.docid not in top_doc_ids:\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n return results\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" }, { "identifier": "DuoT5LlmRanker", "path": "rankers/pairwise.py", "snippet": "class DuoT5LlmRanker(PairwiseLlmRanker):\n def compare(self, query: str, docs: List[str]) -> bool:\n self.total_compare += 1\n self.prompt = 'Query: {query} Document0: {doc1} Document1: {doc2} Relevant:'\n\n inputs = [self.prompt.format(query=query, doc1=docs[0], doc2=docs[1]),\n self.prompt.format(query=query, doc1=docs[1], doc2=docs[0])]\n inputs = self.tokenizer(inputs, padding=True, truncation=True, return_tensors=\"pt\").to(self.llm.device)\n decode_ids = torch.full((2, 1),\n self.llm.config.decoder_start_token_id,\n dtype=torch.long, device=self.llm.device)\n\n self.total_prompt_tokens += inputs['input_ids'].shape[0] * inputs['input_ids'].shape[1]\n\n with torch.no_grad():\n logits = self.llm(input_ids=inputs['input_ids'],\n attention_mask=inputs['attention_mask'],\n decoder_input_ids=decode_ids).logits\n # 6136 and 1176 are the indexes of the tokens false and true in T5.\n batch_scores = logits[:, 0, [6136, 1176]]\n batch_scores = torch.nn.functional.softmax(batch_scores, dim=1)\n batch_probs = batch_scores[:, 1]\n return batch_probs[0] > batch_probs[1]\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n original_ranking = copy.deepcopy(ranking)\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n if self.method == \"heapsort\":\n class ComparableDoc:\n def __init__(self, docid, text, ranker):\n self.docid = docid\n self.text = text\n self.ranker = ranker\n\n def __gt__(self, other):\n return self.ranker.compare(query, [self.text, other.text])\n arr = [ComparableDoc(docid=doc.docid, text=doc.text, ranker=self) for doc in ranking]\n self.heapSort(arr, self.k)\n ranking = [SearchResult(docid=doc.docid, score=-i, text=None) for i, doc in enumerate(reversed(arr))]\n\n else:\n raise NotImplementedError(f'Method {self.method} is not implemented.')\n\n results = []\n top_doc_ids = set()\n rank = 1\n for i, doc in enumerate(ranking[:self.k]):\n top_doc_ids.add(doc.docid)\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n for doc in original_ranking:\n if doc.docid not in top_doc_ids:\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n return results" }, { "identifier": "OpenAiPairwiseLlmRanker", "path": "rankers/pairwise.py", "snippet": "class OpenAiPairwiseLlmRanker(PairwiseLlmRanker):\n def __init__(self,\n model_name_or_path,\n api_key,\n method=\"heapsort\",\n batch_size=2,\n k=10):\n self.llm = model_name_or_path\n self.tokenizer = tiktoken.encoding_for_model(model_name_or_path)\n self.method = method\n self.k = k\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n self.CHARACTERS = [\"A\", \"B\"]\n self.system_prompt = \"You are RankGPT, an intelligent assistant specialized in selecting the most relevant passage from a pair of passages based on their relevance to the query.\"\n self.prompt = \"\"\"Given a query \"{query}\", which of the following two passages is more relevant to the query?\n \nPassage A: \"{doc1}\"\n\nPassage B: \"{doc2}\"\n\nOutput Passage A or Passage B:\"\"\"\n openai.api_key = api_key\n\n def _get_response(self, input_text):\n while True:\n try:\n response = openai.ChatCompletion.create(\n model=self.llm,\n messages=[\n {\"role\": \"system\", \"content\": self.system_prompt},\n {\"role\": \"user\", \"content\": input_text},\n ],\n temperature=0.0,\n request_timeout=15\n )\n self.total_completion_tokens += int(response['usage']['completion_tokens'])\n self.total_prompt_tokens += int(response['usage']['prompt_tokens'])\n\n output = response['choices'][0]['message']['content']\n matches = re.findall(r\"(Passage [A-B])\", output, re.MULTILINE)\n if matches:\n output = matches[0][8]\n elif output.strip() in self.CHARACTERS:\n pass\n else:\n print(f\"Unexpected output: {output}\")\n output = \"A\"\n return output\n\n except openai.error.APIError as e:\n # Handle API error here, e.g. retry or log\n print(f\"OpenAI API returned an API Error: {e}\")\n time.sleep(5)\n continue\n except openai.error.APIConnectionError as e:\n # Handle connection error here\n print(f\"Failed to connect to OpenAI API: {e}\")\n time.sleep(5)\n continue\n except openai.error.RateLimitError as e:\n # Handle rate limit error (we recommend using exponential backoff)\n print(f\"OpenAI API request exceeded rate limit: {e}\")\n time.sleep(5)\n continue\n except openai.error.InvalidRequestError as e:\n # Handle invalid request error\n print(f\"OpenAI API request was invalid: {e}\")\n raise e\n except openai.error.AuthenticationError as e:\n # Handle authentication error\n print(f\"OpenAI API request failed authentication: {e}\")\n raise e\n except openai.error.Timeout as e:\n # Handle timeout error\n print(f\"OpenAI API request timed out: {e}\")\n time.sleep(5)\n continue\n except openai.error.ServiceUnavailableError as e:\n # Handle service unavailable error\n print(f\"OpenAI API request failed with a service unavailable error: {e}\")\n time.sleep(5)\n continue\n except Exception as e:\n print(f\"Unknown error: {e}\")\n raise e\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n doc1, doc2 = docs[0], docs[1]\n input_texts = [self.prompt.format(query=query, doc1=doc1, doc2=doc2),\n self.prompt.format(query=query, doc1=doc2, doc2=doc1)]\n\n return [f'Passage {self._get_response(input_texts[0])}', f'Passage {self._get_response(input_texts[1])}']\n\n def truncate(self, text, length):\n return self.tokenizer.decode(self.tokenizer.encode(text)[:length])" }, { "identifier": "OpenAiListwiseLlmRanker", "path": "rankers/listwise.py", "snippet": "class OpenAiListwiseLlmRanker(LlmRanker):\n def __init__(self, model_name_or_path, api_key, window_size, step_size, num_repeat):\n self.llm = model_name_or_path\n self.tokenizer = tiktoken.encoding_for_model(model_name_or_path)\n self.window_size = window_size\n self.step_size = step_size\n self.num_repeat = num_repeat\n openai.api_key = api_key\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n messages = create_permutation_instruction_chat(query, docs, self.llm)\n while True:\n try:\n completion = openai.ChatCompletion.create(\n model=self.llm,\n messages=messages,\n temperature=0.0,\n request_timeout=15)\n self.total_completion_tokens += int(completion['usage']['completion_tokens'])\n self.total_prompt_tokens += int(completion['usage']['prompt_tokens'])\n return completion['choices'][0]['message']['content']\n except Exception as e:\n print(str(e))\n if \"This model's maximum context length is\" in str(e):\n print('reduce_length')\n return 'ERROR::reduce_length'\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n\n for _ in range(self.num_repeat):\n ranking = copy.deepcopy(ranking)\n end_pos = len(ranking)\n start_pos = end_pos - self.window_size\n while start_pos >= 0:\n start_pos = max(start_pos, 0)\n result = self.compare(query, ranking[start_pos: end_pos])\n ranking = receive_permutation(ranking, result, start_pos, end_pos)\n end_pos = end_pos - self.step_size\n start_pos = start_pos - self.step_size\n\n for i, doc in enumerate(ranking):\n doc.score = -i\n return ranking\n\n def truncate(self, text, length):\n return self.tokenizer.decode(self.tokenizer.encode(text)[:length])" }, { "identifier": "ListwiseLlmRanker", "path": "rankers/listwise.py", "snippet": "class ListwiseLlmRanker(OpenAiListwiseLlmRanker):\n CHARACTERS = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\",\n \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\",\n \"W\"] # \"Passage X\" and \"Passage Y\" will be tokenized into 3 tokens, so we dont use for now\n\n def __init__(self, model_name_or_path, tokenizer_name_or_path, device, window_size, step_size,\n scoring='generation', num_repeat=1, cache_dir=None):\n\n self.scoring = scoring\n self.device = device\n self.window_size = window_size\n self.step_size = step_size\n self.num_repeat = num_repeat\n self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n\n if self.config.model_type == 't5':\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path, cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n\n self.decoder_input_ids = self.tokenizer.encode(\"<pad> Passage\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.device) if self.tokenizer else None\n self.target_token_ids = self.tokenizer.batch_encode_plus([f'<pad> Passage {self.CHARACTERS[i]}'\n for i in range(len(self.CHARACTERS))],\n return_tensors=\"pt\",\n add_special_tokens=False,\n padding=True).input_ids[:, -1]\n elif self.config.model_type == 'llama':\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n self.tokenizer.use_default_system_prompt = False\n if 'vicuna' and 'v1.5' in model_name_or_path:\n self.tokenizer.chat_template = \"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\\\'s questions.' %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ system_message }}{% endif %}{% if message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}\"\n\n self.llm = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir).eval()\n else:\n raise NotImplementedError\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n if self.scoring == 'generation':\n if self.config.model_type == 't5':\n input_text = create_permutation_instruction_complete(query, docs)\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\", truncation=True).input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids)[0]\n self.total_completion_tokens += output_ids.shape[0]\n output = self.tokenizer.decode(output_ids,\n skip_special_tokens=True).strip()\n elif self.config.model_type == 'llama':\n input_text = create_permutation_instruction_chat(query, docs, model_name=None)\n input_ids = self.tokenizer.apply_chat_template(input_text, return_tensors=\"pt\",\n add_generation_prompt=True).to(self.device)\n\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids)[0]\n self.total_completion_tokens += output_ids.shape[0]\n output = self.tokenizer.decode(output_ids[input_ids.shape[1]:],\n skip_special_tokens=True).strip()\n\n elif self.scoring == 'likelihood':\n passages = \"\\n\\n\".join([f'Passage {self.CHARACTERS[i]}: \"{doc.text}\"' for i, doc in enumerate(docs)])\n input_text = f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage:'\n\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n with torch.no_grad():\n logits = self.llm(input_ids=input_ids, decoder_input_ids=self.decoder_input_ids).logits[0][-1]\n distributions = torch.softmax(logits, dim=0)\n scores = distributions[self.target_token_ids[:len(docs)]]\n ranked = sorted(zip([f\"[{str(i+1)}]\" for i in range(len(docs))], scores), key=lambda x: x[1], reverse=True)\n output = '>'.join(ranked[i][0] for i in range(len(ranked)))\n\n return output\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" } ]
import logging import ir_datasets import argparse import sys import json import time import random from pyserini.search.lucene import LuceneSearcher from pyserini.search._base import get_topics from rankers.rankers import SearchResult from rankers.pointwise import PointwiseLlmRanker, MonoT5LlmRanker from rankers.setwise import SetwiseLlmRanker, OpenAiSetwiseLlmRanker from rankers.pairwise import PairwiseLlmRanker, DuoT5LlmRanker, OpenAiPairwiseLlmRanker from rankers.listwise import OpenAiListwiseLlmRanker, ListwiseLlmRanker from tqdm import tqdm
13,420
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else:
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else:
ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path,
1
2023-10-14 01:39:38+00:00
16k
amazon-science/tabsyn
baselines/tabddpm/train.py
[ { "identifier": "make_dataset", "path": "utils_train.py", "snippet": "def make_dataset(\n data_path: str,\n T: src.Transformations,\n task_type,\n change_val: bool,\n concat = True,\n):\n\n # classification\n if task_type == 'binclass' or task_type == 'multiclass':\n X_cat = {} if os.path.exists(os.path.join(data_path, 'X_cat_train.npy')) else None\n X_num = {} if os.path.exists(os.path.join(data_path, 'X_num_train.npy')) else None\n y = {} if os.path.exists(os.path.join(data_path, 'y_train.npy')) else None\n\n for split in ['train', 'test']:\n X_num_t, X_cat_t, y_t = src.read_pure_data(data_path, split)\n if X_num is not None:\n X_num[split] = X_num_t\n if X_cat is not None:\n if concat:\n X_cat_t = concat_y_to_X(X_cat_t, y_t)\n X_cat[split] = X_cat_t \n if y is not None:\n y[split] = y_t\n else:\n # regression\n X_cat = {} if os.path.exists(os.path.join(data_path, 'X_cat_train.npy')) else None\n X_num = {} if os.path.exists(os.path.join(data_path, 'X_num_train.npy')) else None\n y = {} if os.path.exists(os.path.join(data_path, 'y_train.npy')) else None\n\n for split in ['train', 'test']:\n X_num_t, X_cat_t, y_t = src.read_pure_data(data_path, split)\n\n if X_num is not None:\n if concat:\n X_num_t = concat_y_to_X(X_num_t, y_t)\n X_num[split] = X_num_t\n if X_cat is not None:\n X_cat[split] = X_cat_t\n if y is not None:\n y[split] = y_t\n\n info = src.load_json(os.path.join(data_path, 'info.json'))\n\n D = src.Dataset(\n X_num,\n X_cat,\n y,\n y_info={},\n task_type=src.TaskType(info['task_type']),\n n_classes=info.get('n_classes')\n )\n\n if change_val:\n D = src.change_val(D)\n\n # def categorical_to_idx(feature):\n # unique_categories = np.unique(feature)\n # idx_mapping = {category: index for index, category in enumerate(unique_categories)}\n # idx_feature = np.array([idx_mapping[category] for category in feature])\n # return idx_feature\n\n # for split in ['train', 'val', 'test']:\n # D.y[split] = categorical_to_idx(D.y[split].squeeze(1))\n\n return src.transform_dataset(D, T, None)" }, { "identifier": "update_ema", "path": "utils_train.py", "snippet": "def update_ema(target_params, source_params, rate=0.999):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for target, source in zip(target_params, source_params):\n target.detach().mul_(rate).add_(source.detach(), alpha=1 - rate)" }, { "identifier": "MLPDiffusion", "path": "baselines/tabddpm/models/modules.py", "snippet": "class MLPDiffusion(nn.Module):\n def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t = 1024):\n super().__init__()\n self.dim_t = dim_t\n self.num_classes = num_classes\n self.is_y_cond = is_y_cond\n\n # d0 = rtdl_params['d_layers'][0]\n\n rtdl_params['d_in'] = dim_t\n rtdl_params['d_out'] = d_in\n\n self.mlp = MLP.make_baseline(**rtdl_params)\n\n if self.num_classes > 0 and is_y_cond:\n self.label_emb = nn.Embedding(self.num_classes, dim_t)\n elif self.num_classes == 0 and is_y_cond:\n self.label_emb = nn.Linear(1, dim_t)\n \n self.proj = nn.Linear(d_in, dim_t)\n self.time_embed = nn.Sequential(\n nn.Linear(dim_t, dim_t),\n nn.SiLU(),\n nn.Linear(dim_t, dim_t)\n )\n \n def forward(self, x, timesteps, y=None):\n emb = self.time_embed(timestep_embedding(timesteps, self.dim_t))\n if self.is_y_cond and y is not None:\n if self.num_classes > 0:\n y = y.squeeze()\n else:\n y = y.resize(y.size(0), 1).float()\n emb += F.silu(self.label_emb(y))\n x = self.proj(x) + emb\n\n return self.mlp(x)" }, { "identifier": "GaussianMultinomialDiffusion", "path": "baselines/tabddpm/models/gaussian_multinomial_distribution.py", "snippet": "class GaussianMultinomialDiffusion(torch.nn.Module):\n def __init__(\n self,\n num_classes: np.array,\n num_numerical_features: int,\n denoise_fn,\n num_timesteps=1000,\n gaussian_loss_type='mse',\n gaussian_parametrization='eps',\n multinomial_loss_type='vb_stochastic',\n parametrization='x0',\n scheduler='cosine',\n device=torch.device('cpu')\n ):\n\n super(GaussianMultinomialDiffusion, self).__init__()\n assert multinomial_loss_type in ('vb_stochastic', 'vb_all')\n assert parametrization in ('x0', 'direct')\n\n if multinomial_loss_type == 'vb_all':\n print('Computing the loss using the bound on _all_ timesteps.'\n ' This is expensive both in terms of memory and computation.')\n\n self.num_numerical_features = num_numerical_features\n self.num_classes = num_classes # it as a vector [K1, K2, ..., Km]\n self.num_classes_expanded = torch.from_numpy(\n np.concatenate([num_classes[i].repeat(num_classes[i]) for i in range(len(num_classes))])\n ).to(device)\n\n self.slices_for_classes = [np.arange(self.num_classes[0])]\n offsets = np.cumsum(self.num_classes)\n for i in range(1, len(offsets)):\n self.slices_for_classes.append(np.arange(offsets[i - 1], offsets[i]))\n self.offsets = torch.from_numpy(np.append([0], offsets)).to(device)\n\n self._denoise_fn = denoise_fn\n self.gaussian_loss_type = gaussian_loss_type\n self.gaussian_parametrization = gaussian_parametrization\n self.multinomial_loss_type = multinomial_loss_type\n self.num_timesteps = num_timesteps\n self.parametrization = parametrization\n self.scheduler = scheduler\n\n alphas = 1. - get_named_beta_schedule(scheduler, num_timesteps)\n alphas = torch.tensor(alphas.astype('float64')) # alpha2_t\n betas = 1. - alphas # beta2_t\n\n log_alpha = np.log(alphas)\n log_cumprod_alpha = np.cumsum(log_alpha)\n\n log_1_min_alpha = log_1_min_a(log_alpha)\n log_1_min_cumprod_alpha = log_1_min_a(log_cumprod_alpha)\n\n alphas_cumprod = np.cumprod(alphas, axis=0) # tilde_alpha2_t\n alphas_cumprod_prev = torch.tensor(np.append(1.0, alphas_cumprod[:-1])) # tilde_alpha2_{t-1}\n alphas_cumprod_next = torch.tensor(np.append(alphas_cumprod[1:], 0.0)) # tilde_alpha2_{t+1}\n sqrt_alphas_cumprod = np.sqrt(alphas_cumprod) # tilde_alpha_t\n sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - alphas_cumprod) # tilde_beta_t\n sqrt_recip_alphas_cumprod = np.sqrt(1.0 / alphas_cumprod) # sqrt(1 / tilde_alpha_t)\n sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / alphas_cumprod - 1) # sqrt(tilde_beta_t / tilde_alpha_t )\n\n # Gaussian diffusion\n\n self.posterior_variance = (\n betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)\n )\n self.posterior_log_variance_clipped = torch.from_numpy(\n np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:]))\n ).float().to(device)\n self.posterior_mean_coef1 = (\n betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)\n ).float().to(device)\n self.posterior_mean_coef2 = (\n (1.0 - alphas_cumprod_prev)\n * np.sqrt(alphas.numpy())\n / (1.0 - alphas_cumprod)\n ).float().to(device)\n\n assert log_add_exp(log_alpha, log_1_min_alpha).abs().sum().item() < 1.e-5\n assert log_add_exp(log_cumprod_alpha, log_1_min_cumprod_alpha).abs().sum().item() < 1e-5\n assert (np.cumsum(log_alpha) - log_cumprod_alpha).abs().sum().item() < 1.e-5\n\n # Convert to float32 and register buffers.\n self.register_buffer('alphas', alphas.float().to(device))\n self.register_buffer('log_alpha', log_alpha.float().to(device))\n self.register_buffer('log_1_min_alpha', log_1_min_alpha.float().to(device))\n self.register_buffer('log_1_min_cumprod_alpha', log_1_min_cumprod_alpha.float().to(device))\n self.register_buffer('log_cumprod_alpha', log_cumprod_alpha.float().to(device))\n self.register_buffer('alphas_cumprod', alphas_cumprod.float().to(device))\n self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev.float().to(device))\n self.register_buffer('alphas_cumprod_next', alphas_cumprod_next.float().to(device))\n self.register_buffer('sqrt_alphas_cumprod', sqrt_alphas_cumprod.float().to(device))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', sqrt_one_minus_alphas_cumprod.float().to(device))\n self.register_buffer('sqrt_recip_alphas_cumprod', sqrt_recip_alphas_cumprod.float().to(device))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', sqrt_recipm1_alphas_cumprod.float().to(device))\n\n self.register_buffer('Lt_history', torch.zeros(num_timesteps))\n self.register_buffer('Lt_count', torch.zeros(num_timesteps))\n \n # Gaussian part\n def gaussian_q_mean_variance(self, x_start, t):\n mean = (\n extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n )\n variance = extract(1.0 - self.alphas_cumprod, t, x_start.shape)\n log_variance = extract(\n self.log_1_min_cumprod_alpha, t, x_start.shape\n )\n return mean, variance, log_variance\n \n def gaussian_q_sample(self, x_start, t, noise=None):\n if noise is None:\n noise = torch.randn_like(x_start)\n assert noise.shape == x_start.shape\n return (\n extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n * noise\n )\n \n def gaussian_q_posterior_mean_variance(self, x_start, x_t, t):\n assert x_start.shape == x_t.shape\n posterior_mean = (\n extract(self.posterior_mean_coef1, t, x_t.shape) * x_start\n + extract(self.posterior_mean_coef2, t, x_t.shape) * x_t\n )\n posterior_variance = extract(self.posterior_variance, t, x_t.shape)\n posterior_log_variance_clipped = extract(\n self.posterior_log_variance_clipped, t, x_t.shape\n )\n assert (\n posterior_mean.shape[0]\n == posterior_variance.shape[0]\n == posterior_log_variance_clipped.shape[0]\n == x_start.shape[0]\n )\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def gaussian_p_mean_variance(\n self, model_output, x, t, clip_denoised=False, denoised_fn=None, model_kwargs=None\n ):\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n\n model_variance = torch.cat([self.posterior_variance[1].unsqueeze(0).to(x.device), (1. - self.alphas)[1:]], dim=0)\n # model_variance = self.posterior_variance.to(x.device)\n model_log_variance = torch.log(model_variance)\n\n model_variance = extract(model_variance, t, x.shape)\n model_log_variance = extract(model_log_variance, t, x.shape)\n\n\n if self.gaussian_parametrization == 'eps':\n pred_xstart = self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n elif self.gaussian_parametrization == 'x0':\n pred_xstart = model_output\n else:\n raise NotImplementedError\n \n model_mean, _, _ = self.gaussian_q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n ), f'{model_mean.shape}, {model_log_variance.shape}, {pred_xstart.shape}, {x.shape}'\n\n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def _vb_terms_bpd(\n self, model_output, x_start, x_t, t, clip_denoised=False, model_kwargs=None\n ):\n true_mean, _, true_log_variance_clipped = self.gaussian_q_posterior_mean_variance(\n x_start=x_start, x_t=x_t, t=t\n )\n out = self.gaussian_p_mean_variance(\n model_output, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs\n )\n kl = normal_kl(\n true_mean, true_log_variance_clipped, out[\"mean\"], out[\"log_variance\"]\n )\n kl = mean_flat(kl) / np.log(2.0)\n\n decoder_nll = -discretized_gaussian_log_likelihood(\n x_start, means=out[\"mean\"], log_scales=0.5 * out[\"log_variance\"]\n )\n assert decoder_nll.shape == x_start.shape\n decoder_nll = mean_flat(decoder_nll) / np.log(2.0)\n\n # At the first timestep return the decoder NLL,\n # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))\n output = torch.where((t == 0), decoder_nll, kl)\n return {\"output\": output, \"pred_xstart\": out[\"pred_xstart\"], \"out_mean\": out[\"mean\"], \"true_mean\": true_mean}\n \n def _prior_gaussian(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n\n This term can't be optimized, as it only depends on the encoder.\n\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.gaussian_q_mean_variance(x_start, t)\n kl_prior = normal_kl(\n mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0\n )\n return mean_flat(kl_prior) / np.log(2.0)\n \n def _gaussian_loss(self, model_out, x_start, x_t, t, noise, model_kwargs=None):\n if model_kwargs is None:\n model_kwargs = {}\n\n terms = {}\n if self.gaussian_loss_type == 'mse':\n terms[\"loss\"] = mean_flat((noise - model_out) ** 2)\n elif self.gaussian_loss_type == 'kl':\n terms[\"loss\"] = self._vb_terms_bpd(\n model_output=model_out,\n x_start=x_start,\n x_t=x_t,\n t=t,\n clip_denoised=False,\n model_kwargs=model_kwargs,\n )[\"output\"]\n\n\n return terms['loss']\n \n def _predict_xstart_from_eps(self, x_t, t, eps):\n assert x_t.shape == eps.shape\n return (\n extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps\n )\n \n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (\n extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - pred_xstart\n ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def gaussian_p_sample(\n self,\n model_out,\n x,\n t,\n clip_denoised=False,\n denoised_fn=None,\n model_kwargs=None,\n ):\n out = self.gaussian_p_mean_variance(\n model_out,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n )\n noise = torch.randn_like(x)\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n\n sample = out[\"mean\"] + nonzero_mask * torch.exp(0.5 * out[\"log_variance\"]) * noise\n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n\n # Multinomial part\n\n def multinomial_kl(self, log_prob1, log_prob2):\n\n kl = (log_prob1.exp() * (log_prob1 - log_prob2)).sum(dim=1)\n\n return kl\n\n def q_pred_one_timestep(self, log_x_t, t):\n log_alpha_t = extract(self.log_alpha, t, log_x_t.shape)\n log_1_min_alpha_t = extract(self.log_1_min_alpha, t, log_x_t.shape)\n\n # alpha_t * E[xt] + (1 - alpha_t) 1 / K\n log_probs = log_add_exp(\n log_x_t + log_alpha_t,\n log_1_min_alpha_t - torch.log(self.num_classes_expanded)\n )\n\n return log_probs\n\n def q_pred(self, log_x_start, t):\n log_cumprod_alpha_t = extract(self.log_cumprod_alpha, t, log_x_start.shape)\n log_1_min_cumprod_alpha = extract(self.log_1_min_cumprod_alpha, t, log_x_start.shape)\n\n log_probs = log_add_exp(\n log_x_start + log_cumprod_alpha_t,\n log_1_min_cumprod_alpha - torch.log(self.num_classes_expanded)\n )\n\n return log_probs\n\n def predict_start(self, model_out, log_x_t, t):\n\n\n assert model_out.size(0) == log_x_t.size(0)\n assert model_out.size(1) == self.num_classes.sum(), f'{model_out.size()}'\n\n log_pred = torch.empty_like(model_out)\n for ix in self.slices_for_classes:\n log_pred[:, ix] = F.log_softmax(model_out[:, ix], dim=1)\n return log_pred\n\n def q_posterior(self, log_x_start, log_x_t, t):\n # q(xt-1 | xt, x0) = q(xt | xt-1, x0) * q(xt-1 | x0) / q(xt | x0)\n # where q(xt | xt-1, x0) = q(xt | xt-1).\n\n # EV_log_qxt_x0 = self.q_pred(log_x_start, t)\n\n # print('sum exp', EV_log_qxt_x0.exp().sum(1).mean())\n # assert False\n\n # log_qxt_x0 = (log_x_t.exp() * EV_log_qxt_x0).sum(dim=1)\n t_minus_1 = t - 1\n # Remove negative values, will not be used anyway for final decoder\n t_minus_1 = torch.where(t_minus_1 < 0, torch.zeros_like(t_minus_1), t_minus_1)\n log_EV_qxtmin_x0 = self.q_pred(log_x_start, t_minus_1)\n\n num_axes = (1,) * (len(log_x_start.size()) - 1)\n t_broadcast = t.to(log_x_start.device).view(-1, *num_axes) * torch.ones_like(log_x_start)\n log_EV_qxtmin_x0 = torch.where(t_broadcast == 0, log_x_start, log_EV_qxtmin_x0.to(torch.float32))\n\n # unnormed_logprobs = log_EV_qxtmin_x0 +\n # log q_pred_one_timestep(x_t, t)\n # Note: _NOT_ x_tmin1, which is how the formula is typically used!!!\n # Not very easy to see why this is true. But it is :)\n unnormed_logprobs = log_EV_qxtmin_x0 + self.q_pred_one_timestep(log_x_t, t)\n\n sliced = sliced_logsumexp(unnormed_logprobs, self.offsets)\n log_EV_xtmin_given_xt_given_xstart = unnormed_logprobs - sliced\n\n return log_EV_xtmin_given_xt_given_xstart\n\n def p_pred(self, model_out, log_x, t):\n if self.parametrization == 'x0':\n log_x_recon = self.predict_start(model_out, log_x, t=t)\n log_model_pred = self.q_posterior(\n log_x_start=log_x_recon, log_x_t=log_x, t=t)\n elif self.parametrization == 'direct':\n log_model_pred = self.predict_start(model_out, log_x, t=t)\n else:\n raise ValueError\n\n\n return log_model_pred\n\n @torch.no_grad()\n def p_sample(self, model_out, log_x, t):\n model_log_prob = self.p_pred(model_out, log_x=log_x, t=t)\n out = self.log_sample_categorical(model_log_prob)\n return out\n\n @torch.no_grad()\n def p_sample_loop(self, shape):\n device = self.log_alpha.device\n\n b = shape[0]\n # start with random normal image.\n img = torch.randn(shape, device=device)\n\n for i in reversed(range(1, self.num_timesteps)):\n img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long))\n return img\n\n @torch.no_grad()\n def _sample(self, image_size, batch_size = 16):\n return self.p_sample_loop((batch_size, 3, image_size, image_size))\n\n @torch.no_grad()\n def interpolate(self, x1, x2, t = None, lam = 0.5):\n b, *_, device = *x1.shape, x1.device\n t = default(t, self.num_timesteps - 1)\n\n assert x1.shape == x2.shape\n\n t_batched = torch.stack([torch.tensor(t, device=device)] * b)\n xt1, xt2 = map(lambda x: self.q_sample(x, t=t_batched), (x1, x2))\n\n img = (1 - lam) * xt1 + lam * xt2\n for i in reversed(range(0, t)):\n img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long))\n\n return img\n\n def log_sample_categorical(self, logits):\n full_sample = []\n for i in range(len(self.num_classes)):\n one_class_logits = logits[:, self.slices_for_classes[i]]\n uniform = torch.rand_like(one_class_logits)\n gumbel_noise = -torch.log(-torch.log(uniform + 1e-30) + 1e-30)\n sample = (gumbel_noise + one_class_logits).argmax(dim=1)\n full_sample.append(sample.unsqueeze(1))\n full_sample = torch.cat(full_sample, dim=1)\n log_sample = index_to_log_onehot(full_sample, self.num_classes)\n return log_sample\n\n def q_sample(self, log_x_start, t):\n log_EV_qxt_x0 = self.q_pred(log_x_start, t)\n\n log_sample = self.log_sample_categorical(log_EV_qxt_x0)\n\n return log_sample\n\n def nll(self, log_x_start):\n b = log_x_start.size(0)\n device = log_x_start.device\n loss = 0\n for t in range(0, self.num_timesteps):\n t_array = (torch.ones(b, device=device) * t).long()\n\n kl = self.compute_Lt(\n log_x_start=log_x_start,\n log_x_t=self.q_sample(log_x_start=log_x_start, t=t_array),\n t=t_array)\n\n loss += kl\n\n loss += self.kl_prior(log_x_start)\n\n return loss\n\n def kl_prior(self, log_x_start):\n b = log_x_start.size(0)\n device = log_x_start.device\n ones = torch.ones(b, device=device).long()\n\n log_qxT_prob = self.q_pred(log_x_start, t=(self.num_timesteps - 1) * ones)\n log_half_prob = -torch.log(self.num_classes_expanded * torch.ones_like(log_qxT_prob))\n\n kl_prior = self.multinomial_kl(log_qxT_prob, log_half_prob)\n\n return sum_except_batch(kl_prior)\n\n def compute_Lt(self, model_out, log_x_start, log_x_t, t, detach_mean=False):\n log_true_prob = self.q_posterior(\n log_x_start=log_x_start, log_x_t=log_x_t, t=t)\n log_model_prob = self.p_pred(model_out, log_x=log_x_t, t=t)\n\n if detach_mean:\n log_model_prob = log_model_prob.detach()\n\n kl = self.multinomial_kl(log_true_prob, log_model_prob)\n\n # if torch.isinf(kl).nonzero().shape[0] != 0:\n # idx = torch.isinf(kl).nonzero()[0]\n # print('KL 0 :', kl[idx])\n\n kl = sum_except_batch(kl)\n\n decoder_nll = -log_categorical(log_x_start, log_model_prob)\n decoder_nll = sum_except_batch(decoder_nll)\n\n mask = (t == torch.zeros_like(t)).float()\n loss = mask * decoder_nll + (1. - mask) * kl \n\n return loss\n\n def sample_time(self, b, device, method='uniform'):\n if method == 'importance':\n if not (self.Lt_count > 10).all():\n return self.sample_time(b, device, method='uniform')\n\n Lt_sqrt = torch.sqrt(self.Lt_history + 1e-10) + 0.0001\n Lt_sqrt[0] = Lt_sqrt[1] # Overwrite decoder term with L1.\n pt_all = (Lt_sqrt / Lt_sqrt.sum()).to(device)\n\n t = torch.multinomial(pt_all, num_samples=b, replacement=True).to(device)\n\n pt = pt_all.gather(dim=0, index=t)\n\n return t, pt\n\n elif method == 'uniform':\n t = torch.randint(0, self.num_timesteps, (b,), device=device).long()\n\n pt = torch.ones_like(t).float() / self.num_timesteps\n return t, pt\n else:\n raise ValueError\n\n def _multinomial_loss(self, model_out, log_x_start, log_x_t, t, pt):\n\n if self.multinomial_loss_type == 'vb_stochastic':\n\n kl = self.compute_Lt(\n model_out, log_x_start, log_x_t, t\n )\n kl_prior = self.kl_prior(log_x_start)\n # Upweigh loss term of the kl\n\n vb_loss = kl / pt + kl_prior\n\n\n return vb_loss\n\n elif self.multinomial_loss_type == 'vb_all':\n # Expensive, dont do it ;).\n # DEPRECATED\n return -self.nll(log_x_start)\n else:\n raise ValueError()\n\n def log_prob(self, x):\n b, device = x.size(0), x.device\n if self.training:\n return self._multinomial_loss(x)\n\n else:\n log_x_start = index_to_log_onehot(x, self.num_classes)\n\n t, pt = self.sample_time(b, device, 'importance')\n\n kl = self.compute_Lt(\n log_x_start, self.q_sample(log_x_start=log_x_start, t=t), t)\n\n kl_prior = self.kl_prior(log_x_start)\n\n # Upweigh loss term of the kl\n loss = kl / (pt + 1e-6) + kl_prior\n\n return -loss\n \n @torch.no_grad()\n def loss_at_step_t(self, x, step):\n\n b = x.shape[0]\n device = x.device\n\n t = (torch.ones((b,)) * step).long().to(device)\n pt = torch.ones_like(t).float() / self.num_timesteps\n\n x_num = x[:, :self.num_numerical_features]\n x_cat = x[:, self.num_numerical_features:]\n \n x_num_t = x_num\n log_x_cat_t = x_cat\n if x_num.shape[1] > 0:\n noise = torch.randn_like(x_num)\n x_num_t = self.gaussian_q_sample(x_num, t, noise=noise)\n if x_cat.shape[1] > 0:\n log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes)\n log_x_cat_t = self.q_sample(log_x_start=log_x_cat, t=t)\n \n x_in = torch.cat([x_num_t, log_x_cat_t], dim=1)\n\n model_out = self._denoise_fn(\n x_in,\n t\n )\n\n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n\n loss_multi = torch.zeros((1,)).float()\n loss_gauss = torch.zeros((1,)).float()\n if x_cat.shape[1] > 0:\n loss_multi = self._multinomial_loss(model_out_cat, log_x_cat, log_x_cat_t, t, pt) / len(self.num_classes)\n \n if x_num.shape[1] > 0:\n loss_gauss = self._gaussian_loss(model_out_num, x_num, x_num_t, t, noise)\n\n recon_x0_num = self.recon_x0(x_in, model_out, t)[:,:self.num_numerical_features]\n\n recon_loss = self._gaussian_loss(recon_x0_num, x_num, x_num_t, t, x_num)\n\n return loss_multi.mean(), loss_gauss.mean(), recon_loss.mean()\n \n @torch.no_grad()\n def recon_x0(self, x, model_out, t):\n # x_num = x[:, :self.num_numerical_features]\n\n x0 = extract(self.sqrt_recip_alphas_cumprod, t, x.shape) * (x - model_out * extract(self.sqrt_one_minus_alphas_cumprod, t, x.shape))\n \n return x0\n\n def mixed_loss(self, x):\n b = x.shape[0]\n device = x.device\n t, pt = self.sample_time(b, device, 'uniform')\n\n x_num = x[:, :self.num_numerical_features]\n x_cat = x[:, self.num_numerical_features:]\n \n x_num_t = x_num\n log_x_cat_t = x_cat\n if x_num.shape[1] > 0:\n noise = torch.randn_like(x_num)\n x_num_t = self.gaussian_q_sample(x_num, t, noise=noise)\n if x_cat.shape[1] > 0:\n log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes)\n log_x_cat_t = self.q_sample(log_x_start=log_x_cat, t=t)\n \n x_in = torch.cat([x_num_t, log_x_cat_t], dim=1)\n\n model_out = self._denoise_fn(\n x_in,\n t\n )\n\n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n\n loss_multi = torch.zeros((1,)).float()\n loss_gauss = torch.zeros((1,)).float()\n\n if x_cat.shape[1] > 0:\n loss_multi = self._multinomial_loss(model_out_cat, log_x_cat, log_x_cat_t, t, pt) / len(self.num_classes)\n \n if x_num.shape[1] > 0:\n loss_gauss = self._gaussian_loss(model_out_num, x_num, x_num_t, t, noise)\n\n\n return loss_multi.mean(), loss_gauss.mean()\n \n @torch.no_grad()\n def mixed_elbo(self, x0):\n b = x0.size(0)\n device = x0.device\n\n x_num = x0[:, :self.num_numerical_features]\n x_cat = x0[:, self.num_numerical_features:]\n has_cat = x_cat.shape[1] > 0\n if has_cat:\n log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes).to(device)\n\n gaussian_loss = []\n xstart_mse = []\n mse = []\n mu_mse = []\n out_mean = []\n true_mean = []\n multinomial_loss = []\n for t in range(self.num_timesteps):\n t_array = (torch.ones(b, device=device) * t).long()\n noise = torch.randn_like(x_num)\n\n x_num_t = self.gaussian_q_sample(x_start=x_num, t=t_array, noise=noise)\n if has_cat:\n log_x_cat_t = self.q_sample(log_x_start=log_x_cat, t=t_array)\n else:\n log_x_cat_t = x_cat\n\n model_out = self._denoise_fn(\n torch.cat([x_num_t, log_x_cat_t], dim=1),\n t_array\n )\n \n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n\n kl = torch.tensor([0.0])\n if has_cat:\n kl = self.compute_Lt(\n model_out=model_out_cat,\n log_x_start=log_x_cat,\n log_x_t=log_x_cat_t,\n t=t_array\n )\n\n out = self._vb_terms_bpd(\n model_out_num,\n x_start=x_num,\n x_t=x_num_t,\n t=t_array,\n clip_denoised=False\n )\n\n multinomial_loss.append(kl)\n gaussian_loss.append(out[\"output\"])\n xstart_mse.append(mean_flat((out[\"pred_xstart\"] - x_num) ** 2))\n # mu_mse.append(mean_flat(out[\"mean_mse\"]))\n out_mean.append(mean_flat(out[\"out_mean\"]))\n true_mean.append(mean_flat(out[\"true_mean\"]))\n\n eps = self._predict_eps_from_xstart(x_num_t, t_array, out[\"pred_xstart\"])\n mse.append(mean_flat((eps - noise) ** 2))\n\n gaussian_loss = torch.stack(gaussian_loss, dim=1)\n multinomial_loss = torch.stack(multinomial_loss, dim=1)\n xstart_mse = torch.stack(xstart_mse, dim=1)\n mse = torch.stack(mse, dim=1)\n # mu_mse = torch.stack(mu_mse, dim=1)\n out_mean = torch.stack(out_mean, dim=1)\n true_mean = torch.stack(true_mean, dim=1)\n\n\n\n prior_gauss = self._prior_gaussian(x_num)\n\n prior_multin = torch.tensor([0.0])\n if has_cat:\n prior_multin = self.kl_prior(log_x_cat)\n\n total_gauss = gaussian_loss.sum(dim=1) + prior_gauss\n total_multin = multinomial_loss.sum(dim=1) + prior_multin\n return {\n \"total_gaussian\": total_gauss,\n \"total_multinomial\": total_multin,\n \"losses_gaussian\": gaussian_loss,\n \"losses_multinimial\": multinomial_loss,\n \"xstart_mse\": xstart_mse,\n \"mse\": mse,\n # \"mu_mse\": mu_mse\n \"out_mean\": out_mean,\n \"true_mean\": true_mean\n }\n\n @torch.no_grad()\n def gaussian_ddim_step(\n self,\n model_out_num,\n x,\n t,\n t_prev,\n clip_denoised=False,\n denoised_fn=None,\n eta=1.0\n ):\n out = self.gaussian_p_mean_variance(\n model_out_num,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=None,\n )\n\n eps = self._predict_eps_from_xstart(x, t, out[\"pred_xstart\"])\n\n alpha_bar = extract(self.alphas_cumprod, t, x.shape)\n \n if t[0] != 0:\n alpha_bar_prev = extract(self.alphas_cumprod, t_prev, x.shape)\n else:\n alpha_bar_prev = extract(self.alphas_cumprod_prev, t_prev, x.shape)\n \n sigma = (\n eta\n * torch.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))\n * torch.sqrt(1 - alpha_bar / alpha_bar_prev)\n )\n\n noise = torch.randn_like(x)\n mean_pred = (\n out[\"pred_xstart\"] * torch.sqrt(alpha_bar_prev)\n + torch.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps\n )\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n sample = mean_pred + nonzero_mask * sigma * noise\n\n return sample\n\n \n @torch.no_grad()\n def gaussian_ddim_sample(\n self,\n noise,\n T,\n eta=0.0\n ):\n x = noise\n b = x.shape[0]\n device = x.device\n for t in reversed(range(T)):\n print(f'Sample timestep {t:4d}', end='\\r')\n t_array = (torch.ones(b, device=device) * t).long()\n out_num = self._denoise_fn(x, t_array)\n x = self.gaussian_ddim_step(\n out_num,\n x,\n t_array\n )\n print()\n return x\n\n\n @torch.no_grad()\n def gaussian_ddim_reverse_step(\n self,\n model_out_num,\n x,\n t,\n clip_denoised=False,\n eta=0.0\n ):\n assert eta == 0.0, \"Eta must be zero.\"\n out = self.gaussian_p_mean_variance(\n model_out_num,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=None,\n model_kwargs=None,\n )\n\n eps = (\n extract(self.sqrt_recip_alphas_cumprod, t, x.shape) * x\n - out[\"pred_xstart\"]\n ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x.shape)\n alpha_bar_next = extract(self.alphas_cumprod_next, t, x.shape)\n\n mean_pred = (\n out[\"pred_xstart\"] * torch.sqrt(alpha_bar_next)\n + torch.sqrt(1 - alpha_bar_next) * eps\n )\n\n return mean_pred\n\n @torch.no_grad()\n def gaussian_ddim_reverse_sample(\n self,\n x,\n T\n ):\n b = x.shape[0]\n device = x.device\n for t in range(T):\n print(f'Reverse timestep {t:4d}', end='\\r')\n t_array = (torch.ones(b, device=device) * t).long()\n out_num = self._denoise_fn(x, t_array)\n x = self.gaussian_ddim_reverse_step(\n out_num,\n x,\n t_array,\n eta=0.0\n )\n print()\n\n return x\n\n\n @torch.no_grad()\n def multinomial_ddim_step(\n self,\n model_out_cat,\n log_x_t,\n t,\n t_prev,\n eta=1.0\n ):\n # not ddim, essentially\n log_x0 = self.predict_start(model_out_cat, log_x_t=log_x_t, t=t)\n\n alpha_bar = extract(self.alphas_cumprod, t, log_x_t.shape)\n\n if t[0] != 0:\n alpha_bar_prev = extract(self.alphas_cumprod, t_prev, log_x_t.shape)\n else:\n alpha_bar_prev = extract(self.alphas_cumprod_prev, t_prev, log_x_t.shape)\n \n sigma = (\n eta\n * torch.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))\n * torch.sqrt(1 - alpha_bar / alpha_bar_prev)\n )\n\n coef1 = sigma\n coef2 = alpha_bar_prev - sigma * alpha_bar\n coef3 = 1 - coef1 - coef2\n\n\n log_ps = torch.stack([\n torch.log(coef1) + log_x_t,\n torch.log(coef2) + log_x0,\n torch.log(coef3) - torch.log(self.num_classes_expanded)\n ], dim=2) \n\n log_prob = torch.logsumexp(log_ps, dim=2)\n\n out = self.log_sample_categorical(log_prob)\n\n return out\n\n @torch.no_grad()\n def sample_ddim(self, num_samples, steps = 1000):\n b = num_samples\n device = self.log_alpha.device\n z_norm = torch.randn((b, self.num_numerical_features), device=device)\n\n has_cat = self.num_classes[0] != 0\n log_z = torch.zeros((b, 0), device=device).float()\n if has_cat:\n uniform_logits = torch.zeros((b, len(self.num_classes_expanded)), device=device)\n log_z = self.log_sample_categorical(uniform_logits)\n \n interval = 1000 // steps\n timesteps = list(np.arange(999, -1, -interval))\n\n if timesteps[-1] != 0:\n timesteps.append(0)\n \n for i in range(0, len(timesteps)):\n\n print(f'Sample timestep {i:4d}', end='\\r')\n \n t = torch.full((b,), timesteps[i], device=device, dtype=torch.long)\n \n \n if i != len(timesteps) -1 :\n t_prev = torch.full((b,), timesteps[i+1], device=device, dtype=torch.long)\n else:\n t_prev = torch.full((b,), 0, device=device, dtype=torch.long)\n \n model_out = self._denoise_fn(\n torch.cat([z_norm, log_z], dim=1).float(),\n t\n )\n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n z_norm = self.gaussian_ddim_step(model_out_num, z_norm, t, t_prev, clip_denoised=False)\n if has_cat:\n log_z = self.multinomial_ddim_step(model_out_cat, log_z, t, t_prev)\n\n print()\n z_ohe = torch.exp(log_z).round()\n z_cat = log_z\n if has_cat:\n z_cat = ohe_to_categories(z_ohe, self.num_classes)\n sample = torch.cat([z_norm, z_cat], dim=1).cpu()\n return sample\n\n\n @torch.no_grad()\n def sample(self, num_samples):\n b = num_samples\n device = self.log_alpha.device\n z_norm = torch.randn((b, self.num_numerical_features), device=device)\n\n has_cat = self.num_classes[0] != 0\n log_z = torch.zeros((b, 0), device=device).float()\n if has_cat:\n uniform_logits = torch.zeros((b, len(self.num_classes_expanded)), device=device)\n print(uniform_logits.shape)\n log_z = self.log_sample_categorical(uniform_logits)\n\n for i in reversed(range(0, self.num_timesteps)):\n print(f'Sample timestep {i:4d}', end='\\r')\n t = torch.full((b,), i, device=device, dtype=torch.long)\n model_out = self._denoise_fn(\n torch.cat([z_norm, log_z], dim=1).float(),\n t\n )\n model_out_num = model_out[:, :self.num_numerical_features]\n model_out_cat = model_out[:, self.num_numerical_features:]\n z_norm = self.gaussian_p_sample(model_out_num, z_norm, t, clip_denoised=False)['sample']\n if has_cat:\n log_z = self.p_sample(model_out_cat, log_z, t)\n\n print()\n z_ohe = torch.exp(log_z).round()\n z_cat = log_z\n if has_cat:\n z_cat = ohe_to_categories(z_ohe, self.num_classes)\n sample = torch.cat([z_norm, z_cat], dim=1).cpu()\n return sample\n \n def sample_all(self, num_samples, batch_size, ddim=False, steps = 1000):\n if ddim:\n print('Sample using DDIM.')\n sample_fn = self.sample_ddim\n else:\n sample_fn = self.sample\n \n b = batch_size\n\n all_samples = []\n num_generated = 0\n while num_generated < num_samples:\n if not ddim:\n sample = sample_fn(b)\n else:\n sample = sample_fn(b, steps=steps)\n mask_nan = torch.any(sample.isnan(), dim=1)\n sample = sample[~mask_nan]\n\n all_samples.append(sample)\n \n if sample.shape[0] != b:\n raise FoundNANsError\n num_generated += sample.shape[0]\n\n x_gen = torch.cat(all_samples, dim=0)[:num_samples]\n\n return x_gen" } ]
import os import sys import time import torch import numpy as np import pandas as pd import src from copy import deepcopy from utils_train import make_dataset, update_ema from baselines.tabddpm.models.modules import MLPDiffusion from baselines.tabddpm.models.gaussian_multinomial_distribution import GaussianMultinomialDiffusion
11,647
def get_model( model_name, model_params, n_num_features, category_sizes ): print(model_name) if model_name == 'mlp':
def get_model( model_name, model_params, n_num_features, category_sizes ): print(model_name) if model_name == 'mlp':
model = MLPDiffusion(**model_params)
2
2023-10-10 18:06:31+00:00
16k
ThomasMrY/DisDiff
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n \n def kl_splits(self, latent_unit=6):\n mean_splits = self.mean.chunk(latent_unit, dim=-1)\n var_splits = self.var.chunk(latent_unit, dim=-1)\n logvar_splits = self.logvar.chunk(latent_unit, dim=-1)\n kl_loss = 0\n for mean, var, logvar in zip(mean_splits, var_splits, logvar_splits):\n kl_split = 0.5 * torch.sum(torch.pow(mean, 2)\n + var - 1.0 - logvar,\n dim=-1)\n kl_loss += torch.sum(kl_split) / kl_split.shape[0]\n return kl_loss/latent_unit\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n # h = self.encoder(x)\n # h = self.quant_conv(h)\n # quant, emb_loss, info = self.quantize(h)\n # return quant, emb_loss, info\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n log_dict_ae[\"train/epoch_num\"] = self.current_epoch\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev, ddim_coef = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_coef', ddim_coef)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(cond = conditioning, shape=size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,**kwargs):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(x = img, c=cond, t=ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning, **kwargs)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,**kwargs):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, **kwargs)\n e_t = return_wrap(e_t, torch.full((b, 1, 1, 1), self.ddim_coef[index], device=device))\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n # p.savez(\"data.npz\", z=z, x = x, xrec = xrec, x_T = x_T, time = time, alphas = alphas, alphas_prev = alphas_prev, sqrt_one_minus_alphas = sqrt_one_minus_alphas, sigmas = sigmas.cpu().numpy(),e_t = e_t)\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "return_wrap", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def return_wrap(inp, coef):\n if isinstance(inp, Return):\n return inp.pred\n elif isinstance(inp, Return_grad) or isinstance(inp, Return_grad_full):\n # return inp.out_grad\n return inp.pred + coef * inp.out_grad" } ]
import torch import torch.nn as nn import numpy as np import torch.nn.functional as F import pytorch_lightning as pl import copy import os import pandas as pd from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.diffusionmodules.util import return_wrap
11,455
def apply_model(self, x_noisy, t, cond, return_ids=False, sampled_concept= None, sampled_index= None): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, sampled_concept = sampled_concept, sampled_index = sampled_index, **cond) # if isinstance(x_recon, tuple) and not return_ids: # return x_recon[0] # else: # return x_recon return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) self.ce_loss = nn.CrossEntropyLoss(reduction = "none") if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) self.register_buffer("shift_coef", - to_torch(np.sqrt(alphas)) * (1. - self.alphas_cumprod_prev) / torch.sqrt(1. - self.alphas_cumprod)) self.register_buffer("ddim_coef", -self.sqrt_one_minus_alphas_cumprod) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") self.load_epoch = sd['epoch'] self.load_step = sd["global_step"] if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) eps_pred = return_wrap(model_out, extract_into_tensor(self.ddim_coef, t, x.shape)) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=eps_pred) elif self.parameterization == "x0": x_recon = eps_pred if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) eps_pred = return_wrap(model_out, extract_into_tensor(self.shift_coef, t, x_start.shape)) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(eps_pred, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): pass # _, loss_dict_no_ema = self.shared_step(batch) # with self.ema_scope(): # _, loss_dict_ema = self.shared_step(batch) # loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} # self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) # self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, dis_loss_flag = False, detach_flag = False, train_enc_flag = False, dis_weight = 1.0, dis_loss_type = "IM", *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key self.dis_loss_flag = dis_loss_flag self.detach_flag = detach_flag self.train_enc_flag = train_enc_flag self.dis_weight = dis_weight self.dis_loss_type = dis_loss_type try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # def on_train_batch_start(self, batch, batch_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if hasattr(self.model.diffusion_model,"scale_factor"): del self.scale_factor self.register_buffer('scale_factor', self.model.diffusion_model.scale_factor) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING Pre-Trained STD-RESCALING ###") else: del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] else: c = None xc = None out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False, sampled_concept= None, sampled_index= None): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, sampled_concept = sampled_concept, sampled_index = sampled_index, **cond) # if isinstance(x_recon, tuple) and not return_ids: # return x_recon[0] # else: # return x_recon return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
5
2023-10-07 09:58:07+00:00
16k
wiio12/LEGO-Prover
lego_prover/prover.py
[ { "identifier": "IsabelleEnv", "path": "lego_prover/env/isa_bridge.py", "snippet": "class IsabelleEnv(gym.Env):\n def __init__(\n self,\n logger=None,\n isabelle_path=\"/Users/wiio/Isabelle2022\",\n working_dir=\"miniF2F\",\n interactive_file=\"miniF2F/interactive.thy\",\n server_host=\"http://127.0.0.1\",\n server_port=8000,\n request_timeout=600,\n log_path=\"./logs\",\n ):\n self.logger = logger\n self.isabelle_path = isabelle_path\n self.working_dir = os.path.abspath(working_dir)\n self.interactive_file = os.path.abspath(interactive_file)\n self.server = f\"{server_host}:{server_port}\"\n self.server_port = server_port\n self.request_timeout = request_timeout\n self.log_path = log_path\n self.isabelle_server = self.get_isabelle_process(server_port)\n self.isabelle_server.run()\n self.stub = None\n \n # wait for isabelle server to run\n time.sleep(3)\n\n self.has_reset = False\n self.reset_options = None\n self.connected = False\n\n def get_isabelle_process(self, server_port):\n self.logger.info(f\"Starting isabelle server at port {server_port}\")\n U.f_mkdir(self.log_path, \"isabelle_server\")\n return SubprocessMonitor(\n commands=[\n \"bash\",\n \"run_server.sh\",\n str(server_port),\n ],\n name=\"isabelle_server\",\n ready_match=r\"Server is running. Press Ctrl-C to stop.\",\n log_path=U.f_join(self.log_path, \"isabelle_server\"),\n cwd=os.path.abspath(\"lego_prover/env/Portal-to-ISAbelle\"),\n server_port=server_port,\n )\n \n def step(\n self,\n code: str,\n formal_statement: str = None,\n quick_check: bool = False,\n ) -> Tuple[ObsType, SupportsFloat, bool, bool, Dict[str, Any]]:\n # if \"theory\" in code:\n # assert \"begin\" in code and \"end\" in code, \\\n # \"Outer syntax error: not complete theorem file\"\n # code = code[code.index(\"begin\") + len(\"begin\"): code.index(\"end\")].strip()\n \n # step 0: replace special token\n for symbol, value in SPECIAL_SYMBOL.items():\n if symbol in code:\n code = code.replace(symbol, value)\n\n # step 1: parse code\n parsed_code = self._get_parsed_code(code)\n\n # step 2: step by step verification\n verified_result = self._verify_step_by_step(parsed_code, quick_check=quick_check)\n if quick_check:\n return verified_result, None, None, None\n\n # step 3: post process error message\n verified_result, code, correct_partial_code, incorrect_code = self._post_process_error_msg(code, parsed_code, verified_result)\n\n # step 4: get skill code\n skill_codes = self._post_process_skill_code(correct_partial_code)\n\n # step 5: get request\n requests = self._get_request(code, skill_codes)\n \n return verified_result, code, skill_codes, requests\n\n def render(self):\n raise NotImplementedError(\"render is not implemented\")\n\n def reset(self, imports=None, hard_reset=False):\n # TODO: we fix the imports for now, we support update imports later.\n if self.stub is None or hard_reset:\n self.stub = create_stub(self.server_port)\n try:\n self.logger.info(self.stub.InitialiseIsabelle(server_pb2.IsaPath(path=self.isabelle_path)).message)\n self.logger.info(self.stub.IsabelleWorkingDirectory(server_pb2.IsaPath(path=self.working_dir)).message)\n self.logger.info(self.stub.IsabelleContext(server_pb2.IsaContext(context=self.interactive_file)).message)\n self.successful_starting = True\n except Exception as e:\n self.logger.info(\"Failure at initializing Isabelle process.\\n\"\n \"Make sure the path your provide is where the Isabelle executable is.\")\n self.logger.info(e)\n # This will reset all state\n self._post(f\"<initialise>\")\n return f\"Starting is successful: {self.successful_starting}\"\n else:\n self._post(\"reset_problem\")\n return f\"soft reset problem successful\"\n \n def close(self):\n if self.stub is not None:\n self._exit()\n self.isabelle_server.stop()\n return not self.connected\n \n # @func_set_timeout(1800, allowOverride=True)\n def _post(self, action):\n reset_retry_cnt = 3\n while reset_retry_cnt > 0:\n try:\n result = self.stub.IsabelleCommand(server_pb2.IsaCommand(command=action)).state\n return result\n except Exception as e:\n self.logger.info(f\"Isabelle environment exception: {e}\")\n self.isabelle_server.terminate()\n self.isabelle_server = self.get_isabelle_process(self.server_port)\n self.isabelle_server.run()\n time.sleep(3)\n self.reset(hard_reset=True)\n reset_retry_cnt -= 1\n assert False, \"Isabelle enviroment fail to reboot!\"\n \n\n def _exit(self):\n try:\n self._post('exit')\n except:\n self.logger.info(\"Post('exit') timed out, kill from system...\")\n os.system(\"ps aux | grep Isabelle | awk '{print $2}' | xargs kill -9 > /dev/null 2>&1\")\n os.system(\"ps aux | grep poly | awk '{print $2}' | xargs kill -9 > /dev/null 2>&1\")\n\n\n def _get_parsed_code(self, theory, tls_name='default') -> List[str]:\n steps = self._post(f\"<parse text> ${theory}\")\n steps = steps.split('<SEP>')\n steps = [s for s in steps if s.strip() != '']\n # remove weird '$' step and whitespace steps\n steps = [s for s in steps if s != '$' and s.strip() != '']\n return steps\n \n def _parse_hammer_output(self, obs):\n \"\"\"Parse the sledgehammer output, otherwise return an empty string\"\"\"\n if '<hammer>' in obs:\n output = obs.split('<hammer>')[1]\n else:\n output = ''\n return output\n\n def _verify_step_by_step(self, steps, quick_check=False):\n done = False\n reason = ''\n success = False\n step_results = []\n tls_name = 'default'\n error_step_index = None\n corrected_step = {}\n for i, step in enumerate(steps):\n try:\n step_time = time.time()\n if \"sledgehammer\" not in step:\n obs, reward, done, metadata, error = self._run_step(step, i, tls_name)\n strip_step = step.strip()\n\n if error is not None and quick_check is True:\n self._post(\"reset_problem\")\n return False\n \n # only fix \"by\" step\n if error is not None and strip_step.startswith(\"by\"):\n old_status = copy((obs, reward, done, metadata, error))\n # try correct the step with sledgehammer step\n one_line_error = error.replace('\\n', ' ')\n self.logger.info(f\"Error with step: [{step}], error: [{one_line_error}]\")\n self.logger.info(\"Trying hammer methods...\")\n obs, reward, done, metadata, error = self._run_sledgehammer(step, i, tls_name)\n if obs is not None:\n actual_step, obs = obs.split(\"<hammer>\")\n actual_step, obs = actual_step.strip(), obs.strip()\n corrected_step[i] = (step, actual_step)\n else:\n obs, reward, done, metadata, error = old_status\n else:\n if quick_check is True:\n self._post(\"reset_problem\")\n return False\n self.logger.info(\"Model use sledgehammer, Trying hammer methods...\")\n obs, reward, done, metadata, error = self._run_sledgehammer(step, i, tls_name)\n if obs is not None:\n actual_step, obs = obs.split(\"<hammer>\")\n actual_step, obs = actual_step.strip(), obs.strip()\n corrected_step[i] = (step, actual_step)\n\n step_time = time.time() - step_time\n step_results.append({\n \"index\": i,\n \"step\": step,\n \"output\": obs,\n \"step_time\": step_time,\n })\n if error is not None:\n reason = error\n success = False\n done = False\n error_step_index = i\n break\n except Exception as e:\n # Timeout - end the proof attempt\n success = False\n done = False\n reason = f'Python exception with error {str(e)}, at command \"{step}\" (line 1)'\n error_step_index = i\n step_results.append(dict(index=i, step=step, output=''))\n break\n\n # Change when successful\n tls_name = 'default_%d' % i\n\n if done and reward == 1.0:\n success = True\n\n result = {\n 'success': success,\n 'reason': reason,\n 'num_steps': len(steps),\n 'last_step': len(step_results),\n 'error_step_index': error_step_index,\n 'step_results': step_results,\n 'corrected_steps': corrected_step,\n }\n\n # This will reset all the problem status\n self._post(\"reset_problem\")\n if quick_check is True:\n return success\n return result\n\n def _run_sledgehammer(self, step, i, tls_name):\n # First try heuristics\n for heuristic in ['by auto', 'by simp', 'by blast', 'by fastforce', 'by force', 'by eval', 'by presburger', 'by sos', 'by arith', 'by linarith', 'by (auto simp: field_simps)', \"sledgehammer\"]:\n step_ = heuristic\n obs, reward, done, metadata, error = self._run_step(step_, i, tls_name) \n if error is None:\n if \"<hammer>\" not in obs:\n obs = '%s <hammer> %s' % (heuristic, obs)\n actual_step = obs.split(\"<hammer>\")[0].strip()\n self.logger.info(f\"Tried step: {step_}, success, replace step: [{step}] with step: [{actual_step}]\")\n return obs, reward, done, metadata, error\n else:\n if step_ == \"sledgehammer\":\n one_line_error = error.replace('\\n', ' ')\n self.logger.info(f\"Tried step: {step_} with error [{one_line_error}]\")\n if 'At command \"<malformed>\"' in one_line_error:\n error = \"Sledgehammer error (line 1): fail to finish the proof with sledgehammer\"\n return None, reward, done, metadata, error\n # Try sledgehammer\n # if error.replace('\\n', ' ').startswith(\"Step error: Outer syntax error (line 1): command expected\"):\n # error = \"Sledgehammer error (line 1): fail to finish the proof with sledgehammer\"\n return obs, reward, done, metadata, error\n\n def _run_step(self, step, i, tls_name):\n obs, reward, done, metadata = self.step_to_top_level_state(\n action=step,\n tls_name=tls_name,\n new_name='default_%d' % i\n )\n error = None\n if 'error:' in obs or 'Step error' in obs or 'Unknown error' in obs:\n error = obs\n return obs, reward, done, metadata, error\n\n def step_to_top_level_state(self, action, tls_name, new_name):\n # last_obs_string = self.stub.IsabelleCommand(server_pb2.IsaCommand(command=f\"<get state> {tls_name}\")).state\n obs_string = \"Step error\"\n try:\n obs_string = self._post(f\"<apply to top level state> {tls_name} <apply to top level state> {action} <apply to top level state> {new_name}\")\n # print(obs_string)\n except Exception as e:\n self.logger.info(\"***Something went wrong***\")\n self.logger.info(e)\n\n if \"error\" in obs_string:\n done = False\n else:\n done = self.is_finished(new_name)\n # done = True if (\"subgoal\" in last_obs_string and \"subgoal\" not in obs_string) else False\n return obs_string, self.reward(done), done, {}\n\n def reward(self, done):\n return 1 if done else 0\n\n def is_finished(self, name_of_tls):\n ret = self._post(f\"<is finished> {name_of_tls}\").strip()\n return ret.startswith(\"t\")\n \n def get_marker_statement(self, code):\n parsed = self._get_parsed_code(code)\n sl = []\n for code in parsed:\n code = code.strip()\n if code.startswith(\"lemma\") or code.startswith(\"theorem\") or code.startswith(\"fun\") or code.startswith(\"definition\"):\n sl.append(code)\n return sl[-1]\n\n \n def _post_process_error_msg(self, code, parsed_code, verified_result):\n old_code = copy(code)\n only_refresh_code = False\n if \"Timeout after\" in verified_result[\"reason\"]:\n verified_result[\"reason\"] = \\\n 'Step timeout error (line 1): the step takes more than 10 seconds to run. At command \"<cmd>\" (line 1)'\n if verified_result[\"success\"] is True:\n only_refresh_code = True\n elif re.search(r\"\\(line [0-9]+\\)\", verified_result[\"reason\"]) is None and \\\n re.search(r'At command \"(.?)+\"', verified_result[\"reason\"]) is None:\n self.logger.info(\"No line number or at command, skip...\")\n self.logger.info(\"The error is:\")\n self.logger.info(verified_result[\"reason\"])\n only_refresh_code = True\n \n matched_codes = []\n for ix, step in enumerate(verified_result[\"step_results\"]):\n step_code = step[\"step\"].strip()\n if step_code not in code:\n # This error is too complicated, I give up\n if len(step[\"output\"]) != 0:\n return verified_result, old_code, \"\".join(matched_codes), code\n else:\n if step_code.startswith(\"(*\"):\n start_index = code.index(\"(*\")\n self.logger.info(f\"Parsed code: {step_code}\")\n self.logger.info(f\"ori code: {code}\")\n for i in range(len(step_code)):\n if code[i+start_index] != step_code[i]:\n assert step_code[i] == \"?\"\n code = code[:i+start_index] + step_code[i] + code[i+start_index+1:]\n self.logger.info(f\"new code: {code}\")\n else:\n self.logger.info(f\"Parsed code: {step_code}\")\n self.logger.info(f\"ori code: {code}\")\n assert False, \"You should add the list!\"\n new_step = None\n if ix in verified_result[\"corrected_steps\"]:\n old_step, new_step = verified_result[\"corrected_steps\"][ix]\n assert old_step == step_code\n matched_code = code[:code.index(step_code) + len(step_code)]\n code = code[code.index(step_code) + len(step_code):]\n if new_step is not None:\n matched_code = matched_code.replace(step_code.strip(), new_step.strip())\n matched_codes.append(matched_code)\n \n correct_code = \"\".join(matched_codes)\n incorrect_code = code\n\n if not only_refresh_code:\n previous_code = \"\".join(matched_codes)\n line_number = previous_code.strip().count(\"\\n\") + 1\n\n error_msg = re.sub(r\"\\(line [0-9]+\\)\", f\"(line {line_number})\", verified_result[\"reason\"])\n error_msg = re.sub(r'At command \"(.?)+\"', f'At command \"{repr(step_code)}\"', error_msg)\n\n verified_result[\"reason\"] = error_msg\n \n new_code = \"\".join(matched_codes + [code])\n\n return verified_result, new_code, correct_code, incorrect_code\n \n def get_lemma_name(self, code):\n name = \"no_name\"\n try:\n if code.startswith('lemma'):\n name = re.findall(r\"lemma (.+):\", code)[0].strip()\n elif code.startswith('theorem'):\n name = re.findall(r\"theorem (.+):\", code)\n if len(name) == 0:\n name = \"theorem_with_no_name\"\n else:\n name = name[0].strip()\n elif code.startswith('fun') and not code.startswith('function'):\n name = re.findall(r\"fun (.+) ::\", code)[0].strip()\n elif code.startswith('function'):\n name = re.findall(r\"function (.+) ::\", code)[0].strip()\n elif code.startswith('definition'):\n name = re.findall(r\"definition (.+) ::\", code)[0].strip()\n else:\n assert False, f\"new code type: {code}\"\n except Exception as e:\n self.logger.info(f\"Error get lemma name, error: {e}, code: {code}\")\n return name\n \n def _post_process_skill_code(self, correct_partial_code):\n start_keyword = [\"lemma\", \"theorem\", \"definition\", \"fun\", \"end\"]\n \n parsed_code = self._get_parsed_code(correct_partial_code)\n all_codes = []\n current_code_set = []\n for code in parsed_code:\n if code.startswith(tuple(start_keyword)):\n if len(current_code_set) > 0:\n skill_code = \"\\n\".join(current_code_set)\n all_codes.append(skill_code.strip())\n current_code_set = [code]\n else:\n assert len(all_codes) == 0 or len(current_code_set) > 0\n if len(current_code_set) != 0:\n current_code_set.append(code)\n \n # remove empty code:\n tmp_code = []\n for code in all_codes:\n code = self._beautify(code, correct_partial_code)\n if len(code) == 0:\n continue\n tmp_code.append(code)\n all_codes = tmp_code\n\n # resolve dependence\n all_names = []\n for code in all_codes:\n all_names.append(self.get_lemma_name(code))\n \n name_and_codes = list(zip(all_names, all_codes))\n name_and_codes = sorted(name_and_codes, key=lambda x: len(x[0]), reverse=True)\n if len(name_and_codes) > 0:\n all_names, all_codes = list(zip(*name_and_codes))\n else:\n all_names, all_codes = [], []\n \n new_codes = []\n for ix, code in enumerate(all_codes):\n current_code = code\n escape_names = [all_names[ix]]\n while True:\n updated = False\n for jx, name in enumerate(all_names):\n if name in escape_names:\n continue\n if name in current_code:\n current_code = f\"{all_codes[jx]}\\n\\n{current_code}\"\n escape_names.append(name)\n updated = True\n if updated is False:\n break\n new_codes.append(current_code)\n \n return list(zip(all_codes, new_codes))\n\n def _beautify(self, ori_code, correct_partial_code):\n parsed_code = self._get_parsed_code(ori_code)\n if ori_code.startswith(\"lemma\") or ori_code.startswith(\"theorem\"):\n if len(parsed_code) <= 1:\n return \"\"\n else:\n return ori_code\n if parsed_code[0].strip() not in correct_partial_code:\n return ori_code\n\n formatted_code = correct_partial_code[correct_partial_code.index(parsed_code[0]):]\n matched_codes = []\n for ix, step_code in enumerate(parsed_code):\n step_code = step_code.strip()\n if step_code not in formatted_code:\n # This error is too complicated, I give up\n return ori_code\n matched_code = formatted_code[:formatted_code.index(step_code) + len(step_code)]\n formatted_code = formatted_code[formatted_code.index(step_code) + len(step_code):]\n matched_codes.append(matched_code)\n \n new_code = \"\".join(matched_codes)\n \n # remove all the comments\n # This regular expression pattern will find all comments in the Isabelle code\n pattern = re.compile(r\"\\(\\*(.*?)\\*\\)\", re.DOTALL)\n\n # Substitute found comments with an empty string\n new_code = re.sub(pattern, '', new_code).strip()\n new_code = '\\n'.join(line for line in new_code.splitlines() if line.strip())\n\n if len(self._get_parsed_code(new_code)) <= 1:\n return \"\"\n return new_code\n\n def _get_request(self, code, skill_codes):\n parsed = self._get_parsed_code(code)\n requests = []\n for line in parsed:\n if line.strip().startswith(\"lemma\"):\n requests.append(line)\n full_codes = [k[1] for k in skill_codes]\n full_code = \"\\n\\n\".join(full_codes)\n requests = list(filter(lambda x: x not in full_code, requests))\n return requests" }, { "identifier": "ActionAgent", "path": "lego_prover/agents/action.py", "snippet": "class ActionAgent:\n def __init__(\n self,\n logger=None,\n model_name=\"gpt-3.5-turbo\",\n temperature=0,\n request_timeout=120,\n ckpt_dir=\"ckpt\",\n ):\n self.logger = logger\n self.ckpt_dir = ckpt_dir\n U.f_mkdir(f\"{ckpt_dir}/action\")\n self.llm = LLMMixture(\n model_name=model_name,\n temperature=temperature,\n request_timeout=request_timeout,\n )\n\n # load decomposer examples:\n self.decomposer_examples = {}\n for file in os.listdir(\"data/decomposer_examples\"):\n with open(os.path.join(\"data/decomposer_examples\", file), \"r\") as f:\n text = f.read()\n self.decomposer_examples[file[:-4]] = text\n \n self.formalizer_examples = {}\n for file in os.listdir(\"data/formalizer_examples\"):\n with open(os.path.join(\"data/formalizer_examples\", file), \"r\") as f:\n text = f.read()\n self.formalizer_examples[file[:-4]] = text\n \n def retrieved_example_skills(self, retrieved_skills):\n random.shuffle(retrieved_skills)\n prompt_examples = []\n for ix, skills in enumerate(retrieved_skills):\n skill_code = skills[\"code\"]\n prompt_example = f\"\"\"###### useful skill {ix+1}: ######\n```isabelle\n{skill_code}\n```\n\"\"\"\n prompt_examples.append(prompt_example)\n \n example_programmes = \"\\n\\n\".join(prompt_examples)\n return example_programmes\n \n def decomposer(self, context):\n system_prompt_template = load_prompt(\"decomposer\")\n system_message = SystemMessage(content=system_prompt_template)\n\n human_prompt_template = load_prompt(\"decomposer_human\")\n human_prompt_template = HumanMessagePromptTemplate.from_template(human_prompt_template)\n\n # post-process in-context-learning examples\n decomposer_examples = copy(self.decomposer_examples)\n if context[\"problem_name\"] in decomposer_examples:\n decomposer_examples.pop(context[\"problem_name\"])\n icl_examples = random.sample(list(decomposer_examples.values()), 3)\n icl_examples = \"\\n\\n####################\\n\\n\".join(icl_examples)\n\n context[\"informal_statement\"] = context[\"informal_statement\"].replace(\"\\n\", ' ').strip()\n context[\"informal_proof\"] = context[\"informal_proof\"].replace(\"\\n\", \" \").strip()\n\n human_message = human_prompt_template.format(\n examples=icl_examples,\n informal_statement=context[\"informal_statement\"],\n informal_proof=context[\"informal_proof\"],\n formal_statement=context[\"formal_statement\"],\n )\n\n conversation = {\n \"sys0\": system_message.content,\n \"human0\": human_message.content,\n }\n\n self.logger.info(\n f\"****decomposer system message****\\n{system_message.content}\"\n )\n\n self.logger.info(\n f\"****decomposer human message****\\n{human_message.content}\"\n )\n\n n_retry = 3\n informal_proof = context[\"informal_proof\"]\n skill_requests = []\n while n_retry > 0:\n try:\n ai_message = self.llm([system_message, human_message], temperature=0)\n self.logger.info(\n f\"****decomposer ai message****\\n{ai_message.content}\"\n )\n conversation[f\"ai{3-n_retry}\"] = ai_message.content\n message = ai_message.content\n if \"####################\" in message:\n message = message[:message.index(\"####################\")]\n # Extracting Error Analysis content\n informal_proof = re.search(r'## Structured informal proof\\n(.*?)\\n\\n#', message, re.DOTALL).group(1).strip()\n\n # Extracting each skill request's name and its content\n skill_requests = re.findall(r\"```isabelle\\n(.*?)\\n```\", message, re.DOTALL)\n break\n except AssertionError as e:\n if \"query too long\" in str(e):\n self.logger.warn(str(e))\n break\n except Exception as e:\n self.logger.info(f\"Error occur in decomposer: {str(e)}\")\n n_retry -= 1\n examples = random.sample(list(decomposer_examples.values()), 3)\n examples = \"\\n\\n####################\\n\\n\".join(examples)\n human_message = human_prompt_template.format(\n examples=examples,\n informal_statement=context[\"informal_statement\"],\n informal_proof=context[\"informal_proof\"],\n formal_statement=context[\"formal_statement\"],\n )\n time.sleep(5)\n ret_request = []\n for skill in skill_requests:\n if \"N/A\" in skill:\n continue\n ret_request.append(skill)\n\n if len(ret_request) > 5:\n self.logger.info(f\"skill request more than 5, with len {len(ret_request)}\")\n ret_request = random.sample(ret_request, 5)\n\n return informal_proof, ret_request, conversation\n\n def critic(self, context, code_last_round=None, error_last_round=None):\n system_prompt_template = load_prompt(\"critic_request\")\n system_prompt_template = SystemMessagePromptTemplate.from_template(system_prompt_template)\n system_message = system_prompt_template.format(examples=\"\")\n\n human_prompt_template = load_prompt(\"critic_request_human\")\n human_prompt_template = HumanMessagePromptTemplate.from_template(human_prompt_template)\n\n if code_last_round is None:\n code_last_round = \"No code from last round...\"\n else:\n code_last_round = code_last_round.split('\\n')\n new_code = []\n for ix, line in enumerate(code_last_round):\n line = f\"#{ix+1} \" + line\n new_code.append(line)\n code_last_round = \"\\n\".join(new_code)\n \n if error_last_round is None:\n error_last_round = \"No error from last round...\"\n\n human_message = human_prompt_template.format(\n code=code_last_round,\n error=error_last_round,\n )\n\n # self.logger.info(\n # f\"****critic agent system message****\\n{system_message.content}\"\n # )\n\n self.logger.info(\n f\"****critic agent human message****\\n{human_message.content}\"\n )\n\n n_retry = 3\n error_analysis = \"No error analysis...\"\n skill_requests = []\n while n_retry > 0:\n try:\n ai_message = self.llm([system_message, human_message])\n self.logger.info(\n f\"****critic agent ai message****\\n{ai_message.content}\"\n )\n message = ai_message.content\n # Extracting Error Analysis content\n error_analysis = re.search(r'# Error analysis:\\n(.*?)\\n\\n#', message, re.DOTALL).group(1).strip()\n\n # Extracting each skill request's name and its content\n skill_requests = re.findall(r'## Skill \\d+: ([\\w_]+)\\n```isabelle\\n(.*?)\\n```', message, re.DOTALL)\n break\n except AssertionError as e:\n if \"query too long\" in str(e):\n self.logger.warn(str(e))\n break\n except Exception as e:\n self.logger.info(f\"Error occur in auto_formal_pre: {str(e)}\")\n n_retry -= 1\n time.sleep(5)\n\n return error_analysis, skill_requests\n \n def render_formalizer_system_message(self):\n system_template = load_prompt(\"formalizer\")\n return SystemMessage(content=system_template)\n \n def render_formalizer_human_message(\n self,\n skills,\n context,\n informal_proof=None,\n n_example=3,\n ) -> HumanMessage:\n human_prompt_template = load_prompt(\"formalizer_human\")\n human_prompt_template = HumanMessagePromptTemplate.from_template(human_prompt_template)\n\n formalizer_examples = copy(self.formalizer_examples)\n if context[\"problem_name\"] in formalizer_examples:\n formalizer_examples.pop(context[\"problem_name\"])\n\n examples = random.sample(list(formalizer_examples.values()), n_example)\n examples = \"\\n\\n####################\\n\\n\".join(examples)\n context[\"informal_statement\"] = context[\"informal_statement\"].replace(\"\\n\", ' ').strip()\n context[\"informal_proof\"] = context[\"informal_proof\"].replace(\"\\n\", \" \").strip()\n\n skills = self.retrieved_example_skills(skills)\n \n human_message = human_prompt_template.format(\n skill_examples = skills,\n examples=examples,\n informal_statement=context[\"informal_statement\"],\n informal_proof=context[\"informal_proof\"] if informal_proof is None else informal_proof,\n formal_statement=context[\"formal_statement\"],\n )\n\n return human_message\n\n\n def render_human_message(\n self, \n context, \n code=None,\n error=None,\n error_analysis=None,\n informal_proof=None,\n ) -> HumanMessage:\n human_prompt_template = load_prompt(\"auto_formal2_human\")\n human_prompt_template = HumanMessagePromptTemplate.from_template(human_prompt_template)\n\n if code is None:\n code = \"No code from last round...\"\n else:\n code = code.split('\\n')\n new_code = []\n for ix, line in enumerate(code):\n line = f\"#{ix+1} \" + line\n new_code.append(line)\n code = \"\\n\".join(new_code)\n \n if error is None:\n error = \"No error from last round...\"\n if error_analysis is None:\n error_analysis = \"No analysis...\"\n\n human_message = human_prompt_template.format(\n informal_statement=context[\"informal_statement\"],\n informal_proof=context[\"informal_proof\"] if informal_proof is None else informal_proof,\n formal_statement=context[\"formal_statement\"],\n code_last_round=code,\n error_last_round=error,\n error_analysis=error_analysis,\n )\n\n return human_message\n\n def process_ai_message(self, message, context):\n assert isinstance(message, AIMessage)\n\n retry = 3\n error = None\n while retry > 0:\n try:\n code_pattern = re.compile(r\"```(?:[i|I]sabelle)(.*?)```\", re.DOTALL)\n text = message.content[message.content.index(\"# Formalized Code\"):]\n code = \"\\n\".join(code_pattern.findall(text)).strip()\n return code\n except Exception as e:\n retry -= 1\n error = e\n time.sleep(1)\n self.logger.info(f\"Error parsing action response (before program execution): {error}\")\n return False" }, { "identifier": "CurriculumAgent", "path": "lego_prover/agents/curriculum.py", "snippet": "class CurriculumAgent:\n def __init__(\n self,\n logger=None,\n ckpt_dir=\"ckpt\",\n resume=False,\n miniF2F_tasks : mp.Queue = None,\n curriculum_task_type : str = \"simple_curriculum\",\n curriculum_agent_lock = U.WithEmpty()\n ):\n self.logger=logger\n self.miniF2F_tasks = miniF2F_tasks\n self.curriculum_task_type = curriculum_task_type\n self.curriculum_agent_lock = curriculum_agent_lock\n self.ckpt_dir = ckpt_dir\n U.f_mkdir(f\"{ckpt_dir}/curriculum/vectordb\")\n if resume:\n self.logger.info(f\"Loading Curriculum Agent from {ckpt_dir}/curriculum\")\n self.sync_checkpoint()\n else:\n self.completed_tasks = []\n self.failed_tasks = []\n \n def sync_checkpoint(self,):\n if os.path.exists(f\"{self.ckpt_dir}/curriculum/completed_tasks.json\"):\n self.completed_tasks = U.load_json(f\"{self.ckpt_dir}/curriculum/completed_tasks.json\")\n if os.path.exists(f\"{self.ckpt_dir}/curriculum/failed_tasks.json\"):\n self.failed_tasks = U.load_json(f\"{self.ckpt_dir}/curriculum/failed_tasks.json\")\n\n @property\n def easy_to_hard_curriculum(self):\n result = []\n for name in os.listdir(\"data/full_data/valid\"):\n path = os.path.join(\"data/full_data/valid\", name)\n context = U.load_json(path)\n result.append((path, len(context[\"informal_proof\"])))\n result = sorted(result, key=lambda x: x[1])\n result = [x[0] for x in result]\n return result\n\n @property\n def progress(self):\n return len(self.completed_tasks)\n\n def propose_next_task(self, max_retries=5, idx=None):\n if self.curriculum_task_type == \"example\":\n filename = os.listdir(\"data/examples\")[self.progress]\n task = filename[:-5]\n context = load_context(problem_name=os.path.join(\"data/examples\", filename))\n return task, context\n elif self.curriculum_task_type == \"simple_curriculum\":\n assert idx is not None\n file_path = self.easy_to_hard_curriculum[idx]\n task = file_path\n context = load_context(file_path)\n return task, context\n elif self.curriculum_task_type == \"queue_curriculum\":\n while True:\n if self.miniF2F_tasks.qsize() == 0:\n return \"\", None\n file_path = self.miniF2F_tasks.get()\n context = load_context(file_path)\n if file_path not in self.completed_tasks:\n break\n return file_path, context\n else:\n raise NotImplementedError\n\n def get_task_retry_count(self, task):\n cnt = 0\n for t in self.failed_tasks:\n if t == task:\n cnt += 1\n return cnt\n\n def propose_next_manual_task(self):\n confirmed = False\n task = \"\"\n while not confirmed:\n task = input(\"Enter task: \")\n print(f\"Task: {task}\")\n confirmed = input(\"Confirm? (y/n)\").lower() in [\"y\", \"\"]\n context = load_context(task)\n return task, context\n\n def update_exploration_progress(self, info):\n with self.curriculum_agent_lock:\n self.sync_checkpoint()\n\n task = info[\"task\"]\n if info[\"success\"]:\n self.logger.info(f\"Completed task {task}.\")\n self.completed_tasks.append(task)\n else:\n self.logger.info(\n f\"Failed to complete task {task}. Skipping to next task.\"\n )\n self.failed_tasks.append(task)\n\n # clean up tasks and dump to disk\n self.clean_up_tasks()\n\n def clean_up_tasks(self):\n updated_completed_tasks = []\n # record repeated failed tasks\n updated_failed_tasks = self.failed_tasks\n # dedup but keep order\n for task in self.completed_tasks:\n if task not in updated_completed_tasks:\n updated_completed_tasks.append(task)\n\n # remove completed tasks from failed tasks\n for task in updated_completed_tasks:\n while task in updated_failed_tasks:\n updated_failed_tasks.remove(task)\n\n self.completed_tasks = updated_completed_tasks\n self.failed_tasks = updated_failed_tasks\n\n # dump to json\n U.dump_json(\n self.completed_tasks, f\"{self.ckpt_dir}/curriculum/completed_tasks.json\"\n )\n U.dump_json(self.failed_tasks, f\"{self.ckpt_dir}/curriculum/failed_tasks.json\")" }, { "identifier": "SkillManager", "path": "lego_prover/agents/skill.py", "snippet": "class SkillManager:\n def __init__(\n self,\n rank = None,\n logger = None,\n ckpt_dir=\"ckpt\",\n skill_manager_lock=U.WithEmpty(),\n chroma_bridge: ChromaBridge = None\n ):\n self.rank = rank\n self.logger = logger\n self.skill_manager_lock = skill_manager_lock\n self.chroma_bridge = chroma_bridge\n U.f_mkdir(f\"{ckpt_dir}/skill/code\")\n U.f_mkdir(f\"{ckpt_dir}/skill/history_problem\")\n U.f_mkdir(f\"{ckpt_dir}/skill/requests\")\n U.f_mkdir(f\"{ckpt_dir}/skill/description\")\n U.f_mkdir(f\"{ckpt_dir}/skill/vectordb\")\n self.ckpt_dir = ckpt_dir\n self.encoder = tiktoken.encoding_for_model(\"gpt-4\")\n with self.skill_manager_lock:\n self.sync_checkpoint()\n \n def sync_checkpoint(self):\n if os.path.exists(f\"{self.ckpt_dir}/skill/skills.json\"):\n self.skills = U.load_json(f\"{self.ckpt_dir}/skill/skills.json\")\n else:\n self.skills = {}\n if os.path.exists(f\"{self.ckpt_dir}/skill/codes.json\"):\n self.codes = U.load_json(f\"{self.ckpt_dir}/skill/codes.json\")\n else:\n self.codes = {}\n if os.path.exists(f\"{self.ckpt_dir}/skill/skill_request.json\"):\n self.skill_requests = U.load_json(f\"{self.ckpt_dir}/skill/skill_request.json\")\n else:\n self.skill_requests = {}\n \n def add_new_problem(self, problem_name, formal_statement):\n data = (\"problem_add_text\", {\n \"add_text\": formal_statement,\n \"problem_name\": problem_name,\n })\n output = self.chroma_bridge.run_cmd(data)\n assert output[\"error\"] is None, \"error is not None\"\n print(output[\"output\"])\n\n def add_new_request(self, problem_name, formal_statement, init_update_count=0):\n with self.skill_manager_lock:\n self.sync_checkpoint()\n\n exists_formal_statements = [value['formal_statement'] for value in self.skill_requests.values()]\n if len(get_close_matches(formal_statement, exists_formal_statements, n=1, cutoff=0.85)) != 0:\n return\n\n with self.skill_manager_lock:\n self.sync_checkpoint()\n request_name = f\"request_{len(self.skill_requests)}\"\n self.skill_requests[request_name] = {\n \"request_name\": request_name,\n \"problem_name\": problem_name,\n \"formal_statement\": formal_statement,\n \"update_count\": init_update_count,\n }\n \n\n data = (\"request_add_text\", {\n \"add_text\": formal_statement,\n \"request_name\": request_name,\n })\n \n assert self.chroma_bridge is not None\n output = self.chroma_bridge.run_cmd(data)\n if output[\"error\"] is None:\n # print(\"There are\", output[\"output\"], \"code\")\n assert output[\"output\"] == len(\n self.skill_requests\n ), (\"requestdb is not synced with skill_request.json, \"\n f\"there are {output['output']} in requestdb but {len(self.skill_requests)} in skill_request.json\")\n \n U.dump_text(\n formal_statement, f\"{self.ckpt_dir}/skill/requests/{request_name}.thy\"\n )\n U.dump_json(self.skill_requests, f\"{self.ckpt_dir}/skill/skill_request.json\")\n self.logger.info(f\"Added skill, marker:\\n ```isabelle\\n{formal_statement}```\\n\") \n\n def add_new_skill(self, skill_name, description, marker, full_code, origin=\"\", init_update_count=0):\n with self.skill_manager_lock:\n self.sync_checkpoint()\n\n exists_markers = [value['marker'] for value in self.skills.values()]\n if len(self.encoder.encode(marker)) > 650:\n return\n if len(get_close_matches(marker, exists_markers, n=1, cutoff=0.85)) != 0:\n return\n\n if not bool(re.match(\"^[a-zA-Z0-9_']+$\", skill_name)):\n skill_name = f\"skill_{len(self.skills)}\"\n\n skill_name = skill_name.lower().strip().replace(\" \", \"_\")\n if skill_name in self.skills:\n i = 2\n while f\"{skill_name}V{i}\" in self.skills:\n i += 1\n skill_name = f\"{skill_name}V{i}\"\n\n with self.skill_manager_lock:\n self.sync_checkpoint()\n\n self.skills[skill_name] = {\n \"skill_name\": skill_name,\n \"marker\": marker,\n \"description\": description,\n \"full_code\": full_code,\n \"origin\": origin,\n \"update_count\": init_update_count,\n }\n\n # add_text = f\"code: {marker}, skill: {skill_name}, description: {description},\"\n add_text = marker\n \n # use chroma bridge to add skill to the chromadb\n assert self.chroma_bridge is not None\n data = (\"skill_add_text\",{\n \"skill_name\": skill_name,\n \"add_text\": add_text,\n })\n output = self.chroma_bridge.run_cmd(data)\n if output[\"error\"] is None:\n assert output[\"output\"] == len(\n self.skills\n ), (\"vectordb is not synced with skill.json\"\n f\"there are {output['output']} in skilldb but {len(self.skills)} in skills.json\")\n \n U.dump_text(\n marker, f\"{self.ckpt_dir}/skill/code/{skill_name}.thy\"\n )\n U.dump_text(\n description,\n f\"{self.ckpt_dir}/skill/description/{skill_name}.txt\",\n )\n U.dump_json(self.skills, f\"{self.ckpt_dir}/skill/skills.json\")\n self.logger.info(f\"Added skill, marker:\\n ```isabelle\\n{marker}```\\nfull_code:\\nisabelle\\n{full_code}\\n\")\n\n def update_count(self, skill_name):\n with self.skill_manager_lock:\n self.sync_checkpoint()\n self.skills[skill_name][\"update_count\"] += 1\n U.dump_json(self.skills, f\"{self.ckpt_dir}/skill/skills.json\")\n \n def update_count_request(self, request_name):\n with self.skill_manager_lock:\n self.sync_checkpoint()\n self.skill_requests[request_name][\"update_count\"] += 1\n U.dump_json(self.skill_requests, f\"{self.ckpt_dir}/skill/skill_request.json\")\n\n def retrieve_skills(self, query, k):\n ret_skill = []\n k = min(len(self.skills), k)\n if k != 0:\n self.logger.info(f\"Skill Manager retrieving for {k} skills\")\n with self.skill_manager_lock:\n # query = f\"informal statement: {context['informal_statement']}, informal proof: {context['informal_proof']}, formal_statement: {context['formal_statement']}\"\n data = (\"skill_query\", {\"query\": query, \"k\": k})\n outputs = self.chroma_bridge.run_cmd(data)\n ret_skill_name = []\n if outputs[\"error\"] is None:\n ret_skill_name = outputs[\"output\"]\n self.sync_checkpoint()\n self.logger.info(\n f\"Skill Manager retrieved skills for query:\\n ```\\n\"\n f\"{query}\\n```\\n\"\n f\"{', '.join(ret_skill_name)}\"\n )\n\n for skill_name in ret_skill_name:\n retrieved_skill = {\n \"skill\": skill_name,\n \"description\": self.skills[skill_name][\"description\"],\n \"code\": self.skills[skill_name][\"full_code\"],\n \"marker\": self.skills[skill_name][\"marker\"],\n }\n ret_skill.append(retrieved_skill)\n return ret_skill\n\n def retrieve_skills_with_context(self, context):\n ret_skill = []\n\n k = min(len(self.skills), 6)\n if k != 0:\n self.logger.info(f\"Skill Manager retrieving for {k} skills\")\n with self.skill_manager_lock:\n query = context['formal_statement']\n data = (\"skill_query\", {\"query\": query, \"k\": k})\n outputs = self.chroma_bridge.run_cmd(data)\n ret_skill_name = []\n if outputs[\"error\"] is None:\n ret_skill_name = outputs[\"output\"]\n self.sync_checkpoint()\n self.logger.info(\n f\"Skill Manager retrieved skills for query:\\n ```\\n\"\n f\"{query}\\n```\\n\"\n f\"{', '.join(ret_skill_name)}\"\n )\n \n for skill_name in ret_skill_name:\n retrieved_skill = {\n \"skill\": skill_name,\n \"description\": self.skills[skill_name][\"description\"],\n \"code\": self.skills[skill_name][\"full_code\"],\n \"marker\": self.skills[skill_name][\"marker\"],\n }\n ret_skill.append(retrieved_skill)\n\n return ret_skill" } ]
import os import random import re import time import multiprocessing as mp import tiktoken import lego_prover.utils as U import logging from lego_prover.env.isa_bridge import IsabelleEnv from .agents import ActionAgent from .agents import CurriculumAgent from .agents import SkillManager from langchain.schema import HumanMessage
11,622
class Prover: def __init__( self, rank: int = None, isabelle_path: str = None, server_port: int = 8000, model_name: str = "gpt-4", temperature: int = 0, action_agent_task_max_retries: int = 4, curriculum_task_type: str = "simple_curriculum", curriculum_agent_lock = U.WithEmpty(), skill_manager_lock = U.WithEmpty(), chroma_bridge = None, openai_api_request_timeout: int = 6000, ckpt_dir: str = "ckpt", resume: bool = False, miniF2F_tasks: mp.Queue = None, ): """ Initializes a new instance of the Prover class. Args: rank (int): The rank of the prover process. isabelle_path (str): The path to the Isabelle directory. server_port (int): The port number for the server. model_name (str): The name of the OpenAI model to use. temperature (int): The temperature for sampling the LLM. action_agent_task_max_retries (int): The maximum number of retries for an action agent task. curriculum_task_type (str): The type of curriculum task to use. curriculum_agent_lock: The lock for the curriculum agent. skill_manager_lock: The lock for the skill manager. chroma_bridge: The ChromaBridge object for controlling the keyboard and mouse. openai_api_request_timeout (int): The timeout for OpenAI API requests. ckpt_dir (str): The directory for saving checkpoints. resume (bool): Whether to resume from the checkpoint. miniF2F_tasks (mp.Queue): The queue for miniF2F tasks. """ # init env self.rank = rank self.logger = logging.getLogger(f'prover-{rank}') self.logger.info(f"lego_prover running in rank {rank}") self.model_name = model_name self.env = IsabelleEnv( logger=self.logger, isabelle_path=isabelle_path, server_port=server_port ) self.action_agent_model_name = model_name self.tokenizer_encoder = tiktoken.encoding_for_model( self.action_agent_model_name) self.ckpt_dir = ckpt_dir self.temperature = temperature # init agents self.action_agent = ActionAgent( logger=self.logger, model_name=model_name, temperature=temperature, request_timeout=openai_api_request_timeout, ckpt_dir=ckpt_dir, ) self.action_agent_task_max_retries = action_agent_task_max_retries self.curriculum_agent = CurriculumAgent( logger=self.logger, ckpt_dir=ckpt_dir, resume=resume, miniF2F_tasks=miniF2F_tasks, curriculum_task_type=curriculum_task_type, curriculum_agent_lock=curriculum_agent_lock, )
class Prover: def __init__( self, rank: int = None, isabelle_path: str = None, server_port: int = 8000, model_name: str = "gpt-4", temperature: int = 0, action_agent_task_max_retries: int = 4, curriculum_task_type: str = "simple_curriculum", curriculum_agent_lock = U.WithEmpty(), skill_manager_lock = U.WithEmpty(), chroma_bridge = None, openai_api_request_timeout: int = 6000, ckpt_dir: str = "ckpt", resume: bool = False, miniF2F_tasks: mp.Queue = None, ): """ Initializes a new instance of the Prover class. Args: rank (int): The rank of the prover process. isabelle_path (str): The path to the Isabelle directory. server_port (int): The port number for the server. model_name (str): The name of the OpenAI model to use. temperature (int): The temperature for sampling the LLM. action_agent_task_max_retries (int): The maximum number of retries for an action agent task. curriculum_task_type (str): The type of curriculum task to use. curriculum_agent_lock: The lock for the curriculum agent. skill_manager_lock: The lock for the skill manager. chroma_bridge: The ChromaBridge object for controlling the keyboard and mouse. openai_api_request_timeout (int): The timeout for OpenAI API requests. ckpt_dir (str): The directory for saving checkpoints. resume (bool): Whether to resume from the checkpoint. miniF2F_tasks (mp.Queue): The queue for miniF2F tasks. """ # init env self.rank = rank self.logger = logging.getLogger(f'prover-{rank}') self.logger.info(f"lego_prover running in rank {rank}") self.model_name = model_name self.env = IsabelleEnv( logger=self.logger, isabelle_path=isabelle_path, server_port=server_port ) self.action_agent_model_name = model_name self.tokenizer_encoder = tiktoken.encoding_for_model( self.action_agent_model_name) self.ckpt_dir = ckpt_dir self.temperature = temperature # init agents self.action_agent = ActionAgent( logger=self.logger, model_name=model_name, temperature=temperature, request_timeout=openai_api_request_timeout, ckpt_dir=ckpt_dir, ) self.action_agent_task_max_retries = action_agent_task_max_retries self.curriculum_agent = CurriculumAgent( logger=self.logger, ckpt_dir=ckpt_dir, resume=resume, miniF2F_tasks=miniF2F_tasks, curriculum_task_type=curriculum_task_type, curriculum_agent_lock=curriculum_agent_lock, )
self.skill_manager = SkillManager(
3
2023-10-09 04:23:43+00:00
16k
YingqingHe/ScaleCrafter-ptl
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False,\n tiled=False,\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.tiled = tiled\n\n if tiled:\n from ldm.modules.diffusionmodules.model_tiled import Decoder\n else:\n from ldm.modules.diffusionmodules.model import Decoder\n\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n \n if tiled:\n self.post_quant_conv = make_conv(embed_dim, ddconfig[\"z_channels\"], tiled=tiled, kernel_size=1)\n else:\n # original post_quant_conv\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n \n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n \n def decode_tiles(self, z):\n assert(self.tiled)\n return self.decode(z)\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"squaredcos_cap_v2\": # used for karlo prior\n # return early\n return betas_for_alpha_bar(\n n_timestep,\n lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,\n )\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", device=torch.device(\"cuda\"), **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.device = device\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != self.device:\n attr = attr.to(self.device)\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule,\n **kwargs\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None, **kwargs):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n timestep_index=i,\n **kwargs)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None, \n # redilation\n dilate=None, dilate_tau=None, dilate_skip=None, \n progress_dilate=False,\n dilate_cfg=None, dilate_cfg_skip=None,\n timestep_index=None,\n **kwargs):\n b, *_, device = *x.shape, x.device\n \n # redilation\n enable_dilate = (dilate is not None)\n if enable_dilate:\n if (self.ddim_timesteps.shape[0]-index) > dilate_tau:\n # close dilation in later denoising\n enable_dilate = False\n else:\n if progress_dilate:\n # adjust the dilation factor progressively\n assert(timestep_index is not None)\n dilate_list = list(range(2, math.ceil(dilate)+1))[::-1]\n n_stage = len(dilate_list)\n n_times_stage = math.ceil(dilate_tau / n_stage)\n stage_index = (timestep_index+1) // n_times_stage\n if stage_index > n_stage-1:\n stage_index = n_stage-1\n dilate = dilate_list[stage_index]\n make_dilate_model(self.model, enable_dilate=enable_dilate, dilate=dilate, nskip=dilate_skip)\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
12,845
@torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
noise = noise_like(x.shape, device, repeat_noise)
15
2023-10-11 10:57:55+00:00
16k
bilibini/Lovely_Image_Downloader
dist/py/Python38/site-packages/charset_normalizer/cd.py
[ { "identifier": "FREQUENCIES", "path": "dist/py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "FREQUENCIES: Dict[str, List[str]] = {\n \"English\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"u\",\n \"m\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"y\",\n \"b\",\n \"v\",\n \"k\",\n \"x\",\n \"j\",\n \"z\",\n \"q\",\n ],\n \"English—\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"h\",\n \"l\",\n \"d\",\n \"c\",\n \"m\",\n \"u\",\n \"f\",\n \"p\",\n \"g\",\n \"w\",\n \"b\",\n \"y\",\n \"v\",\n \"k\",\n \"j\",\n \"x\",\n \"z\",\n \"q\",\n ],\n \"German\": [\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"s\",\n \"t\",\n \"a\",\n \"d\",\n \"h\",\n \"u\",\n \"l\",\n \"g\",\n \"o\",\n \"c\",\n \"m\",\n \"b\",\n \"f\",\n \"k\",\n \"w\",\n \"z\",\n \"p\",\n \"v\",\n \"ü\",\n \"ä\",\n \"ö\",\n \"j\",\n ],\n \"French\": [\n \"e\",\n \"a\",\n \"s\",\n \"n\",\n \"i\",\n \"t\",\n \"r\",\n \"l\",\n \"u\",\n \"o\",\n \"d\",\n \"c\",\n \"p\",\n \"m\",\n \"é\",\n \"v\",\n \"g\",\n \"f\",\n \"b\",\n \"h\",\n \"q\",\n \"à\",\n \"x\",\n \"è\",\n \"y\",\n \"j\",\n ],\n \"Dutch\": [\n \"e\",\n \"n\",\n \"a\",\n \"i\",\n \"r\",\n \"t\",\n \"o\",\n \"d\",\n \"s\",\n \"l\",\n \"g\",\n \"h\",\n \"v\",\n \"m\",\n \"u\",\n \"k\",\n \"c\",\n \"p\",\n \"b\",\n \"w\",\n \"j\",\n \"z\",\n \"f\",\n \"y\",\n \"x\",\n \"ë\",\n ],\n \"Italian\": [\n \"e\",\n \"i\",\n \"a\",\n \"o\",\n \"n\",\n \"l\",\n \"t\",\n \"r\",\n \"s\",\n \"c\",\n \"d\",\n \"u\",\n \"p\",\n \"m\",\n \"g\",\n \"v\",\n \"f\",\n \"b\",\n \"z\",\n \"h\",\n \"q\",\n \"è\",\n \"à\",\n \"k\",\n \"y\",\n \"ò\",\n ],\n \"Polish\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"z\",\n \"w\",\n \"s\",\n \"c\",\n \"t\",\n \"k\",\n \"y\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"l\",\n \"j\",\n \"ł\",\n \"g\",\n \"b\",\n \"h\",\n \"ą\",\n \"ę\",\n \"ó\",\n ],\n \"Spanish\": [\n \"e\",\n \"a\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n \"i\",\n \"l\",\n \"d\",\n \"t\",\n \"c\",\n \"u\",\n \"m\",\n \"p\",\n \"b\",\n \"g\",\n \"v\",\n \"f\",\n \"y\",\n \"ó\",\n \"h\",\n \"q\",\n \"í\",\n \"j\",\n \"z\",\n \"á\",\n ],\n \"Russian\": [\n \"о\",\n \"а\",\n \"е\",\n \"и\",\n \"н\",\n \"с\",\n \"т\",\n \"р\",\n \"в\",\n \"л\",\n \"к\",\n \"м\",\n \"д\",\n \"п\",\n \"у\",\n \"г\",\n \"я\",\n \"ы\",\n \"з\",\n \"б\",\n \"й\",\n \"ь\",\n \"ч\",\n \"х\",\n \"ж\",\n \"ц\",\n ],\n # Jap-Kanji\n \"Japanese\": [\n \"人\",\n \"一\",\n \"大\",\n \"亅\",\n \"丁\",\n \"丨\",\n \"竹\",\n \"笑\",\n \"口\",\n \"日\",\n \"今\",\n \"二\",\n \"彳\",\n \"行\",\n \"十\",\n \"土\",\n \"丶\",\n \"寸\",\n \"寺\",\n \"時\",\n \"乙\",\n \"丿\",\n \"乂\",\n \"气\",\n \"気\",\n \"冂\",\n \"巾\",\n \"亠\",\n \"市\",\n \"目\",\n \"儿\",\n \"見\",\n \"八\",\n \"小\",\n \"凵\",\n \"県\",\n \"月\",\n \"彐\",\n \"門\",\n \"間\",\n \"木\",\n \"東\",\n \"山\",\n \"出\",\n \"本\",\n \"中\",\n \"刀\",\n \"分\",\n \"耳\",\n \"又\",\n \"取\",\n \"最\",\n \"言\",\n \"田\",\n \"心\",\n \"思\",\n \"刂\",\n \"前\",\n \"京\",\n \"尹\",\n \"事\",\n \"生\",\n \"厶\",\n \"云\",\n \"会\",\n \"未\",\n \"来\",\n \"白\",\n \"冫\",\n \"楽\",\n \"灬\",\n \"馬\",\n \"尸\",\n \"尺\",\n \"駅\",\n \"明\",\n \"耂\",\n \"者\",\n \"了\",\n \"阝\",\n \"都\",\n \"高\",\n \"卜\",\n \"占\",\n \"厂\",\n \"广\",\n \"店\",\n \"子\",\n \"申\",\n \"奄\",\n \"亻\",\n \"俺\",\n \"上\",\n \"方\",\n \"冖\",\n \"学\",\n \"衣\",\n \"艮\",\n \"食\",\n \"自\",\n ],\n # Jap-Katakana\n \"Japanese—\": [\n \"ー\",\n \"ン\",\n \"ス\",\n \"・\",\n \"ル\",\n \"ト\",\n \"リ\",\n \"イ\",\n \"ア\",\n \"ラ\",\n \"ッ\",\n \"ク\",\n \"ド\",\n \"シ\",\n \"レ\",\n \"ジ\",\n \"タ\",\n \"フ\",\n \"ロ\",\n \"カ\",\n \"テ\",\n \"マ\",\n \"ィ\",\n \"グ\",\n \"バ\",\n \"ム\",\n \"プ\",\n \"オ\",\n \"コ\",\n \"デ\",\n \"ニ\",\n \"ウ\",\n \"メ\",\n \"サ\",\n \"ビ\",\n \"ナ\",\n \"ブ\",\n \"ャ\",\n \"エ\",\n \"ュ\",\n \"チ\",\n \"キ\",\n \"ズ\",\n \"ダ\",\n \"パ\",\n \"ミ\",\n \"ェ\",\n \"ョ\",\n \"ハ\",\n \"セ\",\n \"ベ\",\n \"ガ\",\n \"モ\",\n \"ツ\",\n \"ネ\",\n \"ボ\",\n \"ソ\",\n \"ノ\",\n \"ァ\",\n \"ヴ\",\n \"ワ\",\n \"ポ\",\n \"ペ\",\n \"ピ\",\n \"ケ\",\n \"ゴ\",\n \"ギ\",\n \"ザ\",\n \"ホ\",\n \"ゲ\",\n \"ォ\",\n \"ヤ\",\n \"ヒ\",\n \"ユ\",\n \"ヨ\",\n \"ヘ\",\n \"ゼ\",\n \"ヌ\",\n \"ゥ\",\n \"ゾ\",\n \"ヶ\",\n \"ヂ\",\n \"ヲ\",\n \"ヅ\",\n \"ヵ\",\n \"ヱ\",\n \"ヰ\",\n \"ヮ\",\n \"ヽ\",\n \"゠\",\n \"ヾ\",\n \"ヷ\",\n \"ヿ\",\n \"ヸ\",\n \"ヹ\",\n \"ヺ\",\n ],\n # Jap-Hiragana\n \"Japanese——\": [\n \"の\",\n \"に\",\n \"る\",\n \"た\",\n \"と\",\n \"は\",\n \"し\",\n \"い\",\n \"を\",\n \"で\",\n \"て\",\n \"が\",\n \"な\",\n \"れ\",\n \"か\",\n \"ら\",\n \"さ\",\n \"っ\",\n \"り\",\n \"す\",\n \"あ\",\n \"も\",\n \"こ\",\n \"ま\",\n \"う\",\n \"く\",\n \"よ\",\n \"き\",\n \"ん\",\n \"め\",\n \"お\",\n \"け\",\n \"そ\",\n \"つ\",\n \"だ\",\n \"や\",\n \"え\",\n \"ど\",\n \"わ\",\n \"ち\",\n \"み\",\n \"せ\",\n \"じ\",\n \"ば\",\n \"へ\",\n \"び\",\n \"ず\",\n \"ろ\",\n \"ほ\",\n \"げ\",\n \"む\",\n \"べ\",\n \"ひ\",\n \"ょ\",\n \"ゆ\",\n \"ぶ\",\n \"ご\",\n \"ゃ\",\n \"ね\",\n \"ふ\",\n \"ぐ\",\n \"ぎ\",\n \"ぼ\",\n \"ゅ\",\n \"づ\",\n \"ざ\",\n \"ぞ\",\n \"ぬ\",\n \"ぜ\",\n \"ぱ\",\n \"ぽ\",\n \"ぷ\",\n \"ぴ\",\n \"ぃ\",\n \"ぁ\",\n \"ぇ\",\n \"ぺ\",\n \"ゞ\",\n \"ぢ\",\n \"ぉ\",\n \"ぅ\",\n \"ゐ\",\n \"ゝ\",\n \"ゑ\",\n \"゛\",\n \"゜\",\n \"ゎ\",\n \"ゔ\",\n \"゚\",\n \"ゟ\",\n \"゙\",\n \"ゕ\",\n \"ゖ\",\n ],\n \"Portuguese\": [\n \"a\",\n \"e\",\n \"o\",\n \"s\",\n \"i\",\n \"r\",\n \"d\",\n \"n\",\n \"t\",\n \"m\",\n \"u\",\n \"c\",\n \"l\",\n \"p\",\n \"g\",\n \"v\",\n \"b\",\n \"f\",\n \"h\",\n \"ã\",\n \"q\",\n \"é\",\n \"ç\",\n \"á\",\n \"z\",\n \"í\",\n ],\n \"Swedish\": [\n \"e\",\n \"a\",\n \"n\",\n \"r\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"d\",\n \"o\",\n \"m\",\n \"k\",\n \"g\",\n \"v\",\n \"h\",\n \"f\",\n \"u\",\n \"p\",\n \"ä\",\n \"c\",\n \"b\",\n \"ö\",\n \"å\",\n \"y\",\n \"j\",\n \"x\",\n ],\n \"Chinese\": [\n \"的\",\n \"一\",\n \"是\",\n \"不\",\n \"了\",\n \"在\",\n \"人\",\n \"有\",\n \"我\",\n \"他\",\n \"这\",\n \"个\",\n \"们\",\n \"中\",\n \"来\",\n \"上\",\n \"大\",\n \"为\",\n \"和\",\n \"国\",\n \"地\",\n \"到\",\n \"以\",\n \"说\",\n \"时\",\n \"要\",\n \"就\",\n \"出\",\n \"会\",\n \"可\",\n \"也\",\n \"你\",\n \"对\",\n \"生\",\n \"能\",\n \"而\",\n \"子\",\n \"那\",\n \"得\",\n \"于\",\n \"着\",\n \"下\",\n \"自\",\n \"之\",\n \"年\",\n \"过\",\n \"发\",\n \"后\",\n \"作\",\n \"里\",\n \"用\",\n \"道\",\n \"行\",\n \"所\",\n \"然\",\n \"家\",\n \"种\",\n \"事\",\n \"成\",\n \"方\",\n \"多\",\n \"经\",\n \"么\",\n \"去\",\n \"法\",\n \"学\",\n \"如\",\n \"都\",\n \"同\",\n \"现\",\n \"当\",\n \"没\",\n \"动\",\n \"面\",\n \"起\",\n \"看\",\n \"定\",\n \"天\",\n \"分\",\n \"还\",\n \"进\",\n \"好\",\n \"小\",\n \"部\",\n \"其\",\n \"些\",\n \"主\",\n \"样\",\n \"理\",\n \"心\",\n \"她\",\n \"本\",\n \"前\",\n \"开\",\n \"但\",\n \"因\",\n \"只\",\n \"从\",\n \"想\",\n \"实\",\n ],\n \"Ukrainian\": [\n \"о\",\n \"а\",\n \"н\",\n \"і\",\n \"и\",\n \"р\",\n \"в\",\n \"т\",\n \"е\",\n \"с\",\n \"к\",\n \"л\",\n \"у\",\n \"д\",\n \"м\",\n \"п\",\n \"з\",\n \"я\",\n \"ь\",\n \"б\",\n \"г\",\n \"й\",\n \"ч\",\n \"х\",\n \"ц\",\n \"ї\",\n ],\n \"Norwegian\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"s\",\n \"i\",\n \"o\",\n \"l\",\n \"d\",\n \"g\",\n \"k\",\n \"m\",\n \"v\",\n \"f\",\n \"p\",\n \"u\",\n \"b\",\n \"h\",\n \"å\",\n \"y\",\n \"j\",\n \"ø\",\n \"c\",\n \"æ\",\n \"w\",\n ],\n \"Finnish\": [\n \"a\",\n \"i\",\n \"n\",\n \"t\",\n \"e\",\n \"s\",\n \"l\",\n \"o\",\n \"u\",\n \"k\",\n \"ä\",\n \"m\",\n \"r\",\n \"v\",\n \"j\",\n \"h\",\n \"p\",\n \"y\",\n \"d\",\n \"ö\",\n \"g\",\n \"c\",\n \"b\",\n \"f\",\n \"w\",\n \"z\",\n ],\n \"Vietnamese\": [\n \"n\",\n \"h\",\n \"t\",\n \"i\",\n \"c\",\n \"g\",\n \"a\",\n \"o\",\n \"u\",\n \"m\",\n \"l\",\n \"r\",\n \"à\",\n \"đ\",\n \"s\",\n \"e\",\n \"v\",\n \"p\",\n \"b\",\n \"y\",\n \"ư\",\n \"d\",\n \"á\",\n \"k\",\n \"ộ\",\n \"ế\",\n ],\n \"Czech\": [\n \"o\",\n \"e\",\n \"a\",\n \"n\",\n \"t\",\n \"s\",\n \"i\",\n \"l\",\n \"v\",\n \"r\",\n \"k\",\n \"d\",\n \"u\",\n \"m\",\n \"p\",\n \"í\",\n \"c\",\n \"h\",\n \"z\",\n \"á\",\n \"y\",\n \"j\",\n \"b\",\n \"ě\",\n \"é\",\n \"ř\",\n ],\n \"Hungarian\": [\n \"e\",\n \"a\",\n \"t\",\n \"l\",\n \"s\",\n \"n\",\n \"k\",\n \"r\",\n \"i\",\n \"o\",\n \"z\",\n \"á\",\n \"é\",\n \"g\",\n \"m\",\n \"b\",\n \"y\",\n \"v\",\n \"d\",\n \"h\",\n \"u\",\n \"p\",\n \"j\",\n \"ö\",\n \"f\",\n \"c\",\n ],\n \"Korean\": [\n \"이\",\n \"다\",\n \"에\",\n \"의\",\n \"는\",\n \"로\",\n \"하\",\n \"을\",\n \"가\",\n \"고\",\n \"지\",\n \"서\",\n \"한\",\n \"은\",\n \"기\",\n \"으\",\n \"년\",\n \"대\",\n \"사\",\n \"시\",\n \"를\",\n \"리\",\n \"도\",\n \"인\",\n \"스\",\n \"일\",\n ],\n \"Indonesian\": [\n \"a\",\n \"n\",\n \"e\",\n \"i\",\n \"r\",\n \"t\",\n \"u\",\n \"s\",\n \"d\",\n \"k\",\n \"m\",\n \"l\",\n \"g\",\n \"p\",\n \"b\",\n \"o\",\n \"h\",\n \"y\",\n \"j\",\n \"c\",\n \"w\",\n \"f\",\n \"v\",\n \"z\",\n \"x\",\n \"q\",\n ],\n \"Turkish\": [\n \"a\",\n \"e\",\n \"i\",\n \"n\",\n \"r\",\n \"l\",\n \"ı\",\n \"k\",\n \"d\",\n \"t\",\n \"s\",\n \"m\",\n \"y\",\n \"u\",\n \"o\",\n \"b\",\n \"ü\",\n \"ş\",\n \"v\",\n \"g\",\n \"z\",\n \"h\",\n \"c\",\n \"p\",\n \"ç\",\n \"ğ\",\n ],\n \"Romanian\": [\n \"e\",\n \"i\",\n \"a\",\n \"r\",\n \"n\",\n \"t\",\n \"u\",\n \"l\",\n \"o\",\n \"c\",\n \"s\",\n \"d\",\n \"p\",\n \"m\",\n \"ă\",\n \"f\",\n \"v\",\n \"î\",\n \"g\",\n \"b\",\n \"ș\",\n \"ț\",\n \"z\",\n \"h\",\n \"â\",\n \"j\",\n ],\n \"Farsi\": [\n \"ا\",\n \"ی\",\n \"ر\",\n \"د\",\n \"ن\",\n \"ه\",\n \"و\",\n \"م\",\n \"ت\",\n \"ب\",\n \"س\",\n \"ل\",\n \"ک\",\n \"ش\",\n \"ز\",\n \"ف\",\n \"گ\",\n \"ع\",\n \"خ\",\n \"ق\",\n \"ج\",\n \"آ\",\n \"پ\",\n \"ح\",\n \"ط\",\n \"ص\",\n ],\n \"Arabic\": [\n \"ا\",\n \"ل\",\n \"ي\",\n \"م\",\n \"و\",\n \"ن\",\n \"ر\",\n \"ت\",\n \"ب\",\n \"ة\",\n \"ع\",\n \"د\",\n \"س\",\n \"ف\",\n \"ه\",\n \"ك\",\n \"ق\",\n \"أ\",\n \"ح\",\n \"ج\",\n \"ش\",\n \"ط\",\n \"ص\",\n \"ى\",\n \"خ\",\n \"إ\",\n ],\n \"Danish\": [\n \"e\",\n \"r\",\n \"n\",\n \"t\",\n \"a\",\n \"i\",\n \"s\",\n \"d\",\n \"l\",\n \"o\",\n \"g\",\n \"m\",\n \"k\",\n \"f\",\n \"v\",\n \"u\",\n \"b\",\n \"h\",\n \"p\",\n \"å\",\n \"y\",\n \"ø\",\n \"æ\",\n \"c\",\n \"j\",\n \"w\",\n ],\n \"Serbian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"р\",\n \"с\",\n \"у\",\n \"т\",\n \"к\",\n \"ј\",\n \"в\",\n \"д\",\n \"м\",\n \"п\",\n \"л\",\n \"г\",\n \"з\",\n \"б\",\n \"a\",\n \"i\",\n \"e\",\n \"o\",\n \"n\",\n \"ц\",\n \"ш\",\n ],\n \"Lithuanian\": [\n \"i\",\n \"a\",\n \"s\",\n \"o\",\n \"r\",\n \"e\",\n \"t\",\n \"n\",\n \"u\",\n \"k\",\n \"m\",\n \"l\",\n \"p\",\n \"v\",\n \"d\",\n \"j\",\n \"g\",\n \"ė\",\n \"b\",\n \"y\",\n \"ų\",\n \"š\",\n \"ž\",\n \"c\",\n \"ą\",\n \"į\",\n ],\n \"Slovene\": [\n \"e\",\n \"a\",\n \"i\",\n \"o\",\n \"n\",\n \"r\",\n \"s\",\n \"l\",\n \"t\",\n \"j\",\n \"v\",\n \"k\",\n \"d\",\n \"p\",\n \"m\",\n \"u\",\n \"z\",\n \"b\",\n \"g\",\n \"h\",\n \"č\",\n \"c\",\n \"š\",\n \"ž\",\n \"f\",\n \"y\",\n ],\n \"Slovak\": [\n \"o\",\n \"a\",\n \"e\",\n \"n\",\n \"i\",\n \"r\",\n \"v\",\n \"t\",\n \"s\",\n \"l\",\n \"k\",\n \"d\",\n \"m\",\n \"p\",\n \"u\",\n \"c\",\n \"h\",\n \"j\",\n \"b\",\n \"z\",\n \"á\",\n \"y\",\n \"ý\",\n \"í\",\n \"č\",\n \"é\",\n ],\n \"Hebrew\": [\n \"י\",\n \"ו\",\n \"ה\",\n \"ל\",\n \"ר\",\n \"ב\",\n \"ת\",\n \"מ\",\n \"א\",\n \"ש\",\n \"נ\",\n \"ע\",\n \"ם\",\n \"ד\",\n \"ק\",\n \"ח\",\n \"פ\",\n \"ס\",\n \"כ\",\n \"ג\",\n \"ט\",\n \"צ\",\n \"ן\",\n \"ז\",\n \"ך\",\n ],\n \"Bulgarian\": [\n \"а\",\n \"и\",\n \"о\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"с\",\n \"в\",\n \"л\",\n \"к\",\n \"д\",\n \"п\",\n \"м\",\n \"з\",\n \"г\",\n \"я\",\n \"ъ\",\n \"у\",\n \"б\",\n \"ч\",\n \"ц\",\n \"й\",\n \"ж\",\n \"щ\",\n \"х\",\n ],\n \"Croatian\": [\n \"a\",\n \"i\",\n \"o\",\n \"e\",\n \"n\",\n \"r\",\n \"j\",\n \"s\",\n \"t\",\n \"u\",\n \"k\",\n \"l\",\n \"v\",\n \"d\",\n \"m\",\n \"p\",\n \"g\",\n \"z\",\n \"b\",\n \"c\",\n \"č\",\n \"h\",\n \"š\",\n \"ž\",\n \"ć\",\n \"f\",\n ],\n \"Hindi\": [\n \"क\",\n \"र\",\n \"स\",\n \"न\",\n \"त\",\n \"म\",\n \"ह\",\n \"प\",\n \"य\",\n \"ल\",\n \"व\",\n \"ज\",\n \"द\",\n \"ग\",\n \"ब\",\n \"श\",\n \"ट\",\n \"अ\",\n \"ए\",\n \"थ\",\n \"भ\",\n \"ड\",\n \"च\",\n \"ध\",\n \"ष\",\n \"इ\",\n ],\n \"Estonian\": [\n \"a\",\n \"i\",\n \"e\",\n \"s\",\n \"t\",\n \"l\",\n \"u\",\n \"n\",\n \"o\",\n \"k\",\n \"r\",\n \"d\",\n \"m\",\n \"v\",\n \"g\",\n \"p\",\n \"j\",\n \"h\",\n \"ä\",\n \"b\",\n \"õ\",\n \"ü\",\n \"f\",\n \"c\",\n \"ö\",\n \"y\",\n ],\n \"Thai\": [\n \"า\",\n \"น\",\n \"ร\",\n \"อ\",\n \"ก\",\n \"เ\",\n \"ง\",\n \"ม\",\n \"ย\",\n \"ล\",\n \"ว\",\n \"ด\",\n \"ท\",\n \"ส\",\n \"ต\",\n \"ะ\",\n \"ป\",\n \"บ\",\n \"ค\",\n \"ห\",\n \"แ\",\n \"จ\",\n \"พ\",\n \"ช\",\n \"ข\",\n \"ใ\",\n ],\n \"Greek\": [\n \"α\",\n \"τ\",\n \"ο\",\n \"ι\",\n \"ε\",\n \"ν\",\n \"ρ\",\n \"σ\",\n \"κ\",\n \"η\",\n \"π\",\n \"ς\",\n \"υ\",\n \"μ\",\n \"λ\",\n \"ί\",\n \"ό\",\n \"ά\",\n \"γ\",\n \"έ\",\n \"δ\",\n \"ή\",\n \"ω\",\n \"χ\",\n \"θ\",\n \"ύ\",\n ],\n \"Tamil\": [\n \"க\",\n \"த\",\n \"ப\",\n \"ட\",\n \"ர\",\n \"ம\",\n \"ல\",\n \"ன\",\n \"வ\",\n \"ற\",\n \"ய\",\n \"ள\",\n \"ச\",\n \"ந\",\n \"இ\",\n \"ண\",\n \"அ\",\n \"ஆ\",\n \"ழ\",\n \"ங\",\n \"எ\",\n \"உ\",\n \"ஒ\",\n \"ஸ\",\n ],\n \"Kazakh\": [\n \"а\",\n \"ы\",\n \"е\",\n \"н\",\n \"т\",\n \"р\",\n \"л\",\n \"і\",\n \"д\",\n \"с\",\n \"м\",\n \"қ\",\n \"к\",\n \"о\",\n \"б\",\n \"и\",\n \"у\",\n \"ғ\",\n \"ж\",\n \"ң\",\n \"з\",\n \"ш\",\n \"й\",\n \"п\",\n \"г\",\n \"ө\",\n ],\n}" }, { "identifier": "KO_NAMES", "path": "dist/py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "KO_NAMES: Set[str] = {\"johab\", \"cp949\", \"euc_kr\"}" }, { "identifier": "LANGUAGE_SUPPORTED_COUNT", "path": "dist/py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES)" }, { "identifier": "TOO_SMALL_SEQUENCE", "path": "dist/py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "TOO_SMALL_SEQUENCE: int = 32" }, { "identifier": "ZH_NAMES", "path": "dist/py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "ZH_NAMES: Set[str] = {\"big5\", \"cp950\", \"big5hkscs\", \"hz\"}" }, { "identifier": "is_suspiciously_successive_range", "path": "dist/py/Python38/site-packages/charset_normalizer/md.py", "snippet": "@lru_cache(maxsize=1024)\ndef is_suspiciously_successive_range(\n unicode_range_a: Optional[str], unicode_range_b: Optional[str]\n) -> bool:\n \"\"\"\n Determine if two Unicode range seen next to each other can be considered as suspicious.\n \"\"\"\n if unicode_range_a is None or unicode_range_b is None:\n return True\n\n if unicode_range_a == unicode_range_b:\n return False\n\n if \"Latin\" in unicode_range_a and \"Latin\" in unicode_range_b:\n return False\n\n if \"Emoticons\" in unicode_range_a or \"Emoticons\" in unicode_range_b:\n return False\n\n # Latin characters can be accompanied with a combining diacritical mark\n # eg. Vietnamese.\n if (\"Latin\" in unicode_range_a or \"Latin\" in unicode_range_b) and (\n \"Combining\" in unicode_range_a or \"Combining\" in unicode_range_b\n ):\n return False\n\n keywords_range_a, keywords_range_b = unicode_range_a.split(\n \" \"\n ), unicode_range_b.split(\" \")\n\n for el in keywords_range_a:\n if el in UNICODE_SECONDARY_RANGE_KEYWORD:\n continue\n if el in keywords_range_b:\n return False\n\n # Japanese Exception\n range_a_jp_chars, range_b_jp_chars = (\n unicode_range_a\n in (\n \"Hiragana\",\n \"Katakana\",\n ),\n unicode_range_b in (\"Hiragana\", \"Katakana\"),\n )\n if (range_a_jp_chars or range_b_jp_chars) and (\n \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b\n ):\n return False\n if range_a_jp_chars and range_b_jp_chars:\n return False\n\n if \"Hangul\" in unicode_range_a or \"Hangul\" in unicode_range_b:\n if \"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b:\n return False\n if unicode_range_a == \"Basic Latin\" or unicode_range_b == \"Basic Latin\":\n return False\n\n # Chinese/Japanese use dedicated range for punctuation and/or separators.\n if (\"CJK\" in unicode_range_a or \"CJK\" in unicode_range_b) or (\n unicode_range_a in [\"Katakana\", \"Hiragana\"]\n and unicode_range_b in [\"Katakana\", \"Hiragana\"]\n ):\n if \"Punctuation\" in unicode_range_a or \"Punctuation\" in unicode_range_b:\n return False\n if \"Forms\" in unicode_range_a or \"Forms\" in unicode_range_b:\n return False\n\n return True" }, { "identifier": "CoherenceMatches", "path": "dist/py/Python38/site-packages/charset_normalizer/models.py", "snippet": "class CharsetMatch:\nclass CharsetMatches:\nclass CliDetectionResult:\n def __init__(\n self,\n payload: bytes,\n guessed_encoding: str,\n mean_mess_ratio: float,\n has_sig_or_bom: bool,\n languages: \"CoherenceMatches\",\n decoded_payload: Optional[str] = None,\n ):\n def __eq__(self, other: object) -> bool:\n def __lt__(self, other: object) -> bool:\n def multi_byte_usage(self) -> float:\n def __str__(self) -> str:\n def __repr__(self) -> str:\n def add_submatch(self, other: \"CharsetMatch\") -> None:\n def encoding(self) -> str:\n def encoding_aliases(self) -> List[str]:\n def bom(self) -> bool:\n def byte_order_mark(self) -> bool:\n def languages(self) -> List[str]:\n def language(self) -> str:\n def chaos(self) -> float:\n def coherence(self) -> float:\n def percent_chaos(self) -> float:\n def percent_coherence(self) -> float:\n def raw(self) -> bytes:\n def submatch(self) -> List[\"CharsetMatch\"]:\n def has_submatch(self) -> bool:\n def alphabets(self) -> List[str]:\n def could_be_from_charset(self) -> List[str]:\n def output(self, encoding: str = \"utf_8\") -> bytes:\n def fingerprint(self) -> str:\n def __init__(self, results: Optional[List[CharsetMatch]] = None):\n def __iter__(self) -> Iterator[CharsetMatch]:\n def __getitem__(self, item: Union[int, str]) -> CharsetMatch:\n def __len__(self) -> int:\n def __bool__(self) -> bool:\n def append(self, item: CharsetMatch) -> None:\n def best(self) -> Optional[\"CharsetMatch\"]:\n def first(self) -> Optional[\"CharsetMatch\"]:\n def __init__(\n self,\n path: str,\n encoding: Optional[str],\n encoding_aliases: List[str],\n alternative_encodings: List[str],\n language: str,\n alphabets: List[str],\n has_sig_or_bom: bool,\n chaos: float,\n coherence: float,\n unicode_path: Optional[str],\n is_preferred: bool,\n ):\n def __dict__(self) -> Dict[str, Any]: # type: ignore\n def to_json(self) -> str:" }, { "identifier": "is_accentuated", "path": "dist/py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_accentuated(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return (\n \"WITH GRAVE\" in description\n or \"WITH ACUTE\" in description\n or \"WITH CEDILLA\" in description\n or \"WITH DIAERESIS\" in description\n or \"WITH CIRCUMFLEX\" in description\n or \"WITH TILDE\" in description\n )" }, { "identifier": "is_latin", "path": "dist/py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef is_latin(character: str) -> bool:\n try:\n description: str = unicodedata.name(character)\n except ValueError:\n return False\n return \"LATIN\" in description" }, { "identifier": "is_multi_byte_encoding", "path": "dist/py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=128)\ndef is_multi_byte_encoding(name: str) -> bool:\n \"\"\"\n Verify is a specific encoding is a multi byte one based on it IANA name\n \"\"\"\n return name in {\n \"utf_8\",\n \"utf_8_sig\",\n \"utf_16\",\n \"utf_16_be\",\n \"utf_16_le\",\n \"utf_32\",\n \"utf_32_le\",\n \"utf_32_be\",\n \"utf_7\",\n } or issubclass(\n importlib.import_module(\"encodings.{}\".format(name)).IncrementalDecoder,\n MultibyteIncrementalDecoder,\n )" }, { "identifier": "is_unicode_range_secondary", "path": "dist/py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))\ndef is_unicode_range_secondary(range_name: str) -> bool:\n return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)" }, { "identifier": "unicode_range", "path": "dist/py/Python38/site-packages/charset_normalizer/utils.py", "snippet": "@lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)\ndef unicode_range(character: str) -> Optional[str]:\n \"\"\"\n Retrieve the Unicode range official name from a single character.\n \"\"\"\n character_ord: int = ord(character)\n\n for range_name, ord_range in UNICODE_RANGES_COMBINED.items():\n if character_ord in ord_range:\n return range_name\n\n return None" } ]
import importlib from codecs import IncrementalDecoder from collections import Counter from functools import lru_cache from typing import Counter as TypeCounter, Dict, List, Optional, Tuple from .constant import ( FREQUENCIES, KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES, ) from .md import is_suspiciously_successive_range from .models import CoherenceMatches from .utils import ( is_accentuated, is_latin, is_multi_byte_encoding, is_unicode_range_secondary, unicode_range, )
11,265
if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if (
def encoding_unicode_range(iana_name: str) -> List[str]: """ Return associated unicode ranges in a single byte code page. """ if is_multi_byte_encoding(iana_name): raise IOError("Function not supported on multi-byte code page") decoder = importlib.import_module( "encodings.{}".format(iana_name) ).IncrementalDecoder p: IncrementalDecoder = decoder(errors="ignore") seen_ranges: Dict[str, int] = {} character_count: int = 0 for i in range(0x40, 0xFF): chunk: str = p.decode(bytes([i])) if chunk: character_range: Optional[str] = unicode_range(chunk) if character_range is None: continue if is_unicode_range_secondary(character_range) is False: if character_range not in seen_ranges: seen_ranges[character_range] = 0 seen_ranges[character_range] += 1 character_count += 1 return sorted( [ character_range for character_range in seen_ranges if seen_ranges[character_range] / character_count >= 0.15 ] ) def unicode_range_languages(primary_range: str) -> List[str]: """ Return inferred languages used with a unicode range. """ languages: List[str] = [] for language, characters in FREQUENCIES.items(): for character in characters: if unicode_range(character) == primary_range: languages.append(language) break return languages @lru_cache() def encoding_languages(iana_name: str) -> List[str]: """ Single-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ unicode_ranges: List[str] = encoding_unicode_range(iana_name) primary_range: Optional[str] = None for specified_range in unicode_ranges: if "Latin" not in specified_range: primary_range = specified_range break if primary_range is None: return ["Latin Based"] return unicode_range_languages(primary_range) @lru_cache() def mb_encoding_languages(iana_name: str) -> List[str]: """ Multi-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ if ( iana_name.startswith("shift_") or iana_name.startswith("iso2022_jp") or iana_name.startswith("euc_j") or iana_name == "cp932" ): return ["Japanese"] if iana_name.startswith("gb") or iana_name in ZH_NAMES: return ["Chinese"] if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: return ["Korean"] return [] @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) def get_target_features(language: str) -> Tuple[bool, bool]: """ Determine main aspects from a supported language if it contains accents and if is pure Latin. """ target_have_accents: bool = False target_pure_latin: bool = True for character in FREQUENCIES[language]: if not target_have_accents and is_accentuated(character): target_have_accents = True if target_pure_latin and is_latin(character) is False: target_pure_latin = False return target_have_accents, target_pure_latin def alphabet_languages( characters: List[str], ignore_non_latin: bool = False ) -> List[str]: """ Return associated languages associated to given characters. """ languages: List[Tuple[str, float]] = [] source_have_accents = any(is_accentuated(character) for character in characters) for language, language_characters in FREQUENCIES.items(): target_have_accents, target_pure_latin = get_target_features(language) if ignore_non_latin and target_pure_latin is False: continue if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if (
is_suspiciously_successive_range(discovered_range, character_range)
5
2023-10-11 09:08:57+00:00
16k
MTgeophysics/mtpy-v2
mtpy/modeling/modem/residual.py
[ { "identifier": "Data", "path": "mtpy/modeling/modem/data.py", "snippet": "class Data:\n \"\"\"\n Data will read and write .dat files for ModEM and convert a WS data file\n to ModEM format.\n\n ..note: :: the data is interpolated onto the given periods such that all\n stations invert for the same periods. The interpolation is\n a linear interpolation of each of the real and imaginary parts\n of the impedance tensor and induction tensor.\n See mtpy.core.mt.MT.interpolate for more details\n \n :param edi_list: list of edi files to read\n\n ====================== ====================================================\n Attributes Description\n ====================== ====================================================\n _dtype internal variable defining the data type of\n data_array\n _logger python logging object that put messages in logging\n format defined in logging configure file, see MtPyLog\n for more information\n _t_shape internal variable defining shape of tipper array in\n _dtype\n _z_shape internal variable defining shape of Z array in\n _dtype\n center_position (east, north, evel) for center point of station\n array. All stations are relative to this location\n for plotting purposes.\n comp_index_dict dictionary for index values of component of Z and T\n station_locations Stations object\n data_array numpy.ndarray (num_stations) structured to store\n data. keys are:\n * station --> station name\n * lat --> latitude in decimal degrees\n * lon --> longitude in decimal degrees\n * elev --> elevation (m)\n * rel_east -- > relative east location to\n center_position (m)\n * rel_north --> relative north location to\n center_position (m)\n * east --> UTM east (m)\n * north --> UTM north (m)\n * zone --> UTM zone\n * z --> impedance tensor array with shape\n (num_freq, 2, 2)\n * z_err --> impedance tensor error array with\n shape (num_freq, 2, 2)\n * tip --> Tipper array with shape\n (num_freq, 1, 2)\n * tipperr --> Tipper array with shape\n (num_freq, 1, 2)\n data_fn full path to data file\n data_period_list period list from all the data\n edi_list list of full paths to edi files\n error_type_tipper [ 'abs' | 'floor' ]\n *default* is 'abs'\n error_type_z [ 'egbert' | 'mean_od' | 'eigen' | 'median']\n *default* is 'egbert_floor'\n * add '_floor' to any of the above to set the\n error as an error floor, otherwise all\n components are give weighted the same\n\n * 'egbert' sets error to\n error_value_z * sqrt(abs(zxy*zyx))\n * 'mean_od' sets error to\n error_value_z * mean([Zxy, Zyx])\n (non zeros)\n * 'eigen' sets error to\n error_value_z * eigenvalues(Z[ii])\n * 'median' sets error to\n error_value_z * median([Zxx, Zxy, Zyx, Zyy])\n (non zeros)\n A 2x2 numpy array of error_type_z can be specified to\n explicitly set the error_type_z for each component.\n\n error_value_z percentage to multiply Z by to set error\n *default* is 5 for 5% of Z as error\n A 2x2 numpy array of values can be specified to\n explicitly set the error_value_z for each component.\n\n error_value_tipper absolute error between 0 and 1.\n fn_basename basename of data file. *default* is 'ModEM_Data.dat'\n formatting ['1' | '2'], format of the output data file, *default* is '1'\n header_strings strings for header of data file following the format\n outlined in the ModEM documentation\n inv_comp_dict dictionary of inversion components\n inv_mode inversion mode, options are: *default* is '1'\n * '1' --> for 'Full_Impedance' and\n 'Full_Vertical_Components'\n * '2' --> 'Full_Impedance'\n * '3' --> 'Off_Diagonal_Impedance' and\n 'Full_Vertical_Components'\n * '4' --> 'Off_Diagonal_Impedance'\n * '5' --> 'Full_Vertical_Components'\n * '6' --> 'Full_Interstation_TF'\n * '7' --> 'Off_Diagonal_Rho_Phase'\n\n inv_mode_dict dictionary for inversion modes\n max_num_periods maximum number of periods\n model_epsg epsg code for model projection, provide this to\n project model to non-utm coordinates. Find the epsg\n code for your projection on\n http://spatialreference.org/ref/ or google search\n epsg \"your projection\"\n model_utm_zone alternative to model_epsg, choose a utm zone to\n project all sites to (e.g. '55S')\n mt_dict dictionary of mtpy.core.mt.MT objects with keys\n being station names\n period_buffer float or int\n if specified, apply a buffer so that interpolation doesn't\n stretch too far over periods\n period_dict dictionary of period index for period_list\n period_list list of periods to invert for\n period_max maximum value of period to invert for\n period_min minimum value of period to invert for\n period_buffer buffer so that interpolation doesn't stretch too far\n over periods. Provide a float or integer factor, \n greater than which interpolation will not stretch.\n e.g. 1.5 means only interpolate to a maximum of\n 1.5 times each side of each frequency value\n rotate_angle Angle to rotate data to assuming 0 is N and E is 90\n save_path path to save data file to\n units [ [V/m]/[T] | [mV/km]/[nT] | Ohm ] units of Z\n *default* is [mV/km]/[nT]\n wave_sign_impedance [ + | - ] sign of time dependent wave.\n *default* is '+' as positive downwards.\n wave_sign_tipper [ + | - ] sign of time dependent wave.\n *default* is '+' as positive downwards.\n ====================== ====================================================\n\n\n :Example 1 --> create inversion period list: ::\n\n >>> from pathlib import Path\n >>> import mtpy.modeling.modem as modem\n >>> edi_path = Path(r\"/home/mt/edi_files\")\n >>> edi_list = list(edi_path.glob(\"*.edi\"))\n >>> md = modem.Data(edi_list, period_min=.1, period_max=300,\\\n >>> ... max_num_periods=12)\n >>> md.write_data_file(save_path=r\"/home/modem/inv1\")\n >>> md\n \n\n :Example 2 --> set inverions period list from data: ::\n\n >>> md = modem.Data(edi_list)\n >>> #get period list from an .edi file\n >>> inv_period_list = 1./md.mt_dict[\"mt01\"].Z.freq\n >>> #invert for every third period in inv_period_list\n >>> inv_period_list = inv_period_list[np.arange(0, len(inv_period_list, 3))]\n >>> md.period_list = inv_period_list\n >>> md.write_data_file(save_path=r\"/home/modem/inv1\")\n\n :Example 3 --> change error values: ::\n\n >>> mdr.error_type = 'floor'\n >>> mdr.error_floor = 10\n >>> mdr.error_tipper = .03\n >>> mdr.write_data_file(save_path=r\"/home/modem/inv2\")\n\n :Example 4 --> change inversion type: ::\n\n >>> mdr.inv_mode = '3'\n >>> mdr.write_data_file(save_path=r\"/home/modem/inv2\")\n\n :Example 5 --> rotate data: ::\n\n >>> md.rotation_angle = 60\n >>> md.write_data_file(save_path=r\"/home/modem/Inv1\")\n >>> # or\n >>> md.write_data_file(save_path=r\"/home/modem/Inv1\", \\\n rotation_angle=60)\n\n\n \"\"\"\n\n def __init__(self, dataframe=None, center_point=None, **kwargs):\n\n self.logger = logger\n\n self.dataframe = dataframe\n\n if center_point is None:\n self.center_point = MTLocation()\n else:\n self.center_point = center_point\n\n self.wave_sign_impedance = \"+\"\n self.wave_sign_tipper = \"+\"\n self.z_units = \"[mV/km]/[nT]\"\n self.t_units = \"\"\n self.inv_mode = \"1\"\n self.formatting = \"1\"\n self.rotation_angle = 0\n\n self.z_model_error = ModelErrors(\n error_value=5,\n error_type=\"geometric_mean\",\n floor=True,\n mode=\"impedance\",\n )\n self.t_model_error = ModelErrors(\n error_value=0.02,\n error_type=\"absolute\",\n floor=True,\n mode=\"tipper\",\n )\n\n self.fn_basename = \"ModEM_Data.dat\"\n self.save_path = Path.cwd()\n\n self.topography = True\n\n self.inv_mode_dict = {\n \"1\": [\"Full_Impedance\", \"Full_Vertical_Components\"],\n \"2\": [\"Full_Impedance\"],\n \"3\": [\"Off_Diagonal_Impedance\", \"Full_Vertical_Components\"],\n \"4\": [\"Off_Diagonal_Impedance\"],\n \"5\": [\"Full_Vertical_Components\"],\n \"6\": [\"Full_Interstation_TF\"],\n \"7\": [\"Off_Diagonal_Rho_Phase\"],\n }\n self.inv_comp_dict = {\n \"Full_Impedance\": [\"zxx\", \"zxy\", \"zyx\", \"zyy\"],\n \"Off_Diagonal_Impedance\": [\"zxy\", \"zyx\"],\n \"Full_Vertical_Components\": [\"tzx\", \"tzy\"],\n }\n\n self.header_string = \" \".join(\n [\n \"# Period(s)\",\n \"Code\",\n \"GG_Lat\",\n \"GG_Lon\",\n \"X(m)\",\n \"Y(m)\",\n \"Z(m)\",\n \"Component\",\n \"Real\",\n \"Imag\",\n \"Error\",\n ]\n )\n\n self._df_keys = [\n \"period\",\n \"station\",\n \"latitude\",\n \"longitude\",\n \"model_north\",\n \"model_east\",\n \"model_elevation\",\n \"comp\",\n \"real\",\n \"imag\",\n \"error\",\n ]\n\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n def __str__(self):\n lines = [\"ModEM Data Object:\"]\n if self.dataframe is not None:\n lines += [\n f\"\\tNumber of impedance stations: {self.get_n_stations('impedance')}\"\n ]\n lines += [\n f\"\\tNumber of tipper stations: {self.get_n_stations('vertical')}\"\n ]\n lines += [\n f\"\\tNumber of phase tensor stations: {self.get_n_stations('phase_tensor')}\"\n ]\n lines += [f\"\\tNumber of periods: {self.n_periods}\"]\n lines += [\"\\tPeriod range (s): \"]\n lines += [f\"\\t\\tMin: {self.period.min():.5g}\"]\n lines += [f\"\\t\\tMax: {self.period.max():.5g}\"]\n lines += [f\"\\tRotation angle: {self.rotation_angle}\"]\n lines += [\"\\tData center: \"]\n lines += [\n f\"\\t\\tLatitude: {self.center_point.latitude:>8.4f} deg \"\n f\"\\tNorthing: {self.center_point.north:.4f} m\"\n ]\n lines += [\n f\"\\t\\tLongitude: {self.center_point.longitude:>8.4f} deg \"\n f\"\\tEasting: {self.center_point.east:.4f} m\"\n ]\n lines += [\n f\"\\t\\tDatum epsg: {self.center_point.datum_epsg}\"\n f\"\\t\\t\\tUTM epsg: {self.center_point.utm_epsg}\"\n ]\n lines += [f\"\\t\\tElevation: {self.center_point.elevation:.1f} m\"]\n\n lines += [\n f\"\\tImpedance data: {self.dataframe.zxy.mean() != 0.0}\"\n ]\n lines += [\n f\"\\tTipper data: {self.dataframe.tzx.mean() != 0.0}\"\n ]\n lines += [\n f\"\\tInversion Mode: {', '.join(self.inv_mode_dict[self.inv_mode])}\"\n ]\n\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()\n\n @property\n def dataframe(self):\n return self._mt_dataframe.dataframe\n\n @dataframe.setter\n def dataframe(self, df):\n \"\"\"\n Set dataframe to an MTDataframe\n :param df: DESCRIPTION\n :type df: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if df is None:\n self._mt_dataframe = MTDataFrame()\n\n elif isinstance(df, (pd.DataFrame, MTDataFrame, np.ndarray)):\n self._mt_dataframe = MTDataFrame(df)\n\n else:\n raise TypeError(\n f\"Input must be a dataframe or MTDataFrame object not {type(df)}\"\n )\n\n self._mt_dataframe.dataframe.reset_index(drop=True, inplace=True)\n\n @property\n def model_parameters(self):\n params = {\n \"wave_sign_impedance\": self.wave_sign_impedance,\n \"wave_sign_tipper\": self.wave_sign_tipper,\n \"z_units\": self.z_units,\n \"t_units\": self.t_units,\n \"inv_mode\": self.inv_mode,\n \"formatting\": self.formatting,\n \"data_filename\": self.data_filename,\n \"topography\": self.topography,\n \"rotation_angle\": self.rotation_angle,\n \"center_point.latitude\": self.center_point.latitude,\n \"center_point.longitue\": self.center_point.longitude,\n \"center_point.elevation\": self.center_point.elevation,\n \"center_point.utm_epsg\": self.center_point.utm_epsg,\n \"center_point.datum_epsg\": self.center_point.datum_epsg,\n }\n\n for key, value in self.z_model_error.error_parameters.items():\n params[f\"z_model_error.{key}\"] = value\n for key, value in self.t_model_error.error_parameters.items():\n params[f\"t_model_error.{key}\"] = value\n\n return params\n\n @property\n def data_filename(self):\n return self.save_path.joinpath(self.fn_basename)\n\n @data_filename.setter\n def data_filename(self, value):\n if value is not None:\n value = Path(value)\n if value.parent == Path(\".\"):\n self.fn_basename = value.name\n else:\n self.save_path = value.parent\n self.fn_basename = value.name\n\n @property\n def period(self):\n if self.dataframe is not None:\n return np.sort(self.dataframe.period.unique())\n\n def get_n_stations(self, mode):\n if self.dataframe is not None:\n if \"impedance\" in mode.lower():\n return (\n self.dataframe.loc[\n (self.dataframe.zxx != 0)\n | (self.dataframe.zxy != 0)\n | (self.dataframe.zyx != 0)\n | (self.dataframe.zyy != 0),\n \"station\",\n ]\n .unique()\n .size\n )\n elif \"vertical\" in mode.lower():\n return (\n self.dataframe.loc[\n (self.dataframe.tzx != 0) | (self.dataframe.tzy != 0),\n \"station\",\n ]\n .unique()\n .size\n )\n elif \"phase_tensor\" in mode.lower():\n return (\n self.dataframe.loc[\n (self.dataframe.ptxx != 0)\n | (self.dataframe.ptxy != 0)\n | (self.dataframe.ptyx != 0)\n | (self.dataframe.ptyy != 0),\n \"station\",\n ]\n .unique()\n .size\n )\n\n @property\n def n_periods(self):\n return self.period.size\n\n def _get_components(self):\n \"\"\"\n get components to write out\n \"\"\"\n\n comps = []\n for inv_modes in self.inv_mode_dict[self.inv_mode]:\n comps += self.inv_comp_dict[inv_modes]\n\n return comps\n\n def _get_header_string(self, error_type, error_value):\n \"\"\"\n Create the header strings\n\n # Created using MTpy calculated egbert_floor error of 5% data rotated 0.0_deg\n clockwise from N\n\n :param error_type: The method to calculate the errors\n :type error_type: string\n :param error_value: value of error or error floor\n :type error_value: float\n :param rotation_angle: angle data have been rotated by\n :type rotation_angle: float\n\n \"\"\"\n\n h_str = []\n if np.atleast_1d(error_type).ndim == 2:\n h_str = (\n f\"# Creating_software: MTpy v2, \"\n f\"error: [{error_type[0, 0]}, {error_type[0, 1]}, \"\n f\"{error_type[1, 0]}, {error_type[1, 1]}], \"\n )\n else:\n h_str = f\"# Creating_software: MTpy v2, error: {error_type}, \"\n\n if np.atleast_1d(error_value).ndim == 2:\n h_str += (\n f\"error floors of {error_value[0, 0]:.0f}%, \"\n f\"{error_value[0, 1]:.0f}%, \"\n f\"{error_value[1, 0]:.0f}%, \"\n f\"{error_value[1, 1]:.0f}%, \"\n f\"data rotated {self.rotation_angle:.1f}_deg clockwise from N, \"\n f\"{self.center_point.utm_crs}\"\n )\n\n else:\n if error_value > 1:\n fmt = \".0f\"\n units = \"%\"\n elif error_value < 1:\n fmt = \".2f\"\n units = \"\"\n h_str += (\n f\"error_value: {error_value:{fmt}}{units}, data_rotation: \"\n f\"{self.rotation_angle:.1f} deg clockwise, \"\n f\"model_{self.center_point.utm_crs}\"\n )\n\n return h_str\n\n def _write_header(self, mode):\n \"\"\" \"\"\"\n d_lines = []\n if \"impedance\" in mode.lower():\n d_lines.append(\n self._get_header_string(\n self.z_model_error.error_type,\n self.z_model_error.error_value,\n )\n )\n d_lines.append(self.header_string)\n d_lines.append(f\"> {mode}\")\n d_lines.append(f\"> exp({self.wave_sign_impedance}i\\omega t)\")\n d_lines.append(f\"> {self.z_units}\")\n\n elif \"vertical\" in mode.lower():\n d_lines.append(\n self._get_header_string(\n self.t_model_error.error_type,\n self.t_model_error.error_value,\n )\n )\n d_lines.append(self.header_string)\n d_lines.append(f\"> {mode}\")\n d_lines.append(f\"> exp({self.wave_sign_tipper}i\\omega t)\")\n d_lines.append(f\"> [{self.t_units}]\")\n\n d_lines.append(\n f\"> {self.rotation_angle:.3g}\"\n ) # orientation, need to add at some point\n if self.topography:\n d_lines.append(\n f\"> {self.center_point.latitude:>10.6f} \"\n f\"{self.center_point.longitude:>10.6f} \"\n f\"{self.center_point.model_elevation:>10.2f}\"\n )\n else:\n d_lines.append(\n f\"> {self.center_point.latitude:>10.6f} \"\n f\"{self.center_point.longitude:>10.6f}\"\n )\n\n n_stations = self.get_n_stations(mode)\n d_lines.append(f\"> {self.n_periods} {n_stations}\")\n\n return d_lines\n\n def _write_comp(self, row, comp):\n \"\"\"\n write a single row\n\n :param row: DESCRIPTION\n :type row: TYPE\n :param comp: DESCRIPTION\n :type comp: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n value = np.nan_to_num(getattr(row, comp))\n err = getattr(row, f\"{comp}_model_error\")\n\n if (\n value.real != 0.0\n and value.imag != 0.0\n and value.real != 1e32\n and value.imag != 1e32\n ):\n if self.formatting == \"1\":\n per = f\"{row.period:<12.5e}\"\n sta = f\"{row.station:>7}\"\n lat = f\"{row.latitude:> 9.3f}\"\n lon = f\"{row.longitude:> 9.3f}\"\n eas = f\"{row.model_east:> 12.3f}\"\n nor = f\"{row.model_north:> 12.3f}\"\n if self.topography:\n ele = f\"{row.model_elevation:> 12.3f}\"\n else:\n ele = f\"{0:> 12.3f}\"\n if comp[1].lower() == \"z\":\n comp = comp.replace(\"z\", \"\")\n com = f\"{comp:>4}\".upper()\n if self.z_units.lower() == \"ohm\":\n rea = f\"{value.real / 796.:> 14.6e}\"\n ima = f\"{value.imag / 796.:> 14.6e}\"\n elif self.z_units.lower() not in (\n \"[v/m]/[t]\",\n \"[mv/km]/[nt]\",\n ):\n raise ValueError(f\"Unsupported unit '{self.z_units}'\")\n else:\n rea = f\"{value.real:> 14.6e}\"\n ima = f\"{value.imag:> 14.6e}\"\n\n elif self.formatting == \"2\":\n per = f\"{row.period:<14.6e}\"\n sta = f\"{row.station:>10}\"\n lat = f\"{row.latitude:> 14.6f}\"\n lon = f\"{row.longitude:> 14.6f}\"\n eas = f\"{row.model_east:> 15.3f}\"\n nor = f\"{row.model_north:> 15.3f}\"\n if self.topography:\n ele = f\"{row.model_elevation:> 10.3f}\"\n else:\n ele = f\"{0:> 10.3f}\"\n if comp[1].lower() == \"z\":\n comp = comp.replace(\"z\", \"\")\n com = f\"{comp:>4}\".upper()\n if self.z_units.lower() == \"ohm\":\n rea = f\"{value.real / 796.:> 17.6e}\"\n ima = f\"{value.imag / 796.:> 17.6e}\"\n elif self.z_units.lower() not in (\n \"[v/m]/[t]\",\n \"[mv/km]/[nt]\",\n ):\n raise ValueError(f\"Unsupported unit '{self.z_units}'\")\n else:\n rea = f\"{value.real:> 17.6e}\"\n ima = f\"{value.imag:> 17.6e}\"\n\n else:\n raise NotImplementedError(\n f\"format {self.formatting} ({type(self.formatting)}) is \"\n \"not supported.\"\n )\n\n if np.isinf(err) or np.isnan(err):\n err = 10 ** (\n np.floor(np.log10(abs(max([float(rea), float(ima)]))))\n )\n abs_err = f\"{err:> 14.6e}\"\n\n return \"\".join(\n [\n per,\n sta,\n lat,\n lon,\n nor,\n eas,\n ele,\n com,\n rea,\n ima,\n abs_err,\n ]\n )\n\n def _check_for_errors_of_zero(self):\n \"\"\"\n Need to check for any zeros in the error values which can prevent\n ModEM from running.\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n ## check for zeros in model error\n for comp in [\"zxx\", \"zxy\", \"zyx\", \"zyy\", \"tzx\", \"tzy\"]:\n find_zeros = np.where(self.dataframe[f\"{comp}_model_error\"] == 0)[\n 0\n ]\n if find_zeros.shape[0] > 0:\n if comp in [\"zxx\", \"zxy\", \"zyx\", \"zyy\"]:\n error_percent = self.z_model_error.error_value\n elif \"t\" in comp:\n error_percent = self.t_model_error.error_value\n\n self.logger.warning(\n f\"Found errors with values of 0 in {comp} \"\n f\"{len(find_zeros)} times. Setting error as {comp} x \"\n f\"{error_percent}.\"\n )\n\n self.dataframe.loc[\n find_zeros.tolist(), f\"{comp}_model_error\"\n ] = (\n abs(self.dataframe[f\"{comp}\"].iloc[list(find_zeros)])\n * error_percent\n )\n\n def _check_for_too_small_errors(self, tol=0.02):\n \"\"\"\n Check for too small of errors relative to the error floor\n \"\"\"\n\n for comp in [\"zxx\", \"zxy\", \"zyx\", \"zyy\", \"tzx\", \"tzy\"]:\n find_small = np.where(\n self.dataframe[f\"{comp}_model_error\"]\n / abs(self.dataframe[comp])\n < tol\n )[0]\n if find_small.shape[0] > 0:\n\n if comp.startswith(\"z\"):\n error_percent = self.z_model_error.error_value\n elif comp.startswith(\"t\"):\n error_percent = self.t_model_error.error_value\n\n self.logger.warning(\n f\"Found errors with values less than {tol} in {comp} \"\n f\"{len(find_small)} times. Setting error as {comp} x \"\n f\"{error_percent}.\"\n )\n self.dataframe.loc[\n find_small.tolist(), f\"{comp}_model_error\"\n ] = (\n abs(self.dataframe[f\"{comp}\"].iloc[list(find_small)])\n * error_percent\n )\n\n def write_data_file(\n self,\n file_name=None,\n save_path=None,\n fn_basename=None,\n elevation=False,\n ):\n \"\"\"\n \n :param save_path: full directory to save file to, defaults to None\n :type save_path: string or Path, optional\n :param fn_basename: Basename of the saved file, defaults to None\n :type fn_basename: string, optional\n :param elevation: If True adds in elevation from 'rel_elev' column in data\n array, defaults to False\n :type elevation: boolean, optional\n\n :raises NotImplementedError: If the inversion mode is not supported\n :raises ValueError: :class:`mtpy.utils.exceptions.ValueError` if a parameter\n is missing\n :return: full path to data file\n :rtype: Path\n\n .. code-block::\n :linenos:\n\n >>> from pathlib import Path\n >>> import mtpy.modeling.modem as modem\n >>> edi_path = Path(r\"/home/mt/edi_files\")\n >>> edi_list = list(edi_path.glob(\"*.ed\"))\n >>> md = modem.Data(edi_list, period_min=.1, period_max=300,\\\n >>> ... max_num_periods=12)\n >>> md.write_data_file(save_path=r\"/home/modem/inv1\")\n /home/modem/inv1/ModemDataFile.dat\n \n \"\"\"\n\n if self.dataframe is None:\n raise ValueError(\n \"A DataFrame needs to be present to write a ModEM data file\"\n )\n\n if file_name is not None:\n self.data_filename = file_name\n\n if save_path is not None:\n self.save_path = Path(save_path)\n if fn_basename is not None:\n self.data_filename = fn_basename\n\n self._check_for_errors_of_zero()\n self._check_for_too_small_errors()\n\n for inv_mode in self.inv_mode_dict[self.inv_mode]:\n if \"impedance\" in inv_mode.lower():\n z_lines = self._write_header(inv_mode)\n\n elif \"vertical\" in inv_mode.lower():\n t_lines = self._write_header(inv_mode)\n\n else:\n # maybe error here\n raise NotImplementedError(\n f\"inv_mode {inv_mode} is not supported yet\"\n )\n\n comps = self._get_components()\n # Iterate over stations and sort by period\n for station in self.dataframe.station.unique():\n sdf = self.dataframe.loc[self.dataframe.station == station]\n sdf.sort_values(\"period\")\n\n for row in sdf.itertuples():\n for comp in comps:\n d_line = self._write_comp(row, comp)\n if d_line is None:\n continue\n\n if comp.startswith(\"z\"):\n z_lines.append(d_line)\n elif comp.startswith(\"t\"):\n t_lines.append(d_line)\n\n with open(self.data_filename, \"w\") as dfid:\n dfid.write(\"\\n\".join(z_lines + t_lines))\n\n self.logger.info(\n \"Wrote ModEM data file to {0}\".format(self.data_filename)\n )\n return self.data_filename\n\n def _read_header(self, header_lines):\n \"\"\"\n Read header lines\n\n :param header_lines: DESCRIPTION\n :type header_lines: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n mode = None\n inv_list = []\n header_list = []\n metadata_list = []\n n_periods = 0\n n_stations = 0\n self.center_point = MTLocation()\n for hline in header_lines:\n if hline.find(\"#\") == 0:\n if \"period\" not in hline.lower():\n header_list.append(hline.strip())\n elif hline.find(\">\") == 0:\n # modem outputs only 7 characters for the lat and lon\n # if there is a negative they merge together, need to split\n # them up\n hline = hline.replace(\"-\", \" -\")\n metadata_list.append(hline[1:].strip())\n if hline.lower().find(\"ohm\") > 0:\n self.z_units = \"ohm\"\n continue\n elif hline.lower().find(\"mv\") > 0:\n self.z_units = \"[mV/km]/[nT]\"\n continue\n elif hline.lower().find(\"vertical\") > 0:\n mode = \"vertical\"\n inv_list.append(\"Full_Vertical_Components\")\n continue\n elif hline.lower().find(\"impedance\") > 0:\n mode = \"impedance\"\n inv_list.append(\"Full_Impedance\")\n continue\n\n if hline.find(\"exp\") > 0:\n if mode in [\"impedance\"]:\n self.wave_sign_impedance = hline[hline.find(\"(\") + 1]\n elif mode in [\"vertical\"]:\n self.wave_sign_tipper = hline[hline.find(\"(\") + 1]\n\n elif (\n len(hline[1:].strip().split()) >= 2\n and hline.count(\".\") > 0\n ):\n value_list = [\n float(value) for value in hline[1:].strip().split()\n ]\n if value_list[0] != 0.0:\n self.center_point.latitude = value_list[0]\n if value_list[1] != 0.0:\n self.center_point.longitude = value_list[1]\n try:\n self.center_point.elevation = value_list[2]\n except IndexError:\n self.center_point.elevation = 0.0\n self.logger.debug(\n \"Did not find center elevation in data file\"\n )\n elif len(hline[1:].strip().split()) < 2:\n try:\n self.rotation_angle = float(hline[1:].strip())\n except ValueError:\n continue\n elif len(hline[1:].strip().split()) == 2:\n n_periods = int(hline[1:].strip().split()[0])\n n_stations = int(hline[1:].strip().split()[1])\n\n for head_line, inv_mode in zip(header_list, inv_list):\n self._parse_header_line(head_line, inv_mode)\n\n self._get_inv_mode(inv_list)\n\n return n_periods, n_stations\n\n def _parse_header_line(self, header_line, mode):\n \"\"\"\n Parse header line\n\n \"\"\"\n\n if header_line == self.header_string:\n return\n\n item_dict = {\n \"error\": \"error_type\",\n \"error_value\": \"error_value\",\n \"data_rotation\": \"rotation_angle\",\n \"model_epsg\": \"center_point.utm_epsg\",\n }\n\n if header_line.count(\",\") > 0:\n header_list = header_line.split(\",\")\n else:\n header_list = header_line.split()\n\n if \"impedance\" in mode.lower():\n obj = self.z_model_error\n\n elif \"vertical\" in mode.lower():\n obj = self.t_model_error\n\n for ii, item in enumerate(header_list):\n item = item.lower()\n if item.count(\":\") > 0:\n item_list = [k.strip() for k in item.split(\":\")]\n if len(item_list) == 2:\n key = item_list[0]\n value = item_list[1].replace(\"%\", \"\").split()[0]\n if key in [\"error_value\", \"data_rotation\"]:\n try:\n value = float(value)\n except ValueError:\n pass\n try:\n if key in [\"model_epsg\"]:\n setattr(self.center_point, \"utm_epsg\", value)\n elif \"error\" in key:\n\n setattr(\n obj,\n item_dict[key],\n value,\n )\n else:\n setattr(self, item_dict[\"key\"], value)\n except KeyError:\n continue\n\n ## Older files\n else:\n if item in [\"calculated\"]:\n value = header_list[ii + 1]\n\n if \"floor\" in value:\n setattr(obj, \"floor\", True)\n value = value.replace(\"_floor\", \"\")\n setattr(obj, \"error_type\", value)\n\n if item in [\"of\"]:\n value = float(header_list[ii + 1].replace(\"%\", \"\"))\n setattr(obj, item_dict[\"error_value\"], value)\n\n if \"deg\" in item:\n setattr(\n self,\n item_dict[\"data_rotation\"],\n float(item.split(\"_\")[0]),\n )\n\n def _get_rotation_angle(self, header_line):\n # try to find rotation angle\n h_list = header_line.split()\n for hh, h_str in enumerate(h_list):\n if h_str.find(\"_deg\") > 0:\n try:\n self.rotation_angle = float(h_str[0 : h_str.find(\"_deg\")])\n except ValueError:\n pass\n\n def _get_inv_mode(self, inv_list):\n # find inversion mode\n for inv_key in list(self.inv_mode_dict.keys()):\n inv_mode_list = self.inv_mode_dict[inv_key]\n if len(inv_mode_list) != inv_list:\n continue\n else:\n tf_arr = np.zeros(len(inv_list), dtype=bool)\n\n for tf, data_inv in enumerate(inv_list):\n if data_inv in self.inv_mode_dict[inv_key]:\n tf_arr[tf] = True\n\n if np.alltrue(tf_arr):\n self.inv_mode = inv_key\n break\n\n def _read_line(self, line):\n \"\"\"\n read a single line\n :param line: DESCRIPTION\n :type line: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n .. note:: Pandas Groupby does not play nice with complex numbers so\n we will be keeping the real and imaginary part separate for now.\n\n \"\"\"\n\n line_dict = dict(\n [(key, value) for key, value in zip(self._df_keys, line.split())]\n )\n for key in [\n \"period\",\n \"latitude\",\n \"longitude\",\n \"model_east\",\n \"model_north\",\n \"model_elevation\",\n \"real\",\n \"imag\",\n \"error\",\n ]:\n line_dict[key] = float(line_dict[key])\n\n comp = line_dict.pop(\"comp\").lower()\n if comp.startswith(\"t\"):\n comp = comp.replace(\"t\", \"tz\")\n line_dict[f\"{comp}_real\"] = line_dict.pop(\"real\")\n line_dict[f\"{comp}_imag\"] = line_dict.pop(\"imag\")\n line_dict[f\"{comp}_model_error\"] = line_dict.pop(\"error\")\n if line_dict[f\"{comp}_model_error\"] > 1e10:\n line_dict[f\"{comp}_model_error\"] = np.nan\n\n return line_dict\n\n def read_data_file(self, data_fn):\n \"\"\"\n\n :param data_fn: full path to data file name\n :type data_fn: string or Path\n :raises ValueError: If cannot compute component\n\n Fills attributes:\n * data_array\n * period_list\n * mt_dict\n\n .. code-block::\n\n >>> md = Data()\n >>> md.read_data_file(r\"/home/modem_data.dat\")\n >>> md\n ModEM Data Object:\n Number of stations: 169\n Number of periods: 22\n Period range:\n Min: 0.01 s\n Max: 15230.2 s\n Rotation angle: 0.0\n Data center:\n latitude: 39.6351 deg\n longitude: -119.8039 deg\n Elevation: 0.0 m\n Easting: 259368.9746 m\n Northing: 4391021.1981 m\n UTM zone: 11S\n Model EPSG: None\n Model UTM zone: None\n Impedance data: True\n Tipper data: True\n\n\n \"\"\"\n\n self.data_filename = Path(data_fn)\n\n if self.data_filename is None:\n raise ValueError(\"data_fn is None, enter a data file to read.\")\n elif not self.data_filename.is_file():\n raise ValueError(\n \"Could not find {0}, check path\".format(self.data_filename)\n )\n\n self.center_point = MTLocation()\n\n # open file get lines\n with open(self.data_filename, \"r\") as dfid:\n dlines = dfid.readlines()\n\n # read header information\n n_periods, n_stations = self._read_header(\n [line for line in dlines if \">\" in line or \"#\" in line]\n )\n\n # create a list of dictionaries to make into a pandas dataframe\n entries = []\n for dline in dlines:\n if \"#\" in dline or \">\" in dline:\n continue\n\n elif len(dline.split()) == len(self._df_keys):\n line_dict = self._read_line(dline)\n entries.append(line_dict)\n\n full_df = pd.DataFrame(entries)\n\n # group by period and station so that there is 1 row per period per station\n combined_df = full_df.groupby(\n [\"station\", \"period\"], as_index=False\n ).first()\n\n # combine real and imaginary\n cols = [c.split(\"_\")[0] for c in combined_df.columns if \"real\" in c]\n for col in cols:\n combined_df[col] = (\n combined_df[f\"{col}_real\"] + 1j * combined_df[f\"{col}_imag\"]\n )\n combined_df.drop(\n [f\"{col}_real\", f\"{col}_imag\"], axis=1, inplace=True\n )\n\n return MTDataFrame(combined_df)\n\n def fix_data_file(self, fn=None, n=3):\n \"\"\"\n A newer compiled version of Modem outputs slightly different headers\n This aims to convert that into the older format\n\n :param fn: DESCRIPTION, defaults to None\n :type fn: TYPE, optional\n :param n: DESCRIPTION, defaults to 3\n :type n: TYPE, optional\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n if fn:\n self.data_filename = Path(fn)\n with self.data_filename.open() as fid:\n lines = fid.readlines()\n\n def fix_line(line_list):\n return (\n \" \".join(\"\".join(line_list).replace(\"\\n\", \"\").split()) + \"\\n\"\n )\n\n h1 = fix_line(lines[0:n])\n h2 = fix_line(lines[n : 2 * n])\n\n find = None\n for index, line in enumerate(lines[2 * n + 1 :], start=2 * n + 1):\n if line.find(\"#\") >= 0:\n find = index\n break\n\n if find is not None:\n h3 = fix_line(lines[find : find + n])\n h4 = fix_line(lines[find + n : find + 2 * n])\n\n new_lines = (\n [h1, h2]\n + lines[2 * n : find]\n + [h3, h4]\n + lines[find + 2 * n :]\n )\n else:\n new_lines = [h1, h2] + lines[2 * n :]\n\n with self.data_filename.open(\"w\") as fid:\n fid.writelines(new_lines)\n\n return self.data_filename" }, { "identifier": "PlotRMS", "path": "mtpy/modeling/plots/plot_modem_rms.py", "snippet": "class PlotRMS(PlotBaseMaps):\n def __init__(self, dataframe, **kwargs):\n super().__init__(**kwargs)\n\n self.dataframe = dataframe\n self.dx = 0.035\n self.rms_min = 0\n self.rms_max = 5\n self.rms_step = 0.5\n self.plot_station = True\n self.station_id = None\n self.stack_bottom = False\n\n self.comp_list = [\n \"rms_zxx\",\n \"rms_zxy\",\n \"rms_zyx\",\n \"rms_zyy\",\n \"rms_tzx\",\n \"rms_tzy\",\n ]\n self.distance_multiplier = [\n (-0.5, 1),\n (0.5, 1),\n (-0.5, 0),\n (0.5, 0),\n (-0.5, -1),\n (0.5, -1),\n ]\n\n self.color_dict = {\n \"rms_z\": (0, 162 / 255, 255 / 255),\n \"rms_t\": (255 / 255, 162 / 255, 0),\n \"rms_zxx\": (136 / 255, 235 / 255, 193 / 255),\n \"rms_zxy\": (84 / 255, 189 / 255, 215 / 255),\n \"rms_zyx\": (136 / 255, 84 / 255, 215 / 255),\n \"rms_zyy\": (206 / 255, 84 / 255, 215 / 255),\n \"rms_tzx\": (215 / 255, 210 / 255, 84 / 255),\n \"rms_tzy\": (215 / 255, 154 / 255, 84 / 255),\n }\n\n self.label_dict = {\n \"rms_z\": \"Z\",\n \"rms_t\": \"Tipper\",\n \"rms_zxx\": \"$Z_{xx}$\",\n \"rms_zxy\": \"$Z_{xy}$\",\n \"rms_zyx\": \"$Z_{yx}$\",\n \"rms_zyy\": \"$Z_{yy}$\",\n \"rms_tzx\": \"$T_{zx}$\",\n \"rms_tzy\": \"$T_{zy}$\",\n }\n\n self.rms_cmap = \"jet\"\n\n self.subplot_left = 0.05\n self.subplot_right = 0.99\n self.subplot_bottom = 0.09\n self.subplot_top = 0.99\n\n self.box_size = 30\n\n self.cx_source = None\n self.cx_zoom = None\n if has_cx:\n self.cx_source = cx.providers.USGS.USTopo\n\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n @property\n def dataframe(self):\n return self._mt_dataframe.dataframe\n\n @dataframe.setter\n def dataframe(self, df):\n \"\"\"\n Set dataframe to an MTDataframe\n :param df: DESCRIPTION\n :type df: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if df is None:\n self._mt_dataframe = MTDataFrame()\n\n elif isinstance(df, (pd.DataFrame, MTDataFrame, np.ndarray)):\n self._mt_dataframe = MTDataFrame(df)\n\n else:\n raise TypeError(\n f\"Input must be a dataframe or MTDataFrame object not {type(df)}\"\n )\n\n @property\n def rms_cmap(self):\n return self._rms_cmap\n\n @rms_cmap.setter\n def rms_cmap(self, value):\n if isinstance(value, str):\n self._rms_cmap = cm.get_cmap(value)\n\n elif isinstance(value, colors.LinearSegmentedColormap):\n self._rms_cmap = value\n\n else:\n self._rms_cmap = cm.get_cmap(\"jet\")\n\n def _plot_rms_map(self):\n \"\"\"\n plot rms map\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n cb_norm = colors.BoundaryNorm(\n np.arange(\n self.rms_min, self.rms_max + self.rms_step, self.rms_step\n ),\n self.rms_cmap.N,\n )\n\n for dm, comp in zip(self.distance_multiplier, self.comp_list):\n for station in self.dataframe.station.unique():\n\n sdf = self._mt_dataframe.get_station_df(station)\n rms = sdf[comp].mean()\n self.ax1.scatter(\n sdf.longitude.iloc[0] + (self.dx / 2) * dm[0],\n sdf.latitude.iloc[0] + (self.dx / 2) * dm[1],\n c=rms,\n marker=\"s\",\n s=self.box_size,\n edgecolors=(0, 0, 0),\n cmap=self.rms_cmap,\n norm=cb_norm,\n )\n if self.plot_station:\n self.ax1.text(\n sdf.longitude.iloc[0],\n sdf.latitude.iloc[0] + self.dx,\n station,\n ha=\"center\",\n va=\"baseline\",\n clip_on=True,\n )\n\n if has_cx:\n if has_cx:\n try:\n cx_kwargs = {\"source\": self.cx_source, \"crs\": \"EPSG:4326\"}\n if self.cx_zoom is not None:\n cx_kwargs[\"zoom\"] = self.cx_zoom\n cx.add_basemap(\n self.ax1,\n **cx_kwargs,\n )\n except Exception as error:\n self.logger.warning(\n f\"Could not add base map because {error}\"\n )\n\n cb_ax, _ = mcb.make_axes(self.ax1, shrink=0.5)\n cb = mcb.ColorbarBase(cb_ax, cmap=self.rms_cmap, norm=cb_norm)\n\n @property\n def rms_per_period_all(self):\n \"\"\"\n RMS per period\n \"\"\"\n\n if self.dataframe is not None:\n rms_list = []\n for period in self.dataframe.period.unique():\n comp_df = self.dataframe.loc[\n self.dataframe.period == period,\n [\n \"rms_zxx\",\n \"rms_zxy\",\n \"rms_zyx\",\n \"rms_zyy\",\n \"rms_tzx\",\n \"rms_tzy\",\n ],\n ]\n\n mean_dict = {\"period\": period}\n for comp in comp_df.columns:\n mean_dict[comp] = comp_df.loc[:, comp].mean()\n\n rms_list.append(mean_dict)\n\n df = pd.DataFrame(rms_list)\n df = df.set_index(\"period\")\n df = df.sort_index()\n\n return df\n\n @property\n def rms_per_station(self):\n \"\"\"\n RMS per period\n \"\"\"\n\n if self.dataframe is not None:\n rms_list = []\n for station in self.dataframe.station.unique():\n z_df = self.dataframe.loc[\n self.dataframe.station == station,\n [\"rms_zxx\", \"rms_zxy\", \"rms_zyx\", \"rms_zyy\"],\n ]\n t_df = self.dataframe.loc[\n self.dataframe.station == station, [\"rms_tzx\", \"rms_tzy\"]\n ]\n\n rms_list.append(\n {\n \"station\": station,\n \"rms_z\": z_df.mean().mean(),\n \"rms_t\": t_df.mean().mean(),\n }\n )\n\n df = pd.DataFrame(rms_list)\n df = df.set_index(\"station\")\n df = df.sort_index()\n\n return df\n\n @property\n def rms_array(self):\n \"\"\"\n arrays for color maps\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n period_dict = dict(\n [\n (f\"{ff:.4g}\", ii)\n for ii, ff in enumerate(self.dataframe.period.unique())\n ]\n )\n\n station_dict = dict(\n [(ss, ii) for ii, ss in enumerate(self.dataframe.station.unique())]\n )\n\n rms_array = np.zeros(\n (\n self.dataframe.station.unique().size,\n self.dataframe.period.unique().size,\n 6,\n )\n )\n\n for row in self.dataframe.itertuples():\n p_index = period_dict[f\"{row.period:.4g}\"]\n s_index = station_dict[row.station]\n\n for ii, comp in enumerate(\n [\"zxx\", \"zxy\", \"zyx\", \"zyy\", \"tzx\", \"tzy\"]\n ):\n rms_array[s_index, p_index, ii] = getattr(row, f\"rms_{comp}\")\n\n return rms_array\n\n def _plot_colormesh(self):\n \"\"\"\n plot as color maps\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n x = self.dataframe.period.unique()\n y = np.arange(self.dataframe.station.unique().size)\n xg, yg = np.meshgrid(x, y)\n\n rms_array = self.rms_array.copy()\n\n fig = plt.figure()\n fig.subplotpars.hspace = 0.15\n fig.subplotpars.vspace = 0.15\n\n ax_list = []\n for ii in range(6):\n if ii == 0:\n ax = fig.add_subplot(3, 2, ii + 1)\n else:\n ax = fig.add_subplot(3, 2, ii + 1, sharex=ax_list[0])\n\n ax.pcolormesh(\n xg, yg, rms_array[:, :, ii], cmap=self.rms_cmap, vmin=0, vmax=5\n )\n ax.text(\n x[0],\n y[-3],\n self.label_dict[self.comp_list[ii]],\n ha=\"left\",\n va=\"bottom\",\n bbox={\"facecolor\": \"w\"},\n )\n ax.set_xscale(\"log\")\n\n ax_list.append(ax)\n\n for ax in ax_list[-2:]:\n ax.set_xlabel(\"Period (s)\")\n\n plt.show()\n return fig, ax_list\n\n def print_suspect_stations(self, rms_threshold=4):\n \"\"\"\n print stations that are suspect\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n red_begin = \"\\033[1;31;48m\"\n red_end = \"\\033[1;37;0m\"\n\n df = self.rms_per_station\n max_len = max([len(ii) for ii in df.index])\n\n for row in df.itertuples():\n if row.rms_z > rms_threshold or row.rms_t > rms_threshold:\n if row.rms_z > rms_threshold:\n z_value = f\"{red_begin}Z = {row.rms_z:<6.2f}{red_end}\"\n else:\n z_value = f\"Z = {row.rms_z:<6.2f}\"\n\n if row.rms_t > rms_threshold:\n t_value = f\"{red_begin}T = {row.rms_t:<6.2f}{red_end}\"\n else:\n t_value = f\"T = {row.rms_t:<6.2f}\"\n print(f\"{row.Index:<{max_len}} {z_value} {t_value}\")\n\n def _plot_by_period(self):\n \"\"\"\n plot by period\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n df = self.rms_per_period_all.copy()\n plot_list = []\n color_list = []\n for comp in df.columns:\n if not np.all(np.isnan(df[comp])):\n plot_list.append(comp)\n color_list.append(self.color_dict[comp])\n\n ax = df.plot.bar(\n y=plot_list,\n color=color_list,\n xlabel=\"Period (s)\",\n ylabel=\"normalized RMS\",\n grid=True,\n ax=self.ax2,\n )\n ax.set_axisbelow(True)\n\n ax.set_xticklabels(\n [f\"{float(x.get_text()):.4g}\" for x in ax.get_xticklabels()]\n )\n ax.tick_params(left=True)\n # ticks_loc = ax.get_yticks().tolist()\n # ax.yaxis.set_major_locator(ticker.FixedLocator(ticks_loc))\n # ax.set_yticklabels([f\"{x:.1f}\" for x in ticks_loc])\n\n return ax\n\n def _plot_by_station(self):\n \"\"\"\n plot by station\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n df = self.rms_per_station.copy()\n plot_list = []\n color_list = []\n for comp in df.columns:\n if not np.all(np.isnan(df[comp])):\n plot_list.append(comp)\n color_list.append(self.color_dict[comp])\n\n ax = df.plot.bar(\n y=plot_list,\n color=color_list,\n xlabel=\"Station\",\n ylabel=\"normalized RMS\",\n grid=True,\n ax=self.ax3,\n )\n\n ax.tick_params(left=True)\n # ticks_loc = ax.get_yticks().tolist()\n # ax.yaxis.set_major_locator(ticker.FixedLocator(ticks_loc))\n # ax.set_yticklabels([f\"{x:.1f}\" for x in ticks_loc])\n\n ax.set_axisbelow(True)\n\n return ax\n\n def _get_subplots(self, fig):\n\n if self.stack_bottom:\n gs1 = gridspec.GridSpec(2, 2, hspace=0.25, wspace=0.075)\n\n self.ax1 = fig.add_subplot(gs1[0, :], aspect=\"equal\")\n self.ax2 = fig.add_subplot(gs1[1, 0])\n self.ax3 = fig.add_subplot(gs1[1, 1])\n else:\n gs1 = gridspec.GridSpec(2, 2, hspace=0.35, wspace=0.075)\n\n self.ax1 = fig.add_subplot(gs1[:, 0], aspect=\"equal\")\n self.ax2 = fig.add_subplot(gs1[0, 1])\n self.ax3 = fig.add_subplot(gs1[1, 1])\n\n def plot(self, **kwargs):\n \"\"\"\n\n :param **kwargs: DESCRIPTION\n :type **kwargs: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n self._set_subplot_params()\n\n self.fig = plt.figure(\n self.fig_num, figsize=self.fig_size, dpi=self.fig_dpi\n )\n\n plt.clf()\n\n self._get_subplots(self.fig)\n\n self._plot_rms_map()\n self._plot_by_period()\n self._plot_by_station()" } ]
from pathlib import Path from .data import Data from mtpy.modeling.plots import PlotRMS import numpy as np import pandas as pd
14,083
""" ================== ModEM ================== residuals class to contain RMS information revised by JP 2017 revised by AK 2017 to bring across functionality from ak branch """ # ============================================================================= # Imports # ============================================================================= # =============================================================================
""" ================== ModEM ================== residuals class to contain RMS information revised by JP 2017 revised by AK 2017 to bring across functionality from ak branch """ # ============================================================================= # Imports # ============================================================================= # =============================================================================
class Residual(Data):
0
2023-10-11 22:24:50+00:00
16k
Jacoo-ai/HIC-Yolov5
detect.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None, inplace=True, fuse=True):\n from models.yolo import Detect, Model\n\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n for w in weights if isinstance(weights, list) else [weights]:\n ckpt = torch.load(attempt_download(w), map_location=map_location) # load\n if fuse:\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model\n else:\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse\n\n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]:\n m.inplace = inplace # pytorch 1.7.0 compatibility\n if type(m) is Detect:\n if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility\n delattr(m, 'anchor_grid')\n setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n\n if len(model) == 1:\n return model[-1] # return model\n else:\n print(f'Ensemble created with {weights}\\n')\n for k in ['names']:\n setattr(model, k, getattr(model[-1], k))\n model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride\n return model # return ensemble" }, { "identifier": "LoadImages", "path": "utils/datasets.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True):\n p = str(Path(path).resolve()) # os-agnostic absolute path\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception(f'ERROR: {p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n ret_val, img0 = self.cap.read()\n if not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n else:\n path = self.files[self.count]\n self.new_video(path)\n ret_val, img0 = self.cap.read()\n\n self.frame += 1\n print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='')\n\n else:\n # Read image\n self.count += 1\n img0 = cv2.imread(path) # BGR\n assert img0 is not None, 'Image Not Found ' + path\n print(f'image {self.count}/{self.nf} {path}: ', end='')\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return path, img, img0, self.cap\n\n def new_video(self, path):\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadStreams", "path": "utils/datasets.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n\n if os.path.isfile(sources):\n with open(sources, 'r') as f:\n sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]\n else:\n sources = [sources]\n\n n = len(sources)\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.auto = auto\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n print(f'{i + 1}/{n}: {s}... ', end='')\n if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video\n check_requirements(('pafy', 'youtube_dl'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n print(f\" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n print('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n if not self.rect:\n print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame\n while cap.isOpened() and n < f:\n n += 1\n # _, self.imgs[index] = cap.read()\n cap.grab()\n if n % read == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n print('WARNING: Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] *= 0\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(1 / self.fps[i]) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Letterbox\n img0 = self.imgs.copy()\n img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]\n\n # Stack\n img = np.stack(img, 0)\n\n # Convert\n img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n img = np.ascontiguousarray(img)\n\n return self.sources, img, img0, None\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "apply_classifier", "path": "utils/general.py", "snippet": "def apply_classifier(x, model, img, im0):\n # Apply a second stage classifier to yolo outputs\n im0 = [im0] if isinstance(im0, np.ndarray) else im0\n for i, d in enumerate(x): # per image\n if d is not None and len(d):\n d = d.clone()\n\n # Reshape and pad cutouts\n b = xyxy2xywh(d[:, :4]) # boxes\n b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square\n b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad\n d[:, :4] = xywh2xyxy(b).long()\n\n # Rescale boxes from img_size to im0 size\n scale_coords(img.shape[2:], d[:, :4], im0[i].shape)\n\n # Classes\n pred_cls1 = d[:, 5].long()\n ims = []\n for j, a in enumerate(d): # per item\n cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]\n im = cv2.resize(cutout, (224, 224)) # BGR\n # cv2.imwrite('example%i.jpg' % j, cutout)\n\n im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32\n im /= 255.0 # 0 - 255 to 0.0 - 1.0\n ims.append(im)\n\n pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction\n x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections\n\n return x" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(imgsz, s=32, floor=0):\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size" }, { "identifier": "check_imshow", "path": "utils/general.py", "snippet": "def check_imshow():\n # Check if environment supports image displays\n try:\n assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'\n assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'\n cv2.imshow('test', np.zeros((1, 1, 3)))\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n return True\n except Exception as e:\n print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\\n{e}')\n return False" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "@try_except\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n assert file.exists(), f\"{prefix} {file.resolve()} not found, check failed.\"\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n s = f\"{prefix} {r} not found and is required by YOLOv5\"\n if install:\n print(f\"{s}, attempting auto-update...\")\n try:\n assert check_online(), f\"'pip install {r}' skipped (offline)\"\n print(check_output(f\"pip install '{r}'\", shell=True).decode())\n n += 1\n except Exception as e:\n print(f'{prefix} {e}')\n else:\n print(f'{s}. Please install and rerun your command.')\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s))" }, { "identifier": "check_suffix", "path": "utils/general.py", "snippet": "def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''):\n # Check file(s) for acceptable suffixes\n if file and suffix:\n if isinstance(suffix, str):\n suffix = [suffix]\n for f in file if isinstance(file, (list, tuple)) else [file]:\n assert Path(f).suffix.lower() in suffix, f\"{msg}{f} acceptable suffix is {suffix}\"" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=False, sep='', mkdir=False):\n # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.\n path = Path(path) # os-agnostic\n if path.exists() and not exist_ok:\n suffix = path.suffix\n path = path.with_suffix('')\n dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n matches = [re.search(rf\"%s{sep}(\\d+)\" % path.stem, d) for d in dirs]\n i = [int(m.groups()[0]) for m in matches if m] # indices\n n = max(i) + 1 if i else 2 # increment number\n path = Path(f\"{path}{sep}{n}{suffix}\") # update path\n dir = path if path.suffix == '' else path.parent # directory\n if not dir.exists() and mkdir:\n dir.mkdir(parents=True, exist_ok=True) # make directory\n return path" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(name, opt):\n # Print argparser arguments\n print(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))" }, { "identifier": "save_one_box", "path": "utils/general.py", "snippet": "def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):\n # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop\n xyxy = torch.tensor(xyxy).view(-1, 4)\n b = xyxy2xywh(xyxy) # boxes\n if square:\n b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square\n b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad\n xyxy = xywh2xyxy(b).long()\n clip_coords(xyxy, im.shape)\n crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)]\n if save:\n cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop)\n return crop" }, { "identifier": "scale_coords", "path": "utils/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "set_logging", "path": "utils/general.py", "snippet": "def set_logging(rank=-1, verbose=True):\n logging.basicConfig(\n format=\"%(message)s\",\n level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN)" }, { "identifier": "strip_optimizer", "path": "utils/general.py", "snippet": "def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\n # Strip optimizer from 'f' to finalize training, optionally save as 's'\n x = torch.load(f, map_location=torch.device('cpu'))\n if x.get('ema'):\n x['model'] = x['ema'] # replace model with ema\n for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys\n x[k] = None\n x['epoch'] = -1\n x['model'].half() # to FP16\n for p in x['model'].parameters():\n p.requires_grad = False\n torch.save(x, s or f)\n mb = os.path.getsize(s or f) / 1E6 # filesize\n print(f\"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB\")" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "Annotator", "path": "utils/plots.py", "snippet": "CONFIG_DIR = user_config_dir() # Ultralytics settings dir\nRANK = int(os.getenv('RANK', -1))\nclass Colors:\nclass Annotator:\n def __init__(self):\n def __call__(self, i, bgr=False):\n def hex2rgb(h): # rgb order (PIL)\ndef check_font(font='Arial.ttf', size=10):\n def __init__(self, im, line_width=None, font_size=None, font='', pil=False, example='abc'):\n def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def rectangle(self, xy, fill=None, outline=None, width=1):\n def text(self, xy, text, txt_color=(255, 255, 255)):\n def result(self):\ndef hist2d(x, y, n=100):\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n def butter_lowpass(cutoff, fs, order):\ndef output_to_target(output):\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16):\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\ndef plot_val_txt(): # from utils.plots import *; plot_val()\ndef plot_targets_txt(): # from utils.plots import *; plot_targets_txt()\ndef plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\ndef plot_labels(labels, names=(), save_dir=Path('')):\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\ndef plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()\ndef plot_results(file='path/to/results.csv', dir=''):\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):" }, { "identifier": "load_classifier", "path": "utils/torch_utils.py", "snippet": "def load_classifier(name='resnet101', n=2):\n # Loads a pretrained model reshaped to n-class output\n model = torchvision.models.__dict__[name](pretrained=True)\n\n # ResNet model properties\n # input_size = [3, 224, 224]\n # input_space = 'RGB'\n # input_range = [0, 1]\n # mean = [0.485, 0.456, 0.406]\n # std = [0.229, 0.224, 0.225]\n\n # Reshape output to n classes\n filters = model.fc.weight.shape[1]\n model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)\n model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)\n model.fc.out_features = n\n return model" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" } ]
import argparse import os import sys import cv2 import numpy as np import torch import torch.backends.cudnn as cudnn import onnxruntime import tensorflow as tf from pathlib import Path from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ strip_optimizer, xyxy2xywh from utils.plots import Annotator, colors from utils.torch_utils import load_classifier, select_device, time_sync
11,144
if len(img.shape) == 3: img = img[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference if pt: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(img, augment=augment, visualize=visualize)[0] elif onnx: if dnn: net.setInput(img) pred = torch.tensor(net.forward()) else: pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) else: # tensorflow model (tflite, pb, saved_model) imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy if pb: pred = frozen_func(x=tf.constant(imn)).numpy() elif saved_model: pred = model(imn, training=False).numpy() elif tflite: if int8: scale, zero_point = input_details[0]['quantization'] imn = (imn / scale + zero_point).astype(np.uint8) # de-scale interpreter.set_tensor(input_details[0]['index'], imn) interpreter.invoke() pred = interpreter.get_tensor(output_details[0]['index']) if int8: scale, zero_point = output_details[0]['quantization'] pred = (pred.astype(np.float32) - zero_point) * scale # re-scale pred[..., 0] *= imgsz[1] # x pred[..., 1] *= imgsz[0] # y pred[..., 2] *= imgsz[1] # w pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class # label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') label = None annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5m.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=640, # inference size (pixels) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize set_logging() device = select_device(device) half &= device.type != 'cpu' # half precision only supported on CUDA # Load model w = str(weights[0] if isinstance(weights, list) else weights) classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', ''] check_suffix(w, suffixes) # check weights have acceptable suffix pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if pt: model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names if half: model.half() # to FP16 if classify: # second-stage classifier modelc = load_classifier(name='resnet50', n=2) # initialize modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() elif onnx: if dnn: # check_requirements(('opencv-python>=4.5.4',)) net = cv2.dnn.readNetFromONNX(w) else: check_requirements(('onnx', 'onnxruntime')) session = onnxruntime.InferenceSession(w, None) else: # TensorFlow models check_requirements(('tensorflow>=2.4.1',)) if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt def wrap_frozen_graph(gd, inputs, outputs): x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped import return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), tf.nest.map_structure(x.graph.as_graph_element, outputs)) graph_def = tf.Graph().as_graph_def() graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") elif saved_model: model = tf.keras.models.load_model(w) elif tflite: interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if pt and device.type != 'cpu': model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once dt, seen = [0.0, 0.0, 0.0], 0 for path, img, im0s, vid_cap in dataset: t1 = time_sync() if onnx: img = img.astype('float32') else: img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 img = img / 255.0 # 0 - 255 to 0.0 - 1.0 if len(img.shape) == 3: img = img[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference if pt: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(img, augment=augment, visualize=visualize)[0] elif onnx: if dnn: net.setInput(img) pred = torch.tensor(net.forward()) else: pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) else: # tensorflow model (tflite, pb, saved_model) imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy if pb: pred = frozen_func(x=tf.constant(imn)).numpy() elif saved_model: pred = model(imn, training=False).numpy() elif tflite: if int8: scale, zero_point = input_details[0]['quantization'] imn = (imn / scale + zero_point).astype(np.uint8) # de-scale interpreter.set_tensor(input_details[0]['index'], imn) interpreter.invoke() pred = interpreter.get_tensor(output_details[0]['index']) if int8: scale, zero_point = output_details[0]['quantization'] pred = (pred.astype(np.float32) - zero_point) * scale # re-scale pred[..., 0] *= imgsz[1] # x pred[..., 1] *= imgsz[0] # y pred[..., 2] *= imgsz[1] # w pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class # label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') label = None annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {colorstr('bold', save_dir)}{s}")
8
2023-10-12 08:52:01+00:00
16k
OmicsML/scDiff
scdiff/ext/gears/gears.py
[ { "identifier": "GEARS_Model", "path": "scdiff/ext/gears/model.py", "snippet": "class GEARS_Model(torch.nn.Module):\n \"\"\"\n GEARS model\n\n \"\"\"\n\n def __init__(self, args):\n \"\"\"\n :param args: arguments dictionary\n \"\"\"\n\n super(GEARS_Model, self).__init__()\n self.args = args\n self.num_genes = args['num_genes']\n self.num_perts = args['num_perts']\n hidden_size = args['hidden_size']\n self.uncertainty = args['uncertainty']\n self.num_layers = args['num_go_gnn_layers']\n self.indv_out_hidden_size = args['decoder_hidden_size']\n self.num_layers_gene_pos = args['num_gene_gnn_layers']\n self.no_perturb = args['no_perturb']\n self.pert_emb_lambda = 0.2\n\n # perturbation positional embedding added only to the perturbed genes\n self.pert_w = nn.Linear(1, hidden_size)\n\n # gene/globel perturbation embedding dictionary lookup\n self.gene_emb = nn.Embedding(self.num_genes, hidden_size, max_norm=True)\n self.pert_emb = nn.Embedding(self.num_perts, hidden_size, max_norm=True)\n\n # transformation layer\n self.emb_trans = nn.ReLU()\n self.pert_base_trans = nn.ReLU()\n self.transform = nn.ReLU()\n self.emb_trans_v2 = MLP([hidden_size, hidden_size, hidden_size], last_layer_act='ReLU')\n self.pert_fuse = MLP([hidden_size, hidden_size, hidden_size], last_layer_act='ReLU')\n\n # gene co-expression GNN\n self.G_coexpress = args['G_coexpress'].to(args['device'])\n self.G_coexpress_weight = args['G_coexpress_weight'].to(args['device'])\n\n self.emb_pos = nn.Embedding(self.num_genes, hidden_size, max_norm=True)\n self.layers_emb_pos = torch.nn.ModuleList()\n for i in range(1, self.num_layers_gene_pos + 1):\n self.layers_emb_pos.append(SGConv(hidden_size, hidden_size, 1))\n\n\n self.sim_layers = torch.nn.ModuleList()\n for i in range(1, self.num_layers + 1):\n self.sim_layers.append(SGConv(hidden_size, hidden_size, 1))\n\n # decoder shared MLP\n self.recovery_w = MLP([hidden_size, hidden_size*2, hidden_size], last_layer_act='linear')\n\n # gene specific decoder\n self.indv_w1 = nn.Parameter(torch.rand(self.num_genes,\n hidden_size, 1))\n self.indv_b1 = nn.Parameter(torch.rand(self.num_genes, 1))\n self.act = nn.ReLU()\n nn.init.xavier_normal_(self.indv_w1)\n nn.init.xavier_normal_(self.indv_b1)\n\n # Cross gene MLP\n self.cross_gene_state = MLP([self.num_genes, hidden_size,\n hidden_size])\n # final gene specific decoder\n self.indv_w2 = nn.Parameter(torch.rand(1, self.num_genes,\n hidden_size+1))\n self.indv_b2 = nn.Parameter(torch.rand(1, self.num_genes))\n nn.init.xavier_normal_(self.indv_w2)\n nn.init.xavier_normal_(self.indv_b2)\n\n # batchnorms\n self.bn_emb = nn.BatchNorm1d(hidden_size)\n self.bn_pert_base = nn.BatchNorm1d(hidden_size)\n self.bn_pert_base_trans = nn.BatchNorm1d(hidden_size)\n\n # uncertainty mode\n if self.uncertainty:\n self.uncertainty_w = MLP([hidden_size, hidden_size*2, hidden_size, 1], last_layer_act='linear')\n\n def forward(self, data):\n \"\"\"\n Forward pass of the model\n \"\"\"\n x, pert_idx = data.x, data.pert_idx\n if self.no_perturb:\n out = x.reshape(-1, 1)\n out = torch.split(torch.flatten(out), self.num_genes)\n return torch.stack(out)\n else:\n num_graphs = len(data.batch.unique())\n\n # get base gene embeddings\n emb = self.gene_emb(torch.LongTensor(list(range(self.num_genes))\n ).repeat(num_graphs, ).to(self.args['device']))\n emb = self.bn_emb(emb)\n base_emb = self.emb_trans(emb)\n\n pos_emb = self.emb_pos(torch.LongTensor(list(range(self.num_genes))\n ).repeat(num_graphs, ).to(self.args['device']))\n for idx, layer in enumerate(self.layers_emb_pos):\n pos_emb = layer(pos_emb, self.G_coexpress, self.G_coexpress_weight)\n if idx < len(self.layers_emb_pos) - 1:\n pos_emb = pos_emb.relu()\n\n base_emb = base_emb + 0.2 * pos_emb\n base_emb = self.emb_trans_v2(base_emb)\n\n # get perturbation index and embeddings\n\n pert_index = []\n for idx, i in enumerate(pert_idx):\n for j in i:\n if j != -1:\n pert_index.append([idx, j])\n pert_index = torch.tensor(pert_index).T\n\n pert_global_emb = self.pert_emb(torch.LongTensor(list(range(self.num_perts))).to(self.args['device']))\n\n # augment global perturbation embedding with GNN\n for idx, layer in enumerate(self.sim_layers):\n pert_global_emb = layer(pert_global_emb, self.G_sim, self.G_sim_weight)\n if idx < self.num_layers - 1:\n pert_global_emb = pert_global_emb.relu()\n\n # add global perturbation embedding to each gene in each cell in the batch\n base_emb = base_emb.reshape(num_graphs, self.num_genes, -1)\n\n if pert_index.shape[0] != 0:\n # in case all samples in the batch are controls, then there is no indexing for pert_index.\n pert_track = {}\n for i, j in enumerate(pert_index[0]):\n if j.item() in pert_track:\n pert_track[j.item()] = pert_track[j.item()] + pert_global_emb[pert_index[1][i]]\n else:\n pert_track[j.item()] = pert_global_emb[pert_index[1][i]]\n\n if len(list(pert_track.values())) > 0:\n if len(list(pert_track.values())) == 1:\n # circumvent when batch size = 1 with single perturbation and cannot feed into MLP\n emb_total = self.pert_fuse(torch.stack(list(pert_track.values()) * 2))\n else:\n emb_total = self.pert_fuse(torch.stack(list(pert_track.values())))\n\n for idx, j in enumerate(pert_track.keys()):\n base_emb[j] = base_emb[j] + emb_total[idx]\n\n base_emb = base_emb.reshape(num_graphs * self.num_genes, -1)\n base_emb = self.bn_pert_base(base_emb)\n\n # apply the first MLP\n base_emb = self.transform(base_emb)\n out = self.recovery_w(base_emb)\n out = out.reshape(num_graphs, self.num_genes, -1)\n out = out.unsqueeze(-1) * self.indv_w1\n w = torch.sum(out, axis=2)\n out = w + self.indv_b1\n\n # Cross gene\n cross_gene_embed = self.cross_gene_state(out.reshape(num_graphs, self.num_genes, -1).squeeze(2))\n cross_gene_embed = cross_gene_embed.repeat(1, self.num_genes)\n\n cross_gene_embed = cross_gene_embed.reshape([num_graphs, self.num_genes, -1])\n cross_gene_out = torch.cat([out, cross_gene_embed], 2)\n\n cross_gene_out = cross_gene_out * self.indv_w2\n cross_gene_out = torch.sum(cross_gene_out, axis=2)\n out = cross_gene_out + self.indv_b2\n out = out.reshape(num_graphs * self.num_genes, -1) + x.reshape(-1, 1)\n out = torch.split(torch.flatten(out), self.num_genes)\n\n # uncertainty head\n if self.uncertainty:\n out_logvar = self.uncertainty_w(base_emb)\n out_logvar = torch.split(torch.flatten(out_logvar), self.num_genes)\n return torch.stack(out), torch.stack(out_logvar)\n\n return torch.stack(out)" }, { "identifier": "compute_metrics", "path": "scdiff/ext/gears/inference.py", "snippet": "def compute_metrics(results):\n \"\"\"\n Given results from a model run and the ground truth, compute metrics\n\n \"\"\"\n metrics = {}\n metrics_pert = {}\n\n metric2fct = {\n 'mse': mse,\n 'pearson': pearsonr\n }\n\n for m in metric2fct.keys():\n metrics[m] = []\n metrics[m + '_de'] = []\n\n for pert in np.unique(results['pert_cat']):\n\n metrics_pert[pert] = {}\n p_idx = np.where(results['pert_cat'] == pert)[0]\n\n for m, fct in metric2fct.items():\n if m == 'pearson':\n val = fct(results['pred'][p_idx].mean(0), results['truth'][p_idx].mean(0))[0]\n if np.isnan(val):\n val = 0\n else:\n val = fct(results['pred'][p_idx].mean(0), results['truth'][p_idx].mean(0))\n\n metrics_pert[pert][m] = val\n metrics[m].append(metrics_pert[pert][m])\n\n if pert != 'ctrl':\n\n for m, fct in metric2fct.items():\n if m == 'pearson':\n val = fct(results['pred_de'][p_idx].mean(0), results['truth_de'][p_idx].mean(0))[0]\n if np.isnan(val):\n val = 0\n else:\n val = fct(results['pred_de'][p_idx].mean(0), results['truth_de'][p_idx].mean(0))\n\n metrics_pert[pert][m + '_de'] = val\n metrics[m + '_de'].append(metrics_pert[pert][m + '_de'])\n\n else:\n for m, fct in metric2fct.items():\n metrics_pert[pert][m + '_de'] = 0\n\n for m in metric2fct.keys():\n\n metrics[m] = np.mean(metrics[m])\n metrics[m + '_de'] = np.mean(metrics[m + '_de'])\n\n return metrics, metrics_pert" }, { "identifier": "deeper_analysis", "path": "scdiff/ext/gears/inference.py", "snippet": "def deeper_analysis(adata, test_res, de_column_prefix='rank_genes_groups_cov', most_variable_genes=None):\n\n metric2fct = {\n 'pearson': pearsonr,\n 'mse': mse\n }\n\n pert_metric = {}\n\n # in silico modeling and upperbounding\n pert2pert_full_id = dict(adata.obs[['condition', 'condition_name']].values)\n geneid2name = dict(zip(adata.var.index.values, adata.var['gene_name']))\n geneid2idx = dict(zip(adata.var.index.values, range(len(adata.var.index.values))))\n\n # calculate mean expression for each condition\n unique_conditions = adata.obs.condition.unique()\n conditions2index = {}\n for i in unique_conditions:\n conditions2index[i] = np.where(adata.obs.condition == i)[0]\n\n condition2mean_expression = {}\n for i, j in conditions2index.items():\n condition2mean_expression[i] = np.mean(adata.X[j], axis=0)\n pert_list = np.array(list(condition2mean_expression.keys()))\n mean_expression = np.array(list(condition2mean_expression.values())).reshape(\n len(adata.obs.condition.unique()), adata.X.toarray().shape[1])\n ctrl = mean_expression[np.where(pert_list == 'ctrl')[0]]\n\n if most_variable_genes is None:\n most_variable_genes = np.argsort(np.std(mean_expression, axis=0))[-200:]\n\n # gene_list = adata.var['gene_name'].values\n\n for pert in np.unique(test_res['pert_cat']):\n pert_metric[pert] = {}\n de_idx = [geneid2idx[i] for i in adata.uns['rank_genes_groups_cov_all'][pert2pert_full_id[pert]][:20]]\n de_idx_200 = [geneid2idx[i] for i in adata.uns['rank_genes_groups_cov_all'][pert2pert_full_id[pert]][:200]]\n de_idx_100 = [geneid2idx[i] for i in adata.uns['rank_genes_groups_cov_all'][pert2pert_full_id[pert]][:100]]\n de_idx_50 = [geneid2idx[i] for i in adata.uns['rank_genes_groups_cov_all'][pert2pert_full_id[pert]][:50]]\n\n pert_idx = np.where(test_res['pert_cat'] == pert)[0]\n pred_mean = np.mean(test_res['pred_de'][pert_idx], axis=0).reshape(-1,)\n true_mean = np.mean(test_res['truth_de'][pert_idx], axis=0).reshape(-1,)\n\n direc_change = np.abs(np.sign(test_res['pred'][pert_idx].mean(0) - ctrl[0]) -\n np.sign(test_res['truth'][pert_idx].mean(0) - ctrl[0]))\n frac_correct_direction = len(np.where(direc_change == 0)[0])/len(geneid2name)\n pert_metric[pert]['frac_correct_direction_all'] = frac_correct_direction\n\n de_idx_map = {20: de_idx,\n 50: de_idx_50,\n 100: de_idx_100,\n 200: de_idx_200\n }\n\n for val in [20, 50, 100, 200]:\n\n direc_change = np.abs(\n np.sign(test_res['pred'][pert_idx].mean(0)[de_idx_map[val]] - ctrl[0][de_idx_map[val]])\n - np.sign(test_res['truth'][pert_idx].mean(0)[de_idx_map[val]] - ctrl[0][de_idx_map[val]])\n )\n frac_correct_direction = len(np.where(direc_change == 0)[0])/val\n pert_metric[pert]['frac_correct_direction_' + str(val)] = frac_correct_direction\n\n mean = np.mean(test_res['truth_de'][pert_idx], axis=0)\n std = np.std(test_res['truth_de'][pert_idx], axis=0)\n min_ = np.min(test_res['truth_de'][pert_idx], axis=0)\n max_ = np.max(test_res['truth_de'][pert_idx], axis=0)\n q25 = np.quantile(test_res['truth_de'][pert_idx], 0.25, axis=0)\n q75 = np.quantile(test_res['truth_de'][pert_idx], 0.75, axis=0)\n q55 = np.quantile(test_res['truth_de'][pert_idx], 0.55, axis=0)\n q45 = np.quantile(test_res['truth_de'][pert_idx], 0.45, axis=0)\n q40 = np.quantile(test_res['truth_de'][pert_idx], 0.4, axis=0)\n q60 = np.quantile(test_res['truth_de'][pert_idx], 0.6, axis=0)\n\n zero_des = np.intersect1d(np.where(min_ == 0)[0], np.where(max_ == 0)[0])\n nonzero_des = np.setdiff1d(list(range(20)), zero_des)\n if len(nonzero_des) == 0:\n pass\n # pert that all de genes are 0...\n else:\n\n direc_change = np.abs(np.sign(pred_mean[nonzero_des] - ctrl[0][de_idx][nonzero_des]) -\n np.sign(true_mean[nonzero_des] - ctrl[0][de_idx][nonzero_des]))\n frac_correct_direction = len(np.where(direc_change == 0)[0])/len(nonzero_des)\n pert_metric[pert]['frac_correct_direction_20_nonzero'] = frac_correct_direction\n\n in_range = (pred_mean[nonzero_des] >= min_[nonzero_des]) & (pred_mean[nonzero_des] <= max_[nonzero_des])\n frac_in_range = sum(in_range)/len(nonzero_des)\n pert_metric[pert]['frac_in_range'] = frac_in_range\n\n in_range_5 = (pred_mean[nonzero_des] >= q45[nonzero_des]) & (pred_mean[nonzero_des] <= q55[nonzero_des])\n frac_in_range_45_55 = sum(in_range_5)/len(nonzero_des)\n pert_metric[pert]['frac_in_range_45_55'] = frac_in_range_45_55\n\n in_range_10 = (pred_mean[nonzero_des] >= q40[nonzero_des]) & (pred_mean[nonzero_des] <= q60[nonzero_des])\n frac_in_range_40_60 = sum(in_range_10)/len(nonzero_des)\n pert_metric[pert]['frac_in_range_40_60'] = frac_in_range_40_60\n\n in_range_25 = (pred_mean[nonzero_des] >= q25[nonzero_des]) & (pred_mean[nonzero_des] <= q75[nonzero_des])\n frac_in_range_25_75 = sum(in_range_25)/len(nonzero_des)\n pert_metric[pert]['frac_in_range_25_75'] = frac_in_range_25_75\n\n zero_idx = np.where(std > 0)[0]\n sigma = (np.abs(pred_mean[zero_idx] - mean[zero_idx]))/(std[zero_idx])\n pert_metric[pert]['mean_sigma'] = np.mean(sigma)\n pert_metric[pert]['std_sigma'] = np.std(sigma)\n pert_metric[pert]['frac_sigma_below_1'] = 1 - len(np.where(sigma > 1)[0])/len(zero_idx)\n pert_metric[pert]['frac_sigma_below_2'] = 1 - len(np.where(sigma > 2)[0])/len(zero_idx)\n\n # correlation on delta\n p_idx = np.where(test_res['pert_cat'] == pert)[0]\n\n for m, fct in metric2fct.items():\n if m != 'mse':\n val = fct(test_res['pred'][p_idx].mean(0) - ctrl[0], test_res['truth'][p_idx].mean(0)-ctrl[0])[0]\n if np.isnan(val):\n val = 0\n\n pert_metric[pert][m + '_delta'] = val\n\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx] - ctrl[0][de_idx],\n test_res['truth'][p_idx].mean(0)[de_idx]-ctrl[0][de_idx])[0]\n if np.isnan(val):\n val = 0\n\n pert_metric[pert][m + '_delta_de'] = val\n\n # up fold changes > 10?\n pert_mean = np.mean(test_res['truth'][p_idx], axis=0).reshape(-1,)\n\n fold_change = pert_mean/ctrl\n fold_change[np.isnan(fold_change)] = 0\n fold_change[np.isinf(fold_change)] = 0\n # this is to remove the ones that are super low and the fold change becomes unmeaningful\n fold_change[0][np.where(pert_mean < 0.5)[0]] = 0\n\n o = np.where(fold_change[0] > 0)[0]\n\n pred_fc = test_res['pred'][p_idx].mean(0)[o]\n true_fc = test_res['truth'][p_idx].mean(0)[o]\n ctrl_fc = ctrl[0][o]\n\n if len(o) > 0:\n pert_metric[pert]['fold_change_gap_all'] = np.mean(np.abs(pred_fc/ctrl_fc - true_fc/ctrl_fc))\n\n o = np.intersect1d(np.where(fold_change[0] < 0.333)[0], np.where(fold_change[0] > 0)[0])\n\n pred_fc = test_res['pred'][p_idx].mean(0)[o]\n true_fc = test_res['truth'][p_idx].mean(0)[o]\n ctrl_fc = ctrl[0][o]\n\n if len(o) > 0:\n pert_metric[pert]['fold_change_gap_downreg_0.33'] = np.mean(np.abs(pred_fc/ctrl_fc - true_fc/ctrl_fc))\n\n o = np.intersect1d(np.where(fold_change[0] < 0.1)[0], np.where(fold_change[0] > 0)[0])\n\n pred_fc = test_res['pred'][p_idx].mean(0)[o]\n true_fc = test_res['truth'][p_idx].mean(0)[o]\n ctrl_fc = ctrl[0][o]\n\n if len(o) > 0:\n pert_metric[pert]['fold_change_gap_downreg_0.1'] = np.mean(np.abs(pred_fc/ctrl_fc - true_fc/ctrl_fc))\n\n o = np.where(fold_change[0] > 3)[0]\n\n pred_fc = test_res['pred'][p_idx].mean(0)[o]\n true_fc = test_res['truth'][p_idx].mean(0)[o]\n ctrl_fc = ctrl[0][o]\n\n if len(o) > 0:\n pert_metric[pert]['fold_change_gap_upreg_3'] = np.mean(np.abs(pred_fc/ctrl_fc - true_fc/ctrl_fc))\n\n o = np.where(fold_change[0] > 10)[0]\n\n pred_fc = test_res['pred'][p_idx].mean(0)[o]\n true_fc = test_res['truth'][p_idx].mean(0)[o]\n ctrl_fc = ctrl[0][o]\n\n if len(o) > 0:\n pert_metric[pert]['fold_change_gap_upreg_10'] = np.mean(np.abs(pred_fc/ctrl_fc - true_fc/ctrl_fc))\n\n # most variable genes\n for m, fct in metric2fct.items():\n if m != 'mse':\n val = fct(test_res['pred'][p_idx].mean(0)[most_variable_genes] - ctrl[0][most_variable_genes],\n test_res['truth'][p_idx].mean(0)[most_variable_genes]-ctrl[0][most_variable_genes])[0]\n if np.isnan(val):\n val = 0\n pert_metric[pert][m + '_delta_top200_hvg'] = val\n\n val = fct(test_res['pred'][p_idx].mean(0)[most_variable_genes],\n test_res['truth'][p_idx].mean(0)[most_variable_genes])[0]\n if np.isnan(val):\n val = 0\n pert_metric[pert][m + '_top200_hvg'] = val\n else:\n val = fct(test_res['pred'][p_idx].mean(0)[most_variable_genes],\n test_res['truth'][p_idx].mean(0)[most_variable_genes])\n pert_metric[pert][m + '_top200_hvg'] = val\n\n # top 20/50/100/200 DEs\n for m, fct in metric2fct.items():\n if m != 'mse':\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx] - ctrl[0][de_idx],\n test_res['truth'][p_idx].mean(0)[de_idx]-ctrl[0][de_idx])[0]\n if np.isnan(val):\n val = 0\n pert_metric[pert][m + '_delta_top20_de'] = val\n\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx], test_res['truth'][p_idx].mean(0)[de_idx])[0]\n if np.isnan(val):\n val = 0\n pert_metric[pert][m + '_top20_de'] = val\n else:\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx] - ctrl[0][de_idx],\n test_res['truth'][p_idx].mean(0)[de_idx]-ctrl[0][de_idx])\n pert_metric[pert][m + '_top20_de'] = val\n\n for m, fct in metric2fct.items():\n if m != 'mse':\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx_200] - ctrl[0][de_idx_200],\n test_res['truth'][p_idx].mean(0)[de_idx_200]-ctrl[0][de_idx_200])[0]\n if np.isnan(val):\n val = 0\n pert_metric[pert][m + '_delta_top200_de'] = val\n\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx_200], test_res['truth'][p_idx].mean(0)[de_idx_200])[0]\n if np.isnan(val):\n val = 0\n pert_metric[pert][m + '_top200_de'] = val\n else:\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx_200] - ctrl[0][de_idx_200],\n test_res['truth'][p_idx].mean(0)[de_idx_200]-ctrl[0][de_idx_200])\n pert_metric[pert][m + '_top200_de'] = val\n\n for m, fct in metric2fct.items():\n if m != 'mse':\n\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx_100] - ctrl[0][de_idx_100],\n test_res['truth'][p_idx].mean(0)[de_idx_100]-ctrl[0][de_idx_100])[0]\n if np.isnan(val):\n val = 0\n pert_metric[pert][m + '_delta_top100_de'] = val\n\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx_100], test_res['truth'][p_idx].mean(0)[de_idx_100])[0]\n if np.isnan(val):\n val = 0\n pert_metric[pert][m + '_top100_de'] = val\n else:\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx_100] - ctrl[0][de_idx_100],\n test_res['truth'][p_idx].mean(0)[de_idx_100]-ctrl[0][de_idx_100])\n pert_metric[pert][m + '_top100_de'] = val\n\n for m, fct in metric2fct.items():\n if m != 'mse':\n\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx_50] - ctrl[0][de_idx_50],\n test_res['truth'][p_idx].mean(0)[de_idx_50]-ctrl[0][de_idx_50])[0]\n if np.isnan(val):\n val = 0\n pert_metric[pert][m + '_delta_top50_de'] = val\n\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx_50], test_res['truth'][p_idx].mean(0)[de_idx_50])[0]\n if np.isnan(val):\n val = 0\n pert_metric[pert][m + '_top50_de'] = val\n else:\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx_50] - ctrl[0][de_idx_50],\n test_res['truth'][p_idx].mean(0)[de_idx_50]-ctrl[0][de_idx_50])\n pert_metric[pert][m + '_top50_de'] = val\n\n return pert_metric" }, { "identifier": "evaluate", "path": "scdiff/ext/gears/inference.py", "snippet": "def evaluate(loader, model, uncertainty, device):\n \"\"\"\n Run model in inference mode using a given data loader\n \"\"\"\n\n model.eval()\n model.to(device)\n pert_cat = []\n pred = []\n truth = []\n pred_de = []\n truth_de = []\n results = {}\n logvar = []\n\n for itr, batch in enumerate(loader):\n\n batch.to(device)\n pert_cat.extend(batch.pert)\n\n with torch.no_grad():\n if uncertainty:\n p, unc = model(batch)\n logvar.extend(unc.cpu())\n else:\n p = model(batch)\n t = batch.y\n pred.extend(p.cpu())\n truth.extend(t.cpu())\n\n # Differentially expressed genes\n for itr, de_idx in enumerate(batch.de_idx):\n pred_de.append(p[itr, de_idx])\n truth_de.append(t[itr, de_idx])\n\n # all genes\n results['pert_cat'] = np.array(pert_cat)\n pred = torch.stack(pred)\n truth = torch.stack(truth)\n results['pred'] = pred.detach().cpu().numpy()\n results['truth'] = truth.detach().cpu().numpy()\n\n pred_de = torch.stack(pred_de)\n truth_de = torch.stack(truth_de)\n results['pred_de'] = pred_de.detach().cpu().numpy()\n results['truth_de'] = truth_de.detach().cpu().numpy()\n\n if uncertainty:\n results['logvar'] = torch.stack(logvar).detach().cpu().numpy()\n\n return results" }, { "identifier": "non_dropout_analysis", "path": "scdiff/ext/gears/inference.py", "snippet": "def non_dropout_analysis(adata, test_res):\n metric2fct = {\n 'pearson': pearsonr,\n 'mse': mse\n }\n\n pert_metric = {}\n\n # in silico modeling and upperbounding\n pert2pert_full_id = dict(adata.obs[['condition', 'condition_name']].values)\n # geneid2name = dict(zip(adata.var.index.values, adata.var['gene_name']))\n geneid2idx = dict(zip(adata.var.index.values, range(len(adata.var.index.values))))\n\n # calculate mean expression for each condition\n unique_conditions = adata.obs.condition.unique()\n conditions2index = {}\n for i in unique_conditions:\n conditions2index[i] = np.where(adata.obs.condition == i)[0]\n\n condition2mean_expression = {}\n for i, j in conditions2index.items():\n condition2mean_expression[i] = np.mean(adata.X[j], axis=0)\n pert_list = np.array(list(condition2mean_expression.keys()))\n mean_expression = np.array(list(condition2mean_expression.values())).reshape(\n len(adata.obs.condition.unique()), adata.X.toarray().shape[1])\n ctrl = mean_expression[np.where(pert_list == 'ctrl')[0]]\n\n # gene_list = adata.var['gene_name'].values\n\n for pert in np.unique(test_res['pert_cat']):\n pert_metric[pert] = {}\n\n pert_idx = np.where(test_res['pert_cat'] == pert)[0]\n de_idx = [geneid2idx[i] for i in adata.uns['top_non_dropout_de_20'][pert2pert_full_id[pert]]]\n non_zero_idx = adata.uns['non_zeros_gene_idx'][pert2pert_full_id[pert]]\n non_dropout_gene_idx = adata.uns['non_dropout_gene_idx'][pert2pert_full_id[pert]]\n\n direc_change = np.abs(np.sign(test_res['pred'][pert_idx].mean(\n 0)[de_idx] - ctrl[0][de_idx]) - np.sign(test_res['truth'][pert_idx].mean(0)[de_idx] - ctrl[0][de_idx]))\n frac_correct_direction = len(np.where(direc_change == 0)[0])/len(de_idx)\n pert_metric[pert]['frac_correct_direction_top20_non_dropout'] = frac_correct_direction\n\n frac_direction_opposite = len(np.where(direc_change == 2)[0])/len(de_idx)\n pert_metric[pert]['frac_opposite_direction_top20_non_dropout'] = frac_direction_opposite\n\n frac_direction_opposite = len(np.where(direc_change == 1)[0])/len(de_idx)\n pert_metric[pert]['frac_0/1_direction_top20_non_dropout'] = frac_direction_opposite\n\n direc_change = np.abs(\n np.sign(test_res['pred'][pert_idx].mean(0)[non_zero_idx] - ctrl[0][non_zero_idx])\n - np.sign(test_res['truth'][pert_idx].mean(0)[non_zero_idx] - ctrl[0][non_zero_idx])\n )\n frac_correct_direction = len(np.where(direc_change == 0)[0])/len(non_zero_idx)\n pert_metric[pert]['frac_correct_direction_non_zero'] = frac_correct_direction\n\n frac_direction_opposite = len(np.where(direc_change == 2)[0])/len(non_zero_idx)\n pert_metric[pert]['frac_opposite_direction_non_zero'] = frac_direction_opposite\n\n frac_direction_opposite = len(np.where(direc_change == 1)[0])/len(non_zero_idx)\n pert_metric[pert]['frac_0/1_direction_non_zero'] = frac_direction_opposite\n\n direc_change = np.abs(\n np.sign(test_res['pred'][pert_idx].mean(0)[non_dropout_gene_idx] - ctrl[0][non_dropout_gene_idx])\n - np.sign(test_res['truth'][pert_idx].mean(0)[non_dropout_gene_idx] - ctrl[0][non_dropout_gene_idx])\n )\n frac_correct_direction = len(np.where(direc_change == 0)[0])/len(non_dropout_gene_idx)\n pert_metric[pert]['frac_correct_direction_non_dropout'] = frac_correct_direction\n\n frac_direction_opposite = len(np.where(direc_change == 2)[0])/len(non_dropout_gene_idx)\n pert_metric[pert]['frac_opposite_direction_non_dropout'] = frac_direction_opposite\n\n frac_direction_opposite = len(np.where(direc_change == 1)[0])/len(non_dropout_gene_idx)\n pert_metric[pert]['frac_0/1_direction_non_dropout'] = frac_direction_opposite\n\n mean = np.mean(test_res['truth'][pert_idx][:, de_idx], axis=0)\n std = np.std(test_res['truth'][pert_idx][:, de_idx], axis=0)\n min_ = np.min(test_res['truth'][pert_idx][:, de_idx], axis=0)\n max_ = np.max(test_res['truth'][pert_idx][:, de_idx], axis=0)\n q25 = np.quantile(test_res['truth'][pert_idx][:, de_idx], 0.25, axis=0)\n q75 = np.quantile(test_res['truth'][pert_idx][:, de_idx], 0.75, axis=0)\n q55 = np.quantile(test_res['truth'][pert_idx][:, de_idx], 0.55, axis=0)\n q45 = np.quantile(test_res['truth'][pert_idx][:, de_idx], 0.45, axis=0)\n q40 = np.quantile(test_res['truth'][pert_idx][:, de_idx], 0.4, axis=0)\n q60 = np.quantile(test_res['truth'][pert_idx][:, de_idx], 0.6, axis=0)\n\n zero_des = np.intersect1d(np.where(min_ == 0)[0], np.where(max_ == 0)[0])\n nonzero_des = np.setdiff1d(list(range(20)), zero_des)\n\n if len(nonzero_des) == 0:\n pass\n # pert that all de genes are 0...\n else:\n pred_mean = np.mean(test_res['pred'][pert_idx][:, de_idx], axis=0).reshape(-1,)\n # true_mean = np.mean(test_res['truth'][pert_idx][:, de_idx], axis=0).reshape(-1,)\n\n in_range = (pred_mean[nonzero_des] >= min_[nonzero_des]) & (pred_mean[nonzero_des] <= max_[nonzero_des])\n frac_in_range = sum(in_range)/len(nonzero_des)\n pert_metric[pert]['frac_in_range_non_dropout'] = frac_in_range\n\n in_range_5 = (pred_mean[nonzero_des] >= q45[nonzero_des]) & (pred_mean[nonzero_des] <= q55[nonzero_des])\n frac_in_range_45_55 = sum(in_range_5)/len(nonzero_des)\n pert_metric[pert]['frac_in_range_45_55_non_dropout'] = frac_in_range_45_55\n\n in_range_10 = (pred_mean[nonzero_des] >= q40[nonzero_des]) & (pred_mean[nonzero_des] <= q60[nonzero_des])\n frac_in_range_40_60 = sum(in_range_10)/len(nonzero_des)\n pert_metric[pert]['frac_in_range_40_60_non_dropout'] = frac_in_range_40_60\n\n in_range_25 = (pred_mean[nonzero_des] >= q25[nonzero_des]) & (pred_mean[nonzero_des] <= q75[nonzero_des])\n frac_in_range_25_75 = sum(in_range_25)/len(nonzero_des)\n pert_metric[pert]['frac_in_range_25_75_non_dropout'] = frac_in_range_25_75\n\n zero_idx = np.where(std > 0)[0]\n sigma = (np.abs(pred_mean[zero_idx] - mean[zero_idx]))/(std[zero_idx])\n pert_metric[pert]['mean_sigma_non_dropout'] = np.mean(sigma)\n pert_metric[pert]['std_sigma_non_dropout'] = np.std(sigma)\n pert_metric[pert]['frac_sigma_below_1_non_dropout'] = 1 - len(np.where(sigma > 1)[0])/len(zero_idx)\n pert_metric[pert]['frac_sigma_below_2_non_dropout'] = 1 - len(np.where(sigma > 2)[0])/len(zero_idx)\n\n p_idx = np.where(test_res['pert_cat'] == pert)[0]\n for m, fct in metric2fct.items():\n if m != 'mse':\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx] - ctrl[0][de_idx],\n test_res['truth'][p_idx].mean(0)[de_idx]-ctrl[0][de_idx])[0]\n if np.isnan(val):\n val = 0\n pert_metric[pert][m + '_delta_top20_de_non_dropout'] = val\n\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx], test_res['truth'][p_idx].mean(0)[de_idx])[0]\n if np.isnan(val):\n val = 0\n pert_metric[pert][m + '_top20_de_non_dropout'] = val\n else:\n val = fct(test_res['pred'][p_idx].mean(0)[de_idx] - ctrl[0][de_idx],\n test_res['truth'][p_idx].mean(0)[de_idx]-ctrl[0][de_idx])\n pert_metric[pert][m + '_top20_de_non_dropout'] = val\n\n return pert_metric" }, { "identifier": "GeneSimNetwork", "path": "scdiff/ext/gears/utils.py", "snippet": "class GeneSimNetwork():\n \"\"\"\n GeneSimNetwork class\n\n Args:\n edge_list (pd.DataFrame): edge list of the network\n gene_list (list): list of gene names\n node_map (dict): dictionary mapping gene names to node indices\n\n Attributes:\n edge_index (torch.Tensor): edge index of the network\n edge_weight (torch.Tensor): edge weight of the network\n G (nx.DiGraph): networkx graph object\n \"\"\"\n\n def __init__(self, edge_list, gene_list, node_map):\n \"\"\"\n Initialize GeneSimNetwork class\n \"\"\"\n\n self.edge_list = edge_list\n self.G = nx.from_pandas_edgelist(self.edge_list, source='source',\n target='target', edge_attr=['importance'],\n create_using=nx.DiGraph())\n self.gene_list = gene_list\n for n in self.gene_list:\n if n not in self.G.nodes():\n self.G.add_node(n)\n\n edge_index_ = [(node_map[e[0]], node_map[e[1]]) for e in\n self.G.edges]\n self.edge_index = torch.tensor(edge_index_, dtype=torch.long).T\n #self.edge_weight = torch.Tensor(self.edge_list['importance'].values)\n\n edge_attr = nx.get_edge_attributes(self.G, 'importance')\n importance = np.array([edge_attr[e] for e in self.G.edges])\n self.edge_weight = torch.Tensor(importance)" }, { "identifier": "create_cell_graph_dataset_for_prediction", "path": "scdiff/ext/gears/utils.py", "snippet": "def create_cell_graph_dataset_for_prediction(pert_gene, ctrl_adata, gene_names,\n device, num_samples=300):\n \"\"\"\n Create a perturbation specific cell graph dataset for inference\n\n Args:\n pert_gene (list): list of perturbations\n ctrl_adata (anndata): control anndata\n gene_names (list): list of gene names\n device (torch.device): device to use\n num_samples (int): number of samples to use for inference (default: 300)\n\n \"\"\"\n\n # Get the indices (and signs) of applied perturbation\n pert_idx = [np.where(p == np.array(gene_names))[0][0] for p in pert_gene]\n\n Xs = ctrl_adata[np.random.randint(0, len(ctrl_adata), num_samples), :].X.toarray()\n # Create cell graphs\n cell_graphs = [create_cell_graph_for_prediction(X, pert_idx, pert_gene).to(device) for X in Xs]\n return cell_graphs" }, { "identifier": "get_GI_genes_idx", "path": "scdiff/ext/gears/utils.py", "snippet": "def get_GI_genes_idx(adata, GI_gene_file):\n \"\"\"\n Optional: Reads a file containing a list of GI genes (usually those\n with high mean expression)\n\n Args:\n adata (anndata): anndata object\n GI_gene_file (str): file containing GI genes (generally corresponds\n to genes with high mean expression)\n \"\"\"\n # Genes used for linear model fitting\n GI_genes = np.load(GI_gene_file, allow_pickle=True)\n GI_genes_idx = np.where([g in GI_genes for g in adata.var.gene_name.values])[0]\n\n return GI_genes_idx" }, { "identifier": "get_GI_params", "path": "scdiff/ext/gears/utils.py", "snippet": "def get_GI_params(preds, combo):\n \"\"\"\n Get GI parameters\n\n Args:\n preds (dict): dictionary of predictions\n combo (list): list of perturbations\n\n \"\"\"\n singles_expr = np.array([preds[combo[0]], preds[combo[1]]]).T\n first_expr = np.array(preds[combo[0]]).T\n second_expr = np.array(preds[combo[1]]).T\n double_expr = np.array(preds[combo[0]+'_'+combo[1]]).T\n\n return get_coeffs(singles_expr, first_expr, second_expr, double_expr)" }, { "identifier": "get_mean_control", "path": "scdiff/ext/gears/utils.py", "snippet": "def get_mean_control(adata):\n \"\"\"\n Get mean control expression\n \"\"\"\n mean_ctrl_exp = adata[adata.obs['condition'] == 'ctrl'].to_df().mean()\n return mean_ctrl_exp" }, { "identifier": "get_similarity_network", "path": "scdiff/ext/gears/utils.py", "snippet": "def get_similarity_network(network_type, adata, threshold, k,\n data_path, data_name, split, seed, train_gene_set_size,\n set2conditions, default_pert_graph=True, pert_list=None):\n\n if network_type == 'co-express':\n df_out = get_coexpression_network_from_train(adata, threshold, k,\n data_path, data_name, split,\n seed, train_gene_set_size,\n set2conditions)\n elif network_type == 'go':\n if default_pert_graph:\n server_path = 'https://dataverse.harvard.edu/api/access/datafile/6934319'\n tar_data_download_wrapper(server_path,\n os.path.join(data_path, 'go_essential_all'),\n data_path)\n df_jaccard = pd.read_csv(os.path.join(data_path,\n 'go_essential_all/go_essential_all.csv'))\n\n else:\n df_jaccard = make_GO(data_path, pert_list, data_name)\n\n df_out = df_jaccard.groupby('target').apply(lambda x: x.nlargest(k + 1,\n ['importance'])).reset_index(drop=True)\n\n return df_out" }, { "identifier": "loss_fct", "path": "scdiff/ext/gears/utils.py", "snippet": "def loss_fct(pred, y, perts, ctrl=None, direction_lambda=1e-3, dict_filter=None):\n \"\"\"\n Main MSE Loss function, includes direction loss\n\n Args:\n pred (torch.tensor): predicted values\n y (torch.tensor): true values\n perts (list): list of perturbations\n ctrl (str): control perturbation\n direction_lambda (float): direction loss weight hyperparameter\n dict_filter (dict): dictionary of perturbations to conditions\n\n \"\"\"\n gamma = 2\n mse_p = torch.nn.MSELoss()\n perts = np.array(perts)\n losses = torch.tensor(0.0, requires_grad=True).to(pred.device)\n\n for p in set(perts):\n pert_idx = np.where(perts == p)[0]\n\n # during training, we remove the all zero genes into calculation of loss.\n # this gives a cleaner direction loss. empirically, the performance stays the same.\n if p != 'ctrl':\n retain_idx = dict_filter[p]\n pred_p = pred[pert_idx][:, retain_idx]\n y_p = y[pert_idx][:, retain_idx]\n else:\n pred_p = pred[pert_idx]\n y_p = y[pert_idx]\n losses = losses + torch.sum((pred_p - y_p)**(2 + gamma))/pred_p.shape[0]/pred_p.shape[1]\n\n # direction loss\n if (p != 'ctrl'):\n losses = losses + torch.sum(direction_lambda *\n (torch.sign(y_p - ctrl[retain_idx]) -\n torch.sign(pred_p - ctrl[retain_idx]))**2) /\\\n pred_p.shape[0]/pred_p.shape[1]\n else:\n losses = losses + torch.sum(direction_lambda * (torch.sign(y_p - ctrl) -\n torch.sign(pred_p - ctrl))**2) /\\\n pred_p.shape[0]/pred_p.shape[1]\n return losses/(len(set(perts)))" }, { "identifier": "print_sys", "path": "scdiff/ext/gears/utils.py", "snippet": "def print_sys(s):\n \"\"\"system print\n\n Args:\n s (str): the string to print\n \"\"\"\n print(s, flush=True, file=sys.stderr)" }, { "identifier": "uncertainty_loss_fct", "path": "scdiff/ext/gears/utils.py", "snippet": "def uncertainty_loss_fct(pred, logvar, y, perts, reg=0.1, ctrl=None,\n direction_lambda=1e-3, dict_filter=None):\n \"\"\"\n Uncertainty loss function\n\n Args:\n pred (torch.tensor): predicted values\n logvar (torch.tensor): log variance\n y (torch.tensor): true values\n perts (list): list of perturbations\n reg (float): regularization parameter\n ctrl (str): control perturbation\n direction_lambda (float): direction loss weight hyperparameter\n dict_filter (dict): dictionary of perturbations to conditions\n\n \"\"\"\n gamma = 2\n perts = np.array(perts)\n losses = torch.tensor(0.0, requires_grad=True).to(pred.device)\n for p in set(perts):\n if p != 'ctrl':\n retain_idx = dict_filter[p]\n pred_p = pred[np.where(perts == p)[0]][:, retain_idx]\n y_p = y[np.where(perts == p)[0]][:, retain_idx]\n logvar_p = logvar[np.where(perts == p)[0]][:, retain_idx]\n else:\n pred_p = pred[np.where(perts == p)[0]]\n y_p = y[np.where(perts == p)[0]]\n logvar_p = logvar[np.where(perts == p)[0]]\n\n # uncertainty based loss\n losses += torch.sum((pred_p - y_p)**(2 + gamma) + reg * torch.exp(\n -logvar_p) * (pred_p - y_p)**(2 + gamma))/pred_p.shape[0]/pred_p.shape[1]\n\n # direction loss\n if p != 'ctrl':\n losses += torch.sum(direction_lambda *\n (torch.sign(y_p - ctrl[retain_idx]) -\n torch.sign(pred_p - ctrl[retain_idx]))**2) /\\\n pred_p.shape[0]/pred_p.shape[1]\n else:\n losses += torch.sum(direction_lambda *\n (torch.sign(y_p - ctrl) -\n torch.sign(pred_p - ctrl))**2) /\\\n pred_p.shape[0]/pred_p.shape[1]\n\n return losses/(len(set(perts)))" } ]
from copy import deepcopy from torch.optim.lr_scheduler import StepLR from .model import GEARS_Model from .inference import ( compute_metrics, deeper_analysis, evaluate, non_dropout_analysis, ) from .utils import ( GeneSimNetwork, create_cell_graph_dataset_for_prediction, get_GI_genes_idx, get_GI_params, get_mean_control, get_similarity_network, loss_fct, print_sys, uncertainty_loss_fct, ) from collections import OrderedDict from torch_geometric.data import DataLoader import os import pickle import warnings import numpy as np import torch import torch.nn as nn import torch.optim as optim import wandb import seaborn as sns import matplotlib.pyplot as plt
14,147
path: str path to save the model Returns ------- None """ if not os.path.exists(path): os.mkdir(path) if self.config is None: raise ValueError('No model is initialized...') with open(os.path.join(path, 'config.pkl'), 'wb') as f: pickle.dump(self.config, f) torch.save(self.best_model.state_dict(), os.path.join(path, 'model.pt')) def predict(self, pert_list): """ Predict the transcriptome given a list of genes/gene combinations being perturbed Parameters ---------- pert_list: list list of genes/gene combiantions to be perturbed Returns ------- results_pred: dict dictionary of predicted transcriptome results_logvar: dict dictionary of uncertainty score """ # given a list of single/combo genes, return the transcriptome # if uncertainty mode is on, also return uncertainty score. self.ctrl_adata = self.adata[self.adata.obs['condition'] == 'ctrl'] for pert in pert_list: for i in pert: if i not in self.pert_list: raise ValueError(i + " is not in the perturbation graph. " "Please select from GEARS.pert_list!") if self.config['uncertainty']: results_logvar = {} self.best_model = self.best_model.to(self.device) self.best_model.eval() results_pred = {} results_logvar_sum = {} for pert in pert_list: try: # If prediction is already saved, then skip inference results_pred['_'.join(pert)] = self.saved_pred['_'.join(pert)] if self.config['uncertainty']: results_logvar_sum['_'.join(pert)] = self.saved_logvar_sum['_'.join(pert)] continue except: pass cg = create_cell_graph_dataset_for_prediction(pert, self.ctrl_adata, self.pert_list, self.device) loader = DataLoader(cg, 300, shuffle=False) batch = next(iter(loader)) batch.to(self.device) with torch.no_grad(): if self.config['uncertainty']: p, unc = self.best_model(batch) results_logvar['_'.join(pert)] = np.mean(unc.detach().cpu().numpy(), axis=0) results_logvar_sum['_'.join(pert)] = np.exp(-np.mean(results_logvar['_'.join(pert)])) else: p = self.best_model(batch) results_pred['_'.join(pert)] = np.mean(p.detach().cpu().numpy(), axis=0) self.saved_pred.update(results_pred) if self.config['uncertainty']: self.saved_logvar_sum.update(results_logvar_sum) return results_pred, results_logvar_sum else: return results_pred def GI_predict(self, combo, GI_genes_file='./genes_with_hi_mean.npy'): """ Predict the GI scores following perturbation of a given gene combination Parameters ---------- combo: list list of genes to be perturbed GI_genes_file: str path to the file containing genes with high mean expression Returns ------- GI scores for the given combinatorial perturbation based on GEARS predictions """ # if uncertainty mode is on, also return uncertainty score. try: # If prediction is already saved, then skip inference pred = {} pred[combo[0]] = self.saved_pred[combo[0]] pred[combo[1]] = self.saved_pred[combo[1]] pred['_'.join(combo)] = self.saved_pred['_'.join(combo)] except: if self.config['uncertainty']: pred = self.predict([[combo[0]], [combo[1]], combo])[0] else: pred = self.predict([[combo[0]], [combo[1]], combo])
warnings.filterwarnings("ignore") class GEARS: """ GEARS base model class """ def __init__(self, pert_data, device='cuda', weight_bias_track=False, proj_name='GEARS', exp_name='GEARS'): """ Initialize GEARS model Parameters ---------- pert_data: PertData object dataloader for perturbation data device: str Device to run the model on. Default: 'cuda' weight_bias_track: bool Whether to track performance on wandb. Default: False proj_name: str Project name for wandb. Default: 'GEARS' exp_name: str Experiment name for wandb. Default: 'GEARS' Returns ------- None """ self.weight_bias_track = weight_bias_track if self.weight_bias_track: wandb.init(project=proj_name, name=exp_name) self.wandb = wandb else: self.wandb = None self.device = device self.config = None self.dataloader = pert_data.dataloader self.adata = pert_data.adata self.node_map = pert_data.node_map self.node_map_pert = pert_data.node_map_pert self.data_path = pert_data.data_path self.dataset_name = pert_data.dataset_name self.split = pert_data.split self.seed = pert_data.seed self.train_gene_set_size = pert_data.train_gene_set_size self.set2conditions = pert_data.set2conditions self.subgroup = pert_data.subgroup self.gene_list = pert_data.gene_names.values.tolist() self.pert_list = pert_data.pert_names.tolist() self.num_genes = len(self.gene_list) self.num_perts = len(self.pert_list) self.default_pert_graph = pert_data.default_pert_graph self.saved_pred = {} self.saved_logvar_sum = {} self.ctrl_expression = torch.tensor( np.mean(self.adata.X[self.adata.obs.condition == 'ctrl'], axis=0)).reshape(-1, ).to(self.device) pert_full_id2pert = dict(self.adata.obs[['condition_name', 'condition']].values) self.dict_filter = {pert_full_id2pert[i]: j for i, j in self.adata.uns['non_zeros_gene_idx'].items() if i in pert_full_id2pert} self.ctrl_adata = self.adata[self.adata.obs['condition'] == 'ctrl'] gene_dict = {g: i for i, g in enumerate(self.gene_list)} self.pert2gene = {p: gene_dict[pert] for p, pert in enumerate(self.pert_list) if pert in self.gene_list} def tunable_parameters(self): """ Return the tunable parameters of the model Returns ------- dict Tunable parameters of the model """ return {'hidden_size': 'hidden dimension, default 64', 'num_go_gnn_layers': 'number of GNN layers for GO graph, default 1', 'num_gene_gnn_layers': 'number of GNN layers for co-expression gene graph, default 1', 'decoder_hidden_size': 'hidden dimension for gene-specific decoder, default 16', 'num_similar_genes_go_graph': 'number of maximum similar K genes in the GO graph, default 20', 'num_similar_genes_co_express_graph': 'number of maximum similar K genes in the co expression graph, default 20', 'coexpress_threshold': 'pearson correlation threshold when constructing coexpression graph, default 0.4', 'uncertainty': 'whether or not to turn on uncertainty mode, default False', 'uncertainty_reg': 'regularization term to balance uncertainty loss and prediction loss, default 1', 'direction_lambda': 'regularization term to balance direction loss and prediction loss, default 1' } def model_initialize(self, hidden_size=64, num_go_gnn_layers=1, num_gene_gnn_layers=1, decoder_hidden_size=16, num_similar_genes_go_graph=20, num_similar_genes_co_express_graph=20, coexpress_threshold=0.4, uncertainty=False, uncertainty_reg=1, direction_lambda=1e-1, G_go=None, G_go_weight=None, G_coexpress=None, G_coexpress_weight=None, no_perturb=False, ): """ Initialize the model Parameters ---------- hidden_size: int hidden dimension, default 64 num_go_gnn_layers: int number of GNN layers for GO graph, default 1 num_gene_gnn_layers: int number of GNN layers for co-expression gene graph, default 1 decoder_hidden_size: int hidden dimension for gene-specific decoder, default 16 num_similar_genes_go_graph: int number of maximum similar K genes in the GO graph, default 20 num_similar_genes_co_express_graph: int number of maximum similar K genes in the co expression graph, default 20 coexpress_threshold: float pearson correlation threshold when constructing coexpression graph, default 0.4 uncertainty: bool whether or not to turn on uncertainty mode, default False uncertainty_reg: float regularization term to balance uncertainty loss and prediction loss, default 1 direction_lambda: float regularization term to balance direction loss and prediction loss, default 1 G_go: scipy.sparse.csr_matrix GO graph, default None G_go_weight: scipy.sparse.csr_matrix GO graph edge weights, default None G_coexpress: scipy.sparse.csr_matrix co-expression graph, default None G_coexpress_weight: scipy.sparse.csr_matrix co-expression graph edge weights, default None no_perturb: bool predict no perturbation condition, default False Returns ------- None """ self.config = {'hidden_size': hidden_size, 'num_go_gnn_layers': num_go_gnn_layers, 'num_gene_gnn_layers': num_gene_gnn_layers, 'decoder_hidden_size': decoder_hidden_size, 'num_similar_genes_go_graph': num_similar_genes_go_graph, 'num_similar_genes_co_express_graph': num_similar_genes_co_express_graph, 'coexpress_threshold': coexpress_threshold, 'uncertainty': uncertainty, 'uncertainty_reg': uncertainty_reg, 'direction_lambda': direction_lambda, 'G_go': G_go, 'G_go_weight': G_go_weight, 'G_coexpress': G_coexpress, 'G_coexpress_weight': G_coexpress_weight, 'device': self.device, 'num_genes': self.num_genes, 'num_perts': self.num_perts, 'no_perturb': no_perturb } if self.wandb: self.wandb.config.update(self.config) if self.config['G_coexpress'] is None: # calculating co expression similarity graph edge_list = get_similarity_network(network_type='co-express', adata=self.adata, threshold=coexpress_threshold, k=num_similar_genes_co_express_graph, data_path=self.data_path, data_name=self.dataset_name, split=self.split, seed=self.seed, train_gene_set_size=self.train_gene_set_size, set2conditions=self.set2conditions) sim_network = GeneSimNetwork(edge_list, self.gene_list, node_map=self.node_map) self.config['G_coexpress'] = sim_network.edge_index self.config['G_coexpress_weight'] = sim_network.edge_weight if self.config['G_go'] is None: # calculating gene ontology similarity graph edge_list = get_similarity_network(network_type='go', adata=self.adata, threshold=coexpress_threshold, k=num_similar_genes_go_graph, pert_list=self.pert_list, data_path=self.data_path, data_name=self.dataset_name, split=self.split, seed=self.seed, train_gene_set_size=self.train_gene_set_size, set2conditions=self.set2conditions, default_pert_graph=self.default_pert_graph) sim_network = GeneSimNetwork(edge_list, self.pert_list, node_map=self.node_map_pert) self.config['G_go'] = sim_network.edge_index self.config['G_go_weight'] = sim_network.edge_weight self.model = GEARS_Model(self.config).to(self.device) self.best_model = deepcopy(self.model) def load_pretrained(self, path): """ Load pretrained model Parameters ---------- path: str path to the pretrained model Returns ------- None """ with open(os.path.join(path, 'config.pkl'), 'rb') as f: config = pickle.load(f) del config['device'], config['num_genes'], config['num_perts'] self.model_initialize(**config) self.config = config state_dict = torch.load(os.path.join(path, 'model.pt'), map_location=torch.device('cpu')) if next(iter(state_dict))[:7] == 'module.': # the pretrained model is from data-parallel module new_state_dict = OrderedDict() for k, v in state_dict.items(): name = k[7:] # remove `module.` new_state_dict[name] = v state_dict = new_state_dict self.model.load_state_dict(state_dict) self.model = self.model.to(self.device) self.best_model = self.model def save_model(self, path): """ Save the model Parameters ---------- path: str path to save the model Returns ------- None """ if not os.path.exists(path): os.mkdir(path) if self.config is None: raise ValueError('No model is initialized...') with open(os.path.join(path, 'config.pkl'), 'wb') as f: pickle.dump(self.config, f) torch.save(self.best_model.state_dict(), os.path.join(path, 'model.pt')) def predict(self, pert_list): """ Predict the transcriptome given a list of genes/gene combinations being perturbed Parameters ---------- pert_list: list list of genes/gene combiantions to be perturbed Returns ------- results_pred: dict dictionary of predicted transcriptome results_logvar: dict dictionary of uncertainty score """ # given a list of single/combo genes, return the transcriptome # if uncertainty mode is on, also return uncertainty score. self.ctrl_adata = self.adata[self.adata.obs['condition'] == 'ctrl'] for pert in pert_list: for i in pert: if i not in self.pert_list: raise ValueError(i + " is not in the perturbation graph. " "Please select from GEARS.pert_list!") if self.config['uncertainty']: results_logvar = {} self.best_model = self.best_model.to(self.device) self.best_model.eval() results_pred = {} results_logvar_sum = {} for pert in pert_list: try: # If prediction is already saved, then skip inference results_pred['_'.join(pert)] = self.saved_pred['_'.join(pert)] if self.config['uncertainty']: results_logvar_sum['_'.join(pert)] = self.saved_logvar_sum['_'.join(pert)] continue except: pass cg = create_cell_graph_dataset_for_prediction(pert, self.ctrl_adata, self.pert_list, self.device) loader = DataLoader(cg, 300, shuffle=False) batch = next(iter(loader)) batch.to(self.device) with torch.no_grad(): if self.config['uncertainty']: p, unc = self.best_model(batch) results_logvar['_'.join(pert)] = np.mean(unc.detach().cpu().numpy(), axis=0) results_logvar_sum['_'.join(pert)] = np.exp(-np.mean(results_logvar['_'.join(pert)])) else: p = self.best_model(batch) results_pred['_'.join(pert)] = np.mean(p.detach().cpu().numpy(), axis=0) self.saved_pred.update(results_pred) if self.config['uncertainty']: self.saved_logvar_sum.update(results_logvar_sum) return results_pred, results_logvar_sum else: return results_pred def GI_predict(self, combo, GI_genes_file='./genes_with_hi_mean.npy'): """ Predict the GI scores following perturbation of a given gene combination Parameters ---------- combo: list list of genes to be perturbed GI_genes_file: str path to the file containing genes with high mean expression Returns ------- GI scores for the given combinatorial perturbation based on GEARS predictions """ # if uncertainty mode is on, also return uncertainty score. try: # If prediction is already saved, then skip inference pred = {} pred[combo[0]] = self.saved_pred[combo[0]] pred[combo[1]] = self.saved_pred[combo[1]] pred['_'.join(combo)] = self.saved_pred['_'.join(combo)] except: if self.config['uncertainty']: pred = self.predict([[combo[0]], [combo[1]], combo])[0] else: pred = self.predict([[combo[0]], [combo[1]], combo])
mean_control = get_mean_control(self.adata).values
9
2023-10-13 14:20:34+00:00
16k
weavel-ai/promptmodel-python
promptmodel/chat_model.py
[ { "identifier": "DevClient", "path": "promptmodel/dev_app.py", "snippet": "class DevClient:\n \"\"\"DevClient main class\"\"\"\n\n def __init__(self):\n self.function_models: List[FunctionModelInterface] = []\n self.chat_models: List[ChatModelInterface] = []\n\n def register(self, func):\n instructions = list(dis.get_instructions(func))\n for idx in range(\n len(instructions) - 1\n ): # We check up to len-1 because we access idx+1 inside loop\n instruction = instructions[idx]\n # print(instruction)\n if instruction.opname in [\"LOAD_ATTR\", \"LOAD_METHOD\", \"LOAD_GLOBAL\"] and (\n instruction.argval == \"FunctionModel\"\n or instruction.argval == \"ChatModel\"\n ):\n next_instruction = instructions[idx + 1]\n\n # Check if the next instruction is LOAD_CONST with string value\n if next_instruction.opname == \"LOAD_CONST\" and isinstance(\n next_instruction.argval, str\n ):\n if instruction.argval == \"FunctionModel\":\n self.function_models.append(\n FunctionModelInterface(name=next_instruction.argval)\n )\n elif instruction.argval == \"ChatModel\":\n self.chat_models.append(\n ChatModelInterface(name=next_instruction.argval)\n )\n\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n def register_function_model(self, name):\n for function_model in self.function_models:\n if function_model.name == name:\n return\n\n self.function_models.append(FunctionModelInterface(name=name))\n\n def register_chat_model(self, name):\n for chat_model in self.chat_models:\n if chat_model.name == name:\n return\n\n self.chat_models.append(ChatModelInterface(name=name))\n\n def _get_function_model_name_list(self) -> List[str]:\n return [function_model.name for function_model in self.function_models]" }, { "identifier": "LLMProxy", "path": "promptmodel/llms/llm_proxy.py", "snippet": "class LLMProxy(LLM):\n def __init__(\n self,\n name: str,\n version: Optional[Union[str, int]] = \"deploy\",\n unit_config: Optional[UnitConfig] = None\n ):\n super().__init__()\n self._name = name\n self.version = version\n self.unit_config = unit_config\n\n def _wrap_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]:\n def wrapper(inputs: Dict[str, Any], **kwargs):\n prompts, version_details = run_async_in_sync(\n LLMProxy.fetch_prompts(self._name, self.version)\n )\n call_args = self._prepare_call_args(\n prompts, version_details, inputs, kwargs\n )\n\n log_uuid = str(uuid4())\n\n # Call the generator with the arguments\n stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args)\n\n api_response = None\n dict_cache = {} # to store aggregated dictionary values\n string_cache = \"\" # to store aggregated string values\n error_occurs = False\n error_log = None\n for item in stream_response:\n if (\n item.api_response and \"delta\" not in item.api_response.choices[0]\n ): # only get the last api_response, not delta response\n api_response = item.api_response\n if item.parsed_outputs:\n dict_cache = update_dict(dict_cache, item.parsed_outputs)\n if item.raw_output:\n string_cache += item.raw_output\n if item.error and not error_occurs:\n error_occurs = True\n error_log = item.error_log\n\n if error_occurs:\n # delete all promptmodel data in item\n item.raw_output = None\n item.parsed_outputs = None\n item.function_call = None\n item.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n yield item\n\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n\n run_async_in_sync(\n self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=api_response,\n parsed_outputs=dict_cache,\n metadata=metadata,\n )\n )\n\n return wrapper\n\n def _wrap_async_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]:\n async def wrapper(inputs: Dict[str, Any], **kwargs):\n prompts, version_details = await LLMProxy.fetch_prompts(\n self._name, self.version\n )\n call_args = self._prepare_call_args(\n prompts, version_details, inputs, kwargs\n )\n\n # Call async_gen with the arguments\n stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen(\n **call_args\n )\n\n log_uuid = str(uuid4())\n\n api_response = None\n dict_cache = {} # to store aggregated dictionary values\n string_cache = \"\" # to store aggregated string values\n error_occurs = False\n error_log = None\n api_response: Optional[ModelResponse] = None\n async for item in stream_response:\n if (\n item.api_response and \"delta\" not in item.api_response.choices[0]\n ): # only get the last api_response, not delta response\n api_response = item.api_response\n if item.parsed_outputs:\n dict_cache = update_dict(dict_cache, item.parsed_outputs)\n if item.raw_output:\n string_cache += item.raw_output\n if item.error and not error_occurs:\n error_occurs = True\n error_log = item.error_log\n item.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n yield item\n\n # # add string_cache in model_response\n # if api_response:\n # if \"message\" not in api_response.choices[0]:\n # api_response.choices[0].message = {}\n # if \"content\" not in api_response.choices[0].message:\n # api_response.choices[0].message[\"content\"] = string_cache\n # api_response.choices[0].message[\"role\"] = \"assistant\"\n\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n await self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=api_response,\n parsed_outputs=dict_cache,\n metadata=metadata,\n )\n\n # raise Exception(\"error_log\")\n\n return wrapper\n\n def _wrap_method(self, method: Callable[..., Any]) -> Callable[..., Any]:\n def wrapper(inputs: Dict[str, Any], **kwargs):\n prompts, version_details = run_async_in_sync(\n LLMProxy.fetch_prompts(self._name, self.version)\n )\n call_args = self._prepare_call_args(\n prompts, version_details, inputs, kwargs\n )\n\n # Call the method with the arguments\n llm_response: LLMResponse = method(**call_args)\n error_occurs = llm_response.error\n error_log = llm_response.error_log\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n log_uuid = str(uuid4())\n if llm_response.parsed_outputs:\n run_async_in_sync(\n self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=llm_response.api_response,\n parsed_outputs=llm_response.parsed_outputs,\n metadata=metadata,\n )\n )\n else:\n run_async_in_sync(\n self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=llm_response.api_response,\n parsed_outputs={},\n metadata=metadata,\n )\n )\n if error_occurs:\n # delete all promptmodel data in llm_response\n llm_response.raw_output = None\n llm_response.parsed_outputs = None\n llm_response.function_call = None\n\n llm_response.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n return llm_response\n\n return wrapper\n\n def _wrap_async_method(self, method: Callable[..., Any]) -> Callable[..., Any]:\n async def async_wrapper(inputs: Dict[str, Any], **kwargs):\n prompts, version_details = await LLMProxy.fetch_prompts(\n self._name, self.version\n ) # messages, model, uuid = self._fetch_prompts()\n call_args = self._prepare_call_args(\n prompts, version_details, inputs, kwargs\n )\n\n # Call the method with the arguments\n llm_response: LLMResponse = await method(**call_args)\n error_occurs = llm_response.error\n error_log = llm_response.error_log\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n log_uuid = str(uuid4())\n if llm_response.parsed_outputs:\n await self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=llm_response.api_response,\n parsed_outputs=llm_response.parsed_outputs,\n metadata=metadata,\n )\n else:\n await self._async_log_to_cloud(\n log_uuid=log_uuid,\n version_uuid=version_details[\"uuid\"],\n inputs=inputs,\n api_response=llm_response.api_response,\n parsed_outputs={},\n metadata=metadata,\n )\n\n if error_occurs:\n # delete all promptmodel data in llm_response\n llm_response.raw_output = None\n llm_response.parsed_outputs = None\n llm_response.function_call = None\n\n llm_response.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n return llm_response\n\n return async_wrapper\n\n def _wrap_chat(self, method: Callable[..., Any]) -> Callable[..., Any]:\n def wrapper(session_uuid: str, **kwargs):\n instruction, version_details, message_logs = run_async_in_sync(\n LLMProxy.fetch_chat_model(self._name, session_uuid, self.version)\n )\n\n call_args = self._prepare_call_args_for_chat(\n message_logs, version_details, kwargs\n )\n\n # Call the method with the arguments\n llm_response: LLMResponse = method(**call_args)\n error_occurs = llm_response.error\n error_log = llm_response.error_log\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n api_response = None\n if llm_response.api_response:\n api_response = llm_response.api_response\n\n log_uuid = str(uuid4())\n\n run_async_in_sync(\n self._async_chat_log_to_cloud(\n session_uuid=session_uuid,\n version_uuid=version_details[\"uuid\"],\n chat_log_request_list=[\n ChatLogRequest(\n message=llm_response.api_response.choices[\n 0\n ].message.model_dump(),\n uuid=log_uuid,\n metadata=metadata,\n api_response=api_response,\n )\n ],\n )\n )\n\n if error_occurs:\n # delete all promptmodel data in llm_response\n llm_response.raw_output = None\n llm_response.parsed_outputs = None\n llm_response.function_call = None\n\n llm_response.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n return llm_response\n\n return wrapper\n\n def _wrap_async_chat(self, method: Callable[..., Any]) -> Callable[..., Any]:\n async def async_wrapper(session_uuid: str, **kwargs):\n (\n instruction,\n version_details,\n message_logs,\n ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version)\n\n call_args = self._prepare_call_args_for_chat(\n message_logs, version_details, kwargs\n )\n\n # Call the method with the arguments\n llm_response: LLMResponse = await method(**call_args)\n error_occurs = llm_response.error\n error_log = llm_response.error_log\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n api_response = None\n if llm_response.api_response:\n api_response = llm_response.api_response\n\n log_uuid = str(uuid4())\n await self._async_chat_log_to_cloud(\n session_uuid=session_uuid,\n version_uuid=version_details[\"uuid\"],\n chat_log_request_list=[\n ChatLogRequest(\n uuid=log_uuid,\n message=llm_response.api_response.choices[\n 0\n ].message.model_dump(),\n metadata=metadata,\n api_response=api_response,\n )\n ],\n )\n\n if error_occurs:\n # delete all promptmodel data in llm_response\n llm_response.raw_output = None\n llm_response.parsed_outputs = None\n llm_response.function_call = None\n\n llm_response.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n return llm_response\n\n return async_wrapper\n\n def _wrap_chat_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]:\n def wrapper(session_uuid: str, **kwargs):\n instruction, version_details, message_logs = run_async_in_sync(\n LLMProxy.fetch_chat_model(self._name, session_uuid, self.version)\n )\n\n call_args = self._prepare_call_args_for_chat(\n message_logs, version_details, kwargs\n )\n # Call the generator with the arguments\n stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args)\n\n api_response = None\n error_occurs = False\n error_log = None\n log_uuid = str(uuid4())\n for item in stream_response:\n if (\n item.api_response and \"delta\" not in item.api_response.choices[0]\n ): # only get the last api_response, not delta response\n api_response = item.api_response\n\n if item.error and not error_occurs:\n error_occurs = True\n error_log = item.error_log\n\n if error_occurs:\n # delete all promptmodel data in item\n item.raw_output = None\n item.parsed_outputs = None\n item.function_call = None\n item.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n yield item\n\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n run_async_in_sync(\n self._async_chat_log_to_cloud(\n session_uuid=session_uuid,\n version_uuid=version_details[\"uuid\"],\n chat_log_request_list=[\n ChatLogRequest(\n uuid=log_uuid,\n message=api_response.choices[0].message.model_dump(),\n metadata=metadata,\n api_response=api_response,\n )\n ],\n )\n )\n\n return wrapper\n\n def _wrap_async_chat_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]:\n async def wrapper(session_uuid: str, **kwargs):\n (\n instruction,\n version_details,\n message_logs,\n ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version)\n\n call_args = self._prepare_call_args_for_chat(\n message_logs, version_details, kwargs\n )\n # Call the generator with the arguments\n stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen(\n **call_args\n )\n\n api_response = None\n error_occurs = False\n error_log = None\n log_uuid = str(uuid4())\n async for item in stream_response:\n if (\n item.api_response and \"delta\" not in item.api_response.choices[0]\n ): # only get the last api_response, not delta response\n api_response = item.api_response\n\n if item.error and not error_occurs:\n error_occurs = True\n error_log = item.error_log\n\n if error_occurs:\n # delete all promptmodel data in item\n item.raw_output = None\n item.parsed_outputs = None\n item.function_call = None\n\n item.pm_detail = PMDetail(\n model=version_details[\"model\"],\n name=self._name,\n version_uuid=str(version_details[\"uuid\"]),\n version=version_details[\"version\"],\n log_uuid=log_uuid,\n )\n yield item\n\n metadata = {\n \"error\": error_occurs,\n \"error_log\": error_log,\n }\n await self._async_chat_log_to_cloud(\n session_uuid=session_uuid,\n version_uuid=version_details[\"uuid\"],\n chat_log_request_list=[\n ChatLogRequest(\n uuid=log_uuid,\n message=api_response.choices[0].message.model_dump(),\n metadata=metadata,\n api_response=api_response,\n )\n ],\n )\n\n return wrapper\n\n def _prepare_call_args(\n self,\n prompts: List[Dict[str, str]],\n version_detail: Dict[str, Any],\n inputs: Dict[str, Any],\n kwargs,\n ):\n stringified_inputs = {key: str(value) for key, value in inputs.items()}\n messages = [\n {\n \"content\": prompt[\"content\"].format(**stringified_inputs),\n \"role\": prompt[\"role\"],\n }\n for prompt in prompts\n ]\n call_args = {\n \"messages\": messages,\n \"model\": version_detail[\"model\"] if version_detail else None,\n \"parsing_type\": version_detail[\"parsing_type\"] if version_detail else None,\n \"output_keys\": version_detail[\"output_keys\"] if version_detail else None,\n }\n if call_args[\"parsing_type\"] is None:\n del call_args[\"parsing_type\"]\n del call_args[\"output_keys\"]\n\n if \"functions\" in kwargs:\n call_args[\"functions\"] = kwargs[\"functions\"]\n\n if \"tools\" in kwargs:\n call_args[\"tools\"] = kwargs[\"tools\"]\n\n if \"api_key\" in kwargs:\n call_args[\"api_key\"] = kwargs[\"api_key\"]\n return call_args\n\n def _prepare_call_args_for_chat(\n self,\n messages: List[Dict[str, Any]],\n version_detail: Dict[str, Any],\n kwargs,\n ):\n call_args = {}\n token_per_tools = 0\n if \"functions\" in kwargs:\n call_args[\"functions\"] = kwargs[\"functions\"]\n token_per_tools = num_tokens_from_functions_input(\n functions=kwargs[\"functions\"],\n model=version_detail[\"model\"] if version_detail else \"gpt-3.5-turbo\",\n )\n\n if \"tools\" in kwargs:\n call_args[\"tools\"] = kwargs[\"tools\"]\n token_per_tools = num_tokens_from_functions_input(\n functions=kwargs[\"tools\"],\n model=version_detail[\"model\"] if version_detail else \"gpt-3.5-turbo\",\n )\n\n # truncate messages to make length <= model's max length\n model_max_tokens = get_max_tokens(\n model=version_detail[\"model\"] if version_detail else \"gpt-3.5-turbo\"\n )\n token_per_messages = num_tokens_for_messages_for_each(\n messages, version_detail[\"model\"]\n )\n token_limit_exceeded = (\n sum(token_per_messages) + token_per_tools\n ) - model_max_tokens\n if token_limit_exceeded > 0:\n while token_limit_exceeded > 0:\n # erase the second oldest message (first one is system prompt, so it should not be erased)\n if len(messages) == 1:\n # if there is only one message, Error cannot be solved. Just call LLM and get error response\n break\n token_limit_exceeded -= token_per_messages[1]\n del messages[1]\n del token_per_messages[1]\n\n call_args[\"messages\"] = messages\n call_args[\"model\"] = version_detail[\"model\"] if version_detail else None\n\n if \"api_key\" in kwargs:\n call_args[\"api_key\"] = kwargs[\"api_key\"]\n\n if \"tools\" in kwargs:\n call_args[\"tools\"] = kwargs[\"tools\"]\n\n return call_args\n\n async def _async_log_to_cloud(\n self,\n version_uuid: str,\n log_uuid: str,\n inputs: Optional[Dict] = None,\n api_response: Optional[ModelResponse] = None,\n parsed_outputs: Optional[Dict] = None,\n metadata: Optional[Dict] = None,\n ):\n config = read_config()\n if (\n \"project\" in config\n and \"mask_inputs\" in config[\"project\"]\n and config[\"project\"][\"mask_inputs\"] == True\n ):\n inputs = {key: \"PRIVATE LOGGING\" for key, value in inputs.items()}\n\n # Perform the logging asynchronously\n if api_response:\n api_response_dict = api_response.model_dump()\n api_response_dict[\"response_ms\"] = api_response._response_ms\n api_response_dict[\"_response_ms\"] = api_response._response_ms\n else:\n api_response_dict = None\n run_log_request_body = {\n \"uuid\": log_uuid,\n \"api_response\": api_response_dict,\n \"inputs\": inputs,\n \"parsed_outputs\": parsed_outputs,\n \"metadata\": metadata,\n }\n res = await AsyncAPIClient.execute(\n method=\"POST\",\n path=\"/run_log\",\n params={\n \"version_uuid\": version_uuid,\n },\n json=run_log_request_body,\n use_cli_key=False,\n )\n if res.status_code != 200:\n print(f\"[red]Failed to log to cloud: {res.json()}[/red]\");\n \n if self.unit_config:\n res_connect = await AsyncAPIClient.execute(\n method=\"POST\",\n path=\"/unit/connect\",\n json={\n \"unit_log_uuid\": self.unit_config.log_uuid,\n \"run_log_uuid\": log_uuid, \n },\n use_cli_key=False,\n )\n if res_connect.status_code != 200:\n print(f\"[red]Failed to connect prompt component to run log: {res_connect.json()}[/red]\")\n\n return res\n\n async def _async_chat_log_to_cloud(\n self,\n session_uuid: str,\n version_uuid: Optional[str] = None,\n chat_log_request_list: List[ChatLogRequest] = [],\n ):\n # Perform the logging asynchronously\n\n res = await AsyncAPIClient.execute(\n method=\"POST\",\n path=\"/chat_log\",\n params={\n \"session_uuid\": session_uuid,\n \"version_uuid\": version_uuid,\n },\n json=[r.model_dump() for r in chat_log_request_list],\n use_cli_key=False,\n )\n if res.status_code != 200:\n print(f\"[red]Failed to log to cloud: {res.json()}[/red]\")\n return res\n\n async def _async_make_session_cloud(\n self,\n session_uuid: str,\n version_uuid: Optional[str] = None,\n ):\n # Perform the logging asynchronously\n res = await AsyncAPIClient.execute(\n method=\"POST\",\n path=\"/make_session\",\n params={\n \"session_uuid\": session_uuid,\n \"version_uuid\": version_uuid,\n },\n use_cli_key=False,\n )\n if res.status_code != 200:\n print(f\"[red]Failed to make ChatSession in cloud: {res.json()}[/red]\")\n return res\n\n def make_kwargs(self, **kwargs):\n res = {}\n for key, value in kwargs.items():\n if value is not None:\n res[key] = value\n return res\n\n def run(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_method(super().run)(inputs, **kwargs)\n\n def arun(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_method(super().arun)(inputs, **kwargs)\n\n def stream(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_gen(super().stream)(inputs, **kwargs)\n\n def astream(\n self,\n inputs: Optional[Dict[str, Any]] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_gen(super().astream)(inputs, **kwargs)\n\n def run_and_parse(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_method(super().run_and_parse)(inputs, **kwargs)\n\n def arun_and_parse(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_method(super().arun_and_parse)(inputs, **kwargs)\n\n def stream_and_parse(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_gen(super().stream_and_parse)(inputs, **kwargs)\n\n def astream_and_parse(\n self,\n inputs: Dict[str, Any] = {},\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_gen(super().astream_and_parse)(inputs, **kwargs)\n\n def chat_run(\n self,\n session_uuid: str,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_chat(super().run)(session_uuid, **kwargs)\n\n def chat_arun(\n self,\n session_uuid: str,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_chat(super().arun)(session_uuid, **kwargs)\n\n def chat_stream(\n self,\n session_uuid: str,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_chat_gen(super().stream)(session_uuid, **kwargs)\n\n def chat_astream(\n self,\n session_uuid: str,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n kwargs = self.make_kwargs(functions=functions, api_key=api_key, tools=tools)\n return self._wrap_async_chat_gen(super().astream)(session_uuid, **kwargs)\n\n @staticmethod\n async def fetch_prompts(\n name,\n version: Optional[Union[str, int]] = \"deploy\",\n ) -> Tuple[List[Dict[str, str]], Dict[str, Any]]:\n \"\"\"fetch prompts.\n\n Args:\n name (str): name of FunctionModel\n\n Returns:\n Tuple[List[Dict[str, str]], Optional[Dict[str, Any]]]: (prompts, version_detail)\n \"\"\"\n # Check connection activate\n config = read_config()\n if (\n \"connection\" in config\n and \"initializing\" in config[\"connection\"]\n and config[\"connection\"][\"initializing\"] == True\n ):\n return [], {}\n elif (\n \"connection\" in config\n and \"reloading\" in config[\"connection\"]\n and config[\"connection\"][\"reloading\"] == True\n ):\n return [], {}\n else:\n if (\n \"project\" in config\n and \"use_cache\" in config[\"project\"]\n and config[\"project\"][\"use_cache\"] == True\n and version == \"deploy\"\n ):\n cache_manager = CacheManager()\n # call update_local API in background task\n cache_update_thread = Thread(\n target=cache_manager.cache_update_background_task, args=(config,)\n )\n cache_update_thread.daemon = True\n cache_update_thread.start()\n\n # get prompt from local DB by ratio\n prompt_rows, version_detail = get_deployed_prompts(name)\n if prompt_rows is None:\n return [], {}\n\n return [\n {\"role\": prompt.role, \"content\": prompt.content}\n for prompt in prompt_rows\n ], version_detail\n\n else:\n try:\n config_list = await AsyncAPIClient.execute(\n method=\"GET\",\n path=\"/function_model_versions\",\n params={\"function_model_name\": name, \"version\": version},\n use_cli_key=False,\n )\n config_list = config_list.json()\n except Exception as e:\n raise e\n\n function_model_versions = [\n x[\"function_model_version\"] for x in config_list\n ]\n\n if version == \"deploy\":\n for version in function_model_versions:\n if version[\"is_published\"] is True:\n version[\"ratio\"] = 1.0\n selected_version = select_version_by_ratio(function_model_versions)\n else:\n selected_version = function_model_versions[0]\n\n # config.prompts where config.function_model_version.uuid = selected_version.uuid\n prompt_rows = [\n config[\"prompts\"]\n for config in config_list\n if config[\"function_model_version\"][\"uuid\"]\n == selected_version[\"uuid\"]\n ][0]\n\n # sort prompt_rows by step\n prompt_rows = sorted(prompt_rows, key=lambda prompt: prompt[\"step\"])\n\n version_detail = {\n \"model\": selected_version[\"model\"],\n \"version\": selected_version[\"version\"],\n \"uuid\": selected_version[\"uuid\"],\n \"parsing_type\": selected_version[\"parsing_type\"],\n \"output_keys\": selected_version[\"output_keys\"],\n }\n\n if prompt_rows is None:\n return [], {}\n\n return [\n {\"role\": prompt[\"role\"], \"content\": prompt[\"content\"]}\n for prompt in prompt_rows\n ], version_detail\n\n @staticmethod\n async def fetch_chat_model(\n name: str,\n session_uuid: Optional[str] = None,\n version: Optional[Union[str, int]] = \"deploy\",\n ) -> Tuple[str, Dict[str, Any], List[Dict]]:\n \"\"\"fetch instruction and version detail\n\n Args:\n name (str): name of ChatModel\n\n Returns:\n Tuple[List[Dict[str, str]], Optional[Dict[str, Any]]]: (prompts, version_detail)\n \"\"\"\n # Check connection activate\n config = read_config()\n if (\n \"connection\" in config\n and \"initializing\" in config[\"connection\"]\n and config[\"connection\"][\"initializing\"] == True\n ):\n return \"\", {}, []\n elif (\n \"connection\" in config\n and \"reloading\" in config[\"connection\"]\n and config[\"connection\"][\"reloading\"] == True\n ):\n return \"\", {}, []\n else:\n try:\n res_data = await AsyncAPIClient.execute(\n method=\"GET\",\n path=\"/chat_model_versions_with_logs\",\n params={\n \"chat_model_name\": name,\n \"session_uuid\": session_uuid,\n \"version\": version,\n },\n use_cli_key=False,\n )\n res_data = res_data.json()\n except Exception as e:\n raise e\n chat_model_versions = res_data[\"chat_model_versions\"]\n\n if (\n session_uuid is None\n ): # if this is the initial call for deployed chat model\n if version == \"deploy\":\n for version in chat_model_versions:\n if version[\"is_published\"] is True:\n version[\"ratio\"] = 1.0\n selected_version = select_version_by_ratio(chat_model_versions)\n else:\n selected_version = chat_model_versions[0]\n else:\n selected_version = chat_model_versions[0]\n\n instruction: str = selected_version[\"system_prompt\"]\n\n version_detail = {\n \"model\": selected_version[\"model\"],\n \"uuid\": selected_version[\"uuid\"],\n \"version\": selected_version[\"version\"],\n }\n if session_uuid:\n chat_logs: List[Dict] = res_data[\"chat_logs\"]\n chat_logs = [{\"role\": \"system\", \"content\": instruction}] + chat_logs\n else:\n chat_logs = []\n\n # delete columns which value is None in each chat log\n for chat_log in chat_logs:\n for key in list(chat_log.keys()):\n if chat_log[key] is None:\n del chat_log[key]\n\n return instruction, version_detail, chat_logs\n\n # @staticmethod\n # async def fetch_chat_log(\n # session_uuid: str,\n # version: Optional[Union[str, int]] = \"deploy\",\n # ) -> List[Dict[str, Any]]:\n # \"\"\"fetch conversation log for session_uuid and version detail\n\n # Args:\n # session_uuid (str): session_uuid\n\n # Returns:\n # List[Dict[str, Any]] : list of conversation log\n # \"\"\"\n # config = read_config()\n # if \"connection\" in config and config[\"connection\"][\"initializing\"] == True:\n # return []\n # elif \"connection\" in config and config[\"connection\"][\"reloading\"] == True:\n # return []\n # else:\n # try:\n # res_data = await AsyncAPIClient.execute(\n # method=\"GET\",\n # path=\"/fetch_chat_logs\",\n # params={\"session_uuid\": session_uuid},\n # use_cli_key=False,\n # )\n # res_data = res_data.json()\n # except Exception as e:\n # raise e\n\n # # filter out unnecessary data\n # res_data = [\n # {\n # \"role\": message[\"role\"],\n # \"content\": message[\"content\"],\n # \"function_call\": message[\"function_call\"],\n # }\n # for message in res_data[\"chat_logs\"]\n # ]\n # return res_data" }, { "identifier": "logger", "path": "promptmodel/utils/logger.py", "snippet": "def debug(msg: Any, *args):\ndef success(msg: Any, *args):\ndef info(msg: Any, *args):\ndef warning(msg: Any, *args):\ndef error(msg: Any, *args):" }, { "identifier": "read_config", "path": "promptmodel/utils/config_utils.py", "snippet": "def read_config():\n \"\"\"\n Reads the configuration from the given filename.\n\n :return: A dictionary containing the configuration.\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n\n with open(CONFIG_FILE, \"r\") as file:\n config = yaml.safe_load(file) or {}\n return config" }, { "identifier": "upsert_config", "path": "promptmodel/utils/config_utils.py", "snippet": "def upsert_config(new_config: Dict[str, Any], section: str = None):\n \"\"\"\n Upserts the given configuration file with the given configuration.\n\n :param new_config: A dictionary containing the new configuration.\n :param section: The section of the configuration to update.\n \"\"\"\n config = read_config()\n if section:\n config_section = config.get(section, {})\n new_config = {section: merge_dict(config_section, new_config)}\n config = merge_dict(config, new_config)\n # If . directory does not exist, create it\n if not os.path.exists(\"./.promptmodel\"):\n os.mkdir(\"./.promptmodel\")\n\n with open(CONFIG_FILE, \"w\") as file:\n yaml.safe_dump(config, file, default_flow_style=False)" }, { "identifier": "check_connection_status_decorator", "path": "promptmodel/utils/config_utils.py", "snippet": "def check_connection_status_decorator(method):\n if asyncio.iscoroutinefunction(method):\n\n @wraps(method)\n async def async_wrapper(self, *args, **kwargs):\n config = read_config()\n if \"connection\" in config and (\n (\n \"initializing\" in config[\"connection\"]\n and config[\"connection\"][\"initializing\"]\n )\n or (\n \"reloading\" in config[\"connection\"]\n and config[\"connection\"][\"reloading\"]\n )\n ):\n return\n else:\n if \"config\" not in kwargs:\n kwargs[\"config\"] = config\n return await method(self, *args, **kwargs)\n\n # async_wrapper.__name__ = method.__name__\n # async_wrapper.__doc__ = method.__doc__\n return async_wrapper\n else:\n\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n config = read_config()\n if \"connection\" in config and (\n (\n \"initializing\" in config[\"connection\"]\n and config[\"connection\"][\"initializing\"]\n )\n or (\n \"reloading\" in config[\"connection\"]\n and config[\"connection\"][\"reloading\"]\n )\n ):\n return\n else:\n return method(self, *args, **kwargs)\n\n # wrapper.__name__ = method.__name__\n # wrapper.__doc__ = method.__doc__\n return wrapper" }, { "identifier": "run_async_in_sync", "path": "promptmodel/utils/async_utils.py", "snippet": "def run_async_in_sync(coro: Coroutine):\n try:\n loop = asyncio.get_running_loop()\n except RuntimeError: # No running loop\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n result = loop.run_until_complete(coro)\n # loop.close()\n return result\n\n return loop.run_until_complete(coro)" }, { "identifier": "LLMStreamResponse", "path": "promptmodel/types/response.py", "snippet": "class LLMStreamResponse(OpenAIObject):\n api_response: Optional[ModelResponse] = None\n raw_output: Optional[str] = None\n parsed_outputs: Optional[Dict[str, Any]] = None\n error: Optional[bool] = None\n error_log: Optional[str] = None\n function_call: Optional[ChoiceDeltaFunctionCall] = None\n tool_calls: Optional[List[ChoiceDeltaToolCall]] = None\n pm_detail: Optional[PMDetail] = None" }, { "identifier": "LLMResponse", "path": "promptmodel/types/response.py", "snippet": "class LLMResponse(OpenAIObject):\n api_response: Optional[ModelResponse] = None\n raw_output: Optional[str] = None\n parsed_outputs: Optional[Dict[str, Any]] = None\n error: Optional[bool] = None\n error_log: Optional[str] = None\n function_call: Optional[FunctionCall] = None\n tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None\n pm_detail: Optional[PMDetail] = None" }, { "identifier": "ChatModelConfig", "path": "promptmodel/types/response.py", "snippet": "class ChatModelConfig(BaseModel):\n system_prompt: str\n model: str\n name: str\n version_uuid: str\n version: int\n message_logs: Optional[List[Dict]] = []" }, { "identifier": "InstanceType", "path": "promptmodel/types/enums.py", "snippet": "class InstanceType(str, Enum):\n ChatLog = \"ChatLog\"\n RunLog = \"RunLog\"\n ChatLogSession = \"ChatLogSession\"" }, { "identifier": "ChatLogRequest", "path": "promptmodel/types/request.py", "snippet": "class ChatLogRequest(BaseModel):\n uuid: Optional[str] = None\n message: Dict[str, Any]\n metadata: Optional[Dict] = None\n api_response: Optional[ModelResponse] = None\n\n def __post_init__(\n self,\n ):\n if self.api_response is not None and self.message is None:\n self.message = self.api_response.choices[0].message.model_dump()" }, { "identifier": "AsyncAPIClient", "path": "promptmodel/apis/base.py", "snippet": "class AsyncAPIClient:\n \"\"\"\n A class to represent an Async API request client.\n Used in Deployment stage.\n\n ...\n\n Methods\n -------\n get_headers():\n Generates headers for the API request.\n execute(method=\"GET\", params=None, data=None, json=None, **kwargs):\n Executes the API request.\n \"\"\"\n\n @classmethod\n async def _get_headers(cls, use_cli_key: bool = True) -> Dict:\n \"\"\"\n Reads, decrypts the api_key, and returns headers for API request.\n\n Returns\n -------\n dict\n a dictionary containing the Authorization header\n \"\"\"\n config = read_config()\n if use_cli_key:\n if \"connection\" not in config:\n print(\n \"User not logged in. Please run [violet]prompt login[/violet] first.\"\n )\n exit()\n\n encrypted_key = config[\"connection\"][\"encrypted_api_key\"]\n if encrypted_key is None:\n raise Exception(\"No API key found. Please run 'prompt login' first.\")\n decrypted_key = decrypt_message(encrypted_key)\n else:\n decrypted_key = os.environ.get(\"PROMPTMODEL_API_KEY\")\n if decrypted_key is None:\n raise Exception(\n \"PROMPTMODEL_API_KEY was not found in the current environment.\"\n )\n headers = {\"Authorization\": f\"Bearer {decrypted_key}\"}\n return headers\n\n @classmethod\n async def execute(\n cls,\n path: str,\n method=\"GET\",\n params: Dict = None,\n data: Dict = None,\n json: Dict = None,\n ignore_auth_error: bool = False,\n use_cli_key: bool = True,\n **kwargs,\n ) -> requests.Response:\n \"\"\"\n Executes the API request with the decrypted API key in the headers.\n\n Parameters\n ----------\n method : str, optional\n The HTTP method of the request (default is \"GET\")\n params : dict, optional\n The URL parameters to be sent with the request\n data : dict, optional\n The request body to be sent with the request\n json : dict, optional\n The JSON-encoded request body to be sent with the request\n ignore_auth_error: bool, optional\n Whether to ignore authentication errors (default is False)\n **kwargs : dict\n Additional arguments to pass to the requests.request function\n\n Returns\n -------\n requests.Response\n The response object returned by the requests library\n \"\"\"\n url = f\"{ENDPOINT_URL}{path}\"\n headers = await cls._get_headers(use_cli_key)\n try:\n async with httpx.AsyncClient(http2=True) as _client:\n response = await _client.request(\n method,\n url,\n headers=headers,\n params=params,\n data=data,\n json=json,\n **kwargs,\n )\n if not response:\n print(f\"[red]Error: {response}[/red]\")\n if response.status_code == 200:\n return response\n elif response.status_code == 403:\n if not ignore_auth_error:\n print(\"[red]Authentication failed.[/red]\")\n else:\n print(f\"[red]Error: {response}[/red]\")\n\n return response\n except requests.exceptions.ConnectionError:\n print(\"[red]Could not connect to the Promptmodel API.[/red]\")\n except requests.exceptions.Timeout:\n print(\"[red]The request timed out.[/red]\")\n except Exception as exception:\n print(f\"[red]Error: {exception}[/red]\")" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Coroutine, Union from uuid import uuid4 from litellm import ModelResponse from promptmodel import DevClient from promptmodel.llms.llm_proxy import LLMProxy from promptmodel.utils import logger from promptmodel.utils.config_utils import ( read_config, upsert_config, check_connection_status_decorator, ) from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.types.response import LLMStreamResponse, LLMResponse, ChatModelConfig from promptmodel.types.enums import InstanceType from promptmodel.types.request import ChatLogRequest from promptmodel.apis.base import AsyncAPIClient import sys
12,995
message_logs=message_logs, ) @check_connection_status_decorator def add_messages( self, new_messages: List[Dict[str, Any]], metadata_list: List[Optional[Dict]] = [], *args, **kwargs, ) -> None: """Add messages to the chat model. Args: new_messages (List[Dict[str, Any]]): list of messages. Each message is a dict with 'role', 'content', and 'function_call'. """ # Save messages to Cloud DB log_uuid_list = [str(uuid4()) for _ in range(len(new_messages))] run_async_in_sync( self.llm_proxy._async_chat_log_to_cloud( session_uuid=str(self.session_uuid), version_uuid=None, chat_log_request_list=[ ChatLogRequest(**{"message": message, "uuid": str(uuid4())}) for message in new_messages ], ) ) self.recent_log_uuid = log_uuid_list[-1] @check_connection_status_decorator def run( self, functions: Optional[List[Dict[str, Any]]] = None, tools: Optional[List[Dict[str, Any]]] = None, stream: Optional[bool] = False, *args, **kwargs, ) -> LLMResponse: """Run FunctionModel. It does not raise error. Args: functions (List[Dict[str, Any]], optional): list of functions to run. Defaults to None. Returns: LLMResponse: response from the promptmodel. you can find raw output in response.raw_output or response.api_response['choices'][0]['message']['content']. Error: It does not raise error. If error occurs, you can check error in response.error and error_log in response.error_log. """ if stream: def gen(): cache: Optional[LLMStreamResponse] = None for item in self.llm_proxy.chat_stream( self.session_uuid, functions, tools ): yield item cache: LLMStreamResponse = item if cache: self.recent_log_uuid = cache.pm_detail.log_uuid return gen() else: res = self.llm_proxy.chat_run(self.session_uuid, functions, tools) self.recent_log_uuid = res.pm_detail.log_uuid return res # return self.llm_proxy.chat_run(self.session_uuid, functions, self.api_key) @check_connection_status_decorator async def arun( self, functions: Optional[List[Dict[str, Any]]] = None, tools: Optional[List[Dict[str, Any]]] = None, stream: Optional[bool] = False, *args, **kwargs, ) -> LLMResponse: """Async run FunctionModel. It does not raise error. Args: functions (List[Dict[str, Any]], optional): list of functions to run. Defaults to None. Returns: LLMResponse: response from the promptmodel. you can find raw output in response.raw_output or response.api_response['choices'][0]['message']['content']. Error: It does not raise error. If error occurs, you can check error in response.error and error_log in response.error_log. """ if stream: async def async_gen(): cache: Optional[LLMStreamResponse] = None async for item in self.llm_proxy.chat_astream( self.session_uuid, functions, tools ): yield item cache: LLMStreamResponse = item if cache: self.recent_log_uuid = cache.pm_detail.log_uuid return async_gen() else: res: LLMResponse = await self.llm_proxy.chat_arun( self.session_uuid, functions ) self.recent_log_uuid = res.pm_detail.log_uuid return res # return await self.llm_proxy.chat_arun( # self.session_uuid, functions, self.api_key # ) @check_connection_status_decorator async def log_score_to_session( self, score: Optional[Dict[str, Any]] = {}, *args, **kwargs ): try:
from __future__ import annotations class RegisteringMeta(type): def __call__(cls, *args, **kwargs): instance: ChatModel = super().__call__(*args, **kwargs) # Find the global client instance in the current context client = cls.find_client_instance() if client is not None: client.register_chat_model(instance.name) return instance @staticmethod def find_client_instance(): # Get the current frame frame = sys._getframe(2) # Get global variables in the current frame global_vars = frame.f_globals # Find an instance of Client among global variables for var_name, var_val in global_vars.items(): if isinstance(var_val, DevClient): return var_val return None class ChatModel(metaclass=RegisteringMeta): """ Args: name (_type_): _description_ version (Optional[ Union[str, int] ], optional): Choose which FunctionModel version to use. Defaults to "deploy". It can be "deploy", "latest", or version number. api_key (Optional[str], optional): API key for the LLM. Defaults to None. If None, use api_key in .env file. """ def __init__( self, name, session_uuid: str = None, version: Optional[Union[str, int]] = "deploy", api_key: Optional[str] = None, ): self.name = name self.api_key = api_key self.llm_proxy = LLMProxy(name, version) self.version = version self.recent_log_uuid = None if session_uuid is None: self.session_uuid = str(uuid4()) instruction, version_details, chat_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self.name, None, version) ) config = read_config() if ( "connection" in config and "initializing" in config["connection"] and config["connection"]["initializing"] == True ): return elif ( "connection" in config and "reloading" in config["connection"] and config["connection"]["reloading"] == True ): return else: run_async_in_sync( self.llm_proxy._async_make_session_cloud( self.session_uuid, version_details["uuid"], ) ) else: self.session_uuid = session_uuid @check_connection_status_decorator def get_config( self, *args, **kwargs, ) -> ChatModelConfig: """Get config for the ChatModel. It will fetch the published prompt and version config from the Cloud. (It will be saved in cache DB, so there is no extra latency for API call.) - If you made A/B testing in Web Dashboard, it will fetch the prompt randomly by the A/B testing ratio. If dev mode is initializing, it will return None Returns: ChatModelConfig: config for the ChatModel, which contains prompts and version_detail, message_logs """ prompt, version_detail, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self.name, self.session_uuid, self.version) ) return ChatModelConfig( system_prompt=prompt, model=version_detail["model"], name=self.name, version_uuid=str(version_detail["uuid"]), version=version_detail["version"], message_logs=message_logs, ) @check_connection_status_decorator def add_messages( self, new_messages: List[Dict[str, Any]], metadata_list: List[Optional[Dict]] = [], *args, **kwargs, ) -> None: """Add messages to the chat model. Args: new_messages (List[Dict[str, Any]]): list of messages. Each message is a dict with 'role', 'content', and 'function_call'. """ # Save messages to Cloud DB log_uuid_list = [str(uuid4()) for _ in range(len(new_messages))] run_async_in_sync( self.llm_proxy._async_chat_log_to_cloud( session_uuid=str(self.session_uuid), version_uuid=None, chat_log_request_list=[ ChatLogRequest(**{"message": message, "uuid": str(uuid4())}) for message in new_messages ], ) ) self.recent_log_uuid = log_uuid_list[-1] @check_connection_status_decorator def run( self, functions: Optional[List[Dict[str, Any]]] = None, tools: Optional[List[Dict[str, Any]]] = None, stream: Optional[bool] = False, *args, **kwargs, ) -> LLMResponse: """Run FunctionModel. It does not raise error. Args: functions (List[Dict[str, Any]], optional): list of functions to run. Defaults to None. Returns: LLMResponse: response from the promptmodel. you can find raw output in response.raw_output or response.api_response['choices'][0]['message']['content']. Error: It does not raise error. If error occurs, you can check error in response.error and error_log in response.error_log. """ if stream: def gen(): cache: Optional[LLMStreamResponse] = None for item in self.llm_proxy.chat_stream( self.session_uuid, functions, tools ): yield item cache: LLMStreamResponse = item if cache: self.recent_log_uuid = cache.pm_detail.log_uuid return gen() else: res = self.llm_proxy.chat_run(self.session_uuid, functions, tools) self.recent_log_uuid = res.pm_detail.log_uuid return res # return self.llm_proxy.chat_run(self.session_uuid, functions, self.api_key) @check_connection_status_decorator async def arun( self, functions: Optional[List[Dict[str, Any]]] = None, tools: Optional[List[Dict[str, Any]]] = None, stream: Optional[bool] = False, *args, **kwargs, ) -> LLMResponse: """Async run FunctionModel. It does not raise error. Args: functions (List[Dict[str, Any]], optional): list of functions to run. Defaults to None. Returns: LLMResponse: response from the promptmodel. you can find raw output in response.raw_output or response.api_response['choices'][0]['message']['content']. Error: It does not raise error. If error occurs, you can check error in response.error and error_log in response.error_log. """ if stream: async def async_gen(): cache: Optional[LLMStreamResponse] = None async for item in self.llm_proxy.chat_astream( self.session_uuid, functions, tools ): yield item cache: LLMStreamResponse = item if cache: self.recent_log_uuid = cache.pm_detail.log_uuid return async_gen() else: res: LLMResponse = await self.llm_proxy.chat_arun( self.session_uuid, functions ) self.recent_log_uuid = res.pm_detail.log_uuid return res # return await self.llm_proxy.chat_arun( # self.session_uuid, functions, self.api_key # ) @check_connection_status_decorator async def log_score_to_session( self, score: Optional[Dict[str, Any]] = {}, *args, **kwargs ): try:
res = await AsyncAPIClient.execute(
12
2023-10-09 03:35:44+00:00
16k
cambridgeltl/ClaPS
algs/genetics.py
[ { "identifier": "BaseTrainer", "path": "algs/base_trainer.py", "snippet": "class BaseTrainer(abc.ABC):\n \"\"\"\n The base trainer class.\n\n Attributes:\n obj_func: the callable function handle for model interfacing.\n logger: an optional logger object.\n bn_calibrator: a batch norm calibration object. Only used in\n testing (not training or validation).\n \"\"\"\n\n def __init__(\n self,\n obj_func: PromptedClassificationReward,\n prompt_dataset: PromptedClassificationDataset,\n logger: Optional[Any] = None,\n use_bn_calibrator: bool = False,\n n_samples_bn_calibrator: int = 128,\n ):\n self.obj_func = obj_func\n self.logger = logger\n self.prompt_dataset = prompt_dataset\n\n self.bn_calibrator = BatchNormCalibrate() if use_bn_calibrator else None\n self.n_samples_bn_calibrator = n_samples_bn_calibrator\n\n @abc.abstractmethod\n def train(self, train_data: Iterable[Any]):\n raise NotImplementedError()\n\n def validate(self, val_dataset: Iterable[Any], best_str_list: List[str]) -> str:\n t_dataset = val_dataset\n if self.logger is not None:\n self.logger.info(\"total val dataset length: %s\", len(t_dataset))\n val_acc_list = []\n\n for prompt in best_str_list:\n n_correct = 0\n\n for batch_idx in range(0, len(t_dataset) // self.eval_batch_size + 1):\n idx = np.arange(\n batch_idx * self.eval_batch_size,\n (batch_idx + 1) * self.eval_batch_size,\n )\n idx = [_idx for _idx in idx if _idx < len(t_dataset)]\n\n if len(idx) == 0:\n break\n\n t_data = [t_dataset[int(i)] for i in idx]\n (\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n ) = self.prompt_dataset.get_data(t_data)\n\n torch.cuda.empty_cache()\n _, _, batch_acc = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n prompt,\n True,\n \"infer\",\n verbose=True,\n )\n n_correct += batch_acc * len(idx)\n torch.cuda.empty_cache()\n\n if self.logger is not None:\n self.logger.info(\"prompt: %s\", prompt)\n self.logger.info(\"final val acc: %s\", (n_correct / len(t_dataset)))\n val_acc_list.append(float(n_correct / len(t_dataset)))\n # best_prompt = best_str_list[np.argmax(val_acc_list)]\n max_acc = np.max(val_acc_list)\n indices = np.argwhere(val_acc_list == max_acc)\n last_index = indices[-1][0]\n best_prompt = best_str_list[last_index]\n if self.logger is not None:\n self.logger.info(\"val acc list: %s\", val_acc_list)\n self.logger.info(\"best prompt: %s\", best_prompt)\n self.logger.info(\"best prompt acc: %s\", np.max(val_acc_list))\n\n return best_prompt\n\n def test(\n self,\n test_dataset,\n best_prompt,\n bn_calibrate_if_available: bool = True,\n return_logits: bool = False,\n ) -> Tuple[float, Optional[Dict[str, torch.Tensor]]]:\n t_dataset = test_dataset\n if self.logger is not None:\n self.logger.info(\"total test dataset length: %s\", len(t_dataset))\n n_correct = 0\n\n if self.bn_calibrator is not None and bn_calibrate_if_available:\n # select some samples for calibration\n idx_calibrate = np.random.choice(\n len(test_dataset),\n min(len(test_dataset), self.n_samples_bn_calibrator),\n replace=False,\n )\n\n calibrate_data = [t_dataset[int(i)] for i in idx_calibrate]\n (\n t_premise_texts,\n t_hypothesis,\n _,\n ) = self.prompt_dataset.get_data(calibrate_data)\n\n # Initialize the bn calibrator\n self.bn_calibrator.train()\n # Get the logits\n calibrate_logits = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n [0] * len(t_premise_texts), # dummy class labels\n best_prompt,\n to_tensor=True,\n mode=\"infer\",\n accumulate_class=True,\n )[-1]\n # Run the prediction logits only through the BN calibrator to obtain\n # running statistics.\n self.bn_calibrator(calibrate_logits[0], flush=True)\n self.bn_calibrator.eval()\n self.obj_func.bn_calibrator = self.bn_calibrator\n else:\n calibrate_logits = None\n\n all_logits: List[torch.Tensor] = []\n all_labels: List[int] = []\n for batch_idx in range(0, len(t_dataset) // self.eval_batch_size + 1):\n idx = np.arange(\n batch_idx * self.eval_batch_size, (batch_idx + 1) * self.eval_batch_size\n )\n idx = [_idx for _idx in idx if _idx < len(t_dataset)]\n\n if len(idx) == 0:\n break\n\n t_data = [t_dataset[int(i)] for i in idx]\n (\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n ) = self.prompt_dataset.get_data(t_data)\n\n torch.cuda.empty_cache()\n (\n _,\n _,\n batch_acc,\n _,\n _,\n _,\n class_logits,\n ) = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n best_prompt,\n True,\n \"infer\",\n verbose=True,\n accumulate_class=True,\n )\n n_correct += batch_acc * len(idx)\n torch.cuda.empty_cache()\n if return_logits:\n all_logits.append(class_logits[0])\n all_labels += t_class_labels\n if self.logger is not None:\n self.logger.info(\"prompt: %s\", best_prompt)\n self.logger.info(n_correct)\n self.logger.info(\"final test acc: %s\", (n_correct / len(t_dataset)))\n if return_logits:\n return n_correct / len(t_dataset), {\n \"output_logits\": torch.cat(all_logits),\n \"calibrate_logits\": calibrate_logits,\n \"labels\": all_labels,\n }\n return n_correct / len(t_dataset), None\n\n def manual(\n self,\n test_dataset: Iterable[Any],\n bn_calibrate_if_available: bool = True,\n return_logits: bool = False,\n ) -> Tuple[float, Optional[Dict[str, torch.Tensor]]]:\n t_dataset = test_dataset\n for i in range(self.n_classes):\n test_I = [x for x in t_dataset if x[\"label\"] == i]\n if self.logger is not None:\n self.logger.info(\n \"total test dataset length: %s for class %s\", len(test_I), i\n )\n if self.logger is not None:\n self.logger.info(\"total test dataset length: %s\", len(t_dataset))\n n_correct = 0\n sum_ece = 0\n sum_entropy = 0\n class_correct = collections.Counter((i, 0) for i in range(self.n_classes))\n\n if self.bn_calibrator is not None and bn_calibrate_if_available:\n # select some samples for calibration\n idx_calibrate = np.random.choice(\n len(test_dataset),\n min(len(test_dataset), self.n_samples_bn_calibrator),\n replace=False,\n )\n\n calibrate_data = [t_dataset[int(i)] for i in idx_calibrate]\n (\n t_premise_texts,\n t_hypothesis,\n _,\n ) = self.prompt_dataset.get_data(calibrate_data)\n\n # Initialize the bn calibrator\n self.bn_calibrator.train()\n # Get the logits\n calibrate_logits = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n [0] * len(t_premise_texts), # dummy class labels\n \"\",\n to_tensor=True,\n mode=\"infer\",\n accumulate_class=True,\n )[-1]\n # Run the prediction logits only through the BN calibrator to obtain\n # running statistics.\n self.bn_calibrator(calibrate_logits[0], flush=True)\n self.bn_calibrator.eval()\n self.obj_func.bn_calibrator = self.bn_calibrator\n else:\n calibrate_logits = None\n\n all_logits: List[torch.Tensor] = []\n all_labels: List[int] = []\n for batch_idx in range(0, len(t_dataset) // self.eval_batch_size + 1):\n idx = np.arange(\n batch_idx * self.eval_batch_size, (batch_idx + 1) * self.eval_batch_size\n )\n idx = [_idx for _idx in idx if _idx < len(t_dataset)]\n\n if len(idx) == 0:\n break\n\n t_data = [t_dataset[int(i)] for i in idx]\n (\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n ) = self.prompt_dataset.get_data(t_data)\n\n torch.cuda.empty_cache()\n (\n _,\n _,\n batch_acc,\n count_class,\n batch_ece,\n batch_entropy,\n class_logits,\n ) = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n \"\",\n True,\n \"infer\",\n verbose=True,\n accumulate_class=True,\n )\n n_correct += batch_acc * len(idx)\n sum_ece += batch_ece * len(idx)\n sum_entropy += batch_entropy * len(idx)\n class_correct += count_class[0]\n if return_logits:\n all_logits.append(class_logits[0])\n all_labels += t_class_labels\n # print(count_class)\n torch.cuda.empty_cache()\n # print(class_correct)\n if self.logger is not None:\n self.logger.info(\n \"manual prompt test acc: %s\", (float(n_correct) / len(t_dataset))\n )\n self.logger.info(\"count class: %s\", class_correct)\n self.logger.info(\n \"manual prompt test ece percent: %s\",\n (float(sum_ece) / len(t_dataset) * 100),\n )\n self.logger.info(\n \"manual prompt test entropy: %s\", (float(sum_entropy) / len(t_dataset))\n )\n if return_logits:\n return float(n_correct) / len(t_dataset), {\n \"output_logits\": torch.cat(all_logits),\n \"calibrate_logits\": calibrate_logits,\n \"labels\": all_labels,\n }\n return float(n_correct) / len(t_dataset), None" }, { "identifier": "PromptedClassificationDataset", "path": "utils/fsc_datasets.py", "snippet": "class PromptedClassificationDataset:\n def __init__(self, args):\n self.args = args\n self.glue_list = ['sst2', 'rte', 'mrpc', 'qqp', 'mnli', 'qnli']\n self.superglue_list = ['cb', 'copa', 'boolq', 'wic', 'wsc']\n self.nli_3_list = ['mnli', 'xnli', 'anli', 'cb', 'snli']\n if 'xnli' in args['dataset_name']:\n split = self.args['dataset_name'].split('_')[1]\n self.dataset = datasets.load_dataset('xnli', split)\n elif args['dataset_name'] in self.glue_list:\n self.dataset = datasets.load_dataset('glue', args['dataset_name'])\n elif 'anli' in args['dataset_name']:\n self.dataset = datasets.load_dataset('anli')\n elif args['dataset_name'] in self.superglue_list:\n self.dataset = datasets.load_dataset('super_glue', args['dataset_name'])\n elif 'rl' in args['dataset_name']:\n pass\n else:\n self.dataset = datasets.load_dataset(args['dataset_name'])\n def get_few_shot_dataset(self, shots: int) -> tuple:\n \"\"\"\n Retrieves a few-shot dataset by selecting a specified number of instances per class from the given dataset.\n \n Args:\n dataset (dict): A dictionary containing the dataset split into \"train\", \"validation\", and \"test\" subsets.\n shots (int): The number of instances to select per class for the few-shot dataset.\n \n Returns:\n tuple: The few-shot training dataset, the original validation dataset, and the original test dataset.\n \"\"\"\n \n if self.args['dataset_name'] == 'mnli':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation_matched']\n test_dataset = self.dataset['test_matched']\n elif self.args['dataset_name'] == 'yelp_polarity' or self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'SetFit/CR' or self.args['dataset_name'] == 'yelp_review_full':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['train']\n test_dataset = self.dataset['test']\n elif 'rl' in self.args['dataset_name']:\n train_dataset = get_rl_data('train', self.args['dataset_name'], self.args['seed'])\n val_dataset = get_rl_data('dev', self.args['dataset_name'], self.args['seed'])\n test_dataset = get_rl_data('test', self.args['dataset_name'], self.args['seed'])\n train_dataset = [x for x in train_dataset]\n val_dataset = [x for x in val_dataset]\n return train_dataset, val_dataset, test_dataset\n elif self.args['dataset_name'] == 'snli':\n train_dataset = [x for x in self.dataset['train'] if x['label'] != -1]\n val_dataset = [x for x in self.dataset['validation'] if x['label'] != -1]\n test_dataset = [x for x in self.dataset['test'] if x['label'] != -1]\n else:\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation']\n test_dataset = self.dataset['test']\n\n train_0 = [x for x in train_dataset if x['label'] == 0][:shots]\n train_1 = [x for x in train_dataset if x['label'] == 1][:shots]\n train_2 = [x for x in train_dataset if x['label'] == 2][:shots]\n train_3 = [x for x in train_dataset if x['label'] == 3][:shots]\n train_4 = [x for x in train_dataset if x['label'] == 4][:shots]\n train_dataset = train_0 + train_1 + train_2 + train_3 + train_4\n if self.args['dataset_name'] in self.glue_list or self.args['dataset_name'] in self.superglue_list:\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n new_val_dataset = val_0 + val_1 + val_2\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n elif self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'yele_review_full':\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n val_3 = [x for x in train_dataset if x['label'] == 3][-shots:]\n val_4 = [x for x in train_dataset if x['label'] == 4][-shots:]\n new_val_dataset = val_0 + val_1 + val_2 + val_3 + val_4\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n \n val_0 = [x for x in val_dataset if x['label'] == 0][:shots]\n val_1 = [x for x in val_dataset if x['label'] == 1][:shots]\n val_2 = [x for x in val_dataset if x['label'] == 2][:shots]\n val_dataset = val_0 + val_1 + val_2\n print('train_dataset', train_dataset)\n return train_dataset, val_dataset, test_dataset\n\n def get_verbalizer(self) -> list:\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n verbalizer_predefined = ['yes', 'maybe', 'no']\n elif self.args['dataset_name'] == 'sst2' or self.args['dataset_name'] == 'yelp_polarity':\n verbalizer_predefined = ['negative', 'positive']\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'qnli':\n verbalizer_predefined = ['yes', 'no']\n elif self.args['dataset_name'] == 'mrpc' or self.args['dataset_name'] == 'qqp':\n verbalizer_predefined = ['no', 'yes']\n elif self.args['dataset_name'] == 'boolq':\n verbalizer_predefined = ['no', 'yes']\n elif 'indonlp/NusaX-senti' in self.args['dataset_name']:\n verbalizer_predefined = ['negative', 'neutral', 'positive']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Technology']\n\n special_space = '▁'\n binary_list = ['SetFit/sst2', 'yelp_polarity', 'SetFit/CR', 'rotten_tomatoes']\n rl_binary_list = ['rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-yelp-2']\n if 'bert' in self.args['model_name']:\n special_space = 'Ġ'\n if self.args['dataset_name'] in binary_list:\n verbalizer_predefined = ['terrible', 'great']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Tech']\n elif self.args['dataset_name'] == 'SetFit/sst5' or self.args['dataset_name'] == 'yelp_review_full':\n verbalizer_predefined = ['terrible', 'bad', 'okay', 'good', 'great']\n elif self.args['dataset_name'] in rl_binary_list:\n verbalizer_predefined = ['terrible', 'great']\n\n verbalizer_predefined = [special_space + v for v in verbalizer_predefined]\n return verbalizer_predefined\n \n def get_data(self, data) -> tuple:\n text_label_list = ['yelp_polarity', 'ag_news', 'SetFit/sst5', 'SetFit/CR', 'rotten_tomatoes', \"SetFit/sst2\", 'yelp_review_full']\n rl_list = ['rl-agnews', 'rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-sst-5', 'rl-yelp-2', 'rl-yelp-5']\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n return [d[\"premise\"] for d in data], [d[\"hypothesis\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'sst2':\n return [d[\"sentence\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'mrpc':\n return [d[\"sentence1\"] for d in data], [d[\"sentence2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qnli':\n return [d[\"question\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qqp':\n return [d[\"question1\"] for d in data], [d[\"question2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'boolq':\n return [d[\"question\"] for d in data], [d[\"passage\"] for d in data], [d[\"label\"] for d in data]\n elif 'indonlp/NusaX-senti' in self.args['dataset_name'] or self.args['dataset_name'] in text_label_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] in rl_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]" }, { "identifier": "PromptedClassificationReward", "path": "rewards/text_classification_reward.py", "snippet": "class PromptedClassificationReward:\n def __init__(\n self,\n args,\n task_lm: str,\n is_mask_lm: Optional[bool],\n num_classes: int,\n verbalizers: List[str],\n reward_type: str = \"entropy\",\n compute_zscore: bool = True,\n incorrect_coeff: float = 180.0, # lambda_1 in paper\n correct_coeff: float = 200.0, # lambda_2 in paper\n use_bn_calibration: bool = False,\n bn_calibrator: Optional[BatchNormCalibrate] = None,\n template: Optional[str] = None,\n gpu_id: Optional[int] = None,\n ):\n \"\"\"\n Few shot text classification reward (adapted from RLPrompt repository)\n Args:\n task_lm: the string specifying the language model type of the task LM\n is_mask_lm: bool. Whether the LM is masked, or left-to-right.\n compute_zscore: bool. Whether do reward normalization by normalizing the\n mean and standard deviation across the batch.\n incorrect_coeff, correct_coeff:\n num_classes: number of classes in the labels\n verbalizers: a list of verbalizers (for e.g., for sentiment classification)\n reward_type: the type of the reward.\n \"gap\" -- use the one proposed in RLPrompt\n \"ll\" -- use the usual cross entropy loss\n template: the template to organize the queries and prompts.\n default one is [Input][Prompt][MASK].\n default template is adopted when it is not specified.\n bn_calibrator: an optional batch norm calibrator. When provided,\n in inference mode the logits will be first normalised by it first. The\n calibrator must be initialized when passed to this class.\n This class essentially provides the objective function for BO/RL/any other\n prompt optimizer.\n \"\"\"\n super().__init__()\n if torch.cuda.is_available():\n if gpu_id:\n self.device = torch.device(f\"cuda:{gpu_id}\")\n else:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n # self.device = torch.device(\"cpu\")\n self.args = args\n self.task_lm = task_lm\n if is_mask_lm is None:\n # If False, then treat as left-to-right LM\n self.is_mask_lm = True if \"bert\" in self.task_lm else False\n else:\n self.is_mask_lm = is_mask_lm\n assert reward_type in [\"gap\", \"cross_entropy\", \"entropy\"]\n self.reward_type = reward_type\n print(\"Task LM:\", self.task_lm)\n if self.is_mask_lm:\n assert self.task_lm in SUPPORTED_MASK_LMS\n self._tokenizer = AutoTokenizer.from_pretrained(self.task_lm)\n self._generator = AutoModelForMaskedLM.from_pretrained(self.task_lm).to(\n self.device\n )\n else:\n self._generator = T5ForConditionalGeneration.from_pretrained(\n self.task_lm\n ).to(self.device)\n self._tokenizer = AutoTokenizer.from_pretrained(\n self.task_lm, use_fast=False\n )\n\n self.compute_zscore = compute_zscore\n self.incorrect_coeff = incorrect_coeff\n self.correct_coeff = correct_coeff\n self.num_classes = num_classes\n print(\"Num classes:\", self.num_classes)\n self.verbalizers = verbalizers\n print(\"Verbalizers:\", self.verbalizers)\n self.verbalizer_ids = [\n self._tokenizer.convert_tokens_to_ids(v) for v in self.verbalizers\n ]\n print(\"Verbalizer ids:\", self.verbalizer_ids)\n if template is None:\n self.template = self.load_default_template() # prompt templates\n else:\n self.template = template\n self.use_bn_calibration = use_bn_calibration\n self.bn_calibrator = bn_calibrator\n self._counter = 0\n\n def to(self, device):\n self._generator.to(device)\n\n def load_default_template(self) -> List[str]:\n template_dict = {\n \"xnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \", \n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"mnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"snli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \",\n ],\n \"rte\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Textual Entailment: \",\n ],\n \"sst2\": [\n \" {prompt}. Sentence: {sentence_1}, Sentiment: \",\n ],\n \"mrpc\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"qnli\": [\n \" {prompt}. Question: {sentence_1}, Sentence: {sentence_2}, Entailment: \",\n ],\n \"qqp\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"ag_news\": [\n \" {prompt}. Classify the news articles into the categories of World, Sports, Business, and Technology. {sentence_1}: \",\n \"{prompt}\\n\\n{sentence_1}\\n\\nWhich topic is this article about?\\nWorld, Sports, Business, Technology, \",\n ],\n }\n if \"anli\" in self.args[\"dataset_name\"]:\n template = template_dict[\"anli\"][self.args[\"template_id\"]]\n elif (\n \"xnli\" in self.args[\"dataset_name\"]\n or \"americas_nli\" in self.args[\"dataset_name\"]\n ):\n template = template_dict[\"xnli\"][self.args[\"template_id\"]]\n else:\n if self.args[\"dataset_name\"] in template_dict:\n template = template_dict[self.args[\"dataset_name\"]][\n self.args[\"template_id\"]\n ]\n if self.is_mask_lm:\n mask_token = self._tokenizer.mask_token\n print(mask_token)\n simple_list = [\"SetFit/sst2\", \"SetFit/CR\", \"rotten_tomatoes\", \"SetFit/sst5\"]\n long_list = [\"yelp_polarity\", \"yelp_review_full\"]\n hard_list = [\"ag_news\"]\n rl_list = [\n \"rl-agnews\",\n \"rl-cr\",\n \"rl-mr\",\n \"rl-sst-2\",\n \"rl-sst-5\",\n \"rl-yelp-2\",\n \"rl-yelp-5\",\n ]\n if self.args[\"dataset_name\"] in simple_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n elif self.args[\"dataset_name\"] in long_list:\n template = f\" {{prompt}} It was {mask_token}. {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in hard_list:\n template = f\" {{prompt}} {mask_token} News: {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in rl_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n return template\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n return self.forward(*args, **kwds)\n\n def forward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n # output_token: Union[List[str], str],\n to_tensor: bool,\n mode: str = \"train\",\n verbose: bool = True,\n accumulate_class: bool = False,\n ) -> Tuple[Union[List[float], torch.Tensor], Dict[str, Any]]:\n \"\"\"\n This computes the reward of the current prompt.\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n assert mode in [\"train\", \"infer\"]\n if mode == \"train\":\n self._counter += 1\n\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n accs: List[float] = []\n confs: List[float] = []\n entropies: List[float] = []\n class_logits: List[torch.Tensor] = []\n\n counter_list = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n quantities_to_log = {}\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n (\n reward,\n acc,\n correct_predictions,\n conf,\n entropy,\n class_logit,\n ) = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n bn_calibrator=self.bn_calibrator if self.use_bn_calibration else None,\n )\n\n rewards.append(reward)\n accs.append(acc.item())\n confs.append(conf.item())\n entropies.append(entropy.item())\n counter_list.append(correct_predictions)\n class_logits.append(class_logit)\n\n # keep track of rewards for z-score normalization\n input_rewards[\"z\"] += [reward.item()]\n\n # Print examples\n if verbose:\n print_strs = [\n \"Accuracy:\",\n acc.item(),\n \"|\",\n \"Reward:\",\n round(reward.item(), 2),\n ]\n print(*print_strs)\n rewards_tensor = torch.stack(rewards)\n accs_tensor = torch.tensor(accs)\n confs_tensor = torch.tensor(confs)\n entropies_tensor = torch.tensor(entropies)\n # compute the expected calibration error (ECE) by accs_tensor and confs_tensor\n ece = torch.abs(accs_tensor - confs_tensor).mean()\n\n # z-score normalization (2nd stage)\n if mode == \"train\" and self.compute_zscore:\n input_reward_means = {k: np.mean(v) for k, v in input_rewards.items()}\n input_reward_stds = {k: np.std(v) for k, v in input_rewards.items()}\n # not source strings\n idx_means = torch.tensor(input_reward_means[\"z\"]).float()\n idx_stds = torch.tensor(input_reward_stds[\"z\"]).float()\n rewards_tensor = (rewards_tensor - idx_means) / (idx_stds + 1e-4)\n quantities_to_log[prompt_strings[i]][\"resized_reward\"] = []\n for i in range(rewards_tensor.size(0)):\n quantities_to_log[prompt_strings[i]][\"resized_reward\"].append(\n rewards_tensor[i].item()\n )\n elif mode == \"infer\": # Optional: Predict Val Prompts\n score = rewards_tensor.mean().item()\n if verbose:\n print(f\"Our prompt: {prompt_strings}. Score={score}. Acc={acc}\")\n for pt in prompt_strings:\n print(self._tokenizer.tokenize(pt))\n print(accumulate_class)\n print(\"counter_list\", counter_list)\n print(\"ece\", ece)\n if accumulate_class:\n return (\n prompt_strings,\n rewards_tensor,\n accs_tensor,\n counter_list,\n ece,\n entropies_tensor,\n class_logits, # <- list of tensors. n elements = n prompts\n )\n else:\n return prompt_strings, rewards_tensor, accs_tensor\n\n if to_tensor is True:\n return rewards_tensor, accs_tensor, quantities_to_log\n else:\n return rewards_tensor.tolist(), accs, quantities_to_log\n\n def kl_divergence_row_by_row(self, p, q):\n kl_div = torch.sum(p * torch.log(p / q), dim=1)\n return kl_div\n\n def compute_default_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the probs of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_probs = _compute_probs(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_probs\n\n def compute_default_reward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the rewards of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_reward, _, _, _, _, _ = _compute_reward(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_reward\n\n def compute_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_probs: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_probs = _compute_probs(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n kl = self.kl_divergence_row_by_row(prompt_probs, default_probs)\n kl = torch.sum(kl)\n rewards.append(kl)\n kl_tensor = torch.stack(rewards)\n return kl_tensor\n\n def compute_reward_diff(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_rewards: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_rewards, _, _, _, _, _ = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n reward_diff = prompt_rewards - default_rewards\n reward_diff = torch.sum(reward_diff)\n rewards.append(reward_diff)\n reward_diff_tensor = torch.stack(rewards)\n return reward_diff_tensor\n\n # Adapted from\n # https://huggingface.co/docs/transformers/v4.21.1/en/task_summary#masked-language-modeling\n def _get_mask_token_index(self, input_ids: torch.Tensor) -> np.ndarray:\n mask_token_index = torch.where(input_ids == self._tokenizer.mask_token_id)[1]\n return mask_token_index\n\n def ensure_exactly_one_mask_token(\n self, model_inputs: Dict[str, torch.Tensor]\n ) -> None:\n for input_ids in model_inputs[\"input_ids\"]:\n masked_index = self._get_mask_token_index(input_ids)\n numel = np.prod(masked_index.shape)\n assert numel == 1\n\n @torch.no_grad()\n def _get_logits(self, texts: List[str]) -> torch.Tensor:\n # for MLM, add mask token\n batch_size = len(texts)\n encoded_inputs = self._tokenizer(\n texts,\n padding=\"longest\",\n truncation=True,\n return_tensors=\"pt\",\n add_special_tokens=True,\n )\n decoder_input_ids = (\n torch.ones((batch_size, 1)) * torch.tensor(self._tokenizer.pad_token_id)\n ).int()\n if self.is_mask_lm:\n # self.ensure_exactly_one_mask_token(encoded_inputs) TODO\n token_logits = self._generator(**encoded_inputs.to(self.device)).logits\n mask_token_indices = self._get_mask_token_index(encoded_inputs[\"input_ids\"])\n out_logits = token_logits[range(batch_size), mask_token_indices, :]\n return out_logits\n else:\n token_logits = self._generator(\n input_ids=encoded_inputs[\"input_ids\"].to(self.device),\n decoder_input_ids=decoder_input_ids.to(self.device),\n ).logits\n token_logits = token_logits[:, 0, :]\n return token_logits\n\n def _convert_tokens_to_string(self, tokens: List[List[str]]) -> List[str]:\n return [self._tokenizer.convert_tokens_to_string(s) for s in tokens]\n\n def _format_prompts(\n self,\n source_strs: List[str],\n source_2_strs: List[str],\n prompt_strs: List[str],\n ) -> List[str]:\n return [\n self.template.format(sentence_1=s_1, sentence_2=s_2, prompt=p)\n for s_1, s_2, p in zip(source_strs, source_2_strs, prompt_strs)\n ]" } ]
import random import numpy as np from typing import Any from .base_trainer import BaseTrainer from utils.fsc_datasets import PromptedClassificationDataset from rewards.text_classification_reward import PromptedClassificationReward
11,416
class Genetics: def __init__(self, crossover_tokenizer, vocab_id): self.crossover_tokenizer = crossover_tokenizer self.vocab_id = vocab_id def mutate(self, x, prob=0.1): """ Mutates the input string by replacing tokens with a certain probability. Args: x (str): The input string. prob (float, optional): The probability of replacing each token. Defaults to 0.1. Returns: str: The mutated string. """ x_list = self.crossover_tokenizer.encode(x) def pick_another(x_, candidates): return ( x_ if len(candidates) == 1 else random.choice([v for v in candidates if v != x_]) ) for i, element in enumerate(x_list): if i == 0 or i == len(x_list) - 1: continue if random.random() < prob: x_list[i] = pick_another(element, self.vocab_id) out = self.crossover_tokenizer.decode(x_list, skip_special_tokens=True) return out def crossover(self, x1, x2): """ Performs crossover between two input strings. Args: x1 (str): The first input string. x2 (str): The second input string. Returns: str: The crossover result. """ def _crossover_helper(v1, v2): return v1 if random.random() < 0.5 else v2 def _inbalance_helper(v1, v2): n_tokens = min(len(v1), len(v2)) max_n = max(len(v1), len(v2)) out_token = [] for i in range(n_tokens): out_token.append(v1[i] if random.random() < 0.5 else v2[i]) for i in range(n_tokens, max_n): out_token.append(v1[i] if len(v1) > n_tokens else v2[i]) return out_token x1_tokens = self.crossover_tokenizer.encode(x1) x2_tokens = self.crossover_tokenizer.encode(x2) x = _crossover_helper(x1_tokens, x2_tokens) ret = self.crossover_tokenizer.decode(x, skip_special_tokens=True) return ret def random_string(self, length=5): """ Generates a random string of a specified length. Args: length (int, optional): The length of the random string. Defaults to 5. Returns: str: The random string. """ choices = self.vocab_id out = random.choices(choices, k=length) out = self.crossover_tokenizer.decode(out, skip_special_tokens=True) return out def random_extend_pop(self, pop: list, n: int) -> list: """ Extends the population with random strings. Args: pop (list): The population. n (int): The number of random strings to generate. Returns: list: The extended population. """ pop = [p + self.random_string(n) for p in pop] return pop class GeneticAlgorithmTrainer(BaseTrainer): def __init__( self, pop_size: int, mutate_size: int, crossover_size: int, epochs: int, mutate_frac: float, str_len: int, stages: int, n_classes: int, eval_batch_size: int, genetics: Genetics,
class Genetics: def __init__(self, crossover_tokenizer, vocab_id): self.crossover_tokenizer = crossover_tokenizer self.vocab_id = vocab_id def mutate(self, x, prob=0.1): """ Mutates the input string by replacing tokens with a certain probability. Args: x (str): The input string. prob (float, optional): The probability of replacing each token. Defaults to 0.1. Returns: str: The mutated string. """ x_list = self.crossover_tokenizer.encode(x) def pick_another(x_, candidates): return ( x_ if len(candidates) == 1 else random.choice([v for v in candidates if v != x_]) ) for i, element in enumerate(x_list): if i == 0 or i == len(x_list) - 1: continue if random.random() < prob: x_list[i] = pick_another(element, self.vocab_id) out = self.crossover_tokenizer.decode(x_list, skip_special_tokens=True) return out def crossover(self, x1, x2): """ Performs crossover between two input strings. Args: x1 (str): The first input string. x2 (str): The second input string. Returns: str: The crossover result. """ def _crossover_helper(v1, v2): return v1 if random.random() < 0.5 else v2 def _inbalance_helper(v1, v2): n_tokens = min(len(v1), len(v2)) max_n = max(len(v1), len(v2)) out_token = [] for i in range(n_tokens): out_token.append(v1[i] if random.random() < 0.5 else v2[i]) for i in range(n_tokens, max_n): out_token.append(v1[i] if len(v1) > n_tokens else v2[i]) return out_token x1_tokens = self.crossover_tokenizer.encode(x1) x2_tokens = self.crossover_tokenizer.encode(x2) x = _crossover_helper(x1_tokens, x2_tokens) ret = self.crossover_tokenizer.decode(x, skip_special_tokens=True) return ret def random_string(self, length=5): """ Generates a random string of a specified length. Args: length (int, optional): The length of the random string. Defaults to 5. Returns: str: The random string. """ choices = self.vocab_id out = random.choices(choices, k=length) out = self.crossover_tokenizer.decode(out, skip_special_tokens=True) return out def random_extend_pop(self, pop: list, n: int) -> list: """ Extends the population with random strings. Args: pop (list): The population. n (int): The number of random strings to generate. Returns: list: The extended population. """ pop = [p + self.random_string(n) for p in pop] return pop class GeneticAlgorithmTrainer(BaseTrainer): def __init__( self, pop_size: int, mutate_size: int, crossover_size: int, epochs: int, mutate_frac: float, str_len: int, stages: int, n_classes: int, eval_batch_size: int, genetics: Genetics,
obj_func: PromptedClassificationReward,
2
2023-10-08 12:39:44+00:00
16k
clessig/atmorep
atmorep/core/atmorep_model.py
[ { "identifier": "identity", "path": "atmorep/utils/utils.py", "snippet": "def identity( func, *args) :\n return func( *args)" }, { "identifier": "NetMode", "path": "atmorep/utils/utils.py", "snippet": "class NetMode( Enum) :\n indeterminate = 0\n train = 1\n test = 2" }, { "identifier": "get_model_filename", "path": "atmorep/utils/utils.py", "snippet": "def get_model_filename( model = None, model_id = '', epoch=-2, with_model_path = True) :\n\n if isinstance( model, str) :\n name = model \n elif model :\n name = model.__class__.__name__\n else : # backward compatibility\n name = 'mod'\n\n mpath = 'id{}'.format(model_id) if with_model_path else ''\n\n if epoch > -2 :\n # model_file = Path( config.path_results, 'models/id{}/{}_id{}_epoch{}.mod'.format(\n # model_id, name, model_id, epoch))\n model_file = Path( config.path_models, mpath, '{}_id{}_epoch{}.mod'.format(\n name, model_id, epoch))\n else :\n model_file = Path( config.path_models, mpath, '{}_id{}.mod'.format( name, model_id))\n \n return model_file" }, { "identifier": "prepare_token", "path": "atmorep/transformer/transformer_base.py", "snippet": "def prepare_token( xin, embed, embed_token_info, with_cls = True) :\n\n (token_seq, token_info) = xin\n num_tokens = token_seq.shape[-6:-3]\n num_levels = token_seq.shape[1]\n\n # embedding, flatten along token dimension and spatial dimensions\n token_seq_embed = embed( torch.flatten( torch.flatten( token_seq, -3, -1), -3, -2) )\n \n # add auxiliary, global token information\n token_info = embed_token_info( token_info).to( token_seq_embed.device, non_blocking=True )\n # token_info = prepare_token_info( cf, token_info)\n token_info = token_info.reshape([-1] + list(token_seq_embed.shape[1:-1])+[token_info.shape[-1]])\n token_seq_embed = torch.cat( [token_seq_embed, token_info], -1)\n\n # class token\n if with_cls :\n # initialize to zero (mean of data)\n tts = token_seq_embed.shape\n cls_token = torch.zeros( (tts[0], 1, tts[2]), device=token_seq_embed.device)\n \n # add positional encoding\n token_seq_embed = positional_encoding_harmonic( token_seq_embed, num_levels, num_tokens)\n\n # add class token after positional encoding\n if with_cls :\n token_seq_embed = torch.cat( [ cls_token, token_seq_embed ], 1)\n\n return token_seq_embed" }, { "identifier": "checkpoint_wrapper", "path": "atmorep/transformer/transformer_base.py", "snippet": "def checkpoint_wrapper( cmodule, *kwargs) :\n if cmodule.training :\n return torch.utils.checkpoint.checkpoint( cmodule, *kwargs, use_reentrant=False)\n else :\n return cmodule(*kwargs)" }, { "identifier": "MultifieldDataSampler", "path": "atmorep/datasets/multifield_data_sampler.py", "snippet": "class MultifieldDataSampler( torch.utils.data.IterableDataset):\n \n ###################################################\n def __init__( self, file_path, years_data, fields, batch_size, \n num_t_samples, num_patches_per_t, num_load, pre_batch, \n rng_seed = None, file_shape = (-1, 721, 1440),\n level_type = 'ml', time_sampling = 1, \n smoothing = 0, file_format = 'grib', month = None, lat_sampling_weighted = True,\n geo_range = [[-90.,90.], [0.,360.]], \n fields_targets = [], pre_batch_targets = None\n ) :\n '''\n Data set for single dynamic field at an arbitrary number of vertical levels\n '''\n super( MultifieldDataSampler).__init__()\n\n self.fields = fields\n self.batch_size = batch_size\n\n self.pre_batch = pre_batch\n\n self.years_data = years_data\n self.time_sampling = time_sampling\n self.month = month\n self.range_lat = 90. - np.array( geo_range[0])\n self.range_lon = np.array( geo_range[1])\n self.geo_range = geo_range\n\n # order North to South\n self.range_lat = np.flip(self.range_lat) if self.range_lat[1] < self.range_lat[0] \\\n else self.range_lat\n\n # prepare range_lat and range_lon for sampling\n self.is_global = 0 == self.range_lat[0] and self.range_lon[0] == 0. \\\n and 180. == self.range_lat[1] and 360. == self.range_lon[1]\n \n # TODO: this assumes file_shape is set correctly and not just per field and it defines a \n # reference grid, likely has to be the coarsest\n self.res = 360. / file_shape[2]\n \n # avoid wrap around at poles\n pole_offset = np.ceil(fields[0][3][1] * fields[0][4][1] / 2) * self.res\n self.range_lat[0] = pole_offset if self.range_lat[0] < pole_offset else self.range_lat[0]\n self.range_lat[1] =180.-pole_offset if 180.-self.range_lat[1]<pole_offset else self.range_lat[1]\n\n self.lat_sampling_weighted = lat_sampling_weighted\n\n self.level_type = level_type\n self.smoothing = smoothing\n\n self.file_path = config.path_data\n self.file_shape = file_shape\n self.file_format = file_format\n self.num_load = num_load\n self.num_patches_per_t = int(num_patches_per_t)\n self.num_t_samples = int(num_t_samples)\n\n self.fields_targets = fields_targets\n self.pre_batch_targets = pre_batch_targets\n\n # convert to mathematical latitude and ensure North -> South ordering\n # shrink so that cookie cutting based on sampling does not exceed domain if it is not global\n if not self.is_global :\n # TODO: check that field data is consistent and covers the same spatial domain \n # TODO: code below assumes that fields[0] is global\n # TODO: code below does not handle anisotropic grids\n finfo = self.fields[0]\n # ensure that delta is a multiple of the coarse grid resolution\n ngrid1 = finfo[3][1] * finfo[4][1]\n ngrid2 = finfo[3][2] * finfo[4][2]\n delta1 = 0.5 * self.res * (ngrid1-1 if ngrid1 % 2==0 else ngrid1+1)\n delta2 = 0.5 * self.res * (ngrid2-1 if ngrid2 % 2==0 else ngrid2+1)\n self.range_lat += np.array([delta1, -delta1])\n self.range_lon += np.array([delta2, -delta2])\n\n # ensure all data loaders use same rng_seed and hence generate consistent data\n if not rng_seed :\n rng_seed = np.random.randint( 0, 100000, 1)[0]\n self.rng = np.random.default_rng( rng_seed)\n\n # create (source) fields\n self.datasets = self.create_loaders( fields)\n\n # create (target) fields \n self.datasets_targets = self.create_loaders( fields_targets)\n\n ###################################################\n def create_loaders( self, fields ) :\n\n datasets = []\n for field_idx, field_info in enumerate(fields) :\n\n datasets.append( [])\n\n # extract field info\n (vls, num_tokens, token_size) = field_info[2:5]\n\n if len(field_info) > 6 :\n corr_type = field_info[6]\n else:\n corr_type = 'global'\n\n smoothing = self.smoothing\n log_transform_data = False\n if len(field_info) > 7 :\n (data_type, file_shape, file_geo_range, file_format) = field_info[7][:4]\n if len( field_info[7]) > 6 :\n smoothing = field_info[7][6]\n print( '{} : smoothing = {}'.format( field_info[0], smoothing) )\n if len( field_info[7]) > 7 :\n log_transform_data = field_info[7][7]\n print( '{} : log_transform_data = {}'.format( field_info[0], log_transform_data) )\n else :\n data_type = 'era5'\n file_format = self.file_format\n file_shape = self.file_shape\n file_geo_range = [[90.,-90.], [0.,360.]]\n\n # static fields\n if 0 == field_info[1][0] :\n datasets[-1].append( StaticField( self.file_path, field_info, self.batch_size, data_type,\n file_shape, file_geo_range,\n num_tokens, token_size, smoothing, file_format, corr_type) )\n \n # dynamic fields\n elif 1 == field_info[1][0] :\n for vlevel in vls :\n datasets[-1].append( DynamicFieldLevel( self.file_path, self.years_data, field_info,\n self.batch_size, data_type,\n file_shape, file_geo_range,\n num_tokens, token_size,\n self.level_type, vlevel, self.time_sampling, \n smoothing, file_format, corr_type, \n log_transform_data ) )\n \n else :\n assert False\n\n return datasets \n\n ###################################################\n def shuffle( self) :\n\n # ensure that different parallel loaders create independent random shuffles\n delta = torch.randint( 0, 100000, (1,)).item()\n self.rng.bit_generator.advance( delta)\n\n self.idxs_perm = np.zeros( (0, 4), dtype=np.int64)\n\n # latitude, first map to mathematical lat coords in [0,180.], then to [0,pi] then\n # to z-value in [-1,1]\n if self.lat_sampling_weighted :\n lat_r = np.cos( self.range_lat/180. * np.pi)\n else :\n lat_r = self.range_lat\n\n # 1.00001 is a fudge factor since np.round(*.5) leads to flooring instead of proper up-rounding\n res_inv = 1.0 / self.res * 1.00001\n\n # loop over individual data year-month items \n for i_ym in range( len(self.years_months)) :\n \n ym = self.years_months[i_ym]\n \n # ensure a constant size of work load of data loader independent of the month length \n # factor of 128 is a fudge parameter to ensure that mod-ing leads to sufficiently \n # random wrap-around (with 1 instead of 128 there is clustering on the first days)\n hours_in_day = int( 24 / self.time_sampling)\n time_slices = 128 * 31 * hours_in_day\n time_slices_i_ym = hours_in_day * days_in_month( ym[0], ym[1])\n idxs_perm_temp = np.mod(self.rng.permutation(time_slices), time_slices_i_ym)\n # fixed number of time samples independent of length of month\n idxs_perm_temp = idxs_perm_temp[:self.num_t_samples]\n idxs_perm = np.zeros( (self.num_patches_per_t *idxs_perm_temp.shape[0],4) )\n\n # split up into file index and local index\n idx = 0\n for it in idxs_perm_temp :\n \n idx_patches = self.rng.random( (self.num_patches_per_t, 2) )\n # for jj in idx_patches :\n for jj in idx_patches :\n # area consistent sampling on the sphere (with less patches close to the pole)\n # see https://graphics.stanford.edu/courses/cs448-97-fall/notes.html , Lecture 7\n # for area preserving sampling of the sphere\n # py \\in [0,180], px \\in [0,360] (possibly with negative values for lon)\n if self.lat_sampling_weighted :\n py = ((np.arccos(lat_r[0] + (lat_r[1]-lat_r[0]) * jj[0]) / np.pi) * 180.)\n else :\n py = (lat_r[0] + (lat_r[1]-lat_r[0]) * jj[0])\n px = jj[1] * (self.range_lon[1] - self.range_lon[0]) + self.range_lon[0]\n\n # align with grid\n py = self.res * np.round( py * res_inv)\n px = self.res * np.round( px * res_inv)\n\n idxs_perm[idx] = np.array( [i_ym, it, py, px])\n idx = idx + 1\n\n self.idxs_perm = np.concatenate( (self.idxs_perm, idxs_perm[:idx]))\n\n # shuffle again to avoid clustering of patches by loop over idx_patches above\n self.idxs_perm = self.idxs_perm[self.rng.permutation(self.idxs_perm.shape[0])]\n self.idxs_perm = self.idxs_perm[self.rng.permutation(self.idxs_perm.shape[0])]\n # restrict to multiples of batch size\n lenbatch = int(math.floor(self.idxs_perm.shape[0] / self.batch_size)) * self.batch_size\n self.idxs_perm = self.idxs_perm[:lenbatch]\n # # DEBUG\n # print( 'self.idxs_perm.shape = {}'.format(self.idxs_perm.shape ))\n # rank = torch.distributed.get_rank()\n # fname = 'idxs_perm_rank{}_{}.dat'.format( rank, shape_to_str( self.idxs_perm.shape))\n # self.idxs_perm.tofile( fname)\n\n ###################################################\n def set_full_time_range( self) :\n\n self.idxs_perm = np.zeros( (0, 4), dtype=np.int64)\n\n # latitude, first map to mathematical lat coords in [0,180.], then to [0,pi] then\n # to z-value in [-1,1]\n if self.lat_sampling_weighted :\n lat_r = np.cos( self.range_lat/180. * np.pi)\n else :\n lat_r = self.range_lat\n\n # 1.00001 is a fudge factor since np.round(*.5) leads to flooring instead of proper up-rounding\n res_inv = 1.0 / self.res * 1.00001\n\n # loop over individual data year-month items \n for i_ym in range( len(self.years_months)) :\n\n ym = self.years_months[i_ym]\n\n hours_in_day = int( 24 / self.time_sampling)\n idxs_perm_temp = np.arange( hours_in_day * days_in_month( ym[0], ym[1]))\n idxs_perm = np.zeros( (self.num_patches_per_t *idxs_perm_temp.shape[0],4) )\n\n # split up into file index and local index\n idx = 0\n for it in idxs_perm_temp :\n\n idx_patches = self.rng.random( (self.num_patches_per_t, 2) )\n for jj in idx_patches :\n # area consistent sampling on the sphere (with less patches close to the pole)\n # see https://graphics.stanford.edu/courses/cs448-97-fall/notes.html , Lecture 7\n # for area preserving sampling of the sphere\n # py \\in [0,180], px \\in [0,360] (possibly with negative values for lon)\n if self.lat_sampling_weighted :\n py = ((np.arccos(lat_r[0] + (lat_r[1]-lat_r[0]) * jj[0]) / np.pi) * 180.)\n else :\n py = (lat_r[0] + (lat_r[1]-lat_r[0]) * jj[0])\n px = jj[1] * (self.range_lon[1] - self.range_lon[0]) + self.range_lon[0]\n\n # align with grid\n py = self.res * np.round( py * res_inv)\n px = self.res * np.round( px * res_inv)\n\n idxs_perm[idx] = np.array( [i_ym, it, py, px])\n idx = idx + 1\n\n self.idxs_perm = np.concatenate( (self.idxs_perm, idxs_perm[:idx]))\n\n # shuffle again to avoid clustering of patches by loop over idx_patches above\n self.idxs_perm = self.idxs_perm[self.rng.permutation(self.idxs_perm.shape[0])]\n # restrict to multiples of batch size\n lenbatch = int(math.floor(self.idxs_perm.shape[0] / self.batch_size)) * self.batch_size\n self.idxs_perm = self.idxs_perm[:lenbatch]\n\n # # DEBUG\n # print( 'self.idxs_perm.shape = {}'.format(self.idxs_perm.shape ))\n # fname = 'idxs_perm_{}_{}.dat'.format( self.epoch_counter, shape_to_str( self.idxs_perm.shape))\n # self.idxs_perm.tofile( fname)\n\n ###################################################\n def load_data( self, batch_size = None) :\n\n years_data = self.years_data\n \n # ensure proper separation of different random samplers\n delta = torch.randint( 0, 1000, (1,)).item()\n self.rng.bit_generator.advance( delta)\n\n # select num_load random months and years \n perms = np.concatenate( [self.rng.permutation( np.arange(len(years_data))) for i in range(64)])\n perms = perms[:self.num_load]\n if self.month : \n self.years_months = [ (years_data[iyear], self.month) for iyear in perms]\n else : \n # stratified sampling of month to ensure proper distribution, needs to be adapted for \n # number of parallel workers not being divisible by 4\n # rank, ms = torch.distributed.get_rank() % 4, 3\n # perms_m = np.concatenate( [self.rng.permutation( np.arange( rank*ms+1, (rank+1)*ms+1))\n # for i in range(16)])\n perms_m = np.concatenate( [self.rng.permutation( np.arange( 1, 12+1)) for i in range(16)])\n self.years_months = [ ( years_data[iyear], perms_m[i]) for i,iyear in enumerate(perms)]\n\n # generate random permutations passed to the loaders for individual files \n # to ensure consistent processing\n self.shuffle()\n\n # perform actual loading of data\n \n for ds_field in self.datasets :\n for ds in ds_field :\n ds.load_data( self.years_months, self.idxs_perm, batch_size)\n\n for ds_field in self.datasets_targets :\n for ds in ds_field :\n ds.load_data( self.years_months, self.idxs_perm, batch_size)\n\n ###################################################\n def set_data( self, times_pos, batch_size = None) :\n '''\n times_pos = np.array( [ [year, month, day, hour, lat, lon], ...] )\n - lat \\in [90,-90] = [90N, 90S]\n - lon \\in [0,360]\n - (year,month) pairs should be a limited number since all data for these is loaded\n '''\n\n # extract required years and months\n years_months_all = np.array( [ [it[0], it[1]] for it in times_pos ], dtype=np.int64)\n self.years_months = list( zip( np.unique(years_months_all[:,0]),\n np.unique( years_months_all[:,1] )))\n\n # generate all the data\n self.idxs_perm = np.zeros( (len(times_pos), 4))\n for idx, item in enumerate( times_pos) :\n\n assert item[2] >= 1 and item[2] <= 31\n assert item[3] >= 0 and item[3] < int(24 / self.time_sampling)\n assert item[4] >= -90. and item[4] <= 90.\n\n # find year\n for i_ym, ym in enumerate( self.years_months) :\n if ym[0] == item[0] and ym[1] == item[1] :\n break\n\n # last term: correct for window from last file that is loaded\n it = (item[2] - 1) * (24./self.time_sampling) + item[3]\n # it = item[2] * (24./self.time_sampling) + item[3]\n idx_lat = item[4]\n idx_lon = item[5]\n\n # work with mathematical lat coordinates from here on\n self.idxs_perm[idx] = np.array( [i_ym, it, 90. - idx_lat, idx_lon])\n\n for ds_field in self.datasets :\n for ds in ds_field :\n ds.load_data( self.years_months, self.idxs_perm, batch_size)\n\n for ds_field in self.datasets_targets :\n for ds in ds_field :\n ds.load_data( self.years_months, self.idxs_perm, batch_size)\n\n ###################################################\n def set_global( self, times, batch_size = None, token_overlap = [0, 0]) :\n ''' generate patch/token positions for global grid '''\n\n token_overlap = torch.tensor( token_overlap).to(torch.int64)\n\n # assumed that sanity checking that field data is consistent has been done \n ifield = 0\n field = self.fields[ifield]\n\n res = self.res\n side_len = torch.tensor( [field[3][1] * field[4][1] * res, field[3][2] * field[4][2] * res] )\n overlap = torch.tensor( [token_overlap[0]*field[4][1]*res, token_overlap[1]*field[4][2]*res] )\n side_len_2 = side_len / 2.\n assert all( overlap <= side_len_2), 'token_overlap too large for #tokens, reduce if possible'\n\n # generate tiles\n times_pos = []\n for ctime in times :\n\n lat = side_len_2[0].item()\n num_tiles_lat = 0\n while (lat + side_len_2[0].item()) < 180. :\n num_tiles_lat += 1\n lon = side_len_2[1].item() - overlap[1].item()/2.\n num_tiles_lon = 0\n while (lon - side_len_2[1]) < 360. :\n times_pos += [[*ctime, -lat + 90., np.mod(lon,360.) ]]\n lon += side_len[1].item() - overlap[1].item()\n num_tiles_lon += 1\n lat += side_len[0].item() - overlap[0].item()\n\n # add one additional row if no perfect tiling (sphere is toric in longitude so no special\n # handling necessary but not in latitude)\n # the added row is such that it goes exaclty down to the South pole and the offset North-wards\n # is computed based on this\n lat -= side_len[0] - overlap[0]\n if lat - side_len_2[0] < 180. :\n num_tiles_lat += 1\n lat = 180. - side_len_2[0].item() + res\n lon = side_len_2[1].item() - overlap[1].item()/2.\n while (lon - side_len_2[1]) < 360. :\n times_pos += [[*ctime, -lat + 90., np.mod(lon,360.) ]]\n lon += side_len[1].item() - overlap[1].item()\n\n # adjust batch size if necessary so that the evaluations split up across batches of equal size\n batch_size = num_tiles_lon\n\n print( 'Number of batches per global forecast: {}'.format( num_tiles_lat) )\n\n self.set_data( times_pos, batch_size)\n\n ###################################################\n def set_location( self, pos, years, months, num_t_samples_per_month, batch_size = None) :\n ''' random time sampling for fixed location '''\n\n times_pos = []\n for i_ym, ym in enumerate(itertools.product( years, months )) :\n\n # ensure a constant size of work load of data loader independent of the month length \n # factor of 128 is a fudge parameter to ensure that mod-ing leads to sufficiently \n # random wrap-around (with 1 instead of 128 there is clustering on the first days)\n hours_in_day = int( 24 / self.time_sampling)\n d_i_m = days_in_month( ym[0], ym[1]) \n perms = self.rng.permutation( num_t_samples_per_month * d_i_m)\n # ensure that days start at 1\n perms = np.mod( perms[ : num_t_samples_per_month], (d_i_m-1) ) + 1\n rhs = self.rng.integers(low=0, high=hours_in_day, size=num_t_samples_per_month )\n\n for rh, perm in zip( rhs, perms) :\n times_pos += [[ ym[0], ym[1], perm, rh, pos[0], pos[1]] ]\n\n # adjust batch size if necessary so that the evaluations split up across batches of equal size\n while 0 != (len(times_pos) % batch_size) :\n batch_size -= 1\n assert batch_size >= 1\n\n self.set_data( times_pos, batch_size)\n\n ###################################################\n def __iter__(self):\n\n iter_start, iter_end = self.worker_workset()\n\n for bidx in range( iter_start, iter_end) :\n\n sources = []\n for ds_field in self.datasets : \n sources.append( [ds_level[bidx] for ds_level in ds_field])\n # perform batch pre-processing, e.g. BERT-type masking\n if self.pre_batch :\n sources = self.pre_batch( sources)\n\n targets = []\n for ds_field in self.datasets_targets :\n targets.append( [ds_level[bidx] for ds_level in ds_field])\n # perform batch pre-processing, e.g. BERT-type masking\n if self.pre_batch_targets :\n targets = self.pre_batch_targets( targets)\n\n yield (sources,targets)\n\n ###################################################\n def __len__(self):\n return len(self.datasets[0][0])\n\n ###################################################\n def worker_workset( self) :\n\n worker_info = torch.utils.data.get_worker_info()\n\n if worker_info is None: \n iter_start = 0\n iter_end = len(self.datasets[0][0])\n\n else: \n # split workload\n temp = len(self.datasets[0][0])\n per_worker = int( np.floor( temp / float(worker_info.num_workers) ) )\n worker_id = worker_info.id\n iter_start = int(worker_id * per_worker)\n iter_end = int(iter_start + per_worker)\n if worker_info.id+1 == worker_info.num_workers :\n iter_end = int(temp)\n\n return iter_start, iter_end" }, { "identifier": "TransformerEncoder", "path": "atmorep/transformer/transformer_encoder.py", "snippet": "class TransformerEncoder(torch.nn.Module) :\n\n def __init__(self, cf, field_idx, with_embed = True):\n ''' '''\n \n super(TransformerEncoder, self).__init__()\n\n self.cf = cf\n self.field_idx = field_idx\n self.with_embed = with_embed\n\n ###################################\n def create( self) :\n\n cf = self.cf\n with_ln = cf.with_layernorm\n\n self.fields_index = {}\n for ifield, field_info in enumerate(cf.fields) :\n self.fields_index[ field_info[0] ] = ifield \n\n field_info = cf.fields[self.field_idx]\n \n # learnable linear embedding\n if self.with_embed :\n net_dim_input = np.prod(field_info[4]) \n self.embed = torch.nn.Linear( net_dim_input, field_info[1][1]- cf.size_token_info_net)\n\n # num_heads_coupling\n dor = cf.dropout_rate\n self.heads = torch.nn.ModuleList()\n self.mlps = torch.nn.ModuleList()\n for il in range( cf.encoder_num_layers) :\n\n nhc = cf.coupling_num_heads_per_field * len( field_info[1][2])\n # nhs = cf.encoder_num_heads - nhc\n nhs = cf.encoder_num_heads\n # number of tokens\n n_toks = torch.tensor( field_info[3], dtype=torch.int64)\n \n dims_embed = [ field_info[1][1] ]\n vl_num_tokens = [len(field_info[2])] + field_info[3]\n for field_coupled in field_info[1][2] : \n if 'axial' in cf.encoder_att_type :\n finfo_other = cf.fields[ self.fields_index[field_coupled] ]\n dims_embed.append( finfo_other[1][1] )\n vl_num_tokens.append( [len(finfo_other[2])] + finfo_other[3] )\n else : \n for _ in range(cf.coupling_num_heads_per_field) :\n finfo_other = cf.fields[ self.fields_index[field_coupled] ]\n dims_embed.append( finfo_other[1][1] )\n vl_num_tokens.append( [len(finfo_other[2])] + finfo_other[3] )\n\n # attention heads\n if 'dense' == cf.encoder_att_type :\n head = MultiInterAttentionHead( nhs, nhc, dims_embed, with_ln, dor, cf.with_qk_lnorm, \n cf.grad_checkpointing, with_attention=cf.attention )\n elif 'axial' in cf.encoder_att_type :\n par = True if 'parallel' in cf.encoder_att_type else False\n head = MultiFieldAxialAttention( [3,2,1], dims_embed, nhs, nhc, par, dor)\n else :\n assert False, 'Unsupported attention type: ' + cf.decoder_att_type\n self.heads.append( head)\n # feature space mapping sub-block\n self.mlps.append( MLP( dims_embed[0], cf.encoder_num_mlp_layers, with_ln, dropout_rate=dor,\n grad_checkpointing = cf.grad_checkpointing))\n\n return self\n\n ###################################\n def forward(self, xin):\n ''' '''\n assert False" }, { "identifier": "TransformerDecoder", "path": "atmorep/transformer/transformer_decoder.py", "snippet": "class TransformerDecoder(torch.nn.Module) :\n\n ###################################\n def __init__(self, cf, field_info ):\n '''\n Vaswani transformer corresponds to self_att = True and cross_att_ratio = 1. *and* encoder_out \n passed to forward is the output of the encoder (duplicated to match the number of layers)\n '''\n super( TransformerDecoder, self).__init__()\n \n self.cf = cf\n self.num_layers = cf.decoder_num_layers\n self.dim_embed = field_info[1][1]\n\n # TODO: split up create() for consistency\n\n num_heads = cf.decoder_num_heads\n num_mlp_layers = cf.decoder_num_mlp_layers \n self_att = cf.decoder_self_att\n cross_att_ratio = cf.decoder_cross_att_ratio \n\n num_heads_other = int(num_heads * cross_att_ratio)\n num_heads_self = num_heads - num_heads_other\n\n dim_embed = self.dim_embed\n\n # first layers, potentially with U-Net type coupling\n self.blocks = torch.nn.ModuleList()\n for il in range( min( cf.encoder_num_layers, cf.decoder_num_layers) ) :\n\n # self attention sub-block (as in original Vaswani)\n if self_att :\n self.blocks.append( MultiSelfAttentionHead( dim_embed, num_heads, cf.dropout_rate, \n cf.decoder_att_type, cf.with_qk_lnorm,\n cf.grad_checkpointing) )\n # cross attention between encoder and decoder\n if 'dense' == cf.decoder_att_type :\n self.blocks.append( MultiCrossAttentionHead( dim_embed, num_heads_self, num_heads_other, \n cf.dropout_rate, cf.with_qk_lnorm,\n cf.grad_checkpointing, cf.attention ) )\n elif 'axial' in cf.decoder_att_type :\n par = True if 'parallel' in cf.encoder_att_type else False\n self.blocks.append( MultiFieldAxialAttention( [3,2,1], [dim_embed,dim_embed], \n num_heads_self, num_heads_other, par, cf.dropout_rate) )\n else :\n assert False, 'Unsupported attention type: ' + cf.decoder_att_type \n # feature space mapping sub-block\n self.blocks.append( MLP( dim_embed, num_mlp_layers, cf.with_layernorm, \n dropout_rate = cf.dropout_rate, \n grad_checkpointing = cf.grad_checkpointing) )\n\n # remaining strictly non-coupled layers (if decoder is deeper than the encoder)\n dim_embed = self.dim_embed\n for il in range( cf.encoder_num_layers, cf.decoder_num_layers) :\n self.blocks.append( MultiSelfAttentionHead( dim_embed, num_heads, cf.dropout_rate, \n cf.decoder_att_type, cf.with_qk_lnorm))\n self.blocks.append( MLP( dim_embed, num_mlp_layers, cf.with_layernorm,\n grad_checkpointing = cf.grad_checkpointing ))\n\n self.checkpoint = identity\n if cf.grad_checkpointing :\n self.checkpoint = checkpoint_wrapper\n\n ###################################\n def device( self):\n return next(self.parameters()).device\n\n ###################################\n def forward(self, x):\n '''Evaluate decoder'''\n\n dev = self.device()\n\n (decoder_in, encoder_out) = x\n encoder_out.reverse()\n \n token_seq_embed = decoder_in.to( dev, non_blocking=True)\n\n atts = []\n car = self.cf.decoder_cross_att_rate\n for il in range(self.num_layers) : \n token_seq_embed, att = self.checkpoint( self.blocks[2*il], token_seq_embed, \n encoder_out[int(car*il)].to(dev, non_blocking=True) )\n token_seq_embed = self.checkpoint( self.blocks[2*il+1], token_seq_embed, \n encoder_out[int(car*il)].to(dev, non_blocking=True) )\n atts += [ att ]\n\n return token_seq_embed, atts\n\n ###################################\n def get_attention( self, xin, iblock) :\n ''' \n Get attention and projected values from specific layer and her head\n '''\n\n # assert False\n print(\"inside get_attention in transformer_decoder.py\")\n # embedding\n token_seq_embed = decoder_in.to( dev, non_blocking=True)\n car = self.cf.decoder_cross_att_rate\n for il in range(self.num_layers) :\n token_seq_embed = self.checkpoint( self.blocks[2*il], token_seq_embed, \n encoder_out[int(car*il)].to(dev, non_blocking=True) )\n \n atts = self.blocks[2*il].get_attention( token_seq_embed )\n\n return atts #(atts, vsh)" }, { "identifier": "TailEnsemble", "path": "atmorep/transformer/tail_ensemble.py", "snippet": "class TailEnsemble( torch.nn.Module) :\n\n def __init__( self, cf, dim_embed, dim_net_input, net_tail_num_nets = -1 ) :\n \n super( TailEnsemble, self).__init__()\n\n self.cf = cf\n self.dim_embed = dim_embed\n self.dim_input = dim_net_input\n self.net_tail_num_nets = net_tail_num_nets if net_tail_num_nets > 0 else cf.net_tail_num_nets\n\n ###################################################\n def create( self) :\n\n dim = self.dim_embed\n\n # tail networks: use class token to make prediction \n nonlin = torch.nn.GELU()\n self.tail_nets = torch.nn.ModuleList()\n for inet in range( self.net_tail_num_nets) :\n self.tail_nets.append( torch.nn.ModuleList())\n self.tail_nets[-1].append( torch.nn.LayerNorm( dim, elementwise_affine=True))\n for _ in range( self.cf.net_tail_num_layers) :\n self.tail_nets[-1].append( torch.nn.Linear( dim, dim, bias=True))\n self.tail_nets[-1].append( nonlin)\n # un-embedding layer\n self.tail_nets[-1].append( torch.nn.Linear( dim, self.dim_input, bias=True)) \n\n return self \n\n ###################################\n def device( self):\n return next(self.parameters()).device\n\n ###################################################\n def forward( self, rep ) :\n\n rep.to( self.device())\n\n # evaluate ensemble of tail networks\n preds = []\n for tail_net in self.tail_nets : \n cpred = rep\n for block in tail_net :\n cpred = block(cpred)\n preds.append( cpred.unsqueeze(1))\n preds = torch.cat( preds, 1)\n\n # # mean and variance of ensemble\n if 1 == len(self.tail_nets) : # avoid that std_dev is NaN with 1 \"ensemble\" member\n dev = preds.device\n pred = ( torch.mean(preds,1), torch.zeros( torch.std(preds,1).shape, device=dev ), preds )\n else :\n pred = ( torch.mean(preds,1), torch.std(preds,1), preds )\n\n return pred" } ]
import torch import numpy as np import code import atmorep.utils.utils as utils from atmorep.utils.utils import identity from atmorep.utils.utils import NetMode from atmorep.utils.utils import get_model_filename from atmorep.transformer.transformer_base import prepare_token from atmorep.transformer.transformer_base import checkpoint_wrapper from atmorep.datasets.multifield_data_sampler import MultifieldDataSampler from atmorep.transformer.transformer_encoder import TransformerEncoder from atmorep.transformer.transformer_decoder import TransformerDecoder from atmorep.transformer.tail_ensemble import TailEnsemble
11,801
cf = self.cf self.devices = devices size_token_info = 6 self.fields_coupling_idx = [] self.fields_index = {} for ifield, field_info in enumerate(cf.fields) : self.fields_index[ field_info[0] ] = ifield # # embedding network for global/auxiliary token infos # TODO: only for backward compatibility, remove self.embed_token_info = torch.nn.Linear( cf.size_token_info, cf.size_token_info_net) torch.nn.init.constant_( self.embed_token_info.weight, 0.0) self.embeds_token_info = torch.nn.ModuleList() for ifield, field_info in enumerate( cf.fields) : self.embeds_token_info.append( torch.nn.Linear( cf.size_token_info, cf.size_token_info_net)) if len(field_info[1]) > 4 and load_pretrained : # TODO: inconsistent with embeds_token_info -> version that can handle both # we could imply use the file name: embed_token_info vs embeds_token_info name = 'AtmoRep' + '_embed_token_info' mloaded = torch.load( get_model_filename( name, field_info[1][4][0], field_info[1][4][1])) self.embeds_token_info[-1].load_state_dict( mloaded) print( 'Loaded embed_token_info from id = {}.'.format( field_info[1][4][0] ) ) else : # initalization torch.nn.init.constant_( self.embeds_token_info[-1].weight, 0.0) self.embeds_token_info[-1].bias.data.fill_(0.0) # embedding and encoder self.embeds = torch.nn.ModuleList() self.encoders = torch.nn.ModuleList() self.masks = torch.nn.ParameterList() for field_idx, field_info in enumerate(cf.fields) : # learnabl class token if cf.learnable_mask : mask = torch.nn.Parameter( 0.1 * torch.randn( np.prod( field_info[4]), requires_grad=True)) self.masks.append( mask.to(devices[0])) else : self.masks.append( None) # encoder self.encoders.append( TransformerEncoder( cf, field_idx, True).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'encoder', self.encoders[-1]) self.embeds.append( self.encoders[-1].embed) # indices of coupled fields for efficient access in forward self.fields_coupling_idx.append( [field_idx]) for field_coupled in field_info[1][2] : if 'axial' in cf.encoder_att_type : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) else : for _ in range(cf.coupling_num_heads_per_field) : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) # decoder self.decoders = torch.nn.ModuleList() self.field_pred_idxs = [] for field in cf.fields_prediction : for ifield, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.field_pred_idxs.append( ifield) break self.decoders.append( TransformerDecoder( cf, field_info ) ) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'decoder', self.decoders[-1]) # tail networks self.tails = torch.nn.ModuleList() for ifield, field in enumerate(cf.fields_prediction) : field_idx = self.field_pred_idxs[ifield] field_info = cf.fields[field_idx] self.tails.append( TailEnsemble( cf, field_info[1][1], np.prod(field_info[4]) ).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained: self.load_block( field_info, 'tail', self.tails[-1]) # set devices for field_idx, field_info in enumerate(cf.fields) : # find determined device, use default if nothing specified device = self.devices[0] if len(field_info[1]) > 3 : assert field_info[1][3] < 4, 'Only single node model parallelism supported' assert field_info[1][3] < len(devices), 'Per field device id larger than max devices' device = self.devices[ field_info[1][3] ] # set device if self.masks[field_idx] != None : self.masks[field_idx].to(device) self.embeds[field_idx].to(device) self.encoders[field_idx].to(device) for field_idx, field in enumerate(cf.fields_prediction) : field_info = cf.fields[ self.field_pred_idxs[field_idx] ] device = self.devices[0] if len(field_info[1]) > 3 : device = self.devices[ field_info[1][3] ] self.decoders[field_idx].to(device) self.tails[field_idx].to(device) # embed_token_info on device[0] since it is shared by all fields, potentially sub-optimal self.embed_token_info.to(devices[0]) # TODO: only for backward compatibility, remove self.embeds_token_info.to(devices[0]) self.checkpoint = identity if cf.grad_checkpointing :
#################################################################################################### # # Copyright (C) 2022 # #################################################################################################### # # project : atmorep # # author : atmorep collaboration # # description : # # license : # #################################################################################################### # code.interact(local=locals()) # import horovod.torch as hvd #################################################################################################### class AtmoRepData( torch.nn.Module) : def __init__( self, net) : '''Wrapper class for AtmoRep that handles data loading''' super( AtmoRepData, self).__init__() self.data_loader_test = None self.data_loader_train = None self.data_loader_iter = None self.net = net # ensure that all data loaders have the same seed and hence load the same data self.rng_seed = net.cf.rng_seed if not self.rng_seed : self.rng_seed = int(torch.randint( 100000000, (1,))) ################################################### def load_data( self, mode : NetMode, batch_size = -1, num_loader_workers = -1) : '''Load data''' cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_max if num_loader_workers < 0 : num_loader_workers = cf.num_loader_workers if mode == NetMode.train : self.data_loader_train = self._load_data( self.dataset_train, batch_size, num_loader_workers) elif mode == NetMode.test : batch_size = cf.batch_size_test self.data_loader_test = self._load_data( self.dataset_test, batch_size, num_loader_workers) else : assert False ################################################### def _load_data( self, dataset, batch_size, num_loader_workers) : '''Private implementation for load''' dataset.load_data( batch_size) loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, 'num_workers': num_loader_workers, 'pin_memory': True} data_loader = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) return data_loader ################################################### def set_data( self, mode : NetMode, times_pos, batch_size = -1, num_loader_workers = -1) : cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test dataset = self.dataset_train if mode == NetMode.train else self.dataset_test dataset.set_data( times_pos, batch_size) self._set_data( dataset, mode, batch_size, num_loader_workers) ################################################### def set_global( self, mode : NetMode, times, batch_size = -1, num_loader_workers = -1) : cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test dataset = self.dataset_train if mode == NetMode.train else self.dataset_test dataset.set_global( times, batch_size, cf.token_overlap) self._set_data( dataset, mode, batch_size, num_loader_workers) ################################################### def set_location( self, mode : NetMode, pos, years, months, num_t_samples_per_month, batch_size = -1, num_loader_workers = -1) : cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test dataset = self.dataset_train if mode == NetMode.train else self.dataset_test dataset.set_location( pos, years, months, num_t_samples_per_month, batch_size) self._set_data( dataset, mode, batch_size, num_loader_workers) ################################################### def _set_data( self, dataset, mode : NetMode, batch_size = -1, loader_workers = -1) : '''Private implementation for set_data, set_global''' cf = self.net.cf if loader_workers < 0 : loader_workers = cf.num_loader_workers loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, 'num_workers': loader_workers, 'pin_memory': True} if mode == NetMode.train : self.data_loader_train = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) elif mode == NetMode.test : self.data_loader_test = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) else : assert False ################################################### def normalizer( self, field, vl_idx) : if isinstance( field, str) : for fidx, field_info in enumerate(self.cf.fields) : if field == field_info[0] : break assert fidx < len(self.cf.fields), 'invalid field' normalizer = self.dataset_train.datasets[fidx].normalizer elif isinstance( field, int) : normalizer = self.dataset_train.datasets[field][vl_idx].normalizer else : assert False, 'invalid argument type (has to be index to cf.fields or field name)' return normalizer ################################################### def mode( self, mode : NetMode) : if mode == NetMode.train : self.data_loader_iter = iter(self.data_loader_train) self.net.train() elif mode == NetMode.test : self.data_loader_iter = iter(self.data_loader_test) self.net.eval() else : assert False self.cur_mode = mode ################################################### def len( self, mode : NetMode) : if mode == NetMode.train : return len(self.data_loader_train) elif mode == NetMode.test : return len(self.data_loader_test) else : assert False ################################################### def next( self) : return next(self.data_loader_iter) ################################################### def forward( self, xin) : pred = self.net.forward( xin) return pred ################################################### def get_attention( self, xin): #, field_idx) : attn = self.net.get_attention( xin) #, field_idx) return attn ################################################### def create( self, pre_batch, devices, create_net = True, pre_batch_targets = None, load_pretrained=True) : if create_net : self.net.create( devices, load_pretrained) self.pre_batch = pre_batch self.pre_batch_targets = pre_batch_targets cf = self.net.cf self.dataset_train = MultifieldDataSampler( cf.data_dir, cf.years_train, cf.fields, batch_size = cf.batch_size_start, num_t_samples = cf.num_t_samples, num_patches_per_t = cf.num_patches_per_t_train, num_load = cf.num_files_train, pre_batch = self.pre_batch, rng_seed = self.rng_seed, file_shape = cf.file_shape, smoothing = cf.data_smoothing, level_type = cf.level_type, file_format = cf.file_format, month = cf.month, time_sampling = cf.time_sampling, geo_range = cf.geo_range_sampling, fields_targets = cf.fields_targets, pre_batch_targets = self.pre_batch_targets ) self.dataset_test = MultifieldDataSampler( cf.data_dir, cf.years_test, cf.fields, batch_size = cf.batch_size_test, num_t_samples = cf.num_t_samples, num_patches_per_t = cf.num_patches_per_t_test, num_load = cf.num_files_test, pre_batch = self.pre_batch, rng_seed = self.rng_seed, file_shape = cf.file_shape, smoothing = cf.data_smoothing, level_type = cf.level_type, file_format = cf.file_format, month = cf.month, time_sampling = cf.time_sampling, geo_range = cf.geo_range_sampling, lat_sampling_weighted = cf.lat_sampling_weighted, fields_targets = cf.fields_targets, pre_batch_targets = self.pre_batch_targets ) return self #################################################################################################### class AtmoRep( torch.nn.Module) : def __init__(self, cf) : '''Constructor''' super( AtmoRep, self).__init__() self.cf = cf ################################################### def create( self, devices, load_pretrained=True) : '''Create network''' cf = self.cf self.devices = devices size_token_info = 6 self.fields_coupling_idx = [] self.fields_index = {} for ifield, field_info in enumerate(cf.fields) : self.fields_index[ field_info[0] ] = ifield # # embedding network for global/auxiliary token infos # TODO: only for backward compatibility, remove self.embed_token_info = torch.nn.Linear( cf.size_token_info, cf.size_token_info_net) torch.nn.init.constant_( self.embed_token_info.weight, 0.0) self.embeds_token_info = torch.nn.ModuleList() for ifield, field_info in enumerate( cf.fields) : self.embeds_token_info.append( torch.nn.Linear( cf.size_token_info, cf.size_token_info_net)) if len(field_info[1]) > 4 and load_pretrained : # TODO: inconsistent with embeds_token_info -> version that can handle both # we could imply use the file name: embed_token_info vs embeds_token_info name = 'AtmoRep' + '_embed_token_info' mloaded = torch.load( get_model_filename( name, field_info[1][4][0], field_info[1][4][1])) self.embeds_token_info[-1].load_state_dict( mloaded) print( 'Loaded embed_token_info from id = {}.'.format( field_info[1][4][0] ) ) else : # initalization torch.nn.init.constant_( self.embeds_token_info[-1].weight, 0.0) self.embeds_token_info[-1].bias.data.fill_(0.0) # embedding and encoder self.embeds = torch.nn.ModuleList() self.encoders = torch.nn.ModuleList() self.masks = torch.nn.ParameterList() for field_idx, field_info in enumerate(cf.fields) : # learnabl class token if cf.learnable_mask : mask = torch.nn.Parameter( 0.1 * torch.randn( np.prod( field_info[4]), requires_grad=True)) self.masks.append( mask.to(devices[0])) else : self.masks.append( None) # encoder self.encoders.append( TransformerEncoder( cf, field_idx, True).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'encoder', self.encoders[-1]) self.embeds.append( self.encoders[-1].embed) # indices of coupled fields for efficient access in forward self.fields_coupling_idx.append( [field_idx]) for field_coupled in field_info[1][2] : if 'axial' in cf.encoder_att_type : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) else : for _ in range(cf.coupling_num_heads_per_field) : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) # decoder self.decoders = torch.nn.ModuleList() self.field_pred_idxs = [] for field in cf.fields_prediction : for ifield, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.field_pred_idxs.append( ifield) break self.decoders.append( TransformerDecoder( cf, field_info ) ) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'decoder', self.decoders[-1]) # tail networks self.tails = torch.nn.ModuleList() for ifield, field in enumerate(cf.fields_prediction) : field_idx = self.field_pred_idxs[ifield] field_info = cf.fields[field_idx] self.tails.append( TailEnsemble( cf, field_info[1][1], np.prod(field_info[4]) ).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained: self.load_block( field_info, 'tail', self.tails[-1]) # set devices for field_idx, field_info in enumerate(cf.fields) : # find determined device, use default if nothing specified device = self.devices[0] if len(field_info[1]) > 3 : assert field_info[1][3] < 4, 'Only single node model parallelism supported' assert field_info[1][3] < len(devices), 'Per field device id larger than max devices' device = self.devices[ field_info[1][3] ] # set device if self.masks[field_idx] != None : self.masks[field_idx].to(device) self.embeds[field_idx].to(device) self.encoders[field_idx].to(device) for field_idx, field in enumerate(cf.fields_prediction) : field_info = cf.fields[ self.field_pred_idxs[field_idx] ] device = self.devices[0] if len(field_info[1]) > 3 : device = self.devices[ field_info[1][3] ] self.decoders[field_idx].to(device) self.tails[field_idx].to(device) # embed_token_info on device[0] since it is shared by all fields, potentially sub-optimal self.embed_token_info.to(devices[0]) # TODO: only for backward compatibility, remove self.embeds_token_info.to(devices[0]) self.checkpoint = identity if cf.grad_checkpointing :
self.checkpoint = checkpoint_wrapper
4
2023-10-09 19:42:46+00:00
16k
NKI-AI/ahcore
ahcore/callbacks/wsi_metric_callback.py
[ { "identifier": "WriteH5Callback", "path": "ahcore/callbacks/h5_callback.py", "snippet": "class WriteH5Callback(Callback):\n def __init__(\n self,\n max_queue_size: int,\n max_concurrent_writers: int,\n dump_dir: Path,\n normalization_type: str = str(NormalizationType.LOGITS),\n precision: str = str(InferencePrecision.FP32),\n ):\n \"\"\"\n Callback to write predictions to H5 files. This callback is used to write whole-slide predictions to single H5\n files in a separate thread.\n\n TODO:\n - Add support for distributed data parallel\n\n Parameters\n ----------\n max_queue_size : int\n The maximum number of items to store in the queue (i.e. tiles).\n max_concurrent_writers : int\n The maximum number of concurrent writers.\n dump_dir : pathlib.Path\n The directory to dump the H5 files to.\n normalization_type : str\n The normalization type to use for the predictions. One of \"sigmoid\", \"softmax\" or \"logits\".\n precision : str\n The precision to use for the predictions. One of \"float16\", \"float32\" or \"uint8\".\n \"\"\"\n super().__init__()\n self._writers: dict[str, _WriterMessage] = {}\n self._current_filename = None\n self._dump_dir = Path(dump_dir)\n self._max_queue_size = max_queue_size\n self._semaphore = Semaphore(max_concurrent_writers)\n self._dataset_index = 0\n self._normalization_type: NormalizationType = NormalizationType(normalization_type)\n self._precision: InferencePrecision = InferencePrecision(precision)\n\n self._logger = get_logger(type(self).__name__)\n\n @property\n def dump_dir(self) -> Path:\n return self._dump_dir\n\n def __process_management(self) -> None:\n \"\"\"\n Handle the graceful termination of multiple processes at the end of h5 writing.\n This block ensures proper release of resources allocated during multiprocessing.\n\n Returns\n -------\n None\n \"\"\"\n assert self._current_filename, \"_current_filename shouldn't be None here\"\n\n self._writers[self._current_filename][\"queue\"].put(None)\n self._writers[self._current_filename][\"process\"].join()\n self._writers[self._current_filename][\"process\"].close()\n self._writers[self._current_filename][\"queue\"].close()\n\n @property\n def writers(self) -> dict[str, _WriterMessage]:\n return self._writers\n\n def _batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n outputs: Any,\n batch: Any,\n batch_idx: int,\n stage: str,\n dataloader_idx: int = 0,\n ) -> None:\n filename = batch[\"path\"][0] # Filenames are constant across the batch.\n if any([filename != path for path in batch[\"path\"]]):\n raise ValueError(\n \"All paths in a batch must be the same. \"\n \"Either use batch_size=1 or ahcore.data.samplers.WsiBatchSampler.\"\n )\n\n if filename != self._current_filename:\n output_filename = _get_h5_output_filename(\n self.dump_dir,\n filename,\n model_name=str(pl_module.name),\n step=pl_module.global_step,\n )\n output_filename.parent.mkdir(parents=True, exist_ok=True)\n link_fn = (\n self.dump_dir / \"outputs\" / f\"{pl_module.name}\" / f\"step_{pl_module.global_step}\" / \"image_h5_link.txt\"\n )\n with open(link_fn, \"a\" if link_fn.is_file() else \"w\") as file:\n file.write(f\"{filename},{output_filename}\\n\")\n\n self._logger.debug(\"%s -> %s\", filename, output_filename)\n if self._current_filename is not None:\n self.__process_management()\n self._semaphore.release()\n\n self._semaphore.acquire()\n\n if stage == \"validate\":\n total_dataset: ConcatDataset = trainer.datamodule.validate_dataset # type: ignore\n elif stage == \"predict\":\n total_dataset: ConcatDataset = trainer.predict_dataloaders.dataset # type: ignore\n else:\n raise NotImplementedError(f\"Stage {stage} is not supported for {self.__class__.__name__}.\")\n\n current_dataset: TiledWsiDataset\n current_dataset, _ = total_dataset.index_to_dataset(self._dataset_index) # type: ignore\n slide_image = current_dataset.slide_image\n\n data_description: DataDescription = pl_module.data_description # type: ignore\n inference_grid: GridDescription = data_description.inference_grid\n\n mpp = inference_grid.mpp\n if mpp is None:\n mpp = slide_image.mpp\n\n _, size = slide_image.get_scaled_slide_bounds(slide_image.get_scaling(mpp))\n num_samples = len(current_dataset)\n\n # Let's get the data_description, so we can figure out the tile size and things like that\n tile_size = inference_grid.tile_size\n tile_overlap = inference_grid.tile_overlap\n\n # TODO: We are really putting strange things in the Queue if we may believe mypy\n new_queue: Queue[Any] = Queue() # pylint: disable=unsubscriptable-object\n parent_conn, child_conn = Pipe()\n new_writer = H5FileImageWriter(\n output_filename,\n size=size,\n mpp=mpp,\n tile_size=tile_size,\n tile_overlap=tile_overlap,\n num_samples=num_samples,\n color_profile=None,\n is_compressed_image=False,\n progress=None,\n precision=InferencePrecision(self._precision),\n )\n new_process = Process(target=new_writer.consume, args=(self.generator(new_queue), child_conn))\n new_process.start()\n self._writers[filename] = {\n \"queue\": new_queue,\n \"writer\": new_writer,\n \"process\": new_process,\n \"connection\": parent_conn,\n }\n self._current_filename = filename\n\n prediction = outputs[\"prediction\"]\n prediction = NormalizationType.normalize(self._normalization_type)(prediction).detach().cpu().numpy()\n coordinates_x, coordinates_y = batch[\"coordinates\"]\n coordinates = torch.stack([coordinates_x, coordinates_y]).T.detach().cpu().numpy()\n self._writers[filename][\"queue\"].put((coordinates, prediction))\n self._dataset_index += prediction.shape[0]\n\n def _epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:\n if self._current_filename is not None:\n self.__process_management()\n self._semaphore.release()\n self._dataset_index = 0\n # Reset current filename to None for correct execution of subsequent validation loop\n self._current_filename = None\n # Clear all the writers from the current epoch\n self._writers = {}\n\n def on_validation_batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n outputs: Any,\n batch: Any,\n batch_idx: int,\n dataloader_idx: int = 0,\n ) -> None:\n self._batch_end(trainer, pl_module, outputs, batch, batch_idx, \"validate\", dataloader_idx)\n\n def on_predict_batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n outputs: Any,\n batch: Any,\n batch_idx: int,\n dataloader_idx: int = 0,\n ) -> None:\n self._batch_end(trainer, pl_module, outputs, batch, batch_idx, \"predict\", dataloader_idx)\n\n def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:\n self._epoch_end(trainer, pl_module)\n\n def on_predict_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:\n self._epoch_end(trainer, pl_module)\n\n @staticmethod\n def generator(\n queue: Queue[Optional[GenericArray]], # pylint: disable=unsubscriptable-object\n ) -> Generator[GenericArray, None, None]:\n while True:\n batch = queue.get()\n if batch is None:\n break\n yield batch" }, { "identifier": "AhCoreLightningModule", "path": "ahcore/lit_module.py", "snippet": "class AhCoreLightningModule(pl.LightningModule):\n RELEVANT_KEYS = [\n \"coordinates\",\n \"mpp\",\n \"path\",\n \"region_index\",\n \"grid_local_coordinates\",\n \"grid_index\",\n ]\n\n def __init__(\n self,\n model: nn.Module,\n optimizer: torch.optim.Optimizer, # noqa\n data_description: DataDescription,\n loss: nn.Module | None = None,\n augmentations: dict[str, nn.Module] | None = None,\n metrics: dict[str, MetricFactory | WSIMetricFactory] | None = None,\n scheduler: torch.optim.lr_scheduler.LRScheduler | None = None, # noqa\n ):\n super().__init__()\n\n self.save_hyperparameters(\n logger=False,\n ignore=[\n \"model\",\n \"augmentations\",\n \"metrics\",\n \"data_description\",\n \"loss\",\n ],\n ) # TODO: we should send the hyperparams to the logger elsewhere\n\n self._num_classes = data_description.num_classes\n self._model = model(out_channels=self._num_classes)\n self._augmentations = augmentations\n\n self._loss = loss\n if metrics is not None:\n tile_metric = metrics.get(\"tile_level\")\n wsi_metric = metrics.get(\"wsi_level\", None)\n if tile_metric is not None and not isinstance(tile_metric, MetricFactory):\n raise ConfigurationError(\"Tile metrics must be of type MetricFactory\")\n if wsi_metric is not None and not isinstance(wsi_metric, WSIMetricFactory):\n raise ConfigurationError(\"WSI metrics must be of type WSIMetricFactory\")\n\n self._tile_metric = tile_metric\n self._wsi_metrics = wsi_metric\n\n self._data_description = data_description\n\n @property\n def wsi_metrics(self) -> WSIMetricFactory | None:\n return self._wsi_metrics\n\n @property\n def name(self) -> str:\n return str(self._model.__class__.__name__)\n\n def forward(self, sample: torch.Tensor) -> Any:\n \"\"\"This function is only used during inference\"\"\"\n self._model.eval()\n return self._model.forward(sample)\n\n @property\n def data_description(self) -> DataDescription:\n return self._data_description\n\n def _compute_metrics(\n self,\n prediction: torch.Tensor,\n target: torch.Tensor,\n roi: torch.Tensor | None,\n stage: TrainerFn | str,\n ) -> dict[str, torch.Tensor]:\n if not self._tile_metric:\n return {}\n\n _stage = stage.value if isinstance(stage, TrainerFn) else stage\n metrics = {f\"{_stage}/{k}\": v for k, v in self._tile_metric(prediction, target, roi).items()}\n return metrics\n\n def do_step(self, batch: DlupDatasetSample, batch_idx: int, stage: TrainerFn | str) -> LitModuleSample:\n if self._augmentations and stage in self._augmentations:\n batch = self._augmentations[stage](batch)\n\n if self._loss is None:\n raise RuntimeError(\n f\"Loss is not defined for {self.__class__.__name__}. \"\n f\"This is required during training and validation\"\n )\n\n _target = batch[\"target\"]\n # Batch size is required for accurate loss calculation and logging\n batch_size = batch[\"image\"].shape[0]\n # ROIs can reduce the usable area of the inputs, the loss should be scaled appropriately\n roi = batch.get(\"roi\", None)\n\n if stage == \"fit\":\n _prediction = self._model(batch[\"image\"])\n batch[\"prediction\"] = _prediction\n else:\n batch = {**batch, **self._get_inference_prediction(batch[\"image\"])}\n _prediction = batch[\"prediction\"]\n\n loss = self._loss(_prediction, _target, roi)\n\n # The relevant_dict contains values to know where the tiles originate.\n _relevant_dict = {k: v for k, v in batch.items() if k in self.RELEVANT_KEYS}\n _metrics = self._compute_metrics(_prediction, _target, roi, stage=stage)\n _loss = loss.mean()\n # TODO: This can be a TypedDict\n output = {\n \"loss\": _loss,\n \"loss_per_sample\": loss.clone().detach(),\n \"metrics\": _metrics,\n **_relevant_dict,\n }\n if stage != \"fit\":\n output[\"prediction\"] = _prediction\n\n _stage = stage.value if isinstance(stage, TrainerFn) else stage\n\n self.log(\n f\"{_stage}/loss\",\n _loss,\n batch_size=batch_size,\n sync_dist=True,\n on_epoch=True,\n prog_bar=True,\n )\n\n # Log the metrics\n self.log_dict(\n _metrics,\n batch_size=batch_size,\n sync_dist=True,\n prog_bar=False,\n on_epoch=True,\n on_step=False,\n )\n\n return output\n\n def _get_inference_prediction(self, _input: torch.Tensor) -> dict[str, torch.Tensor]:\n output = {}\n output[\"prediction\"] = self._model(_input)\n return output\n\n def training_step(self, batch: dict[str, Any], batch_idx: int) -> dict[str, Any]:\n output = self.do_step(batch, batch_idx, stage=\"fit\")\n return output\n\n def validation_step(self, batch: dict[str, Any], batch_idx: int) -> dict[str, Any]:\n output = self.do_step(batch, batch_idx, stage=\"validate\")\n\n # This is a sanity check. We expect the filenames to be constant across the batch.\n filename = batch[\"path\"][0]\n if any([filename != f for f in batch[\"path\"]]):\n raise ValueError(\"Filenames are not constant across the batch.\")\n return output\n\n def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:\n if self._augmentations and \"predict\" in self._augmentations:\n batch = self._augmentations[\"predict\"](batch)\n\n _relevant_dict = {k: v for k, v in batch.items() if k in self.RELEVANT_KEYS}\n batch = {**batch, **self._get_inference_prediction(batch[\"image\"])}\n _prediction = batch[\"prediction\"]\n output = {\"prediction\": _prediction, **_relevant_dict}\n\n # This is a sanity check. We expect the filenames to be constant across the batch.\n filename = batch[\"path\"][0]\n if any([filename != f for f in batch[\"path\"]]):\n raise ValueError(\"Filenames are not constant across the batch.\")\n return output\n\n def configure_optimizers(self) -> Any:\n optimizer = self.hparams.optimizer(params=self.parameters()) # type: ignore\n if self.hparams.scheduler is not None: # type: ignore\n scheduler = self.hparams.scheduler(optimizer=optimizer) # type: ignore\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": scheduler,\n \"monitor\": \"validate/loss\",\n \"interval\": \"epoch\",\n \"frequency\": self.trainer.check_val_every_n_epoch,\n },\n }\n return {\"optimizer\": optimizer}" }, { "identifier": "WSIMetricFactory", "path": "ahcore/metrics/metrics.py", "snippet": "class WSIMetricFactory:\n # TODO: this should be rewritten to actually be a factory\n def __init__(self, metrics: list[WSIMetric]) -> None:\n super().__init__()\n names = [metric.name for metric in metrics]\n if len(set(names)) != len(names):\n raise RuntimeError(\"Each individual metric must have a different name.\")\n\n self._metrics = metrics\n\n @classmethod\n def for_segmentation(cls, *args: Any, **kwargs: Any) -> WSIMetricFactory:\n dices = WSIDiceMetric(*args, **kwargs)\n return cls([dices])\n\n @classmethod\n def for_wsi_classification(cls, *args: Any, **kwargs: Any) -> WSIMetricFactory:\n raise NotImplementedError\n\n @classmethod\n def for_tile_classification(cls, roi_name: str, label: str, threshold: float) -> WSIMetricFactory:\n raise NotImplementedError\n\n def process_batch(\n self,\n predictions: torch.Tensor,\n target: torch.Tensor,\n wsi_name: str,\n roi: torch.Tensor | None,\n ) -> None:\n for metric in self._metrics:\n metric.process_batch(predictions, target, wsi_name=wsi_name, roi=roi)\n\n def get_average_score(\n self, precomputed_output: list[list[dict[str, dict[str, float]]]] | None = None\n ) -> dict[str, float]:\n output = {}\n for metric in self._metrics:\n output.update(metric.get_average_score(precomputed_output))\n return output\n\n def reset(self) -> None:\n for metric in self._metrics:\n metric.reset()\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(metrics={self._metrics})\"" }, { "identifier": "H5FileImageReader", "path": "ahcore/readers.py", "snippet": "class H5FileImageReader:\n def __init__(self, filename: Path, stitching_mode: StitchingMode) -> None:\n self._filename = filename\n self._stitching_mode = stitching_mode\n\n self.__empty_tile: GenericArray | None = None\n\n self._h5file: Optional[h5py.File] = None\n self._metadata = None\n self._mpp = None\n self._tile_size = None\n self._tile_overlap = None\n self._size = None\n self._num_channels = None\n self._dtype = None\n self._stride = None\n\n @classmethod\n def from_file_path(cls, filename: Path, stitching_mode: StitchingMode = StitchingMode.CROP) -> \"H5FileImageReader\":\n return cls(filename=filename, stitching_mode=stitching_mode)\n\n @property\n def size(self) -> tuple[int, int]:\n if not self._size:\n self._open_file()\n assert self._size\n return self._size\n\n @property\n def mpp(self) -> float:\n if not self._mpp:\n self._open_file()\n assert self._mpp\n return self._mpp\n\n def get_mpp(self, scaling: Optional[float]) -> float:\n if not self._mpp:\n self._open_file()\n assert self._mpp\n if scaling is None:\n return self.mpp\n\n return self._mpp / scaling\n\n def get_scaling(self, mpp: Optional[float]) -> float:\n \"\"\"Inverse of get_mpp().\"\"\"\n if not self._mpp:\n self._open_file()\n assert self._mpp\n if not mpp:\n return 1.0\n return self._mpp / mpp\n\n def _open_file(self) -> None:\n if not self._filename.is_file():\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), str(self._filename))\n\n try:\n self._h5file = h5py.File(self._filename, \"r\")\n except OSError as e:\n logger.error(f\"Could not open file {self._filename}: {e}\")\n raise e\n\n try:\n self._metadata = json.loads(self._h5file.attrs[\"metadata\"])\n except KeyError as e:\n logger.error(f\"Could not read metadata from file {self._filename}: {e}\")\n raise e\n\n if not self._metadata:\n raise ValueError(\"Metadata of h5 file is empty.\")\n\n self._mpp = self._metadata[\"mpp\"]\n self._tile_size = self._metadata[\"tile_size\"]\n self._tile_overlap = self._metadata[\"tile_overlap\"]\n self._size = self._metadata[\"size\"]\n self._num_channels = self._metadata[\"num_channels\"]\n self._dtype = self._metadata[\"dtype\"]\n self._precision = self._metadata[\"precision\"]\n self._multiplier = self._metadata[\"multiplier\"]\n self._stride = (\n self._tile_size[0] - self._tile_overlap[0],\n self._tile_size[1] - self._tile_overlap[1],\n )\n\n if self._metadata[\"has_color_profile\"]:\n _color_profile = self._h5file[\"color_profile\"][()].tobytes()\n raise NotImplementedError(f\"Color profiles are not yet implemented, and are present in {self._filename}.\")\n\n def __enter__(self) -> \"H5FileImageReader\":\n if self._h5file is None:\n self._open_file()\n return self\n\n def _empty_tile(self) -> GenericArray:\n if self.__empty_tile is not None:\n return self.__empty_tile\n\n # When this happens we would already be in the read_region, and self._num_channels would be populated.\n assert self._num_channels\n\n self.__empty_tile = np.zeros((self._num_channels, *self._tile_size), dtype=self._dtype)\n return self.__empty_tile\n\n def read_region(\n self,\n location: tuple[int, int],\n scaling: float,\n size: tuple[int, int],\n ) -> GenericArray:\n \"\"\"\n\n Parameters\n ----------\n location : tuple[int, int]\n Location from the top left (x, y) in pixel coordinates given at the requested scaling.\n scaling : float\n size : tuple[int, int]\n Size of the output region\n\n Returns\n -------\n np.ndarray\n The requested region.\n \"\"\"\n if scaling == 1.0:\n return self.read_region_raw(location, size)\n\n order = 1\n # Calculate original location and size considering the scaling\n\n # unpack for mypy\n l1, l2 = location\n s1, s2 = size\n\n original_location = (\n int(math.floor(l1 / scaling)) - order,\n int(math.floor(l2 / scaling)) - order,\n )\n original_size = (\n int(math.ceil(s1 / scaling)) + order,\n int(math.ceil(s2 / scaling)) + order,\n )\n\n raw_region = self.read_region_raw(original_location, original_size)\n\n # Determine the fractional start and end coordinates for mapping\n fractional_start = tuple(map(lambda _, ol: (_ / scaling) - ol + order, location, original_location))\n fractional_end = tuple(fs + size[i] / scaling for i, fs in enumerate(fractional_start))\n\n # Create an array of coordinates for map_coordinates\n # mypy doesn't properly understand yet that the complex type is valid\n coordinates = np.mgrid[\n fractional_start[0] : fractional_end[0] : complex(size[0]), # type: ignore\n fractional_start[1] : fractional_end[1] : complex(size[1]), # type: ignore\n ]\n coordinates = np.moveaxis(coordinates, 0, -1)\n\n # Interpolate using map_coordinates for all channels\n grid = np.mgrid[: raw_region.shape[0]]\n coordinates = np.concatenate([grid[:, None, None], coordinates], axis=0)\n # scipy doesn't have proper typing yet\n rescaled_region = cast(GenericArray, map_coordinates(raw_region, coordinates, order=order))\n\n return rescaled_region\n\n def read_region_raw(self, location: tuple[int, int], size: tuple[int, int]) -> GenericArray:\n \"\"\"\n Reads a region in the stored h5 file. This function stitches the regions as saved in the h5 file. Doing this\n it takes into account:\n 1) The region overlap, several region merging strategies are implemented: cropping, averaging across borders\n and taking the maximum across borders.\n 2) If tiles are saved or not. In case the tiles are skipped due to a background mask, an empty tile is returned.\n\n Parameters\n ----------\n location : tuple[int, int]\n Coordinates (x, y) of the upper left corner of the region.\n size : tuple[int, int]\n The (h, w) size of the extracted region.\n\n Returns\n -------\n np.ndarray\n Extracted region\n \"\"\"\n if self._h5file is None:\n self._open_file()\n assert self._h5file, \"File is not open. Should not happen\"\n assert self._tile_size\n assert self._tile_overlap\n\n image_dataset = self._h5file[\"data\"]\n num_tiles = self._metadata[\"num_tiles\"]\n tile_indices = self._h5file[\"tile_indices\"]\n\n total_rows = math.ceil((self._size[1] - self._tile_overlap[1]) / self._stride[1])\n total_cols = math.ceil((self._size[0] - self._tile_overlap[0]) / self._stride[0])\n\n assert total_rows * total_cols == num_tiles\n\n x, y = location\n w, h = size\n if x < 0 or y < 0 or x + w > self._size[0] or y + h > self._size[1]:\n logger.error(f\"Requested region is out of bounds: {location}, {self._size}\")\n raise ValueError(\"Requested region is out of bounds\")\n\n start_row = y // self._stride[1]\n end_row = min((y + h - 1) // self._stride[1] + 1, total_rows)\n start_col = x // self._stride[0]\n end_col = min((x + w - 1) // self._stride[0] + 1, total_cols)\n\n if self._stitching_mode == StitchingMode.AVERAGE:\n divisor_array = np.zeros((h, w), dtype=np.uint8)\n stitched_image = np.zeros((self._num_channels, h, w), dtype=self._dtype)\n for i in range(start_row, end_row):\n for j in range(start_col, end_col):\n tile_idx = (i * total_cols) + j\n # Map through tile indices\n tile_index_in_image_dataset = tile_indices[tile_idx]\n tile = (\n self._empty_tile()\n if tile_index_in_image_dataset == -1\n else image_dataset[tile_index_in_image_dataset]\n )\n start_y = i * self._stride[1] - y\n end_y = start_y + self._tile_size[1]\n start_x = j * self._stride[0] - x\n end_x = start_x + self._tile_size[0]\n\n img_start_y = max(0, start_y)\n img_end_y = min(h, end_y)\n img_start_x = max(0, start_x)\n img_end_x = min(w, end_x)\n\n if self._stitching_mode == StitchingMode.CROP:\n crop_start_y = img_start_y - start_y\n crop_end_y = img_end_y - start_y\n crop_start_x = img_start_x - start_x\n crop_end_x = img_end_x - start_x\n\n bbox = (crop_start_x, crop_start_y), (\n crop_end_x - crop_start_x,\n crop_end_y - crop_start_y,\n )\n cropped_tile = crop_to_bbox(tile, bbox)\n stitched_image[:, img_start_y:img_end_y, img_start_x:img_end_x] = cropped_tile\n\n elif self._stitching_mode == StitchingMode.AVERAGE:\n raise NotImplementedError\n tile_start_y = max(0, -start_y)\n tile_end_y = img_end_y - img_start_y\n tile_start_x = max(0, -start_x)\n tile_end_x = img_end_x - img_start_x\n\n # TODO: Replace this with crop_to_bbox\n cropped_tile = tile[tile_start_y:tile_end_y, tile_start_x:tile_end_x]\n stitched_image[img_start_y:img_end_y, img_start_x:img_end_x] += cropped_tile\n divisor_array[img_start_y:img_end_y, img_start_x:img_end_x] += 1\n else:\n raise ValueError(\"Unsupported stitching mode\")\n\n if self._stitching_mode == StitchingMode.AVERAGE:\n stitched_image = (stitched_image / divisor_array[..., np.newaxis]).astype(float)\n\n if self._precision != str(InferencePrecision.FP32):\n # Always convert to float32.\n stitched_image = stitched_image / self._multiplier\n stitched_image = stitched_image.astype(np.float32)\n\n return stitched_image\n\n def close(self) -> None:\n if self._h5file is not None:\n self._h5file.close() # Close the file in close\n del self._h5file # Reset the h5file attribute\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> Literal[False]:\n self.close()\n return False" }, { "identifier": "StitchingMode", "path": "ahcore/readers.py", "snippet": "class StitchingMode(str, Enum):\n CROP = \"crop\"\n AVERAGE = \"average\"\n MAXIMUM = \"maximum\"" }, { "identifier": "_get_h5_output_filename", "path": "ahcore/utils/callbacks.py", "snippet": "def _get_h5_output_filename(dump_dir: Path, input_path: Path, model_name: str, step: None | int | str = None) -> Path:\n hex_dig = _get_uuid_for_filename(input_path=input_path)\n\n # Return the hashed filename with the new extension\n if step is not None:\n return dump_dir / \"outputs\" / model_name / f\"step_{step}\" / f\"{hex_dig}.h5\"\n return dump_dir / \"outputs\" / model_name / f\"{hex_dig}.h5\"" }, { "identifier": "_ValidationDataset", "path": "ahcore/utils/callbacks.py", "snippet": "class _ValidationDataset(Dataset[DlupDatasetSample]):\n \"\"\"Helper dataset to compute the validation metrics.\"\"\"\n\n def __init__(\n self,\n data_description: Optional[DataDescription],\n native_mpp: float,\n reader: H5FileImageReader,\n annotations: Optional[WsiAnnotations] = None,\n mask: Optional[WsiAnnotations] = None,\n region_size: tuple[int, int] = (1024, 1024),\n ):\n \"\"\"\n Parameters\n ----------\n data_description : DataDescription\n native_mpp : float\n The actual mpp of the underlying image.\n reader : H5FileImageReader\n annotations : WsiAnnotations\n mask : WsiAnnotations\n region_size : Tuple[int, int]\n The region size to use to split up the image into regions.\n \"\"\"\n super().__init__()\n self._data_description = data_description\n self._native_mpp = native_mpp\n self._scaling = self._native_mpp / reader.mpp\n self._reader = reader\n self._region_size = region_size\n self._logger = get_logger(type(self).__name__)\n\n self._annotations = self._validate_annotations(annotations)\n self._mask = self._validate_annotations(mask)\n\n self._grid = Grid.from_tiling(\n (0, 0),\n reader.size,\n tile_size=self._region_size,\n tile_overlap=(0, 0),\n mode=TilingMode.overflow,\n order=GridOrder.C,\n )\n\n self._regions = self._generate_regions()\n self._logger.debug(f\"Number of validation regions: {len(self._regions)}\")\n\n def _validate_annotations(self, annotations: Optional[WsiAnnotations]) -> Optional[WsiAnnotations]:\n if annotations is None:\n return None\n\n if isinstance(annotations, WsiAnnotations):\n if self._data_description is None:\n raise ValueError(\n \"Annotations as a `WsiAnnotations` class are provided but no data description is given.\"\n \"This is required to map the labels to indices.\"\n )\n elif isinstance(annotations, SlideImage):\n pass # We do not need a specific test for this\n else:\n raise NotImplementedError(f\"Annotations of type {type(annotations)} are not supported.\")\n\n return annotations\n\n def _generate_regions(self) -> list[tuple[int, int]]:\n \"\"\"Generate the regions to use. These regions are filtered grid cells where there is a mask.\n\n Returns\n -------\n List[Tuple[int, int]]\n The list of regions.\n \"\"\"\n regions = []\n for coordinates in self._grid:\n _coordinates = (coordinates[0], coordinates[1])\n if self._mask is None or self._is_masked(_coordinates):\n regions.append(_coordinates)\n return regions\n\n def _is_masked(self, coordinates: tuple[int, int]) -> bool:\n \"\"\"Check if the region is masked. This works with any masking function that supports a `read_region` method or\n returns a list of annotations with an `area` attribute. In case there are elements of the form `Point` in the\n annotation list, these are also added.\n\n Parameters\n ----------\n coordinates : Tuple[int, int]\n The coordinates of the region to check.\n\n Returns\n -------\n bool\n True if the region is masked, False otherwise. Will also return True when there is no mask.\n \"\"\"\n if self._mask is None:\n return True\n\n region_mask = self._mask.read_region(coordinates, self._scaling, self._region_size)\n\n if isinstance(region_mask, np.ndarray):\n return region_mask.sum() > 0\n\n # We check if the region is not a Point, otherwise this annotation is always included\n # Else, we compute if there is a positive area in the region.\n return bool(sum(_.area if _ is not isinstance(_, (Point, MultiPoint)) else 1.0 for _ in region_mask) > 0)\n\n def __getitem__(self, idx: int) -> dict[str, Any]:\n sample = {}\n coordinates = self._regions[idx]\n\n sample[\"prediction\"] = self._get_h5_region(coordinates)\n\n if self._annotations is not None:\n target, roi = self._get_annotation_data(coordinates)\n if roi is not None:\n sample[\"roi\"] = roi.astype(np.uint8)\n else:\n sample[\"roi\"] = None # type: ignore\n sample[\"target\"] = target\n\n return sample\n\n def _get_h5_region(self, coordinates: tuple[int, int]) -> npt.NDArray[np.uint8 | np.uint16 | np.float32 | np.bool_]:\n x, y = coordinates\n width, height = self._region_size\n\n if x + width > self._reader.size[0] or y + height > self._reader.size[1]:\n region = self._read_and_pad_region(coordinates)\n else:\n region = self._reader.read_region_raw(coordinates, self._region_size)\n return region\n\n def _read_and_pad_region(self, coordinates: tuple[int, int]) -> npt.NDArray[Any]:\n x, y = coordinates\n width, height = self._region_size\n new_width = min(width, self._reader.size[0] - x)\n new_height = min(height, self._reader.size[1] - y)\n clipped_region = self._reader.read_region_raw((x, y), (new_width, new_height))\n\n prediction = np.zeros((clipped_region.shape[0], *self._region_size), dtype=clipped_region.dtype)\n prediction[:, :new_height, :new_width] = clipped_region\n return prediction\n\n def _get_annotation_data(\n self, coordinates: tuple[int, int]\n ) -> tuple[npt.NDArray[np.float32], npt.NDArray[np.int_] | None]:\n if not self._annotations:\n raise ValueError(\"No annotations are provided.\")\n\n if not self._data_description:\n raise ValueError(\"No data description is provided.\")\n\n if not self._data_description.index_map:\n raise ValueError(\"Index map is not provided.\")\n\n _annotations = self._annotations.read_region(coordinates, self._scaling, self._region_size)\n\n if self._data_description.remap_labels:\n _annotations = rename_labels(_annotations, remap_labels=self._data_description.remap_labels)\n\n points, boxes, region, roi = convert_annotations(\n _annotations,\n self._region_size,\n index_map=self._data_description.index_map,\n roi_name=self._data_description.roi_name,\n )\n encoded_region = one_hot_encoding(index_map=self._data_description.index_map, mask=region)\n if roi is not None:\n return encoded_region, roi[np.newaxis, ...]\n return encoded_region, None\n\n def __iter__(self) -> Iterator[dict[str, Any]]:\n for idx in range(len(self)):\n yield self[idx]\n\n def __len__(self) -> int:\n return len(self._regions)" }, { "identifier": "DataDescription", "path": "ahcore/utils/data.py", "snippet": "class DataDescription(BaseModel):\n mask_label: Optional[str] = None\n mask_threshold: Optional[float] = None # This is only used for training\n roi_name: Optional[str] = None\n num_classes: PositiveInt\n data_dir: Path\n manifest_database_uri: str\n manifest_name: str\n split_version: str\n annotations_dir: Path\n training_grid: GridDescription\n inference_grid: GridDescription\n index_map: Optional[Dict[str, int]]\n remap_labels: Optional[Dict[str, str]] = None\n use_class_weights: Optional[bool] = False\n convert_mask_to_rois: bool = True\n use_roi: bool = True\n apply_color_profile: bool = True" }, { "identifier": "get_logger", "path": "ahcore/utils/io.py", "snippet": "def get_logger(name: str = __name__) -> logging.Logger:\n \"\"\"Initializes multi-GPU-friendly python command line logger.\"\"\"\n\n logger = logging.getLogger(name)\n\n # this ensures all logging levels get marked with the rank zero decorator\n # otherwise logs would get multiplied for each GPU process in multi-GPU setup\n for level in (\n \"debug\",\n \"info\",\n \"warning\",\n \"error\",\n \"exception\",\n \"fatal\",\n \"critical\",\n ):\n setattr(logger, level, rank_zero_only(getattr(logger, level)))\n\n return logger" }, { "identifier": "DataManager", "path": "ahcore/utils/manifest.py", "snippet": "class DataManager:\n def __init__(self, database_uri: str) -> None:\n self._database_uri = database_uri\n self.__session: Optional[Session] = None\n self._logger = get_logger(type(self).__name__)\n\n @property\n def _session(self) -> Session:\n if self.__session is None:\n self.__session = open_db(self._database_uri)\n return self.__session\n\n @staticmethod\n def _ensure_record(record: Any, description: str) -> None:\n \"\"\"Raises an error if the record is None.\"\"\"\n if not record:\n raise RecordNotFoundError(f\"{description} not found.\")\n\n def get_records_by_split(\n self,\n manifest_name: str,\n split_version: str,\n split_category: Optional[str] = None,\n ) -> Generator[Patient, None, None]:\n manifest = self._session.query(Manifest).filter_by(name=manifest_name).first()\n self._ensure_record(manifest, f\"Manifest with name {manifest_name}\")\n\n split_definition = self._session.query(SplitDefinitions).filter_by(version=split_version).first()\n self._ensure_record(split_definition, f\"Split definition with version {split_version}\")\n\n # This is because mypy is complaining otherwise,\n # but _ensure_record effectively ensures that the record is not None\n assert manifest is not None\n assert split_definition is not None\n query = (\n self._session.query(Patient)\n .join(Split)\n .filter(\n Patient.manifest_id == manifest.id,\n Split.split_definition_id == split_definition.id,\n )\n )\n\n if split_category is not None:\n split_category_key = get_enum_key_from_value(split_category, CategoryEnum)\n query = query.filter(Split.category == split_category_key)\n\n patients = query.all()\n\n self._logger.info(\n f\"Found {len(patients)} patients for split {split_category if split_category else 'all categories'}\"\n )\n for patient in patients:\n yield patient\n\n def get_image_metadata_by_split(\n self,\n manifest_name: str,\n split_version: str,\n split_category: Optional[str] = None,\n ) -> Generator[ImageMetadata, None, None]:\n \"\"\"\n Yields the metadata of images for a given manifest name, split version, and optional split category.\n\n Parameters\n ----------\n manifest_name : str\n The name of the manifest.\n split_version : str\n The version of the split.\n split_category : Optional[str], default=None\n The category of the split (e.g., \"fit\", \"validate\", \"test\").\n\n Yields\n -------\n ImageMetadata\n The metadata of the image.\n \"\"\"\n for patient in self.get_records_by_split(manifest_name, split_version, split_category):\n for image in patient.images:\n yield fetch_image_metadata(image)\n\n def get_image_metadata_by_patient(self, patient_code: str) -> list[ImageMetadata]:\n \"\"\"\n Fetch the metadata for the images associated with a specific patient.\n\n Parameters\n ----------\n patient_code : str\n The unique code of the patient.\n\n Returns\n -------\n list[ImageData]\n A list of metadata for all images associated with the patient.\n \"\"\"\n patient = self._session.query(Patient).filter_by(patient_code=patient_code).first()\n self._ensure_record(patient, f\"Patient with code {patient_code} not found\")\n assert patient is not None # for mypy\n return [fetch_image_metadata(image) for image in patient.images]\n\n def get_image_by_filename(self, filename: str) -> Image:\n \"\"\"\n Fetch the metadata for an image based on its filename.\n\n Parameters\n ----------\n filename : str\n The filename of the image.\n\n Returns\n -------\n Image\n The image from the database.\n \"\"\"\n image = self._session.query(Image).filter_by(filename=filename).first()\n self._ensure_record(image, f\"Image with filename {filename} not found\")\n assert image\n return image\n\n def get_image_metadata_by_id(self, image_id: int) -> ImageMetadata:\n \"\"\"\n Fetch the metadata for an image based on its ID.\n\n Parameters\n ----------\n image_id : int\n The ID of the image.\n\n Returns\n -------\n ImageMetadata\n Metadata of the image.\n \"\"\"\n image = self._session.query(Image).filter_by(id=image_id).first()\n self._ensure_record(image, f\"No image found with ID {image_id}\")\n assert image is not None # mypy\n return fetch_image_metadata(image)\n\n def __enter__(self) -> \"DataManager\":\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> Literal[False]:\n if self._session is not None:\n self.close()\n return False\n\n def close(self) -> None:\n if self.__session is not None:\n self.__session.close()\n self.__session = None" }, { "identifier": "ImageMetadata", "path": "ahcore/utils/manifest.py", "snippet": "class ImageMetadata(BaseModel):\n \"\"\"Model to hold image metadata\"\"\"\n\n class Config:\n frozen = True\n\n filename: Path\n height: PositiveInt\n width: PositiveInt\n mpp: PositiveFloat" }, { "identifier": "fetch_image_metadata", "path": "ahcore/utils/manifest.py", "snippet": "def fetch_image_metadata(image: Image) -> ImageMetadata:\n \"\"\"Extract metadata from an Image object.\"\"\"\n return ImageMetadata(\n filename=Path(image.filename),\n height=int(image.height),\n width=int(image.width),\n mpp=float(image.mpp),\n )" }, { "identifier": "get_mask_and_annotations_from_record", "path": "ahcore/utils/manifest.py", "snippet": "def get_mask_and_annotations_from_record(\n annotations_root: Path, record: Image\n) -> tuple[_AnnotationReturnTypes | None, _AnnotationReturnTypes | None]:\n \"\"\"\n Get the mask and annotations from a record of type Image.\n\n Parameters\n ----------\n annotations_root : Path\n The root directory of the annotations.\n record : Type[Image]\n The record containing the mask and annotations.\n\n Returns\n -------\n tuple[WsiAnnotations, WsiAnnotations]\n The mask and annotations.\n \"\"\"\n _masks = parse_annotations_from_record(annotations_root, record.masks)\n _annotations = parse_annotations_from_record(annotations_root, record.annotations)\n return _masks, _annotations" } ]
import itertools import json import multiprocessing import time import pytorch_lightning as pl import torch from collections import namedtuple from multiprocessing.pool import Pool from pathlib import Path from typing import Any, Generator, Optional, cast from pytorch_lightning import Callback from ahcore.callbacks import WriteH5Callback from ahcore.lit_module import AhCoreLightningModule from ahcore.metrics import WSIMetricFactory from ahcore.readers import H5FileImageReader, StitchingMode from ahcore.utils.callbacks import _get_h5_output_filename, _ValidationDataset from ahcore.utils.data import DataDescription from ahcore.utils.io import get_logger from ahcore.utils.manifest import DataManager, ImageMetadata, fetch_image_metadata, get_mask_and_annotations_from_record
12,999
assert self._dump_dir assert self._data_description assert self._validate_metadata assert self._data_manager metrics = [] with multiprocessing.Pool(processes=self._max_processes) as pool: results_to_filename: dict[list[dict[str, Any]], str] = {} completed_tasks = 0 # Fill up the initial task pool for image_metadata in itertools.islice(self._validate_metadata, self._max_processes): logger.info("Metadata: %s", image_metadata) # Assemble the task data # filename", "h5_filename", "metadata", "mask", "annotations" task_data = prepare_task_data( image_metadata.filename, self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) while results_to_filename: time.sleep(0.1) # Reduce excessive polling # Check for completed tasks for result in list(results_to_filename.keys()): if result.ready(): filename = results_to_filename.pop(result) try: metric = result.get() except Exception as exc: self._logger.error("%r generated an exception: %s" % (filename, exc)) else: metrics.append(metric) self._logger.debug("Metric for %r is %s" % (filename, metric)) completed_tasks += 1 # Schedule a new task if there are more filenames left in the generator next_metadata = next(self._validate_metadata, None) while next_metadata: task_data = prepare_task_data( next_metadata.filename, # <-- Changed from image_metadata.filename self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) next_metadata = next(self._validate_metadata, None) return metrics def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if not self._dump_dir: raise ValueError("Dump directory is not set.") if not self._wsi_metrics: raise ValueError("WSI metrics are not set.") assert self._model_name # This should be set in the setup() # Ensure that all h5 files have been written self._logger.debug("Computing metrics for %s predictions", len(self._filenames)) computed_metrics = self.compute_metrics(trainer, pl_module) metrics = self._wsi_metrics.get_average_score(computed_metrics) results_json_fn = ( self._dump_dir / "outputs" / self._model_name / f"step_{pl_module.global_step}" / "results.json" ) with open(results_json_fn, "w", encoding="utf-8") as json_file: json.dump(self._dump_list, json_file, indent=2) self._wsi_metrics.reset() # Reset stuff self._dump_list = [] self._filenames = {} self._logger.debug("Metrics: %s", metrics) # TODO: Maybe put this elsewhere? metrics = {f"validate/{k}": v for k, v in metrics.items()} pl_module.log_dict(metrics, prog_bar=True) TaskData = namedtuple("TaskData", ["filename", "h5_filename", "metadata", "mask", "annotations"]) def prepare_task_data( filename: Path, dump_dir: Path, pl_module: pl.LightningModule, data_description: DataDescription, data_manager: DataManager, ) -> TaskData: h5_filename = _get_h5_output_filename( dump_dir=dump_dir, input_path=data_description.data_dir / filename, model_name=str(pl_module.name), step=pl_module.global_step, ) image = data_manager.get_image_by_filename(str(filename))
from __future__ import annotations logger = get_logger(__name__) class ComputeWsiMetricsCallback(Callback): def __init__(self, max_processes: int = 10, save_per_image: bool = True) -> None: """ Callback to compute metrics on whole-slide images. This callback is used to compute metrics on whole-slide images in separate processes. Parameters ---------- max_processes : int The maximum number of concurrent processes. """ self._data_description: Optional[DataDescription] = None self._reader = H5FileImageReader self._max_processes: int = max_processes self._dump_dir: Optional[Path] = None self._save_per_image = save_per_image self._filenames: dict[Path, Path] = {} self._wsi_metrics: WSIMetricFactory | None = None self._class_names: dict[int, str] = {} self._data_manager = None self._validate_filenames_gen = None self._model_name: str | None = None self._validate_metadata_gen: Generator[ImageMetadata, None, None] | None = None self._dump_list: list[dict[str, str]] = [] self._logger = get_logger(type(self).__name__) def setup( self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: Optional[str] = None, ) -> None: if not isinstance(pl_module, AhCoreLightningModule): # TODO: Make a AhCoreCallback with these features raise ValueError("AhCoreLightningModule required for WriteTiffCallback.") self._model_name = pl_module.name _callback: Optional[WriteH5Callback] = None for idx, callback in enumerate(trainer.callbacks): # type: ignore if isinstance(callback, WriteH5Callback): _callback = cast(WriteH5Callback, trainer.callbacks[idx]) # type: ignore break if _callback is None: raise ValueError( "WriteH5Callback is not in the trainer's callbacks. " "This is required before WSI metrics can be computed using this Callback" ) self._dump_dir = _callback.dump_dir if pl_module.wsi_metrics is None: raise ValueError("WSI metrics are not set.") self._wsi_metrics = pl_module.wsi_metrics self._data_description = trainer.datamodule.data_description # type: ignore # For mypy assert self._data_description index_map = self._data_description.index_map assert index_map if not self._data_description: raise ValueError("Data description is not set.") self._class_names = dict([(v, k) for k, v in index_map.items()]) self._class_names[0] = "background" # Here we can query the database for the validation images self._data_manager: DataManager = trainer.datamodule.data_manager # type: ignore def _create_validate_image_metadata_gen( self, ) -> Generator[ImageMetadata, None, None]: assert self._data_description assert self._data_manager gen = self._data_manager.get_image_metadata_by_split( manifest_name=self._data_description.manifest_name, split_version=self._data_description.split_version, split_category="validate", ) for image_metadata in gen: yield image_metadata @property def _validate_metadata(self) -> Generator[ImageMetadata, None, None] | None: return self._validate_metadata_gen def on_validation_epoch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: self._validate_metadata_gen = self._create_validate_image_metadata_gen() def on_validation_batch_end( self, trainer: pl.Trainer, pl_module: pl.LightningModule, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int = 0, ) -> None: if not self._dump_dir: raise ValueError("Dump directory is not set.") filenames = batch["path"] # Filenames are constant across the batch. if len(set(filenames)) != 1: raise ValueError( "All paths in a batch must be the same. " "Either use batch_size=1 or ahcore.data.samplers.WsiBatchSampler." ) def compute_metrics( self, trainer: pl.Trainer, pl_module: pl.LightningModule ) -> list[list[dict[str, dict[str, float]]]]: assert self._dump_dir assert self._data_description assert self._validate_metadata assert self._data_manager metrics = [] with multiprocessing.Pool(processes=self._max_processes) as pool: results_to_filename: dict[list[dict[str, Any]], str] = {} completed_tasks = 0 # Fill up the initial task pool for image_metadata in itertools.islice(self._validate_metadata, self._max_processes): logger.info("Metadata: %s", image_metadata) # Assemble the task data # filename", "h5_filename", "metadata", "mask", "annotations" task_data = prepare_task_data( image_metadata.filename, self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) while results_to_filename: time.sleep(0.1) # Reduce excessive polling # Check for completed tasks for result in list(results_to_filename.keys()): if result.ready(): filename = results_to_filename.pop(result) try: metric = result.get() except Exception as exc: self._logger.error("%r generated an exception: %s" % (filename, exc)) else: metrics.append(metric) self._logger.debug("Metric for %r is %s" % (filename, metric)) completed_tasks += 1 # Schedule a new task if there are more filenames left in the generator next_metadata = next(self._validate_metadata, None) while next_metadata: task_data = prepare_task_data( next_metadata.filename, # <-- Changed from image_metadata.filename self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) next_metadata = next(self._validate_metadata, None) return metrics def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if not self._dump_dir: raise ValueError("Dump directory is not set.") if not self._wsi_metrics: raise ValueError("WSI metrics are not set.") assert self._model_name # This should be set in the setup() # Ensure that all h5 files have been written self._logger.debug("Computing metrics for %s predictions", len(self._filenames)) computed_metrics = self.compute_metrics(trainer, pl_module) metrics = self._wsi_metrics.get_average_score(computed_metrics) results_json_fn = ( self._dump_dir / "outputs" / self._model_name / f"step_{pl_module.global_step}" / "results.json" ) with open(results_json_fn, "w", encoding="utf-8") as json_file: json.dump(self._dump_list, json_file, indent=2) self._wsi_metrics.reset() # Reset stuff self._dump_list = [] self._filenames = {} self._logger.debug("Metrics: %s", metrics) # TODO: Maybe put this elsewhere? metrics = {f"validate/{k}": v for k, v in metrics.items()} pl_module.log_dict(metrics, prog_bar=True) TaskData = namedtuple("TaskData", ["filename", "h5_filename", "metadata", "mask", "annotations"]) def prepare_task_data( filename: Path, dump_dir: Path, pl_module: pl.LightningModule, data_description: DataDescription, data_manager: DataManager, ) -> TaskData: h5_filename = _get_h5_output_filename( dump_dir=dump_dir, input_path=data_description.data_dir / filename, model_name=str(pl_module.name), step=pl_module.global_step, ) image = data_manager.get_image_by_filename(str(filename))
metadata = fetch_image_metadata(image)
11
2023-10-14 18:04:12+00:00
16k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/linear_model/_omp.py
[ { "identifier": "MultiOutputMixin", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class MultiOutputMixin:\n \"\"\"Mixin to mark estimators that support multioutput.\"\"\"\n\n def _more_tags(self):\n return {\"multioutput\": True}" }, { "identifier": "RegressorMixin", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class RegressorMixin:\n \"\"\"Mixin class for all regression estimators in scikit-learn.\"\"\"\n\n _estimator_type = \"regressor\"\n\n def score(self, X, y, sample_weight=None):\n \"\"\"Return the coefficient of determination of the prediction.\n\n The coefficient of determination :math:`R^2` is defined as\n :math:`(1 - \\\\frac{u}{v})`, where :math:`u` is the residual\n sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`\n is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.\n The best possible score is 1.0 and it can be negative (because the\n model can be arbitrarily worse). A constant model that always predicts\n the expected value of `y`, disregarding the input features, would get\n a :math:`R^2` score of 0.0.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Test samples. For some estimators this may be a precomputed\n kernel matrix or a list of generic objects instead with shape\n ``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``\n is the number of samples used in the fitting for the estimator.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n True values for `X`.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n score : float\n :math:`R^2` of ``self.predict(X)`` w.r.t. `y`.\n\n Notes\n -----\n The :math:`R^2` score used when calling ``score`` on a regressor uses\n ``multioutput='uniform_average'`` from version 0.23 to keep consistent\n with default value of :func:`~sklearn.metrics.r2_score`.\n This influences the ``score`` method of all the multioutput\n regressors (except for\n :class:`~sklearn.multioutput.MultiOutputRegressor`).\n \"\"\"\n\n from .metrics import r2_score\n\n y_pred = self.predict(X)\n return r2_score(y, y_pred, sample_weight=sample_weight)\n\n def _more_tags(self):\n return {\"requires_y\": True}" }, { "identifier": "_fit_context", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "def _fit_context(*, prefer_skip_nested_validation):\n \"\"\"Decorator to run the fit methods of estimators within context managers.\n\n Parameters\n ----------\n prefer_skip_nested_validation : bool\n If True, the validation of parameters of inner estimators or functions\n called during fit will be skipped.\n\n This is useful to avoid validating many times the parameters passed by the\n user from the public facing API. It's also useful to avoid validating\n parameters that we pass internally to inner functions that are guaranteed to\n be valid by the test suite.\n\n It should be set to True for most estimators, except for those that receive\n non-validated objects as parameters, such as meta-estimators that are given\n estimator objects.\n\n Returns\n -------\n decorated_fit : method\n The decorated fit method.\n \"\"\"\n\n def decorator(fit_method):\n @functools.wraps(fit_method)\n def wrapper(estimator, *args, **kwargs):\n global_skip_validation = get_config()[\"skip_parameter_validation\"]\n\n # we don't want to validate again for each call to partial_fit\n partial_fit_and_fitted = (\n fit_method.__name__ == \"partial_fit\" and _is_fitted(estimator)\n )\n\n if not global_skip_validation and not partial_fit_and_fitted:\n estimator._validate_params()\n\n with config_context(\n skip_parameter_validation=(\n prefer_skip_nested_validation or global_skip_validation\n )\n ):\n return fit_method(estimator, *args, **kwargs)\n\n return wrapper\n\n return decorator" }, { "identifier": "check_cv", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/model_selection/_split.py", "snippet": "def check_cv(cv=5, y=None, *, classifier=False):\n \"\"\"Input checker utility for building a cross-validator.\n\n Parameters\n ----------\n cv : int, cross-validation generator or an iterable, default=None\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n - None, to use the default 5-fold cross validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable that generates (train, test) splits as arrays of indices.\n\n For integer/None inputs, if classifier is True and ``y`` is either\n binary or multiclass, :class:`StratifiedKFold` is used. In all other\n cases, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validation strategies that can be used here.\n\n .. versionchanged:: 0.22\n ``cv`` default value changed from 3-fold to 5-fold.\n\n y : array-like, default=None\n The target variable for supervised learning problems.\n\n classifier : bool, default=False\n Whether the task is a classification task, in which case\n stratified KFold will be used.\n\n Returns\n -------\n checked_cv : a cross-validator instance.\n The return value is a cross-validator which generates the train/test\n splits via the ``split`` method.\n \"\"\"\n cv = 5 if cv is None else cv\n if isinstance(cv, numbers.Integral):\n if (\n classifier\n and (y is not None)\n and (type_of_target(y, input_name=\"y\") in (\"binary\", \"multiclass\"))\n ):\n return StratifiedKFold(cv)\n else:\n return KFold(cv)\n\n if not hasattr(cv, \"split\") or isinstance(cv, str):\n if not isinstance(cv, Iterable) or isinstance(cv, str):\n raise ValueError(\n \"Expected cv as an integer, cross-validation \"\n \"object (from sklearn.model_selection) \"\n \"or an iterable. Got %s.\" % cv\n )\n return _CVIterableWrapper(cv)\n\n return cv # New style cv objects are passed without any modification" }, { "identifier": "as_float_array", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def as_float_array(X, *, copy=True, force_all_finite=True):\n \"\"\"Convert an array-like to an array of floats.\n\n The new dtype will be np.float32 or np.float64, depending on the original\n type. The function can create a copy or modify the argument depending\n on the argument copy.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n The input data.\n\n copy : bool, default=True\n If True, a copy of X will be created. If False, a copy may still be\n returned if X's dtype is not a floating point type.\n\n force_all_finite : bool or 'allow-nan', default=True\n Whether to raise an error on np.inf, np.nan, pd.NA in X. The\n possibilities are:\n\n - True: Force all values of X to be finite.\n - False: accepts np.inf, np.nan, pd.NA in X.\n - 'allow-nan': accepts only np.nan and pd.NA values in X. Values cannot\n be infinite.\n\n .. versionadded:: 0.20\n ``force_all_finite`` accepts the string ``'allow-nan'``.\n\n .. versionchanged:: 0.23\n Accepts `pd.NA` and converts it into `np.nan`\n\n Returns\n -------\n XT : {ndarray, sparse matrix}\n An array of type float.\n \"\"\"\n if isinstance(X, np.matrix) or (\n not isinstance(X, np.ndarray) and not sp.issparse(X)\n ):\n return check_array(\n X,\n accept_sparse=[\"csr\", \"csc\", \"coo\"],\n dtype=np.float64,\n copy=copy,\n force_all_finite=force_all_finite,\n ensure_2d=False,\n )\n elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:\n return X.copy() if copy else X\n elif X.dtype in [np.float32, np.float64]: # is numpy array\n return X.copy(\"F\" if X.flags[\"F_CONTIGUOUS\"] else \"C\") if copy else X\n else:\n if X.dtype.kind in \"uib\" and X.dtype.itemsize <= 4:\n return_dtype = np.float32\n else:\n return_dtype = np.float64\n return X.astype(return_dtype)" }, { "identifier": "check_array", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def check_array(\n array,\n accept_sparse=False,\n *,\n accept_large_sparse=True,\n dtype=\"numeric\",\n order=None,\n copy=False,\n force_all_finite=True,\n ensure_2d=True,\n allow_nd=False,\n ensure_min_samples=1,\n ensure_min_features=1,\n estimator=None,\n input_name=\"\",\n):\n \"\"\"Input validation on an array, list, sparse matrix or similar.\n\n By default, the input is checked to be a non-empty 2D array containing\n only finite values. If the dtype of the array is object, attempt\n converting to float, raising on failure.\n\n Parameters\n ----------\n array : object\n Input object to check / convert.\n\n accept_sparse : str, bool or list/tuple of str, default=False\n String[s] representing allowed sparse matrix formats, such as 'csc',\n 'csr', etc. If the input is sparse but not in the allowed format,\n it will be converted to the first listed format. True allows the input\n to be any format. False means that a sparse matrix input will\n raise an error.\n\n accept_large_sparse : bool, default=True\n If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by\n accept_sparse, accept_large_sparse=False will cause it to be accepted\n only if its indices are stored with a 32-bit dtype.\n\n .. versionadded:: 0.20\n\n dtype : 'numeric', type, list of type or None, default='numeric'\n Data type of result. If None, the dtype of the input is preserved.\n If \"numeric\", dtype is preserved unless array.dtype is object.\n If dtype is a list of types, conversion on the first type is only\n performed if the dtype of the input is not in the list.\n\n order : {'F', 'C'} or None, default=None\n Whether an array will be forced to be fortran or c-style.\n When order is None (default), then if copy=False, nothing is ensured\n about the memory layout of the output array; otherwise (copy=True)\n the memory layout of the returned array is kept as close as possible\n to the original array.\n\n copy : bool, default=False\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n force_all_finite : bool or 'allow-nan', default=True\n Whether to raise an error on np.inf, np.nan, pd.NA in array. The\n possibilities are:\n\n - True: Force all values of array to be finite.\n - False: accepts np.inf, np.nan, pd.NA in array.\n - 'allow-nan': accepts only np.nan and pd.NA values in array. Values\n cannot be infinite.\n\n .. versionadded:: 0.20\n ``force_all_finite`` accepts the string ``'allow-nan'``.\n\n .. versionchanged:: 0.23\n Accepts `pd.NA` and converts it into `np.nan`\n\n ensure_2d : bool, default=True\n Whether to raise a value error if array is not 2D.\n\n allow_nd : bool, default=False\n Whether to allow array.ndim > 2.\n\n ensure_min_samples : int, default=1\n Make sure that the array has a minimum number of samples in its first\n axis (rows for a 2D array). Setting to 0 disables this check.\n\n ensure_min_features : int, default=1\n Make sure that the 2D array has some minimum number of features\n (columns). The default value of 1 rejects empty datasets.\n This check is only enforced when the input data has effectively 2\n dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0\n disables this check.\n\n estimator : str or estimator instance, default=None\n If passed, include the name of the estimator in warning messages.\n\n input_name : str, default=\"\"\n The data name used to construct the error message. In particular\n if `input_name` is \"X\" and the data has NaN values and\n allow_nan is False, the error message will link to the imputer\n documentation.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n array_converted : object\n The converted and validated array.\n \"\"\"\n if isinstance(array, np.matrix):\n raise TypeError(\n \"np.matrix is not supported. Please convert to a numpy array with \"\n \"np.asarray. For more information see: \"\n \"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html\"\n )\n\n xp, is_array_api_compliant = get_namespace(array)\n\n # store reference to original array to check if copy is needed when\n # function returns\n array_orig = array\n\n # store whether originally we wanted numeric dtype\n dtype_numeric = isinstance(dtype, str) and dtype == \"numeric\"\n\n dtype_orig = getattr(array, \"dtype\", None)\n if not is_array_api_compliant and not hasattr(dtype_orig, \"kind\"):\n # not a data type (e.g. a column named dtype in a pandas DataFrame)\n dtype_orig = None\n\n # check if the object contains several dtypes (typically a pandas\n # DataFrame), and store them. If not, store None.\n dtypes_orig = None\n pandas_requires_conversion = False\n if hasattr(array, \"dtypes\") and hasattr(array.dtypes, \"__array__\"):\n # throw warning if columns are sparse. If all columns are sparse, then\n # array.sparse exists and sparsity will be preserved (later).\n with suppress(ImportError):\n from pandas import SparseDtype\n\n def is_sparse(dtype):\n return isinstance(dtype, SparseDtype)\n\n if not hasattr(array, \"sparse\") and array.dtypes.apply(is_sparse).any():\n warnings.warn(\n \"pandas.DataFrame with sparse columns found.\"\n \"It will be converted to a dense numpy array.\"\n )\n\n dtypes_orig = list(array.dtypes)\n pandas_requires_conversion = any(\n _pandas_dtype_needs_early_conversion(i) for i in dtypes_orig\n )\n if all(isinstance(dtype_iter, np.dtype) for dtype_iter in dtypes_orig):\n dtype_orig = np.result_type(*dtypes_orig)\n elif pandas_requires_conversion and any(d == object for d in dtypes_orig):\n # Force object if any of the dtypes is an object\n dtype_orig = object\n\n elif (_is_extension_array_dtype(array) or hasattr(array, \"iloc\")) and hasattr(\n array, \"dtype\"\n ):\n # array is a pandas series\n pandas_requires_conversion = _pandas_dtype_needs_early_conversion(array.dtype)\n if isinstance(array.dtype, np.dtype):\n dtype_orig = array.dtype\n else:\n # Set to None to let array.astype work out the best dtype\n dtype_orig = None\n\n if dtype_numeric:\n if (\n dtype_orig is not None\n and hasattr(dtype_orig, \"kind\")\n and dtype_orig.kind == \"O\"\n ):\n # if input is object, convert to float.\n dtype = xp.float64\n else:\n dtype = None\n\n if isinstance(dtype, (list, tuple)):\n if dtype_orig is not None and dtype_orig in dtype:\n # no dtype conversion required\n dtype = None\n else:\n # dtype conversion required. Let's select the first element of the\n # list of accepted types.\n dtype = dtype[0]\n\n if pandas_requires_conversion:\n # pandas dataframe requires conversion earlier to handle extension dtypes with\n # nans\n # Use the original dtype for conversion if dtype is None\n new_dtype = dtype_orig if dtype is None else dtype\n array = array.astype(new_dtype)\n # Since we converted here, we do not need to convert again later\n dtype = None\n\n if dtype is not None and _is_numpy_namespace(xp):\n dtype = np.dtype(dtype)\n\n if force_all_finite not in (True, False, \"allow-nan\"):\n raise ValueError(\n 'force_all_finite should be a bool or \"allow-nan\". Got {!r} instead'.format(\n force_all_finite\n )\n )\n\n if dtype is not None and _is_numpy_namespace(xp):\n # convert to dtype object to conform to Array API to be use `xp.isdtype` later\n dtype = np.dtype(dtype)\n\n estimator_name = _check_estimator_name(estimator)\n context = \" by %s\" % estimator_name if estimator is not None else \"\"\n\n # When all dataframe columns are sparse, convert to a sparse array\n if hasattr(array, \"sparse\") and array.ndim > 1:\n with suppress(ImportError):\n from pandas import SparseDtype # noqa: F811\n\n def is_sparse(dtype):\n return isinstance(dtype, SparseDtype)\n\n if array.dtypes.apply(is_sparse).all():\n # DataFrame.sparse only supports `to_coo`\n array = array.sparse.to_coo()\n if array.dtype == np.dtype(\"object\"):\n unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes])\n if len(unique_dtypes) > 1:\n raise ValueError(\n \"Pandas DataFrame with mixed sparse extension arrays \"\n \"generated a sparse matrix with object dtype which \"\n \"can not be converted to a scipy sparse matrix.\"\n \"Sparse extension arrays should all have the same \"\n \"numeric type.\"\n )\n\n if sp.issparse(array):\n _ensure_no_complex_data(array)\n array = _ensure_sparse_format(\n array,\n accept_sparse=accept_sparse,\n dtype=dtype,\n copy=copy,\n force_all_finite=force_all_finite,\n accept_large_sparse=accept_large_sparse,\n estimator_name=estimator_name,\n input_name=input_name,\n )\n else:\n # If np.array(..) gives ComplexWarning, then we convert the warning\n # to an error. This is needed because specifying a non complex\n # dtype to the function converts complex to real dtype,\n # thereby passing the test made in the lines following the scope\n # of warnings context manager.\n with warnings.catch_warnings():\n try:\n warnings.simplefilter(\"error\", ComplexWarning)\n if dtype is not None and xp.isdtype(dtype, \"integral\"):\n # Conversion float -> int should not contain NaN or\n # inf (numpy#14412). We cannot use casting='safe' because\n # then conversion float -> int would be disallowed.\n array = _asarray_with_order(array, order=order, xp=xp)\n if xp.isdtype(array.dtype, (\"real floating\", \"complex floating\")):\n _assert_all_finite(\n array,\n allow_nan=False,\n msg_dtype=dtype,\n estimator_name=estimator_name,\n input_name=input_name,\n )\n array = xp.astype(array, dtype, copy=False)\n else:\n array = _asarray_with_order(array, order=order, dtype=dtype, xp=xp)\n except ComplexWarning as complex_warning:\n raise ValueError(\n \"Complex data not supported\\n{}\\n\".format(array)\n ) from complex_warning\n\n # It is possible that the np.array(..) gave no warning. This happens\n # when no dtype conversion happened, for example dtype = None. The\n # result is that np.array(..) produces an array of complex dtype\n # and we need to catch and raise exception for such cases.\n _ensure_no_complex_data(array)\n\n if ensure_2d:\n # If input is scalar raise error\n if array.ndim == 0:\n raise ValueError(\n \"Expected 2D array, got scalar array instead:\\narray={}.\\n\"\n \"Reshape your data either using array.reshape(-1, 1) if \"\n \"your data has a single feature or array.reshape(1, -1) \"\n \"if it contains a single sample.\".format(array)\n )\n # If input is 1D raise error\n if array.ndim == 1:\n raise ValueError(\n \"Expected 2D array, got 1D array instead:\\narray={}.\\n\"\n \"Reshape your data either using array.reshape(-1, 1) if \"\n \"your data has a single feature or array.reshape(1, -1) \"\n \"if it contains a single sample.\".format(array)\n )\n\n if dtype_numeric and hasattr(array.dtype, \"kind\") and array.dtype.kind in \"USV\":\n raise ValueError(\n \"dtype='numeric' is not compatible with arrays of bytes/strings.\"\n \"Convert your data to numeric values explicitly instead.\"\n )\n if not allow_nd and array.ndim >= 3:\n raise ValueError(\n \"Found array with dim %d. %s expected <= 2.\"\n % (array.ndim, estimator_name)\n )\n\n if force_all_finite:\n _assert_all_finite(\n array,\n input_name=input_name,\n estimator_name=estimator_name,\n allow_nan=force_all_finite == \"allow-nan\",\n )\n\n if ensure_min_samples > 0:\n n_samples = _num_samples(array)\n if n_samples < ensure_min_samples:\n raise ValueError(\n \"Found array with %d sample(s) (shape=%s) while a\"\n \" minimum of %d is required%s.\"\n % (n_samples, array.shape, ensure_min_samples, context)\n )\n\n if ensure_min_features > 0 and array.ndim == 2:\n n_features = array.shape[1]\n if n_features < ensure_min_features:\n raise ValueError(\n \"Found array with %d feature(s) (shape=%s) while\"\n \" a minimum of %d is required%s.\"\n % (n_features, array.shape, ensure_min_features, context)\n )\n\n if copy:\n if _is_numpy_namespace(xp):\n # only make a copy if `array` and `array_orig` may share memory`\n if np.may_share_memory(array, array_orig):\n array = _asarray_with_order(\n array, dtype=dtype, order=order, copy=True, xp=xp\n )\n else:\n # always make a copy for non-numpy arrays\n array = _asarray_with_order(\n array, dtype=dtype, order=order, copy=True, xp=xp\n )\n\n return array" }, { "identifier": "Hidden", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class Hidden:\n \"\"\"Class encapsulating a constraint not meant to be exposed to the user.\n\n Parameters\n ----------\n constraint : str or _Constraint instance\n The constraint to be used internally.\n \"\"\"\n\n def __init__(self, constraint):\n self.constraint = constraint" }, { "identifier": "Interval", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class Interval(_Constraint):\n \"\"\"Constraint representing a typed interval.\n\n Parameters\n ----------\n type : {numbers.Integral, numbers.Real, RealNotInt}\n The set of numbers in which to set the interval.\n\n If RealNotInt, only reals that don't have the integer type\n are allowed. For example 1.0 is allowed but 1 is not.\n\n left : float or int or None\n The left bound of the interval. None means left bound is -∞.\n\n right : float, int or None\n The right bound of the interval. None means right bound is +∞.\n\n closed : {\"left\", \"right\", \"both\", \"neither\"}\n Whether the interval is open or closed. Possible choices are:\n\n - `\"left\"`: the interval is closed on the left and open on the right.\n It is equivalent to the interval `[ left, right )`.\n - `\"right\"`: the interval is closed on the right and open on the left.\n It is equivalent to the interval `( left, right ]`.\n - `\"both\"`: the interval is closed.\n It is equivalent to the interval `[ left, right ]`.\n - `\"neither\"`: the interval is open.\n It is equivalent to the interval `( left, right )`.\n\n Notes\n -----\n Setting a bound to `None` and setting the interval closed is valid. For instance,\n strictly speaking, `Interval(Real, 0, None, closed=\"both\")` corresponds to\n `[0, +∞) U {+∞}`.\n \"\"\"\n\n def __init__(self, type, left, right, *, closed):\n super().__init__()\n self.type = type\n self.left = left\n self.right = right\n self.closed = closed\n\n self._check_params()\n\n def _check_params(self):\n if self.type not in (Integral, Real, RealNotInt):\n raise ValueError(\n \"type must be either numbers.Integral, numbers.Real or RealNotInt.\"\n f\" Got {self.type} instead.\"\n )\n\n if self.closed not in (\"left\", \"right\", \"both\", \"neither\"):\n raise ValueError(\n \"closed must be either 'left', 'right', 'both' or 'neither'. \"\n f\"Got {self.closed} instead.\"\n )\n\n if self.type is Integral:\n suffix = \"for an interval over the integers.\"\n if self.left is not None and not isinstance(self.left, Integral):\n raise TypeError(f\"Expecting left to be an int {suffix}\")\n if self.right is not None and not isinstance(self.right, Integral):\n raise TypeError(f\"Expecting right to be an int {suffix}\")\n if self.left is None and self.closed in (\"left\", \"both\"):\n raise ValueError(\n f\"left can't be None when closed == {self.closed} {suffix}\"\n )\n if self.right is None and self.closed in (\"right\", \"both\"):\n raise ValueError(\n f\"right can't be None when closed == {self.closed} {suffix}\"\n )\n else:\n if self.left is not None and not isinstance(self.left, Real):\n raise TypeError(\"Expecting left to be a real number.\")\n if self.right is not None and not isinstance(self.right, Real):\n raise TypeError(\"Expecting right to be a real number.\")\n\n if self.right is not None and self.left is not None and self.right <= self.left:\n raise ValueError(\n f\"right can't be less than left. Got left={self.left} and \"\n f\"right={self.right}\"\n )\n\n def __contains__(self, val):\n if np.isnan(val):\n return False\n\n left_cmp = operator.lt if self.closed in (\"left\", \"both\") else operator.le\n right_cmp = operator.gt if self.closed in (\"right\", \"both\") else operator.ge\n\n left = -np.inf if self.left is None else self.left\n right = np.inf if self.right is None else self.right\n\n if left_cmp(val, left):\n return False\n if right_cmp(val, right):\n return False\n return True\n\n def is_satisfied_by(self, val):\n if not isinstance(val, self.type):\n return False\n\n return val in self\n\n def __str__(self):\n type_str = \"an int\" if self.type is Integral else \"a float\"\n left_bracket = \"[\" if self.closed in (\"left\", \"both\") else \"(\"\n left_bound = \"-inf\" if self.left is None else self.left\n right_bound = \"inf\" if self.right is None else self.right\n right_bracket = \"]\" if self.closed in (\"right\", \"both\") else \")\"\n\n # better repr if the bounds were given as integers\n if not self.type == Integral and isinstance(self.left, Real):\n left_bound = float(left_bound)\n if not self.type == Integral and isinstance(self.right, Real):\n right_bound = float(right_bound)\n\n return (\n f\"{type_str} in the range \"\n f\"{left_bracket}{left_bound}, {right_bound}{right_bracket}\"\n )" }, { "identifier": "StrOptions", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class StrOptions(Options):\n \"\"\"Constraint representing a finite set of strings.\n\n Parameters\n ----------\n options : set of str\n The set of valid strings.\n\n deprecated : set of str or None, default=None\n A subset of the `options` to mark as deprecated in the string\n representation of the constraint.\n \"\"\"\n\n def __init__(self, options, *, deprecated=None):\n super().__init__(type=str, options=options, deprecated=deprecated)" }, { "identifier": "validate_params", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "def validate_params(parameter_constraints, *, prefer_skip_nested_validation):\n \"\"\"Decorator to validate types and values of functions and methods.\n\n Parameters\n ----------\n parameter_constraints : dict\n A dictionary `param_name: list of constraints`. See the docstring of\n `validate_parameter_constraints` for a description of the accepted constraints.\n\n Note that the *args and **kwargs parameters are not validated and must not be\n present in the parameter_constraints dictionary.\n\n prefer_skip_nested_validation : bool\n If True, the validation of parameters of inner estimators or functions\n called by the decorated function will be skipped.\n\n This is useful to avoid validating many times the parameters passed by the\n user from the public facing API. It's also useful to avoid validating\n parameters that we pass internally to inner functions that are guaranteed to\n be valid by the test suite.\n\n It should be set to True for most functions, except for those that receive\n non-validated objects as parameters or that are just wrappers around classes\n because they only perform a partial validation.\n\n Returns\n -------\n decorated_function : function or method\n The decorated function.\n \"\"\"\n\n def decorator(func):\n # The dict of parameter constraints is set as an attribute of the function\n # to make it possible to dynamically introspect the constraints for\n # automatic testing.\n setattr(func, \"_skl_parameter_constraints\", parameter_constraints)\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n global_skip_validation = get_config()[\"skip_parameter_validation\"]\n if global_skip_validation:\n return func(*args, **kwargs)\n\n func_sig = signature(func)\n\n # Map *args/**kwargs to the function signature\n params = func_sig.bind(*args, **kwargs)\n params.apply_defaults()\n\n # ignore self/cls and positional/keyword markers\n to_ignore = [\n p.name\n for p in func_sig.parameters.values()\n if p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD)\n ]\n to_ignore += [\"self\", \"cls\"]\n params = {k: v for k, v in params.arguments.items() if k not in to_ignore}\n\n validate_parameter_constraints(\n parameter_constraints, params, caller_name=func.__qualname__\n )\n\n try:\n with config_context(\n skip_parameter_validation=(\n prefer_skip_nested_validation or global_skip_validation\n )\n ):\n return func(*args, **kwargs)\n except InvalidParameterError as e:\n # When the function is just a wrapper around an estimator, we allow\n # the function to delegate validation to the estimator, but we replace\n # the name of the estimator by the name of the function in the error\n # message to avoid confusion.\n msg = re.sub(\n r\"parameter of \\w+ must be\",\n f\"parameter of {func.__qualname__} must be\",\n str(e),\n )\n raise InvalidParameterError(msg) from e\n\n return wrapper\n\n return decorator" }, { "identifier": "Parallel", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/parallel.py", "snippet": "class Parallel(joblib.Parallel):\n \"\"\"Tweak of :class:`joblib.Parallel` that propagates the scikit-learn configuration.\n\n This subclass of :class:`joblib.Parallel` ensures that the active configuration\n (thread-local) of scikit-learn is propagated to the parallel workers for the\n duration of the execution of the parallel tasks.\n\n The API does not change and you can refer to :class:`joblib.Parallel`\n documentation for more details.\n\n .. versionadded:: 1.3\n \"\"\"\n\n def __call__(self, iterable):\n \"\"\"Dispatch the tasks and return the results.\n\n Parameters\n ----------\n iterable : iterable\n Iterable containing tuples of (delayed_function, args, kwargs) that should\n be consumed.\n\n Returns\n -------\n results : list\n List of results of the tasks.\n \"\"\"\n # Capture the thread-local scikit-learn configuration at the time\n # Parallel.__call__ is issued since the tasks can be dispatched\n # in a different thread depending on the backend and on the value of\n # pre_dispatch and n_jobs.\n config = get_config()\n iterable_with_config = (\n (_with_config(delayed_func, config), args, kwargs)\n for delayed_func, args, kwargs in iterable\n )\n return super().__call__(iterable_with_config)" }, { "identifier": "delayed", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/parallel.py", "snippet": "def delayed(function):\n \"\"\"Decorator used to capture the arguments of a function.\n\n This alternative to `joblib.delayed` is meant to be used in conjunction\n with `sklearn.utils.parallel.Parallel`. The latter captures the the scikit-\n learn configuration by calling `sklearn.get_config()` in the current\n thread, prior to dispatching the first task. The captured configuration is\n then propagated and enabled for the duration of the execution of the\n delayed function in the joblib workers.\n\n .. versionchanged:: 1.3\n `delayed` was moved from `sklearn.utils.fixes` to `sklearn.utils.parallel`\n in scikit-learn 1.3.\n\n Parameters\n ----------\n function : callable\n The function to be delayed.\n\n Returns\n -------\n output: tuple\n Tuple containing the delayed function, the positional arguments, and the\n keyword arguments.\n \"\"\"\n\n @functools.wraps(function)\n def delayed_function(*args, **kwargs):\n return _FuncWrapper(function), args, kwargs\n\n return delayed_function" }, { "identifier": "LinearModel", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/linear_model/_base.py", "snippet": "class LinearModel(BaseEstimator, metaclass=ABCMeta):\n \"\"\"Base class for Linear Models\"\"\"\n\n @abstractmethod\n def fit(self, X, y):\n \"\"\"Fit model.\"\"\"\n\n def _decision_function(self, X):\n check_is_fitted(self)\n\n X = self._validate_data(X, accept_sparse=[\"csr\", \"csc\", \"coo\"], reset=False)\n return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_\n\n def predict(self, X):\n \"\"\"\n Predict using the linear model.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape (n_samples, n_features)\n Samples.\n\n Returns\n -------\n C : array, shape (n_samples,)\n Returns predicted values.\n \"\"\"\n return self._decision_function(X)\n\n def _set_intercept(self, X_offset, y_offset, X_scale):\n \"\"\"Set the intercept_\"\"\"\n if self.fit_intercept:\n # We always want coef_.dtype=X.dtype. For instance, X.dtype can differ from\n # coef_.dtype if warm_start=True.\n self.coef_ = np.divide(self.coef_, X_scale, dtype=X_scale.dtype)\n self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)\n else:\n self.intercept_ = 0.0\n\n def _more_tags(self):\n return {\"requires_y\": True}" }, { "identifier": "_deprecate_normalize", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/linear_model/_base.py", "snippet": "def _deprecate_normalize(normalize, estimator_name):\n \"\"\"Normalize is to be deprecated from linear models and a use of\n a pipeline with a StandardScaler is to be recommended instead.\n Here the appropriate message is selected to be displayed to the user\n depending on the default normalize value (as it varies between the linear\n models and normalize value selected by the user).\n\n Parameters\n ----------\n normalize : bool,\n normalize value passed by the user\n\n estimator_name : str\n name of the linear estimator which calls this function.\n The name will be used for writing the deprecation warnings\n\n Returns\n -------\n normalize : bool,\n normalize value which should further be used by the estimator at this\n stage of the depreciation process\n\n Notes\n -----\n This function should be completely removed in 1.4.\n \"\"\"\n\n if normalize not in [True, False, \"deprecated\"]:\n raise ValueError(\n \"Leave 'normalize' to its default value or set it to True or False\"\n )\n\n if normalize == \"deprecated\":\n _normalize = False\n else:\n _normalize = normalize\n\n pipeline_msg = (\n \"If you wish to scale the data, use Pipeline with a StandardScaler \"\n \"in a preprocessing stage. To reproduce the previous behavior:\\n\\n\"\n \"from sklearn.pipeline import make_pipeline\\n\\n\"\n \"model = make_pipeline(StandardScaler(with_mean=False), \"\n f\"{estimator_name}())\\n\\n\"\n \"If you wish to pass a sample_weight parameter, you need to pass it \"\n \"as a fit parameter to each step of the pipeline as follows:\\n\\n\"\n \"kwargs = {s[0] + '__sample_weight': sample_weight for s \"\n \"in model.steps}\\n\"\n \"model.fit(X, y, **kwargs)\\n\\n\"\n )\n\n alpha_msg = \"\"\n if \"LassoLars\" in estimator_name:\n alpha_msg = \"Set parameter alpha to: original_alpha * np.sqrt(n_samples). \"\n\n if normalize != \"deprecated\" and normalize:\n warnings.warn(\n \"'normalize' was deprecated in version 1.2 and will be removed in 1.4.\\n\"\n + pipeline_msg\n + alpha_msg,\n FutureWarning,\n )\n elif not normalize:\n warnings.warn(\n (\n \"'normalize' was deprecated in version 1.2 and will be \"\n \"removed in 1.4. \"\n \"Please leave the normalize parameter to its default value to \"\n \"silence this warning. The default behavior of this estimator \"\n \"is to not do any normalization. If normalization is needed \"\n \"please use sklearn.preprocessing.StandardScaler instead.\"\n ),\n FutureWarning,\n )\n\n return _normalize" }, { "identifier": "_pre_fit", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/linear_model/_base.py", "snippet": "def _pre_fit(\n X,\n y,\n Xy,\n precompute,\n normalize,\n fit_intercept,\n copy,\n check_input=True,\n sample_weight=None,\n):\n \"\"\"Function used at beginning of fit in linear models with L1 or L0 penalty.\n\n This function applies _preprocess_data and additionally computes the gram matrix\n `precompute` as needed as well as `Xy`.\n \"\"\"\n n_samples, n_features = X.shape\n\n if sparse.issparse(X):\n # copy is not needed here as X is not modified inplace when X is sparse\n precompute = False\n X, y, X_offset, y_offset, X_scale = _preprocess_data(\n X,\n y,\n fit_intercept=fit_intercept,\n normalize=normalize,\n copy=False,\n check_input=check_input,\n sample_weight=sample_weight,\n )\n else:\n # copy was done in fit if necessary\n X, y, X_offset, y_offset, X_scale = _preprocess_data(\n X,\n y,\n fit_intercept=fit_intercept,\n normalize=normalize,\n copy=copy,\n check_input=check_input,\n sample_weight=sample_weight,\n )\n # Rescale only in dense case. Sparse cd solver directly deals with\n # sample_weight.\n if sample_weight is not None:\n # This triggers copies anyway.\n X, y, _ = _rescale_data(X, y, sample_weight=sample_weight)\n\n # FIXME: 'normalize' to be removed in 1.4\n if hasattr(precompute, \"__array__\"):\n if (\n fit_intercept\n and not np.allclose(X_offset, np.zeros(n_features))\n or normalize\n and not np.allclose(X_scale, np.ones(n_features))\n ):\n warnings.warn(\n (\n \"Gram matrix was provided but X was centered to fit \"\n \"intercept, or X was normalized : recomputing Gram matrix.\"\n ),\n UserWarning,\n )\n # recompute Gram\n precompute = \"auto\"\n Xy = None\n elif check_input:\n # If we're going to use the user's precomputed gram matrix, we\n # do a quick check to make sure its not totally bogus.\n _check_precomputed_gram_matrix(X, precompute, X_offset, X_scale)\n\n # precompute if n_samples > n_features\n if isinstance(precompute, str) and precompute == \"auto\":\n precompute = n_samples > n_features\n\n if precompute is True:\n # make sure that the 'precompute' array is contiguous.\n precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order=\"C\")\n np.dot(X.T, X, out=precompute)\n\n if not hasattr(precompute, \"__array__\"):\n Xy = None # cannot use Xy if precompute is not Gram\n\n if hasattr(precompute, \"__array__\") and Xy is None:\n common_dtype = np.result_type(X.dtype, y.dtype)\n if y.ndim == 1:\n # Xy is 1d, make sure it is contiguous.\n Xy = np.empty(shape=n_features, dtype=common_dtype, order=\"C\")\n np.dot(X.T, y, out=Xy)\n else:\n # Make sure that Xy is always F contiguous even if X or y are not\n # contiguous: the goal is to make it fast to extract the data for a\n # specific target.\n n_targets = y.shape[1]\n Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order=\"F\")\n np.dot(y.T, X, out=Xy.T)\n\n return X, y, X_offset, y_offset, X_scale, precompute, Xy" } ]
import warnings import numpy as np from math import sqrt from numbers import Integral, Real from scipy import linalg from scipy.linalg.lapack import get_lapack_funcs from ..base import MultiOutputMixin, RegressorMixin, _fit_context from ..model_selection import check_cv from ..utils import as_float_array, check_array from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params from ..utils.parallel import Parallel, delayed from ._base import LinearModel, _deprecate_normalize, _pre_fit
13,460
overrides `n_nonzero_coefs`. norms_squared : array-like of shape (n_targets,), default=None Squared L2 norms of the lines of y. Required if tol is not None. copy_Gram : bool, default=True Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, default=True Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, default=False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- coef : ndarray of shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See Also -------- OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP). orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems. lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. sklearn.decomposition.sparse_encode : Generic sparse coding. Each column of the result is the solution to a Lasso problem. Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ Gram = check_array(Gram, order="F", copy=copy_Gram) Xy = np.asarray(Xy) if Xy.ndim > 1 and Xy.shape[1] > 1: # or subsequent target will be affected copy_Gram = True if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if tol is not None: norms_squared = [norms_squared] if copy_Xy or not Xy.flags.writeable: # Make the copy once instead of many times in _gram_omp itself. Xy = Xy.copy() if n_nonzero_coefs is None and tol is None: n_nonzero_coefs = int(0.1 * len(Gram)) if tol is not None and norms_squared is None: raise ValueError( "Gram OMP needs the precomputed norms in order " "to evaluate the error sum of squares." ) if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > len(Gram): raise ValueError( "The number of atoms cannot be more than the number of features" ) if return_path: coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype) else: coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype) n_iters = [] for k in range(Xy.shape[1]): out = _gram_omp( Gram, Xy[:, k], n_nonzero_coefs, norms_squared[k] if tol is not None else None, tol, copy_Gram=copy_Gram, copy_Xy=False, return_path=return_path, ) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, : len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if Xy.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef)
"""Orthogonal matching pursuit algorithms """ # Author: Vlad Niculae # # License: BSD 3 clause premature = ( "Orthogonal matching pursuit ended prematurely due to linear" " dependence in the dictionary. The requested precision might" " not have been met." ) def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False): """Orthogonal Matching Pursuit step using the Cholesky decomposition. Parameters ---------- X : ndarray of shape (n_samples, n_features) Input dictionary. Columns are assumed to have unit norm. y : ndarray of shape (n_samples,) Input targets. n_nonzero_coefs : int Targeted number of non-zero elements. tol : float, default=None Targeted squared error, if not None overrides n_nonzero_coefs. copy_X : bool, default=True Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, default=False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : ndarray of shape (n_nonzero_coefs,) Non-zero elements of the solution. idx : ndarray of shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector. coef : ndarray of shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ if copy_X: X = X.copy("F") else: # even if we are allowed to overwrite, still copy it if bad order X = np.asfortranarray(X) min_float = np.finfo(X.dtype).eps nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (X,)) (potrs,) = get_lapack_funcs(("potrs",), (X,)) alpha = np.dot(X.T, y) residual = y gamma = np.empty(0) n_active = 0 indices = np.arange(X.shape[1]) # keeping track of swapping max_features = X.shape[1] if tol is not None else n_nonzero_coefs L = np.empty((max_features, max_features), dtype=X.dtype) if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(np.dot(X.T, residual))) if lam < n_active or alpha[lam] ** 2 < min_float: # atom already selected or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=2) break if n_active > 0: # Updates the Cholesky decomposition of X' X L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam]) linalg.solve_triangular( L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, check_finite=False, ) v = nrm2(L[n_active, :n_active]) ** 2 Lkk = linalg.norm(X[:, lam]) ** 2 - v if Lkk <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=2) break L[n_active, n_active] = sqrt(Lkk) else: L[0, 0] = linalg.norm(X[:, lam]) X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam]) alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active] indices[n_active], indices[lam] = indices[lam], indices[n_active] n_active += 1 # solves LL'x = X'y as a composition of two triangular systems gamma, _ = potrs( L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False ) if return_path: coefs[:n_active, n_active - 1] = gamma residual = y - np.dot(X[:, :n_active], gamma) if tol is not None and nrm2(residual) ** 2 <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active def _gram_omp( Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None, copy_Gram=True, copy_Xy=True, return_path=False, ): """Orthogonal Matching Pursuit step on a precomputed Gram matrix. This function uses the Cholesky decomposition method. Parameters ---------- Gram : ndarray of shape (n_features, n_features) Gram matrix of the input data matrix. Xy : ndarray of shape (n_features,) Input targets. n_nonzero_coefs : int Targeted number of non-zero elements. tol_0 : float, default=None Squared norm of y, required if tol is not None. tol : float, default=None Targeted squared error, if not None overrides n_nonzero_coefs. copy_Gram : bool, default=True Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, default=True Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, default=False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : ndarray of shape (n_nonzero_coefs,) Non-zero elements of the solution. idx : ndarray of shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector. coefs : ndarray of shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ Gram = Gram.copy("F") if copy_Gram else np.asfortranarray(Gram) if copy_Xy or not Xy.flags.writeable: Xy = Xy.copy() min_float = np.finfo(Gram.dtype).eps nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (Gram,)) (potrs,) = get_lapack_funcs(("potrs",), (Gram,)) indices = np.arange(len(Gram)) # keeping track of swapping alpha = Xy tol_curr = tol_0 delta = 0 gamma = np.empty(0) n_active = 0 max_features = len(Gram) if tol is not None else n_nonzero_coefs L = np.empty((max_features, max_features), dtype=Gram.dtype) L[0, 0] = 1.0 if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(alpha)) if lam < n_active or alpha[lam] ** 2 < min_float: # selected same atom twice, or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=3) break if n_active > 0: L[n_active, :n_active] = Gram[lam, :n_active] linalg.solve_triangular( L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, check_finite=False, ) v = nrm2(L[n_active, :n_active]) ** 2 Lkk = Gram[lam, lam] - v if Lkk <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=3) break L[n_active, n_active] = sqrt(Lkk) else: L[0, 0] = sqrt(Gram[lam, lam]) Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam]) Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam]) indices[n_active], indices[lam] = indices[lam], indices[n_active] Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active] n_active += 1 # solves LL'x = X'y as a composition of two triangular systems gamma, _ = potrs( L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False ) if return_path: coefs[:n_active, n_active - 1] = gamma beta = np.dot(Gram[:, :n_active], gamma) alpha = Xy - beta if tol is not None: tol_curr += delta delta = np.inner(gamma, beta[:n_active]) tol_curr -= delta if abs(tol_curr) <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active @validate_params( { "X": ["array-like"], "y": [np.ndarray], "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], "tol": [Interval(Real, 0, None, closed="left"), None], "precompute": ["boolean", StrOptions({"auto"})], "copy_X": ["boolean"], "return_path": ["boolean"], "return_n_iter": ["boolean"], }, prefer_skip_nested_validation=True, ) def orthogonal_mp( X, y, *, n_nonzero_coefs=None, tol=None, precompute=False, copy_X=True, return_path=False, return_n_iter=False, ): r"""Orthogonal Matching Pursuit (OMP). Solves n_targets Orthogonal Matching Pursuit problems. An instance of the problem has the form: When parametrized by the number of non-zero coefficients using `n_nonzero_coefs`: argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs} When parametrized by error using the parameter `tol`: argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol Read more in the :ref:`User Guide <omp>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Columns are assumed to have unit norm. y : ndarray of shape (n_samples,) or (n_samples, n_targets) Input targets. n_nonzero_coefs : int, default=None Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float, default=None Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs. precompute : 'auto' or bool, default=False Whether to perform precomputations. Improves performance when n_targets or n_samples is very large. copy_X : bool, default=True Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, default=False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- coef : ndarray of shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis generates coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See Also -------- OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model. orthogonal_mp_gram : Solve OMP problems using Gram matrix and the product X.T * y. lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. sklearn.decomposition.sparse_encode : Sparse coding. Notes ----- Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ X = check_array(X, order="F", copy=copy_X) copy_X = False if y.ndim == 1: y = y.reshape(-1, 1) y = check_array(y) if y.shape[1] > 1: # subsequent targets will be affected copy_X = True if n_nonzero_coefs is None and tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1) if tol is None and n_nonzero_coefs > X.shape[1]: raise ValueError( "The number of atoms cannot be more than the number of features" ) if precompute == "auto": precompute = X.shape[0] > X.shape[1] if precompute: G = np.dot(X.T, X) G = np.asfortranarray(G) Xy = np.dot(X.T, y) if tol is not None: norms_squared = np.sum((y**2), axis=0) else: norms_squared = None return orthogonal_mp_gram( G, Xy, n_nonzero_coefs=n_nonzero_coefs, tol=tol, norms_squared=norms_squared, copy_Gram=copy_X, copy_Xy=False, return_path=return_path, ) if return_path: coef = np.zeros((X.shape[1], y.shape[1], X.shape[1])) else: coef = np.zeros((X.shape[1], y.shape[1])) n_iters = [] for k in range(y.shape[1]): out = _cholesky_omp( X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path ) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, : len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if y.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef) def orthogonal_mp_gram( Gram, Xy, *, n_nonzero_coefs=None, tol=None, norms_squared=None, copy_Gram=True, copy_Xy=True, return_path=False, return_n_iter=False, ): """Gram Orthogonal Matching Pursuit (OMP). Solves n_targets Orthogonal Matching Pursuit problems using only the Gram matrix X.T * X and the product X.T * y. Read more in the :ref:`User Guide <omp>`. Parameters ---------- Gram : ndarray of shape (n_features, n_features) Gram matrix of the input data: X.T * X. Xy : ndarray of shape (n_features,) or (n_features, n_targets) Input targets multiplied by X: X.T * y. n_nonzero_coefs : int, default=None Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float, default=None Maximum squared norm of the residual. If not `None`, overrides `n_nonzero_coefs`. norms_squared : array-like of shape (n_targets,), default=None Squared L2 norms of the lines of y. Required if tol is not None. copy_Gram : bool, default=True Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, default=True Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, default=False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- coef : ndarray of shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See Also -------- OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP). orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems. lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. sklearn.decomposition.sparse_encode : Generic sparse coding. Each column of the result is the solution to a Lasso problem. Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ Gram = check_array(Gram, order="F", copy=copy_Gram) Xy = np.asarray(Xy) if Xy.ndim > 1 and Xy.shape[1] > 1: # or subsequent target will be affected copy_Gram = True if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if tol is not None: norms_squared = [norms_squared] if copy_Xy or not Xy.flags.writeable: # Make the copy once instead of many times in _gram_omp itself. Xy = Xy.copy() if n_nonzero_coefs is None and tol is None: n_nonzero_coefs = int(0.1 * len(Gram)) if tol is not None and norms_squared is None: raise ValueError( "Gram OMP needs the precomputed norms in order " "to evaluate the error sum of squares." ) if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > len(Gram): raise ValueError( "The number of atoms cannot be more than the number of features" ) if return_path: coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype) else: coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype) n_iters = [] for k in range(Xy.shape[1]): out = _gram_omp( Gram, Xy[:, k], n_nonzero_coefs, norms_squared[k] if tol is not None else None, tol, copy_Gram=copy_Gram, copy_Xy=False, return_path=return_path, ) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, : len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if Xy.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef)
class OrthogonalMatchingPursuit(MultiOutputMixin, RegressorMixin, LinearModel):
0
2023-10-07 13:19:48+00:00
16k
hellloxiaotian/KDNet
test_ccpd.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distillation19/weights/epoch_074.pt\n for w in weights if isinstance(weights, list) else [weights]:\n # attempt_download(w) # /runs/train/yolov7_distillation19/weights/epoch_074.pt\n ckpt = torch.load(w, map_location=map_location) # load\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model\n \n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True # pytorch 1.7.0 compatibility\n elif type(m) is nn.Upsample:\n m.recompute_scale_factor = None # torch 1.11.0 compatibility\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n \n if len(model) == 1:\n return model[-1] # return model\n else:\n print('Ensemble created with %s\\n' % weights)\n for k in ['names', 'stride']:\n setattr(model, k, getattr(model[-1], k))\n return model # return ensemble" }, { "identifier": "create_dataloader", "path": "utils/datasets.py", "snippet": "def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,\n rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):\n # Make sure only the first process in DDP process the dataset first, and the following others can use the cache\n with torch_distributed_zero_first(rank):\n dataset = LoadImagesAndLabels(path, imgsz, batch_size,\n augment=augment, # augment images\n hyp=hyp, # augmentation hyperparameters\n rect=rect, # rectangular training\n cache_images=cache,\n single_cls=opt.single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix)\n\n batch_size = min(batch_size, len(dataset))\n nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None\n loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader\n # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()\n dataloader = loader(dataset,\n batch_size=batch_size,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)\n return dataloader, dataset" }, { "identifier": "coco80_to_coco91_class", "path": "utils/general.py", "snippet": "def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\n # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco\n # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]\n return x" }, { "identifier": "check_dataset", "path": "utils/general.py", "snippet": "def check_dataset(dict):\n # Download dataset if not found locally\n val, s = dict.get('val'), dict.get('download')\n if val and len(val):\n val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path\n if not all(x.exists() for x in val):\n print('\\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])\n if s and len(s): # download script\n print('Downloading %s ...' % s)\n if s.startswith('http') and s.endswith('.zip'): # URL\n f = Path(s).name # filename\n torch.hub.download_url_to_file(s, f)\n r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip\n else: # bash script\n r = os.system(s)\n print('Dataset autodownload %s\\n' % ('success' if r == 0 else 'failure')) # analyze return value\n else:\n raise Exception('Dataset not found.')" }, { "identifier": "check_file", "path": "utils/general.py", "snippet": "def check_file(file):\n # Search for file if not found\n if Path(file).is_file() or file == '':\n return file\n else:\n files = glob.glob('./**/' + file, recursive=True) # find file\n assert len(files), f'File Not Found: {file}' # assert file was found\n assert len(files) == 1, f\"Multiple files match '{file}', specify exact path: {files}\" # assert unique\n return files[0] # return file" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(img_size, s=32):\n # Verify img_size is a multiple of stride s\n new_size = make_divisible(img_size, int(s)) # ceil gs-multiple\n if new_size != img_size:\n print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))\n return new_size" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "def check_requirements(requirements='requirements.txt', exclude=()):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n import pkg_resources as pkg\n prefix = colorstr('red', 'bold', 'requirements:')\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n if not file.exists():\n print(f\"{prefix} {file.resolve()} not found, check failed.\")\n return\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n n += 1\n print(f\"{prefix} {e.req} not found and is required by YOLOR, attempting auto-update...\")\n print(subprocess.check_output(f\"pip install '{e.req}'\", shell=True).decode())\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s)) # emoji-safe" }, { "identifier": "box_iou", "path": "utils/general.py", "snippet": "def box_iou(box1, box2):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n def box_area(box):\n # box = 4xn\n return (box[2] - box[0]) * (box[3] - box[1])\n\n area1 = box_area(box1.T)\n area2 = box_area(box2.T)\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)\n return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=()):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_det = 300 # maximum number of detections per image\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n if nc == 1:\n x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5,\n # so there is no need to multiplicate.\n else:\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "scale_coords", "path": "utils/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "xywh2xyxy", "path": "utils/general.py", "snippet": "def xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y" }, { "identifier": "set_logging", "path": "utils/general.py", "snippet": "def set_logging(rank=-1):\n logging.basicConfig(\n format=\"%(message)s\",\n level=logging.INFO if rank in [-1, 0] else logging.WARN)" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=True, sep=''):\n # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.\n path = Path(path) # os-agnostic\n if (path.exists() and exist_ok) or (not path.exists()):\n return str(path)\n else:\n dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n matches = [re.search(rf\"%s{sep}(\\d+)\" % path.stem, d) for d in dirs]\n i = [int(m.groups()[0]) for m in matches if m] # indices\n n = max(i) + 1 if i else 2 # increment number\n return f\"{path}{sep}{n}\" # update path" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "ap_per_class", "path": "utils/metrics.py", "snippet": "def ap_per_class(tp, conf, pred_cls, target_cls, v5_metric=False, plot=False, save_dir='.', names=()):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n # Arguments\n tp: True positives (nparray, nx1 or nx10).\n conf: Objectness value from 0-1 (nparray).\n pred_cls: Predicted object classes (nparray).\n target_cls: True object classes (nparray).\n plot: Plot precision-recall curve at [email protected]\n save_dir: Plot save directory\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n # Sort by objectness\n i = np.argsort(-conf)\n tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n # Find unique classes\n unique_classes = np.unique(target_cls)\n nc = unique_classes.shape[0] # number of classes, number of detections\n\n # Create Precision-Recall curve and compute AP for each class\n px, py = np.linspace(0, 1, 1000), [] # for plotting\n ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))\n for ci, c in enumerate(unique_classes):\n i = pred_cls == c\n n_l = (target_cls == c).sum() # number of labels\n n_p = i.sum() # number of predictions\n\n if n_p == 0 or n_l == 0:\n continue\n else:\n # Accumulate FPs and TPs\n fpc = (1 - tp[i]).cumsum(0)\n tpc = tp[i].cumsum(0)\n\n # Recall\n recall = tpc / (n_l + 1e-16) # recall curve\n r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases\n\n # Precision\n precision = tpc / (tpc + fpc) # precision curve\n p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score\n\n # AP from recall-precision curve\n for j in range(tp.shape[1]):\n ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j], v5_metric=v5_metric)\n if plot and j == 0:\n py.append(np.interp(px, mrec, mpre)) # precision at [email protected]\n\n # Compute F1 (harmonic mean of precision and recall)\n f1 = 2 * p * r / (p + r + 1e-16)\n if plot:\n plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)\n plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')\n plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')\n plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')\n\n i = f1.mean(0).argmax() # max F1 index\n return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')" }, { "identifier": "ConfusionMatrix", "path": "utils/metrics.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc, conf=0.25, iou_thres=0.45):\n self.matrix = np.zeros((nc + 1, nc + 1))\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = general.box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(np.int16)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[gc, detection_classes[m1[j]]] += 1 # correct\n else:\n self.matrix[self.nc, gc] += 1 # background FP\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[dc, self.nc] += 1 # background FN\n\n def matrix(self):\n return self.matrix\n\n def plot(self, save_dir='', names=()):\n try:\n import seaborn as sn\n\n array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig = plt.figure(figsize=(12, 9), tight_layout=True)\n sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size\n labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels\n sn.heatmap(array, annot=self.nc < 30, annot_kws={\"size\": 8}, cmap='Blues', fmt='.2f', square=True,\n xticklabels=names + ['background FP'] if labels else \"auto\",\n yticklabels=names + ['background FN'] if labels else \"auto\").set_facecolor((1, 1, 1))\n fig.axes[0].set_xlabel('True')\n fig.axes[0].set_ylabel('Predicted')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n except Exception as e:\n pass\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "plot_images", "path": "utils/plots.py", "snippet": "def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):\n # Plot image grid with labels\n\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n\n # un-normalise\n if np.max(images[0]) <= 1:\n images *= 255\n\n tl = 3 # line thickness\n tf = max(tl - 1, 1) # font thickness\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n\n # Check if we should resize\n scale_factor = max_size / max(h, w)\n if scale_factor < 1:\n h = math.ceil(scale_factor * h)\n w = math.ceil(scale_factor * w)\n\n colors = color_list() # list of colors\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, img in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n\n block_x = int(w * (i // ns))\n block_y = int(h * (i % ns))\n\n img = img.transpose(1, 2, 0)\n if scale_factor < 1:\n img = cv2.resize(img, (w, h))\n\n mosaic[block_y:block_y + h, block_x:block_x + w, :] = img\n if len(targets) > 0:\n image_targets = targets[targets[:, 0] == i]\n boxes = xywh2xyxy(image_targets[:, 2:6]).T\n classes = image_targets[:, 1].astype('int')\n labels = image_targets.shape[1] == 6 # labels if no conf column\n conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale_factor < 1: # absolute coords need scale if image scales\n boxes *= scale_factor\n boxes[[0, 2]] += block_x\n boxes[[1, 3]] += block_y\n for j, box in enumerate(boxes.T):\n cls = int(classes[j])\n color = colors[cls % len(colors)]\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])\n plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)\n\n # Draw image filename labels\n if paths:\n label = Path(paths[i]).name[:40] # trim to 40 char\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,\n lineType=cv2.LINE_AA)\n\n # Image border\n cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)\n\n if fname:\n r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size\n mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)\n # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save\n Image.fromarray(mosaic).save(fname) # PIL save\n return mosaic" }, { "identifier": "output_to_target", "path": "utils/plots.py", "snippet": "def output_to_target(output):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]\n targets = []\n for i, o in enumerate(output):\n for *box, conf, cls in o.cpu().numpy():\n targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])\n return np.array(targets)" }, { "identifier": "plot_study_txt", "path": "utils/plots.py", "snippet": "def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()\n # Plot study.txt generated by test.py\n fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)\n # ax = ax.ravel()\n\n fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolor-p6', 'yolor-w6', 'yolor-e6', 'yolor-d6']]:\n for f in sorted(Path(path).glob('study*.txt')):\n y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n x = np.arange(y.shape[1]) if x is None else np.array(x)\n s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']\n # for i in range(7):\n # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)\n # ax[i].set_title(s[i])\n\n j = y[3].argmax() + 1\n ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,\n label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))\n\n ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')\n\n ax2.grid(alpha=0.2)\n ax2.set_yticks(np.arange(20, 60, 5))\n ax2.set_xlim(0, 57)\n ax2.set_ylim(30, 55)\n ax2.set_xlabel('GPU Speed (ms/img)')\n ax2.set_ylabel('COCO AP val')\n ax2.legend(loc='lower right')\n plt.savefig(str(Path(path).name) + '.png', dpi=300)" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n cpu = device.lower() == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n n = torch.cuda.device_count()\n if n > 1 and batch_size: # check that batch_size is compatible with device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * len(s)\n for i, d in enumerate(device.split(',') if device else range(n)):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_synchronized", "path": "utils/torch_utils.py", "snippet": "def time_synchronized():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" }, { "identifier": "TracedModel", "path": "utils/torch_utils.py", "snippet": "class TracedModel(nn.Module):\n\n def __init__(self, model=None, device=None, img_size=(640,640)): \n super(TracedModel, self).__init__()\n \n print(\" Convert model to Traced-model... \") \n self.stride = model.stride\n self.names = model.names\n self.model = model\n\n self.model = revert_sync_batchnorm(self.model)\n self.model.to('cpu')\n self.model.eval()\n\n self.detect_layer = self.model.model[-1]\n self.model.traced = True\n \n rand_example = torch.rand(1, 3, img_size, img_size)\n \n traced_script_module = torch.jit.trace(self.model, rand_example, strict=False)\n #traced_script_module = torch.jit.script(self.model)\n traced_script_module.save(\"traced_model.pt\")\n print(\" traced_script_module saved! \")\n self.model = traced_script_module\n self.model.to(device)\n self.detect_layer.to(device)\n print(\" model is traced! \\n\") \n\n def forward(self, x, augment=False, profile=False):\n out = self.model(x)\n out = self.detect_layer(out)\n return out" } ]
import argparse import json import os import numpy as np import torch import yaml from pathlib import Path from threading import Thread from tqdm import tqdm from models.experimental import attempt_load from utils.datasets import create_dataloader from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_synchronized, TracedModel from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
11,939
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging - Media Panel Plots if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if plots: confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(as_tuple=False): d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn if len(detected) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Plot images if plots and batch_i < 3: f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() # Compute statistics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any():
def test(data, weights=None, batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, # for NMS save_json=False, single_cls=False, augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(''), # for saving images save_txt=False, # for auto-labelling save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, wandb_logger=None, compute_loss=None, half_precision=True, trace=False, is_coco=False, v5_metric=False): # Initialize/load model and set device training = model is not None if training: # called by train.py device = next(model.parameters()).device # get model device else: # called directly set_logging() device = select_device(opt.device, batch_size=batch_size) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(imgsz, s=gs) # check img_size if trace: model = TracedModel(model, device, imgsz) # Half half = device.type != 'cpu' and half_precision # half precision only supported on CUDA if half: model.half() # Configure model.eval() if isinstance(data, str): is_coco = data.endswith('coco.yaml') with open(data) as f: data = yaml.load(f, Loader=yaml.SafeLoader) check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Logging log_imgs = 0 if wandb_logger and wandb_logger.wandb: log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, prefix=colorstr(f'{task}: '))[0] if v5_metric: print("Testing with YOLOv5 AP metric...") seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', '[email protected]', '[email protected]:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging - Media Panel Plots if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if plots: confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(as_tuple=False): d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn if len(detected) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Plot images if plots and batch_i < 3: f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() # Compute statistics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, v5_metric=v5_metric, save_dir=save_dir, names=names)
15
2023-10-08 13:05:58+00:00
16k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n \"\"\"\n Initializes the StreaksDB class and creates the 'streaks' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_streaks_table()\n\n def _create_streaks_table(self):\n \"\"\"\n Creates the 'streaks' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS streaks (\n discord_id BIGINT PRIMARY KEY,\n current_streak INT DEFAULT 0,\n FOREIGN KEY (discord_id) REFERENCES team_members(discord_id) ON DELETE CASCADE\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def update_streak(self, discord_id: int, new_streak: int):\n \"\"\"\n Updates the streak for a given user.\n\n :param discord_id: The Discord ID of the user.\n :param new_streak: The new streak count.\n \"\"\"\n query = \"\"\"\n INSERT INTO streaks (discord_id, current_streak)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE current_streak = %s\n \"\"\"\n params = (discord_id, new_streak, new_streak)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def get_streak(self, discord_id: int) -> int:\n \"\"\"\n Fetches the current streak for a given user.\n\n :param discord_id: The Discord ID of the user.\n :return: The current streak count.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n c = self.conn.cursor()\n query = \"SELECT current_streak FROM streaks WHERE discord_id = %s\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n row = c.fetchone()\n return row[0] if row else 0\n finally:\n c.close()\n self.close()" }, { "identifier": "TeamMemberDB", "path": "team_members/team_member_db.py", "snippet": "class TeamMemberDB(BaseDB):\n \"\"\"\n TeamMemberDB class handles operations related to the 'team_members' table.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the TeamMemberDB class and creates the 'team_members' table if it doesn't exist.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_team_members_table()\n\n def _create_team_members_table(self):\n \"\"\"\n Creates the 'team_members' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS team_members (\n discord_id BIGINT PRIMARY KEY,\n name VARCHAR(255) NOT NULL,\n time_zone VARCHAR(50) NOT NULL,\n github_username VARCHAR(255),\n on_vacation BOOLEAN DEFAULT FALSE\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def insert_new_member(self, discord_id: int, name: str, time_zone: str, github_username: str):\n \"\"\"\n Inserts a new team member into the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param name: The name of the team member.\n :param time_zone: The time zone of the team member.\n :param github_username: The GitHub username of the team member.\n \"\"\"\n query = \"\"\"\n INSERT INTO team_members (discord_id, name, time_zone, github_username)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE name = %s, time_zone = %s, github_username = %s\n \"\"\"\n params = (discord_id, name, time_zone, github_username, name, time_zone, github_username)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def remove_member(self, discord_id: int):\n \"\"\"\n Removes a team member from the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member to remove.\n \"\"\"\n query = \"DELETE FROM team_members WHERE discord_id = %s\"\n params = (discord_id,)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def list_all_members(self) -> List[Tuple[int, str, str, str, bool]]:\n \"\"\"\n Fetches all team members from the 'team_members' table.\n\n :return: A list of tuples, each containing the Discord ID, name, time zone, GitHub username, and vacation status of a team member.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n c = self.conn.cursor()\n try:\n c.execute(\"SELECT discord_id, name, time_zone, github_username, on_vacation FROM team_members\")\n return c.fetchall()\n finally:\n c.close()\n self.close()\n\n def update_member_timezone(self, discord_id: int, new_time_zone: str):\n \"\"\"\n Updates the timezone of a team member in the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param new_time_zone: The new timezone to be set for the team member.\n \"\"\"\n query = \"UPDATE team_members SET time_zone = %s WHERE discord_id = %s\"\n params = (new_time_zone, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def set_vacation_status(self, discord_id: int, on_vacation: bool):\n \"\"\"\n Sets the vacation status of a team member in the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param on_vacation: The vacation status to be set for the team member.\n \"\"\"\n query = \"UPDATE team_members SET on_vacation = %s WHERE discord_id = %s\"\n params = (on_vacation, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()" }, { "identifier": "UpdatesDB", "path": "updates/updates_db.py", "snippet": "class UpdatesDB(BaseDB):\n \"\"\"\n Database class for handling operations related to the 'updates' table.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the UpdatesDB class and creates the 'updates' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_updates_table()\n\n def _create_updates_table(self):\n \"\"\"\n Creates the 'updates' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS updates (\n id INT AUTO_INCREMENT PRIMARY KEY,\n discord_id BIGINT,\n status TEXT NOT NULL,\n summarized_status TEXT,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n time_zone VARCHAR(255),\n FOREIGN KEY (discord_id) REFERENCES team_members(discord_id) ON DELETE CASCADE\n )\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def insert_status(self, discord_id: int, status: str, time_zone: str):\n \"\"\"\n Inserts a new status update into the 'updates' table.\n\n :param discord_id: The Discord ID of the team member.\n :param status: The status update.\n :param time_zone: The time zone of the user.\n \"\"\"\n # Convert current UTC time to user's local time zone\n utc_now = datetime.utcnow().replace(tzinfo=pytz.utc)\n local_now = utc_now.astimezone(pytz.timezone(time_zone))\n\n query = \"INSERT INTO updates (discord_id, status, timestamp, time_zone) VALUES (%s, %s, %s, %s)\"\n params = (discord_id, status, local_now, time_zone)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def update_summarized_status(self, discord_id: int, summarized_status: str):\n \"\"\"\n Updates the summarized_status for the most recent update for a given user.\n\n :param discord_id: The Discord ID of the team member.\n :param summarized_status: The summarized status update.\n \"\"\"\n query = \"\"\"\n UPDATE updates\n SET summarized_status = %s\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n params = (summarized_status, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n \n def get_weekly_checkins_count(self, discord_id: int, time_zone: str) -> int:\n \"\"\"\n Fetches the number of check-ins for a given user in the current week.\n\n :param discord_id: The Discord ID of the user.\n :param time_zone: The time zone of the user.\n :return: The count of check-ins in the current week.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n # Adjusting the current time to the user's time zone\n local_tz = pytz.timezone(time_zone)\n local_now = datetime.now(local_tz)\n \n # Getting the Monday of the current week in the user's time zone\n monday = local_now - timedelta(days=local_now.weekday())\n monday = monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n query = \"\"\"\n SELECT COUNT(*) FROM updates\n WHERE discord_id = %s AND timestamp >= %s\n \"\"\"\n params = (discord_id, monday)\n try:\n c.execute(query, params)\n \n row = c.fetchone()\n return row[0] if row else 0\n finally:\n c.close()\n self.close()\n\n def get_statuses_in_date_range(self, discord_id: int, start_date: datetime, end_date: datetime) -> List[str]:\n \"\"\"\n Fetches all raw status updates for a given user within a specified date range.\n\n Args:\n discord_id: The Discord ID of the user.\n start_date: The start date of the date range.\n end_date: The end date of the date range.\n\n Returns:\n A list of raw status updates.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n query = \"\"\"\n SELECT summarized_status FROM updates\n WHERE discord_id = %s AND timestamp >= %s AND timestamp <= %s\n \"\"\"\n params = (discord_id, start_date, end_date)\n try:\n c.execute(query, params)\n \n statuses = [row[0] for row in c.fetchall()]\n return statuses\n finally:\n c.close()\n self.close()\n \n def get_all_statuses_for_user(self, discord_id: int) -> List[dict]:\n \"\"\"\n Fetches all status updates (both raw and summarized) for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A list of dictionaries, each containing the status update details for a given record.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor(dictionary=True) # Set dictionary=True to return results as dictionaries\n \n query = \"\"\"\n SELECT id, discord_id, status, summarized_status, timestamp \n FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n \"\"\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n \n statuses = c.fetchall()\n return statuses\n finally:\n c.close()\n self.close()\n \n def get_last_update_timestamp(self, discord_id: int) -> Tuple[datetime, str]:\n \"\"\"\n Fetches the timestamp and time zone of the last status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A tuple containing the timestamp of the last update and its time zone, or (None, None) if there are no updates.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n query = \"\"\"\n SELECT timestamp, time_zone FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n \n row = c.fetchone()\n return (row[0], row[1]) if row else (None, None)\n finally:\n c.close()\n self.close()\n \n def delete_newest_status(self, discord_id: int) -> None:\n \"\"\"\n Deletes the most recent status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n # Fetch the ID of the newest status update for the given user\n query_get_id = \"\"\"\n SELECT id FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n try:\n c.execute(query_get_id, (discord_id,))\n \n row = c.fetchone()\n if row:\n status_id = row[0]\n \n # Now, delete the status update using its ID\n query_delete = \"\"\"\n DELETE FROM updates WHERE id = %s\n \"\"\"\n c.execute(query_delete, (status_id,))\n \n self.conn.commit()\n finally:\n c.close()\n self.close()" }, { "identifier": "WeeklyPostsDB", "path": "weekly_posts/weekly_posts_db.py", "snippet": "class WeeklyPostsDB(BaseDB):\n \"\"\"\n Database class that handles operations related to the 'weekly_posts' table.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the WeeklyPostsDB class, connects to the MySQL database,\n and creates the 'weekly_posts' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_weekly_posts_table()\n\n def _create_weekly_posts_table(self):\n \"\"\"\n Creates the 'weekly_posts' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS weekly_posts (\n post_id BIGINT PRIMARY KEY,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def get_weekly_post_data(self) -> Optional[Dict[str, datetime.datetime]]:\n \"\"\"\n Fetches the most recent weekly post data from the 'weekly_posts' table.\n\n :return: A dictionary containing the post ID and timestamp, or None if no data exists.\n \"\"\"\n query = \"SELECT post_id, timestamp FROM weekly_posts ORDER BY timestamp DESC LIMIT 1\"\n \n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n try:\n c.execute(query)\n row = c.fetchone()\n\n if row:\n return {'post_id': row[0], 'timestamp': row[1]}\n return None\n finally:\n c.close()\n self.close()\n\n def save_weekly_post_data(self, post_id: int, timestamp: datetime.datetime):\n \"\"\"\n Inserts or updates the weekly post data in the 'weekly_posts' table.\n\n :param post_id: The ID of the weekly post.\n :param timestamp: The timestamp of the weekly post.\n \"\"\"\n query = \"\"\"\n INSERT INTO weekly_posts (post_id, timestamp)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE timestamp = %s\n \"\"\"\n params = (post_id, timestamp, timestamp)\n try:\n self.execute_query(query, params)\n finally:\n self.close()" }, { "identifier": "StreaksManager", "path": "streaks/streaks_manager.py", "snippet": "class StreaksManager:\n \"\"\"\n Manages the streaks for team members.\n \"\"\"\n \n def __init__(self, streaks_db: StreaksDB):\n \"\"\"\n Initializes a new StreaksManager instance.\n\n Args:\n streaks_db: The StreaksDB object that handles database operations.\n \"\"\"\n self.streaks_db = streaks_db\n \n def get_streak(self, discord_id: int) -> int:\n \"\"\"\n Fetches the current streak for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n The current streak count.\n \"\"\"\n return self.streaks_db.get_streak(discord_id)\n\n def update_streak(self, discord_id: int, new_streak: int):\n \"\"\"\n Updates the streak for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n new_streak: The new streak count.\n \"\"\"\n self.streaks_db.update_streak(discord_id, new_streak)\n \n def reset_streak(self, discord_id: int):\n \"\"\"\n Resets the streak for a given user to zero.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n self.streaks_db.update_streak(discord_id, 0)" }, { "identifier": "TeamMemberManager", "path": "team_members/team_member_manager.py", "snippet": "class TeamMemberManager:\n \"\"\"\n Manages operations related to team members.\n \"\"\"\n\n def __init__(self, db: TeamMemberDB):\n \"\"\"\n Initialize a TeamMemberManager object.\n\n :param db: TeamMemberDB object for interacting with the database.\n \"\"\"\n self.db = db\n self.team_members = self.load_team_members()\n\n def load_team_members(self) -> List[TeamMember]:\n \"\"\"\n Load team members from the MySQL database into a list of TeamMember objects.\n\n :return: List of TeamMember objects.\n \"\"\"\n team_members = []\n members_data = self.db.list_all_members()\n\n for member_data in members_data:\n member = TeamMember(\n discord_id=member_data[0],\n time_zone=member_data[2],\n name=member_data[1],\n github_username=member_data[3],\n on_vacation=member_data[4]\n )\n team_members.append(member)\n\n return team_members\n\n def find_member(self, discord_id: int) -> TeamMember:\n \"\"\"\n Find and return a team member by their Discord ID.\n\n :param discord_id: The Discord ID of the team member.\n :return: A TeamMember object if found, otherwise None.\n \"\"\"\n for member in self.team_members:\n if member.discord_id == discord_id:\n return member\n return None\n\n def add_member(self, discord_id: int, name: str, time_zone: str, github_username: str):\n \"\"\"\n Add a new team member to the list and the database.\n\n :param discord_id: The Discord ID of the new member.\n :param name: The name of the new member.\n :param time_zone: The time zone of the new member.\n :param github_username: The GitHub username of the new member.\n \"\"\"\n new_member = TeamMember(discord_id, time_zone, name, github_username)\n self.db.insert_new_member(discord_id, name, time_zone, github_username)\n self.team_members.append(new_member)\n\n def remove_member(self, discord_id: int):\n \"\"\"\n Remove a team member from the list and the database.\n\n :param discord_id: The Discord ID of the member to remove.\n \"\"\"\n self.db.remove_member(discord_id)\n self.team_members = [member for member in self.team_members if member.discord_id != discord_id]\n\n def update_member_timezone(self, discord_id: int, new_time_zone: str):\n \"\"\"\n Update the timezone of a team member in the database and the list.\n\n :param discord_id: The Discord ID of the member to update.\n :param new_time_zone: The new timezone string to set for the member.\n \"\"\"\n # Update the timezone in the database\n self.db.update_member_timezone(discord_id, new_time_zone)\n\n # Find the member in the team_members list and update their timezone\n member = self.find_member(discord_id)\n if member:\n member.time_zone = new_time_zone\n\n def set_member_vacation_status(self, discord_id: int, on_vacation: bool):\n \"\"\"\n Sets the vacation status of a team member.\n\n :param discord_id: The Discord ID of the team member.\n :param on_vacation: The vacation status to be set for the team member.\n \"\"\"\n # Update the vacation status in the database\n self.db.set_vacation_status(discord_id, on_vacation)\n\n # Find the member in the team_members list and update their vacation status\n member = self.find_member(discord_id)\n if member:\n member.on_vacation = on_vacation" }, { "identifier": "UpdatesManager", "path": "updates/updates_manager.py", "snippet": "class UpdatesManager:\n \"\"\"\n Manages status updates for team members.\n \"\"\"\n\n def __init__(self, updates_db: UpdatesDB):\n \"\"\"\n Initializes a new UpdatesManager instance.\n\n Args:\n updates_db: The UpdatesDB object that handles database operations.\n \"\"\"\n self.updates_db = updates_db\n\n def insert_status(self, discord_id: int, status: str, time_zone: str):\n \"\"\"\n Inserts a new status update.\n\n Args:\n discord_id: The Discord ID of the team member.\n status: The status update.\n \"\"\"\n self.updates_db.insert_status(discord_id, status, time_zone)\n\n def update_summarized_status(self, discord_id: int, summarized_status: str):\n \"\"\"\n Updates the summarized status for the most recent update for a given user.\n\n Args:\n discord_id: The Discord ID of the team member.\n summarized_status: The summarized status update.\n \"\"\"\n self.updates_db.update_summarized_status(discord_id, summarized_status)\n\n def get_weekly_checkins_count(self, discord_id: int, time_zone: str) -> int:\n \"\"\"\n Fetches the number of check-ins for a given user in the current week.\n\n Args:\n discord_id: The Discord ID of the user.\n time_zone: The time zone of the user.\n\n Returns:\n The count of check-ins in the current week.\n \"\"\"\n return self.updates_db.get_weekly_checkins_count(discord_id, time_zone)\n \n def get_all_statuses_for_user(self, discord_id: int) -> List[dict]:\n \"\"\"\n Fetches all status updates (both raw and summarized) for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A list of dictionaries, each containing the status update details for a given record.\n \"\"\"\n return self.updates_db.get_all_statuses_for_user(discord_id)\n\n def get_last_update_timestamp(self, discord_id: int) -> Tuple[datetime, str]:\n \"\"\"\n Fetches the timestamp and time zone of the last status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A tuple containing the timestamp of the last update and its time zone, or (None, None) if there are no updates.\n \"\"\"\n return self.updates_db.get_last_update_timestamp(discord_id)\n\n def delete_newest_status(self, discord_id: int) -> None:\n \"\"\"\n Deletes the most recent status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n self.updates_db.delete_newest_status(discord_id)\n\n async def generate_daily_summary(self, user_message: str) -> str:\n \"\"\"\n Generates a daily summary of the user's message using a large language model.\n\n Args:\n user_message: The user's message that needs to be summarized.\n\n Returns:\n The summarized message.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"Please summarize the user's update into two sections: 'Did' for tasks completed yesterday and 'Do' for tasks planned for today.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": user_message}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n \n except Exception as e:\n print(f\"An error occurred while generating the summary: {e}\")\n return \"Error in generating summary\"\n\n async def generate_weekly_summary(self, discord_id: int, start_date: datetime, end_date: datetime) -> str:\n \"\"\"\n Generates a weekly summary of the user's status updates using a large language model.\n\n Args:\n discord_id: The Discord ID of the user.\n start_date: The start date of the date range.\n end_date: The end date of the date range.\n\n Returns:\n The summarized weekly status update.\n \"\"\"\n # Fetch all raw status updates for the specified date range using the new method in UpdatesDB\n weekly_statuses = self.updates_db.get_statuses_in_date_range(discord_id, start_date, end_date)\n\n if not weekly_statuses:\n return \"There are no status updates for this week.\"\n \n # Combine all raw statuses into a single string\n combined_statuses = \"\\n\".join(weekly_statuses)\n \n # Prepare a system message to guide OpenAI's model for weekly summary\n system_message = \"Please generate a comprehensive weekly summary based on the provided daily status updates, including only tasks that have been accomplished. Ignore tasks that are not in the 'Did' section.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": combined_statuses}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-4-0613\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n weekly_summary = response['choices'][0]['message']['content'].strip()\n\n return weekly_summary\n \n except Exception as e:\n print(f\"An error occurred while generating the weekly summary: {e}\")\n return \"Error in generating weekly summary\"\n \n async def summarize_technical_updates(self, commit_messages: List[str]) -> str:\n \"\"\"\n Summarizes the technical updates based on commit messages.\n\n Args:\n commit_messages: List of commit messages for the day.\n\n Returns:\n A summarized version of the technical updates.\n \"\"\"\n\n # Combine commit messages into a single string for the LLM\n combined_commits = \"\\n\".join(commit_messages)\n\n # If there are no commit messages, return a default message\n if not combined_commits:\n return \"No technical updates found based on commit messages.\"\n\n # Summarization using LLM\n system_message = \"Please provide a concise summary of the technical updates based on the provided commit messages.\"\n\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": combined_commits}\n ]\n\n model_engine = \"gpt-3.5-turbo-1106\"\n\n try:\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n\n # Extract the generated summary\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n\n except Exception as e:\n print(f\"An error occurred while generating the technical summary: {e}\")\n return \"Error in generating technical summary.\"\n\n async def summarize_feedback_and_revisions(self, original_report: str, feedback: str) -> str:\n \"\"\"\n Takes the original report and user feedback and generates a revised summary.\n\n Args:\n original_report: The original summarized report.\n feedback: The user's feedback or suggested edits.\n\n Returns:\n The revised summary.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"Revise the original report based on the user's feedback.\"\n\n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": f\"Original Report: {original_report}\"},\n {\"role\": \"user\", \"content\": f\"Feedback: {feedback}\"}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n revised_summary = response['choices'][0]['message']['content'].strip()\n\n return revised_summary\n \n except Exception as e:\n print(f\"An error occurred while generating the revised summary: {e}\")\n return \"Error in generating revised summary\"\n\n async def summarize_non_technical_updates(self, update: str) -> str:\n \"\"\"\n Summarizes a non-technical update using a large language model.\n\n Args:\n update: The raw non-technical update provided by the user.\n\n Returns:\n The summarized non-technical update.\n \"\"\"\n\n # System message to guide the LLM for a concise summary\n system_message = \"Please provide a concise summary of the non-technical update shared by the user.\"\n\n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": update}\n ]\n\n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n\n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n\n # Extract the generated summary\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n\n except Exception as e:\n print(f\"An error occurred while generating the non-technical summary: {e}\")\n return \"Error in generating summary\"\n\n async def summarize_goals_for_the_day(self, goals: str) -> str:\n \"\"\"\n Summarizes the user's goals for the day using a large language model.\n\n Args:\n goals: The user's raw input on their goals for the day.\n\n Returns:\n The summarized goals for the day.\n \"\"\"\n # Initiate the conversation with the model\n system_message = \"Please provide a concise summary of the user's goals for today.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": goals}\n ]\n \n # Specify the model engine you want to use (this is an example and can be adjusted based on your needs)\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Provide user's input and retrieve model's response\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n summarized_goals = response['choices'][0]['message']['content'].strip()\n\n # Return the summary\n return summarized_goals\n \n except Exception as e:\n print(f\"An error occurred while generating the goals summary: {e}\")\n return \"Error in generating goals summary\"\n \n async def evaluate_performance(self, user_message: str) -> str:\n \"\"\"\n Evaluates the performance of the user based on their update.\n\n Args:\n user_message: The user's message that needs to be evaluated.\n\n Returns:\n The evaluation of the user's performance.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"\"\"\n You are a project manager at a fast-paced tech startup, recognized for providing clear and actionable feedback during stand-up meetings. Your role is to evaluate the quality of team members' daily stand-up reports, with a focus on clear communication, comprehensive planning, and problem-solving abilities.\n It is essential to note that team members should neither be penalized nor rewarded for merely mentioning issues; instead, the emphasis should be on the clarity of the report and the quality of strategies proposed to address these issues.\n Your feedback is candid and aimed at encouraging high-quality reporting and effective planning within the startup environment.\n Please provide a two-sentence summary of the stand-up and assign a grade (A, B, C, D, or F) based on the following criteria:\n\n - A: Excellent - The report is exceptionally clear and detailed, with well-defined tasks and a thorough approach to tackling issues, exemplifying the proactive and problem-solving ethos of our startup.\n - B: Good - The report is clear and adequately detailed, outlining tasks and addressing issues with a reasonable approach, indicating a commitment to momentum and resolution.\n - C: Fair - The report is understandable but lacks detail in some areas, with a basic approach to resolving issues, suggesting a need for further strategy development.\n - D: Poor - The report is vague or missing details, with a limited or unclear approach to issues, necessitating better communication and planning skills.\n - F: Fail - The report is missing, overly vague, or lacks a coherent structure, with no apparent approach to issues, reflecting a need for significant improvement in reporting and strategizing.\n\n A comprehensive stand-up report effectively communicates what was done and what is planned, clearly identifies any issues, and connects daily tasks with broader business objectives.\n\n Provide clear and constructive feedback, aiming to foster a culture of excellence and continuous improvement in how we plan and communicate our daily activities.\n \"\"\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": user_message}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n performance_evaluation = response['choices'][0]['message']['content'].strip()\n\n return performance_evaluation\n \n except Exception as e:\n print(f\"An error occurred while evaluating the performance: {e}\")\n return \"Error in evaluating performance\"" }, { "identifier": "WeeklyPostManager", "path": "weekly_posts/weekly_post_manager.py", "snippet": "class WeeklyPostManager:\n \"\"\"Manages the status post in a Discord channel.\"\"\"\n \n def __init__(self, channel, weekly_posts_db: WeeklyPostsDB):\n \"\"\"\n Initializes a new WeeklyPostManager instance.\n \"\"\"\n self.channel = channel\n self.weekly_posts_db = weekly_posts_db\n self.editable_weekly_post = None\n self.load_weekly_post_data()\n\n def load_weekly_post_data(self):\n \"\"\"\n Load the weekly post data from the database.\n \n This method queries the 'weekly_posts' table to get the ID and timestamp of \n the last weekly post. If no data exists, it sets the ID and timestamp to None.\n \"\"\"\n data = self.weekly_posts_db.get_weekly_post_data()\n self.editable_weekly_post_id = data.get('post_id', None)\n self.weekly_post_timestamp = data.get('timestamp', None)\n\n def save_weekly_post_data(self):\n \"\"\"\n Save the weekly post data to the database.\n \n This method inserts or updates the ID and timestamp of the current weekly post \n in the 'weekly_posts' table.\n \"\"\"\n self.weekly_posts_db.save_weekly_post_data(self.editable_weekly_post.id, datetime.now())\n\n async def initialize_post(self, team_members: List[TeamMember]):\n \"\"\"\n Initializes or retrieves the weekly status post on Discord.\n\n This function checks if a valid weekly post already exists for the current week.\n If it does, it retrieves that post. Otherwise, it sends a new message in the Discord\n channel with the list of team members and their statuses.\n\n Args:\n team_members: A list of TeamMember objects to be displayed in the post.\n \"\"\"\n current_week_number = datetime.now().isocalendar()[1]\n saved_week_number = self.weekly_post_timestamp.isocalendar()[1] if self.weekly_post_timestamp else None\n\n # Skip initialization if the post already exists and is for the current week\n if self.editable_weekly_post_id and current_week_number == saved_week_number:\n self.editable_weekly_post = await self.channel.fetch_message(self.editable_weekly_post_id)\n return\n\n utc_now = pytz.utc.localize(datetime.utcnow())\n today_weekday = utc_now.weekday()\n last_monday = utc_now - timedelta(days=today_weekday)\n next_sunday = last_monday + timedelta(days=6)\n\n start_date = self.format_date(last_monday)\n end_date = self.format_date(next_sunday)\n\n # Calculate the max name length for alignment purposes\n max_name_length = max([len(m.name) for m in team_members])\n\n member_list = []\n for m in team_members:\n # Include the streak with the fire emoji if the streak is greater than 0\n streak_str = f\" {m.current_streak}🔥\" if m.current_streak > 0 else \"\"\n\n # Construct the new line for the member with the updated information\n new_line = f\"# `{m.name.ljust(max_name_length)} {'❓' * 5} {streak_str}`\"\n member_list.append(new_line)\n\n member_list_str = '\\n'.join(member_list)\n\n await self.channel.send(f\"# Weekly Status Updates\")\n await self.channel.send(f\"## {start_date} to {end_date}\")\n if member_list_str:\n self.editable_weekly_post = await self.channel.send(f\"{member_list_str}\")\n self.save_weekly_post_data() # Save the ID and timestamp after creating the post\n\n async def rebuild_post(self, team_members: List[TeamMember]):\n \"\"\"\n Rebuilds the entire weekly status post from the team members' data.\n\n Args:\n team_members: A list of TeamMember objects with updated statuses and streaks.\n \"\"\"\n # If there are no team members, delete the post and return\n if not team_members:\n if self.editable_weekly_post:\n await self.editable_weekly_post.delete()\n self.editable_weekly_post = None\n return\n\n # Calculate the max name length for alignment purposes\n max_name_length = max([len(m.name) for m in team_members])\n\n member_list = []\n for m in team_members:\n # Get the streak and number of weekly check-ins for the member\n streak = m.current_streak\n check_ins = m.weekly_checkins\n\n # Generate the marks based on the number of check-ins\n marks = \"✅\" * check_ins + \"❓\" * (5 - check_ins)\n\n # Include the streak with the fire emoji if the streak is greater than 0\n streak_str = f\" {streak}🔥\" if streak > 0 else \"\"\n\n # Construct the new line for the member with the updated information\n new_line = f\"# `{m.name.ljust(max_name_length)} {marks} {streak_str}`\"\n member_list.append(new_line)\n\n new_content = '\\n'.join(member_list)\n\n # Update the existing post or create a new one if it doesn't exist\n if self.editable_weekly_post:\n self.editable_weekly_post = await self.editable_weekly_post.edit(content=new_content)\n else:\n self.editable_weekly_post = await self.channel.send(new_content)\n\n # Save the ID and timestamp of the post\n self.save_weekly_post_data()\n\n def format_date(self, dt: datetime) -> str:\n \"\"\"\n Formats a datetime object into a human-readable string.\n\n Args:\n dt: The datetime object to format.\n\n Returns:\n A human-readable date string.\n \"\"\"\n suffix = ['th', 'st', 'nd', 'rd']\n day = int(dt.strftime('%d'))\n if 4 <= day <= 20 or 24 <= day <= 30:\n suffix_index = 0 # use 'th'\n else:\n suffix_index = day % 10 # use 'st', 'nd', 'rd' as appropriate\n\n return dt.strftime(f\"%B {day}{suffix[suffix_index]}\")" }, { "identifier": "Scheduler", "path": "scheduler.py", "snippet": "class Scheduler:\n \"\"\"Scheduler class to manage timed jobs for sending status requests.\n\n Attributes:\n scheduler: The APScheduler object.\n job_ids: A dictionary to store lists of job IDs for each member.\n \"\"\"\n \n def __init__(self) -> None:\n \"\"\"Initialize the Scheduler object and start the APScheduler.\"\"\"\n self.scheduler: AsyncIOScheduler = AsyncIOScheduler()\n self.job_ids: Dict[int, List[str]] = {} # Store job IDs indexed by member's Discord ID\n self.weekly_post_job_id = None # To store the ID of the scheduled weekly post job\n self.scheduler.start()\n\n def add_job(self, func: callable, member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager) -> None:\n \"\"\"Add a new job to the scheduler for a specific team member.\n \n Args:\n func: The function to call when the job is run.\n member: The TeamMember object for whom the job is added.\n \"\"\"\n time_zone = pytz.timezone(member.time_zone)\n \n weekday_trigger = CronTrigger(day_of_week='mon,tue,wed,thu,fri', hour=10, timezone=time_zone)\n weekend_trigger = CronTrigger(day_of_week='sat,sun', hour=11, timezone=time_zone)\n\n weekday_job = self.scheduler.add_job(func, weekday_trigger, args=[member, weekly_post_manager, streaks_manager, updates_manager])\n weekend_job = self.scheduler.add_job(func, weekend_trigger, args=[member, weekly_post_manager, streaks_manager, updates_manager])\n\n self.job_ids.setdefault(member.discord_id, []).extend([weekday_job.id, weekend_job.id])\n\n def remove_job(self, discord_id: int) -> None:\n \"\"\"Remove jobs for a specific team member.\n \n Args:\n discord_id: The Discord ID of the member for whom the job should be removed.\n \"\"\"\n job_ids = self.job_ids.get(discord_id, [])\n for job_id in job_ids:\n self.scheduler.remove_job(job_id)\n\n if discord_id in self.job_ids:\n del self.job_ids[discord_id] # Remove the job IDs from the dictionary\n\n def schedule_weekly_post(self, func: callable, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]) -> None:\n \"\"\"Schedules the weekly post based on the latest time zone among the team members.\"\"\"\n \n # Determine the latest time zone\n latest_time_zone = max([member.time_zone for member in team_members], key=lambda tz: pytz.timezone(tz).utcoffset(datetime.utcnow()))\n\n # Set the trigger for 9:10 AM in the earliest time zone on Monday\n trigger = CronTrigger(day_of_week='mon', hour=9, minute=10, timezone=latest_time_zone)\n\n # Schedule the function with the trigger\n job = self.scheduler.add_job(func, trigger, args=[weekly_post_manager, streaks_manager, team_members])\n self.weekly_post_job_id = job.id\n\n def unschedule_weekly_post(self) -> None:\n \"\"\"Removes the weekly post job from the scheduler.\"\"\"\n if self.weekly_post_job_id:\n self.scheduler.remove_job(self.weekly_post_job_id)\n self.weekly_post_job_id = None\n\n def get_all_scheduled_jobs(self, team_member_manager) -> List[str]:\n \"\"\"Retrieve all scheduled jobs as a list of strings.\"\"\"\n job_descriptions = []\n\n for job in self.scheduler.get_jobs():\n # Determine the associated team member by looking up the job ID in the job_ids dictionary\n member_discord_id = next((discord_id for discord_id, job_ids in self.job_ids.items() if job.id in job_ids), None)\n member_name = team_member_manager.find_member(member_discord_id).name if member_discord_id else \"Unknown\"\n\n # Calculate the remaining time until the next run\n now = datetime.now(job.next_run_time.tzinfo) # Get the current time with the same timezone as the job's next_run_time\n remaining_time = job.next_run_time - now\n remaining_time_str = str(remaining_time).split('.')[0] # Remove the microseconds part\n\n # If this job is the weekly post job\n if job.id == self.weekly_post_job_id:\n job_descriptions.append(f\"ID: {job.id}, Type: Weekly Post, Next Run: {job.next_run_time}, Remaining Time: {remaining_time_str}, Func: {job.func.__name__}\")\n else:\n job_descriptions.append(f\"ID: {job.id}, Member: {member_name}, Next Run: {job.next_run_time}, Remaining Time: {remaining_time_str}, Func: {job.func.__name__}\")\n\n return job_descriptions" }, { "identifier": "TeamMember", "path": "team_members/team_member.py", "snippet": "class TeamMember:\n \"\"\"TeamMember class to store individual team member details.\n \n Attributes:\n discord_id: The Discord ID of the team member.\n time_zone: The time zone in which the team member resides.\n name: The name of the team member.\n github_username: The GitHub username of the team member.\n current_streak: The current streak of daily updates/check-ins of the team member.\n weekly_checkins: The number of check-ins for the current week.\n \"\"\"\n \n def __init__(self, discord_id: int, time_zone: str, name: str, github_username: str,\n current_streak: int = 0, weekly_checkins: int = 0, on_vacation: bool = False) -> None:\n \"\"\"Initialize a new TeamMember object.\n \n Args:\n discord_id: The Discord ID of the team member.\n time_zone: The time zone of the team member.\n name: The name of the team member.\n github_username: The GitHub username of the team member.\n current_streak: The current streak of daily updates/check-ins. Defaults to 0.\n weekly_checkins: The number of check-ins for the current week. Defaults to 0.\n \"\"\"\n self.discord_id: int = discord_id\n self.time_zone: str = time_zone\n self.name: str = name\n self.github_username: str = github_username\n self.current_streak: int = current_streak\n self.weekly_checkins: int = weekly_checkins\n self.on_vacation: bool = on_vacation\n \n def update_streak(self, streak: int) -> None:\n \"\"\"Update the current streak of the team member.\n \n Args:\n streak: The new streak count.\n \"\"\"\n self.current_streak = streak\n \n def reset_streak(self) -> None:\n \"\"\"Reset the current streak of the team member to 0.\"\"\"\n self.current_streak = 0\n\n def update_weekly_checkins(self, count: int):\n \"\"\"\n Update the weekly check-ins count.\n\n Args:\n count: The new count of weekly check-ins.\n \"\"\"\n self.weekly_checkins = count\n \n def increment_weekly_checkins(self) -> None:\n \"\"\"Increment the number of check-ins for the current week by 1.\"\"\"\n self.weekly_checkins += 1\n \n def reset_weekly_checkins(self) -> None:\n \"\"\"Reset the number of check-ins for the current week to 0.\"\"\"\n self.weekly_checkins = 0" } ]
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
13,447
BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝' async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]): # Reset streaks for the previous week for member in team_members: if not member.on_vacation and member.weekly_checkins < 5: streaks_manager.reset_streak(member.discord_id) member.reset_streak() member.reset_weekly_checkins() # Initialize new weekly post await weekly_post_manager.initialize_post(team_members) def get_all_commit_messages_for_user(org_name: str, token: str, member: TeamMember) -> list: """Retrieve all commit messages for a user across all repos in an organization from the last 24 hours.""" headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json" } last_update_timestamp, user_time_zone = updates_manager.get_last_update_timestamp(member.discord_id) if last_update_timestamp: # Convert the timestamp to UTC local_tz = pytz.timezone(user_time_zone) localized_timestamp = local_tz.localize(last_update_timestamp) utc_timestamp = localized_timestamp.astimezone(pytz.utc) # Format the timestamp for the GitHub API and append 'Z' since_date = utc_timestamp.isoformat() if not since_date.endswith('Z'): since_date = utc_timestamp.isoformat().replace('+00:00', '') + 'Z' else: # If no updates found, default to last 24 hours since_date = (datetime.utcnow() - timedelta(days=1)).isoformat() + 'Z' all_commit_messages = [] # Paginate through all repositories in the organization repos_url = f"https://api.github.com/orgs/{org_name}/repos?type=all&per_page=100" while repos_url: response = requests.get(repos_url, headers=headers) if response.status_code != 200: # Log error and break loop print(f"Failed to fetch repos: {response.status_code} {response.text}") break repos = response.json() # Iterate over each repository for repo in repos: repo_name = repo["name"] commits_url = f"https://api.github.com/repos/{org_name}/{repo_name}/commits?author={member.github_username}&since={since_date}&per_page=100" # Paginate through commits for the repository while commits_url: response = requests.get(commits_url, headers=headers) if response.status_code != 200: # Log error and continue to the next repository print(f"Failed to fetch commits for {repo_name}: {response.status_code} {response.text}") break commits = response.json() repo_commit_messages = [commit["commit"]["message"] for commit in commits] all_commit_messages.extend(repo_commit_messages) # Check for the 'next' link for commits pagination commits_url = get_pagination_link(response.headers, 'next') # Check for the 'next' link for repositories pagination repos_url = get_pagination_link(response.headers, 'next') return all_commit_messages def get_pagination_link(headers, rel): """Extract pagination link for the 'rel' type from the Link header.""" link = headers.get('Link', None) if link: links = link.split(', ') for link in links: if 'rel="{}"'.format(rel) in link: return link.split('; ')[0].strip('<>') return None async def send_status_request(member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager,
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝' async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]): # Reset streaks for the previous week for member in team_members: if not member.on_vacation and member.weekly_checkins < 5: streaks_manager.reset_streak(member.discord_id) member.reset_streak() member.reset_weekly_checkins() # Initialize new weekly post await weekly_post_manager.initialize_post(team_members) def get_all_commit_messages_for_user(org_name: str, token: str, member: TeamMember) -> list: """Retrieve all commit messages for a user across all repos in an organization from the last 24 hours.""" headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json" } last_update_timestamp, user_time_zone = updates_manager.get_last_update_timestamp(member.discord_id) if last_update_timestamp: # Convert the timestamp to UTC local_tz = pytz.timezone(user_time_zone) localized_timestamp = local_tz.localize(last_update_timestamp) utc_timestamp = localized_timestamp.astimezone(pytz.utc) # Format the timestamp for the GitHub API and append 'Z' since_date = utc_timestamp.isoformat() if not since_date.endswith('Z'): since_date = utc_timestamp.isoformat().replace('+00:00', '') + 'Z' else: # If no updates found, default to last 24 hours since_date = (datetime.utcnow() - timedelta(days=1)).isoformat() + 'Z' all_commit_messages = [] # Paginate through all repositories in the organization repos_url = f"https://api.github.com/orgs/{org_name}/repos?type=all&per_page=100" while repos_url: response = requests.get(repos_url, headers=headers) if response.status_code != 200: # Log error and break loop print(f"Failed to fetch repos: {response.status_code} {response.text}") break repos = response.json() # Iterate over each repository for repo in repos: repo_name = repo["name"] commits_url = f"https://api.github.com/repos/{org_name}/{repo_name}/commits?author={member.github_username}&since={since_date}&per_page=100" # Paginate through commits for the repository while commits_url: response = requests.get(commits_url, headers=headers) if response.status_code != 200: # Log error and continue to the next repository print(f"Failed to fetch commits for {repo_name}: {response.status_code} {response.text}") break commits = response.json() repo_commit_messages = [commit["commit"]["message"] for commit in commits] all_commit_messages.extend(repo_commit_messages) # Check for the 'next' link for commits pagination commits_url = get_pagination_link(response.headers, 'next') # Check for the 'next' link for repositories pagination repos_url = get_pagination_link(response.headers, 'next') return all_commit_messages def get_pagination_link(headers, rel): """Extract pagination link for the 'rel' type from the Link header.""" link = headers.get('Link', None) if link: links = link.split(', ') for link in links: if 'rel="{}"'.format(rel) in link: return link.split('; ')[0].strip('<>') return None async def send_status_request(member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager,
updates_manager: UpdatesManager):
6
2023-10-12 02:01:46+00:00
16k
azuline/rose
rose/watcher.py
[ { "identifier": "update_cache_evict_nonexistent_collages", "path": "rose/cache.py", "snippet": "def update_cache_evict_nonexistent_collages(c: Config) -> None:\n logger.debug(\"Evicting cached collages that are not on disk\")\n collage_names: list[str] = []\n for f in os.scandir(c.music_source_dir / \"!collages\"):\n p = Path(f.path)\n if p.is_file() and p.suffix == \".toml\":\n collage_names.append(p.stem)\n\n with connect(c) as conn:\n cursor = conn.execute(\n f\"\"\"\n DELETE FROM collages\n WHERE name NOT IN ({\",\".join([\"?\"] * len(collage_names))})\n RETURNING name\n \"\"\",\n collage_names,\n )\n for row in cursor:\n logger.info(f\"Evicted missing collage {row['name']} from cache\")" }, { "identifier": "update_cache_evict_nonexistent_playlists", "path": "rose/cache.py", "snippet": "def update_cache_evict_nonexistent_playlists(c: Config) -> None:\n logger.debug(\"Evicting cached playlists that are not on disk\")\n playlist_names: list[str] = []\n for f in os.scandir(c.music_source_dir / \"!playlists\"):\n p = Path(f.path)\n if p.is_file() and p.suffix == \".toml\":\n playlist_names.append(p.stem)\n\n with connect(c) as conn:\n cursor = conn.execute(\n f\"\"\"\n DELETE FROM playlists\n WHERE name NOT IN ({\",\".join([\"?\"] * len(playlist_names))})\n RETURNING name\n \"\"\",\n playlist_names,\n )\n for row in cursor:\n logger.info(f\"Evicted missing playlist {row['name']} from cache\")" }, { "identifier": "update_cache_evict_nonexistent_releases", "path": "rose/cache.py", "snippet": "def update_cache_evict_nonexistent_releases(c: Config) -> None:\n logger.debug(\"Evicting cached releases that are not on disk\")\n dirs = [Path(d.path).resolve() for d in os.scandir(c.music_source_dir) if d.is_dir()]\n with connect(c) as conn:\n cursor = conn.execute(\n f\"\"\"\n DELETE FROM releases\n WHERE source_path NOT IN ({\",\".join([\"?\"] * len(dirs))})\n RETURNING source_path\n \"\"\",\n [str(d) for d in dirs],\n )\n for row in cursor:\n logger.info(f\"Evicted missing release {row['source_path']} from cache\")" }, { "identifier": "update_cache_for_collages", "path": "rose/cache.py", "snippet": "def update_cache_for_collages(\n c: Config,\n # Leave as None to update all collages.\n collage_names: list[str] | None = None,\n force: bool = False,\n) -> None:\n \"\"\"\n Update the read cache to match the data for all stored collages.\n\n This is performance-optimized in a similar way to the update releases function. We:\n\n 1. Execute one big SQL query at the start to fetch the relevant previous caches.\n 2. Skip reading a file's data if the mtime has not changed since the previous cache update.\n 3. Only execute a SQLite upsert if the read data differ from the previous caches.\n\n However, we do not batch writes to the end of the function, nor do we process the collages in\n parallel. This is because we should have far fewer collages than releases.\n \"\"\"\n collage_dir = c.music_source_dir / \"!collages\"\n collage_dir.mkdir(exist_ok=True)\n\n files: list[tuple[Path, str, os.DirEntry[str]]] = []\n for f in os.scandir(str(collage_dir)):\n path = Path(f.path)\n if path.suffix != \".toml\":\n continue\n if not path.is_file():\n logger.debug(f\"Skipping processing collage {path.name} because it is not a file\")\n continue\n if collage_names is None or path.stem in collage_names:\n files.append((path.resolve(), path.stem, f))\n logger.debug(f\"Refreshing the read cache for {len(files)} collages\")\n\n cached_collages: dict[str, CachedCollage] = {}\n with connect(c) as conn:\n cursor = conn.execute(\n \"\"\"\n SELECT\n c.name\n , c.source_mtime\n , COALESCE(GROUP_CONCAT(cr.release_id, ' ¬ '), '') AS release_ids\n FROM collages c\n LEFT JOIN collages_releases cr ON cr.collage_name = c.name\n GROUP BY c.name\n \"\"\",\n )\n for row in cursor:\n cached_collages[row[\"name\"]] = CachedCollage(\n name=row[\"name\"],\n source_mtime=row[\"source_mtime\"],\n release_ids=_split(row[\"release_ids\"]) if row[\"release_ids\"] else [],\n )\n\n # We want to validate that all release IDs exist before we write them. In order to do that,\n # we need to know which releases exist.\n cursor = conn.execute(\"SELECT id FROM releases\")\n existing_release_ids = {row[\"id\"] for row in cursor}\n\n loop_start = time.time()\n with connect(c) as conn:\n for source_path, name, f in files:\n try:\n cached_collage = cached_collages[name]\n except KeyError:\n logger.debug(f\"First-time unidentified collage found at {source_path}\")\n cached_collage = CachedCollage(\n name=name,\n source_mtime=\"\",\n release_ids=[],\n )\n\n try:\n source_mtime = str(f.stat().st_mtime)\n except FileNotFoundError:\n # Collage was deleted... continue without doing anything. It will be cleaned up by\n # the eviction function.\n continue\n if source_mtime == cached_collage.source_mtime and not force:\n logger.debug(f\"Collage cache hit (mtime) for {source_path}, reusing cached data\")\n continue\n\n logger.debug(f\"Collage cache miss (mtime) for {source_path}, reading data from disk\")\n cached_collage.source_mtime = source_mtime\n\n with lock(c, collage_lock_name(name)):\n with source_path.open(\"rb\") as fp:\n data = tomllib.load(fp)\n original_releases = data.get(\"releases\", [])\n releases = copy.deepcopy(original_releases)\n\n # Update the markings for releases that no longer exist. We will flag releases as\n # missing/not-missing here, so that if they are re-added (maybe it was a temporary\n # disappearance)? they are recovered in the collage.\n for rls in releases:\n if not rls.get(\"missing\", False) and rls[\"uuid\"] not in existing_release_ids:\n logger.warning(\n f\"Marking missing release {rls['description_meta']} as missing in collage {cached_collage.name}\"\n )\n rls[\"missing\"] = True\n elif rls.get(\"missing\", False) and rls[\"uuid\"] in existing_release_ids:\n logger.info(\n f\"Missing release {rls['description_meta']} in collage {cached_collage.name} found: removing missing flag\"\n )\n del rls[\"missing\"]\n\n cached_collage.release_ids = [r[\"uuid\"] for r in releases]\n logger.debug(\n f\"Found {len(cached_collage.release_ids)} release(s) (including missing) in {source_path}\"\n )\n\n # Update the description_metas.\n desc_map: dict[str, str] = {}\n cursor = conn.execute(\n f\"\"\"\n SELECT id, albumtitle, year, albumartist_names, albumartist_roles FROM releases_view\n WHERE id IN ({','.join(['?']*len(releases))})\n \"\"\",\n cached_collage.release_ids,\n )\n for row in cursor:\n desc_map[row[\"id\"]] = calculate_release_logtext(\n title=row[\"albumtitle\"],\n year=row[\"year\"],\n artists=_unpack_artists(\n c, row[\"albumartist_names\"], row[\"albumartist_roles\"]\n ),\n )\n for i, rls in enumerate(releases):\n with contextlib.suppress(KeyError):\n releases[i][\"description_meta\"] = desc_map[rls[\"uuid\"]]\n if rls.get(\"missing\", False) and not releases[i][\"description_meta\"].endswith(\n \" {MISSING}\"\n ):\n releases[i][\"description_meta\"] += \" {MISSING}\"\n\n # Update the collage on disk if we have changed information.\n if releases != original_releases:\n logger.debug(f\"Updating release descriptions for {cached_collage.name}\")\n data[\"releases\"] = releases\n with source_path.open(\"wb\") as fp:\n tomli_w.dump(data, fp)\n cached_collage.source_mtime = str(os.stat(source_path).st_mtime)\n\n logger.info(f\"Updating cache for collage {cached_collage.name}\")\n conn.execute(\n \"\"\"\n INSERT INTO collages (name, source_mtime) VALUES (?, ?)\n ON CONFLICT (name) DO UPDATE SET source_mtime = excluded.source_mtime\n \"\"\",\n (cached_collage.name, cached_collage.source_mtime),\n )\n conn.execute(\n \"DELETE FROM collages_releases WHERE collage_name = ?\",\n (cached_collage.name,),\n )\n args: list[Any] = []\n for position, rls in enumerate(releases):\n args.extend(\n [cached_collage.name, rls[\"uuid\"], position + 1, rls.get(\"missing\", False)]\n )\n if args:\n conn.execute(\n f\"\"\"\n INSERT INTO collages_releases (collage_name, release_id, position, missing)\n VALUES {','.join(['(?, ?, ?, ?)'] * len(releases))}\n \"\"\",\n args,\n )\n\n logger.debug(f\"Collage update loop time {time.time() - loop_start=}\")" }, { "identifier": "update_cache_for_playlists", "path": "rose/cache.py", "snippet": "def update_cache_for_playlists(\n c: Config,\n # Leave as None to update all playlists.\n playlist_names: list[str] | None = None,\n force: bool = False,\n) -> None:\n \"\"\"\n Update the read cache to match the data for all stored playlists.\n\n This is performance-optimized in a similar way to the update releases function. We:\n\n 1. Execute one big SQL query at the start to fetch the relevant previous caches.\n 2. Skip reading a file's data if the mtime has not changed since the previous cache update.\n 3. Only execute a SQLite upsert if the read data differ from the previous caches.\n\n However, we do not batch writes to the end of the function, nor do we process the playlists in\n parallel. This is because we should have far fewer playlists than releases.\n \"\"\"\n playlist_dir = c.music_source_dir / \"!playlists\"\n playlist_dir.mkdir(exist_ok=True)\n\n files: list[tuple[Path, str, os.DirEntry[str]]] = []\n all_files_in_dir: list[Path] = []\n for f in os.scandir(str(playlist_dir)):\n path = Path(f.path)\n all_files_in_dir.append(path)\n if path.suffix != \".toml\":\n continue\n if not path.is_file():\n logger.debug(f\"Skipping processing playlist {path.name} because it is not a file\")\n continue\n if playlist_names is None or path.stem in playlist_names:\n files.append((path.resolve(), path.stem, f))\n logger.debug(f\"Refreshing the read cache for {len(files)} playlists\")\n\n cached_playlists: dict[str, CachedPlaylist] = {}\n with connect(c) as conn:\n cursor = conn.execute(\n \"\"\"\n SELECT\n p.name\n , p.source_mtime\n , p.cover_path\n , COALESCE(GROUP_CONCAT(pt.track_id, ' ¬ '), '') AS track_ids\n FROM playlists p\n LEFT JOIN playlists_tracks pt ON pt.playlist_name = p.name\n GROUP BY p.name\n \"\"\",\n )\n for row in cursor:\n cached_playlists[row[\"name\"]] = CachedPlaylist(\n name=row[\"name\"],\n source_mtime=row[\"source_mtime\"],\n cover_path=Path(row[\"cover_path\"]) if row[\"cover_path\"] else None,\n track_ids=_split(row[\"track_ids\"]) if row[\"track_ids\"] else [],\n )\n\n # We want to validate that all track IDs exist before we write them. In order to do that,\n # we need to know which tracks exist.\n cursor = conn.execute(\"SELECT id FROM tracks\")\n existing_track_ids = {row[\"id\"] for row in cursor}\n\n loop_start = time.time()\n with connect(c) as conn:\n for source_path, name, f in files:\n try:\n cached_playlist = cached_playlists[name]\n except KeyError:\n logger.debug(f\"First-time unidentified playlist found at {source_path}\")\n cached_playlist = CachedPlaylist(\n name=name,\n source_mtime=\"\",\n cover_path=None,\n track_ids=[],\n )\n\n # We do a quick scan for the playlist's cover art here. We always do this check, as it\n # amounts to ~4 getattrs. If a change is detected, we ignore the mtime optimization and\n # always update the database.\n dirty = False\n if cached_playlist.cover_path and not cached_playlist.cover_path.is_file():\n cached_playlist.cover_path = None\n dirty = True\n if not cached_playlist.cover_path:\n for potential_art_file in all_files_in_dir:\n if (\n potential_art_file.stem == name\n and potential_art_file.suffix.lower().lstrip(\".\") in c.valid_art_exts\n ):\n cached_playlist.cover_path = potential_art_file.resolve()\n dirty = True\n break\n\n try:\n source_mtime = str(f.stat().st_mtime)\n except FileNotFoundError:\n # Playlist was deleted... continue without doing anything. It will be cleaned up by\n # the eviction function.\n continue\n if source_mtime == cached_playlist.source_mtime and not force and not dirty:\n logger.debug(f\"playlist cache hit (mtime) for {source_path}, reusing cached data\")\n continue\n\n logger.debug(\n f\"playlist cache miss (mtime/{dirty=}) for {source_path}, reading data from disk\"\n )\n cached_playlist.source_mtime = source_mtime\n\n with lock(c, playlist_lock_name(name)):\n with source_path.open(\"rb\") as fp:\n data = tomllib.load(fp)\n original_tracks = data.get(\"tracks\", [])\n tracks = copy.deepcopy(original_tracks)\n\n # Update the markings for tracks that no longer exist. We will flag tracks as\n # missing/not-missing here, so that if they are re-added (maybe it was a temporary\n # disappearance)? they are recovered in the playlist.\n for trk in tracks:\n if not trk.get(\"missing\", False) and trk[\"uuid\"] not in existing_track_ids:\n logger.warning(\n f\"Marking missing track {trk['description_meta']} as missing in playlist {cached_playlist.name}\"\n )\n trk[\"missing\"] = True\n elif trk.get(\"missing\", False) and trk[\"uuid\"] in existing_track_ids:\n logger.info(\n f\"Missing trk {trk['description_meta']} in playlist {cached_playlist.name} found: removing missing flag\"\n )\n del trk[\"missing\"]\n\n cached_playlist.track_ids = [t[\"uuid\"] for t in tracks]\n logger.debug(\n f\"Found {len(cached_playlist.track_ids)} track(s) (including missing) in {source_path}\"\n )\n\n # Update the description_metas.\n desc_map: dict[str, str] = {}\n cursor = conn.execute(\n f\"\"\"\n SELECT id, tracktitle, source_path, trackartist_names, trackartist_roles FROM tracks_view\n WHERE id IN ({','.join(['?']*len(tracks))})\n \"\"\",\n cached_playlist.track_ids,\n )\n for row in cursor:\n desc_map[row[\"id\"]] = calculate_track_logtext(\n title=row[\"tracktitle\"],\n artists=_unpack_artists(\n c, row[\"trackartist_names\"], row[\"trackartist_roles\"]\n ),\n suffix=Path(row[\"source_path\"]).suffix,\n )\n for i, trk in enumerate(tracks):\n with contextlib.suppress(KeyError):\n tracks[i][\"description_meta\"] = desc_map[trk[\"uuid\"]]\n if trk.get(\"missing\", False):\n tracks[i][\"description_meta\"] += \" {MISSING}\"\n\n # Update the playlist on disk if we have changed information.\n if tracks != original_tracks:\n logger.debug(f\"Updating track descriptions for {cached_playlist.name}\")\n data[\"tracks\"] = tracks\n with source_path.open(\"wb\") as fp:\n tomli_w.dump(data, fp)\n cached_playlist.source_mtime = str(os.stat(source_path).st_mtime)\n\n logger.info(f\"Updating cache for playlist {cached_playlist.name}\")\n conn.execute(\n \"\"\"\n INSERT INTO playlists (name, source_mtime, cover_path) VALUES (?, ?, ?)\n ON CONFLICT (name) DO UPDATE SET\n source_mtime = excluded.source_mtime\n , cover_path = excluded.cover_path\n \"\"\",\n (\n cached_playlist.name,\n cached_playlist.source_mtime,\n str(cached_playlist.cover_path) if cached_playlist.cover_path else None,\n ),\n )\n conn.execute(\n \"DELETE FROM playlists_tracks WHERE playlist_name = ?\",\n (cached_playlist.name,),\n )\n args: list[Any] = []\n for position, trk in enumerate(tracks):\n args.extend(\n [cached_playlist.name, trk[\"uuid\"], position + 1, trk.get(\"missing\", False)]\n )\n if args:\n conn.execute(\n f\"\"\"\n INSERT INTO playlists_tracks (playlist_name, track_id, position, missing)\n VALUES {','.join(['(?, ?, ?, ?)'] * len(tracks))}\n \"\"\",\n args,\n )\n\n logger.debug(f\"playlist update loop time {time.time() - loop_start=}\")" }, { "identifier": "update_cache_for_releases", "path": "rose/cache.py", "snippet": "def update_cache_for_releases(\n c: Config,\n # Leave as None to update all releases.\n release_dirs: list[Path] | None = None,\n force: bool = False,\n # For testing.\n force_multiprocessing: bool = False,\n) -> None:\n \"\"\"\n Update the read cache to match the data for any passed-in releases. If a directory lacks a\n .rose.{uuid}.toml datafile, create the datafile for the release and set it to the initial state.\n\n This is a hot path and is thus performance-optimized. The bottleneck is disk accesses, so we\n structure this function in order to minimize them. We solely read files that have changed since\n last run and batch writes together. We trade higher memory for reduced disk accesses.\n Concretely, we:\n\n 1. Execute one big SQL query at the start to fetch the relevant previous caches.\n 2. Skip reading a file's data if the mtime has not changed since the previous cache update.\n 3. Batch SQLite write operations to the end of this function, and only execute a SQLite upsert\n if the read data differs from the previous caches.\n\n We also shard the directories across multiple processes and execute them simultaneously.\n \"\"\"\n release_dirs = release_dirs or [\n Path(d.path) for d in os.scandir(c.music_source_dir) if d.is_dir()\n ]\n release_dirs = [\n d\n for d in release_dirs\n if d.name != \"!collages\"\n and d.name != \"!playlists\"\n and d.name not in c.ignore_release_directories\n ]\n if not release_dirs:\n logger.debug(\"No-Op: No whitelisted releases passed into update_cache_for_releases\")\n return\n logger.debug(f\"Refreshing the read cache for {len(release_dirs)} releases\")\n if len(release_dirs) < 10:\n logger.debug(f\"Refreshing cached data for {', '.join([r.name for r in release_dirs])}\")\n\n # If the number of releases changed is less than 50; do not bother with all that multiprocessing\n # gunk: instead, directly call the executor.\n #\n # This has an added benefit of not spawning processes from the virtual filesystem and watchdog\n # processes, as those processes always update the cache for one release at a time and are\n # multithreaded. Starting other processes from threads is bad!\n if not force_multiprocessing and len(release_dirs) < 50:\n logger.debug(\n f\"Running cache update executor in same process because {len(release_dirs)=} < 50\"\n )\n _update_cache_for_releases_executor(c, release_dirs, force)\n return\n\n # Batch size defaults to equal split across all processes. However, if the number of directories\n # is small, we shrink the # of processes to save on overhead.\n num_proc = c.max_proc\n if len(release_dirs) < c.max_proc * 50:\n num_proc = max(1, math.ceil(len(release_dirs) // 50))\n batch_size = len(release_dirs) // num_proc + 1\n\n manager = multiprocessing.Manager()\n # Have each process propagate the collages and playlists it wants to update back upwards. We\n # will dispatch the force updater only once in the main process, instead of many times in each\n # process.\n collages_to_force_update = manager.list()\n playlists_to_force_update = manager.list()\n\n errors: list[BaseException] = []\n\n logger.debug(\"Creating multiprocessing pool to parallelize cache executors.\")\n with multiprocessing.Pool(processes=c.max_proc) as pool:\n # At 0, no batch. At 1, 1 batch. At 49, 1 batch. At 50, 1 batch. At 51, 2 batches.\n for i in range(0, len(release_dirs), batch_size):\n logger.debug(\n f\"Spawning release cache update process for releases [{i}, {i+batch_size})\"\n )\n pool.apply_async(\n _update_cache_for_releases_executor,\n (\n c,\n release_dirs[i : i + batch_size],\n force,\n collages_to_force_update,\n playlists_to_force_update,\n ),\n error_callback=lambda e: errors.append(e),\n )\n pool.close()\n pool.join()\n\n if errors:\n raise ExceptionGroup(\"Exception occurred in cache update subprocesses\", errors) # type: ignore\n\n if collages_to_force_update:\n update_cache_for_collages(c, uniq(list(collages_to_force_update)), force=True)\n if playlists_to_force_update:\n update_cache_for_playlists(c, uniq(list(playlists_to_force_update)), force=True)" }, { "identifier": "Config", "path": "rose/config.py", "snippet": "class Config:\n music_source_dir: Path\n fuse_mount_dir: Path\n cache_dir: Path\n # Maximum parallel processes for cache updates. Defaults to nproc/2.\n max_proc: int\n ignore_release_directories: list[str]\n\n # A map from parent artist -> subartists.\n artist_aliases_map: dict[str, list[str]]\n # A map from subartist -> parent artists.\n artist_aliases_parents_map: dict[str, list[str]]\n\n fuse_artists_whitelist: list[str] | None\n fuse_genres_whitelist: list[str] | None\n fuse_labels_whitelist: list[str] | None\n fuse_artists_blacklist: list[str] | None\n fuse_genres_blacklist: list[str] | None\n fuse_labels_blacklist: list[str] | None\n\n cover_art_stems: list[str]\n valid_art_exts: list[str]\n\n rename_source_files: bool\n path_templates: PathTemplateConfig\n\n stored_metadata_rules: list[MetadataRule]\n\n @classmethod\n def parse(cls, config_path_override: Path | None = None) -> Config:\n # As we parse, delete consumed values from the data dictionary. If any are left over at the\n # end of the config, warn that unknown config keys were found.\n cfgpath = config_path_override or CONFIG_PATH\n cfgtext = \"\"\n try:\n with cfgpath.open(\"r\") as fp:\n cfgtext = fp.read()\n data = tomllib.loads(cfgtext)\n except FileNotFoundError as e:\n raise ConfigNotFoundError(f\"Configuration file not found ({cfgpath})\") from e\n except tomllib.TOMLDecodeError as e:\n raise ConfigDecodeError(\n f\"Failed to decode configuration file: invalid TOML: {e}\"\n ) from e\n\n try:\n music_source_dir = Path(data[\"music_source_dir\"]).expanduser()\n del data[\"music_source_dir\"]\n except KeyError as e:\n raise MissingConfigKeyError(\n f\"Missing key music_source_dir in configuration file ({cfgpath})\"\n ) from e\n except (ValueError, TypeError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for music_source_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n\n try:\n fuse_mount_dir = Path(data[\"fuse_mount_dir\"]).expanduser()\n del data[\"fuse_mount_dir\"]\n except KeyError as e:\n raise MissingConfigKeyError(\n f\"Missing key fuse_mount_dir in configuration file ({cfgpath})\"\n ) from e\n except (ValueError, TypeError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_mount_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n\n try:\n cache_dir = Path(data[\"cache_dir\"]).expanduser()\n del data[\"cache_dir\"]\n except KeyError:\n cache_dir = XDG_CACHE_ROSE\n except (TypeError, ValueError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for cache_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n cache_dir.mkdir(parents=True, exist_ok=True)\n\n try:\n max_proc = int(data[\"max_proc\"])\n del data[\"max_proc\"]\n if max_proc <= 0:\n raise ValueError(f\"must be a positive integer: got {max_proc}\")\n except KeyError:\n max_proc = max(1, multiprocessing.cpu_count() // 2)\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for max_proc in configuration file ({cfgpath}): must be a positive integer\"\n ) from e\n\n artist_aliases_map: dict[str, list[str]] = defaultdict(list)\n artist_aliases_parents_map: dict[str, list[str]] = defaultdict(list)\n try:\n for entry in data.get(\"artist_aliases\", []):\n if not isinstance(entry[\"artist\"], str):\n raise ValueError(f\"Artists must be of type str: got {type(entry['artist'])}\")\n artist_aliases_map[entry[\"artist\"]] = entry[\"aliases\"]\n if not isinstance(entry[\"aliases\"], list):\n raise ValueError(\n f\"Aliases must be of type list[str]: got {type(entry['aliases'])}\"\n )\n for s in entry[\"aliases\"]:\n if not isinstance(s, str):\n raise ValueError(f\"Each alias must be of type str: got {type(s)}\")\n artist_aliases_parents_map[s].append(entry[\"artist\"])\n with contextlib.suppress(KeyError):\n del data[\"artist_aliases\"]\n except (ValueError, TypeError, KeyError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for artist_aliases in configuration file ({cfgpath}): must be a list of {{ artist = str, aliases = list[str] }} records\"\n ) from e\n\n try:\n fuse_artists_whitelist = data[\"fuse_artists_whitelist\"]\n del data[\"fuse_artists_whitelist\"]\n if not isinstance(fuse_artists_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_artists_whitelist)}\")\n for s in fuse_artists_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each artist must be of type str: got {type(s)}\")\n except KeyError:\n fuse_artists_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_artists_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_genres_whitelist = data[\"fuse_genres_whitelist\"]\n del data[\"fuse_genres_whitelist\"]\n if not isinstance(fuse_genres_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_genres_whitelist)}\")\n for s in fuse_genres_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each genre must be of type str: got {type(s)}\")\n except KeyError:\n fuse_genres_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_genres_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_labels_whitelist = data[\"fuse_labels_whitelist\"]\n del data[\"fuse_labels_whitelist\"]\n if not isinstance(fuse_labels_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_labels_whitelist)}\")\n for s in fuse_labels_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each label must be of type str: got {type(s)}\")\n except KeyError:\n fuse_labels_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_labels_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_artists_blacklist = data[\"fuse_artists_blacklist\"]\n del data[\"fuse_artists_blacklist\"]\n if not isinstance(fuse_artists_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_artists_blacklist)}\")\n for s in fuse_artists_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each artist must be of type str: got {type(s)}\")\n except KeyError:\n fuse_artists_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_artists_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_genres_blacklist = data[\"fuse_genres_blacklist\"]\n del data[\"fuse_genres_blacklist\"]\n if not isinstance(fuse_genres_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_genres_blacklist)}\")\n for s in fuse_genres_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each genre must be of type str: got {type(s)}\")\n except KeyError:\n fuse_genres_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_genres_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_labels_blacklist = data[\"fuse_labels_blacklist\"]\n del data[\"fuse_labels_blacklist\"]\n if not isinstance(fuse_labels_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_labels_blacklist)}\")\n for s in fuse_labels_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each label must be of type str: got {type(s)}\")\n except KeyError:\n fuse_labels_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_labels_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n if fuse_artists_whitelist and fuse_artists_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_artists_whitelist and fuse_artists_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n if fuse_genres_whitelist and fuse_genres_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_genres_whitelist and fuse_genres_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n if fuse_labels_whitelist and fuse_labels_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_labels_whitelist and fuse_labels_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n\n try:\n cover_art_stems = data[\"cover_art_stems\"]\n del data[\"cover_art_stems\"]\n if not isinstance(cover_art_stems, list):\n raise ValueError(f\"Must be a list[str]: got {type(cover_art_stems)}\")\n for s in cover_art_stems:\n if not isinstance(s, str):\n raise ValueError(f\"Each cover art stem must be of type str: got {type(s)}\")\n except KeyError:\n cover_art_stems = [\"folder\", \"cover\", \"art\", \"front\"]\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for cover_art_stems in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n valid_art_exts = data[\"valid_art_exts\"]\n del data[\"valid_art_exts\"]\n if not isinstance(valid_art_exts, list):\n raise ValueError(f\"Must be a list[str]: got {type(valid_art_exts)}\")\n for s in valid_art_exts:\n if not isinstance(s, str):\n raise ValueError(f\"Each art extension must be of type str: got {type(s)}\")\n except KeyError:\n valid_art_exts = [\"jpg\", \"jpeg\", \"png\"]\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for valid_art_exts in configuration file ({cfgpath}): {e}\"\n ) from e\n\n cover_art_stems = [x.lower() for x in cover_art_stems]\n valid_art_exts = [x.lower() for x in valid_art_exts]\n\n try:\n rename_source_files = data[\"rename_source_files\"]\n del data[\"rename_source_files\"]\n if not isinstance(rename_source_files, bool):\n raise ValueError(f\"Must be a bool: got {type(rename_source_files)}\")\n except KeyError:\n rename_source_files = False\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for rename_source_files in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n ignore_release_directories = data[\"ignore_release_directories\"]\n del data[\"ignore_release_directories\"]\n if not isinstance(ignore_release_directories, list):\n raise ValueError(f\"Must be a list[str]: got {type(ignore_release_directories)}\")\n for s in ignore_release_directories:\n if not isinstance(s, str):\n raise ValueError(f\"Each release directory must be of type str: got {type(s)}\")\n except KeyError:\n ignore_release_directories = []\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for ignore_release_directories in configuration file ({cfgpath}): {e}\"\n ) from e\n\n stored_metadata_rules: list[MetadataRule] = []\n for d in data.get(\"stored_metadata_rules\", []):\n if not isinstance(d, dict):\n raise InvalidConfigValueError(\n f\"Invalid value in stored_metadata_rules in configuration file ({cfgpath}): list values must be a dict: got {type(d)}\"\n )\n\n try:\n matcher = d[\"matcher\"]\n except KeyError as e:\n raise InvalidConfigValueError(\n f\"Missing key `matcher` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}\"\n ) from e\n if not isinstance(matcher, str):\n raise InvalidConfigValueError(\n f\"Invalid value for `matcher` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a string\"\n )\n\n try:\n actions = d[\"actions\"]\n except KeyError as e:\n raise InvalidConfigValueError(\n f\"Missing key `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}\"\n ) from e\n if not isinstance(actions, list):\n raise InvalidConfigValueError(\n f\"Invalid value for `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a list of strings\"\n )\n for action in actions:\n if not isinstance(action, str):\n raise InvalidConfigValueError(\n f\"Invalid value for `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a list of strings: got {type(action)}\"\n )\n\n try:\n stored_metadata_rules.append(MetadataRule.parse(matcher, actions))\n except RuleSyntaxError as e:\n raise InvalidConfigValueError(\n f\"Failed to parse stored_metadata_rules in configuration file ({cfgpath}): rule {d}: {e}\"\n ) from e\n if \"stored_metadata_rules\" in data:\n del data[\"stored_metadata_rules\"]\n\n # Get the potential default template before evaluating the rest.\n default_templates = deepcopy(DEFAULT_TEMPLATE_PAIR)\n with contextlib.suppress(KeyError):\n default_templates.release = PathTemplate(data[\"path_templates\"][\"default\"][\"release\"])\n del data[\"path_templates\"][\"default\"][\"release\"]\n with contextlib.suppress(KeyError):\n default_templates.track = PathTemplate(data[\"path_templates\"][\"default\"][\"track\"])\n del data[\"path_templates\"][\"default\"][\"track\"]\n with contextlib.suppress(KeyError):\n if not data[\"path_templates\"][\"default\"]:\n del data[\"path_templates\"][\"default\"]\n\n path_templates = PathTemplateConfig.with_defaults(default_templates)\n if tmpl_config := data.get(\"path_templates\", None):\n for key in [\n \"source\",\n \"all_releases\",\n \"new_releases\",\n \"recently_added_releases\",\n \"artists\",\n \"genres\",\n \"labels\",\n \"collages\",\n ]:\n with contextlib.suppress(KeyError):\n getattr(path_templates, key).release = PathTemplate(tmpl_config[key][\"release\"])\n del tmpl_config[key][\"release\"]\n with contextlib.suppress(KeyError):\n getattr(path_templates, key).track = PathTemplate(tmpl_config[key][\"track\"])\n del tmpl_config[key][\"track\"]\n with contextlib.suppress(KeyError):\n if not tmpl_config[key]:\n del tmpl_config[key]\n\n with contextlib.suppress(KeyError):\n path_templates.playlists = PathTemplate(tmpl_config[\"playlists\"])\n del tmpl_config[\"playlists\"]\n with contextlib.suppress(KeyError):\n if not data[\"path_templates\"]:\n del data[\"path_templates\"]\n\n try:\n path_templates.parse()\n except InvalidPathTemplateError as e:\n raise InvalidConfigValueError(\n f\"Invalid path template in configuration file ({cfgpath}) for template {e.key}: {e}\"\n ) from e\n\n if data:\n unrecognized_accessors: list[str] = []\n # Do a DFS over the data keys to assemble the map of unknown keys. State is a tuple of\n # (\"accessor\", node).\n dfs_state: deque[tuple[str, dict[str, Any]]] = deque([(\"\", data)])\n while dfs_state:\n accessor, node = dfs_state.pop()\n if isinstance(node, dict):\n for k, v in node.items():\n child_accessor = k if not accessor else f\"{accessor}.{k}\"\n dfs_state.append((child_accessor, v))\n continue\n unrecognized_accessors.append(accessor)\n logger.warning(\n f\"Unrecognized options found in configuration file: {', '.join(unrecognized_accessors)}\"\n )\n\n return Config(\n music_source_dir=music_source_dir,\n fuse_mount_dir=fuse_mount_dir,\n cache_dir=cache_dir,\n max_proc=max_proc,\n artist_aliases_map=artist_aliases_map,\n artist_aliases_parents_map=artist_aliases_parents_map,\n fuse_artists_whitelist=fuse_artists_whitelist,\n fuse_genres_whitelist=fuse_genres_whitelist,\n fuse_labels_whitelist=fuse_labels_whitelist,\n fuse_artists_blacklist=fuse_artists_blacklist,\n fuse_genres_blacklist=fuse_genres_blacklist,\n fuse_labels_blacklist=fuse_labels_blacklist,\n cover_art_stems=cover_art_stems,\n valid_art_exts=valid_art_exts,\n path_templates=path_templates,\n rename_source_files=rename_source_files,\n ignore_release_directories=ignore_release_directories,\n stored_metadata_rules=stored_metadata_rules,\n )\n\n @functools.cached_property\n def valid_cover_arts(self) -> list[str]:\n return [s + \".\" + e for s in self.cover_art_stems for e in self.valid_art_exts]\n\n @functools.cached_property\n def cache_database_path(self) -> Path:\n return self.cache_dir / \"cache.sqlite3\"\n\n @functools.cached_property\n def watchdog_pid_path(self) -> Path:\n return self.cache_dir / \"watchdog.pid\"\n\n @functools.cached_property\n def sanitized_artist_aliases_map(self) -> dict[str, list[str]]:\n return {sanitize_dirname(k, False): v for k, v in self.artist_aliases_map.items()}\n\n @functools.cached_property\n def sanitized_artist_aliases_parents_map(self) -> dict[str, list[str]]:\n return {sanitize_dirname(k, False): v for k, v in self.artist_aliases_parents_map.items()}" } ]
import asyncio import contextlib import logging import sys import time from dataclasses import dataclass from pathlib import Path from queue import Empty, Queue from typing import Literal from watchdog.events import ( FileSystemEvent, FileSystemEventHandler, FileSystemMovedEvent, ) from watchdog.observers import Observer from rose.cache import ( update_cache_evict_nonexistent_collages, update_cache_evict_nonexistent_playlists, update_cache_evict_nonexistent_releases, update_cache_for_collages, update_cache_for_playlists, update_cache_for_releases, ) from rose.config import Config
10,825
""" The watcher module is architected to decouple the event listener from the event processor. This is because we must introduce a wait into the event processor, but we do not want to block the event listener. In order to performantly introduce such a wait, we've made the event processor async. However, the event listener uses a non-async library, so the event listener is sync. Why do we need to introduce a wait time into the event processor? Changes to releases occur across an entire directory, but change events come in one file at a time. If we respond to a file event too early, we may do silly things like write a new `.rose.{uuid}.toml` file, only to have a pre-existing one suddenly appear after. We only want to operate on a release once all files have finished changing. Let's dive down into the details. The watcher is a process that spawns a background thread and then runs an event loop. Hierarchically: Process Thread -> watchdog/inotify listener that enqueues events Event Loop -> processes+debounces events asynchronously from the queue """ logger = logging.getLogger(__name__) # Shorten wait times if we are in a test. This way a test runs faster. This is wasteful in # production though. WAIT_DIVIDER = 1 if "pytest" not in sys.modules else 10 EventType = Literal["created", "deleted", "modified", "moved"] EVENT_TYPES: list[EventType] = ["created", "deleted", "modified", "moved"] @dataclass(frozen=True) class WatchdogEvent: type: EventType collage: str | None = None playlist: str | None = None release: Path | None = None class EventHandler(FileSystemEventHandler): # pragma: no cover def __init__(self, config: Config, queue: Queue[WatchdogEvent]): super().__init__() self.config = config self.queue = queue def on_any_event(self, event: FileSystemEvent) -> None: super().on_any_event(event) # type: ignore path = event.dest_path if isinstance(event, FileSystemMovedEvent) else event.src_path logger.debug(f"Notified of {event.event_type} event for {path}") etype: EventType = event.event_type # type: ignore if etype not in EVENT_TYPES: return # Collage event. relative_path = path.removeprefix(str(self.config.music_source_dir) + "/") if relative_path.startswith("!collages/"): if not relative_path.endswith(".toml"): return collage = relative_path.removeprefix("!collages/").removesuffix(".toml") logger.debug(f"Queueing {etype} event on collage {collage}") self.queue.put(WatchdogEvent(collage=collage, type=etype)) return # Playlist event. if relative_path.startswith("!playlists/"): if not relative_path.endswith(".toml"): return playlist = relative_path.removeprefix("!playlists/").removesuffix(".toml") logger.debug(f"Queueing {etype} event on playlist {playlist}") self.queue.put(WatchdogEvent(playlist=playlist, type=etype)) return # Release event. with contextlib.suppress(IndexError): final_path_part = Path(relative_path).parts[0] if final_path_part == "/": return release_dir = self.config.music_source_dir / final_path_part logger.debug(f"Queueing {etype} event on release {release_dir}") self.queue.put(WatchdogEvent(release=release_dir, type=etype)) async def handle_event( c: Config, e: WatchdogEvent, wait: float | None = None, ) -> None: # pragma: no cover if wait: await asyncio.sleep(wait / WAIT_DIVIDER) if e.type == "created" or e.type == "modified": if e.collage:
""" The watcher module is architected to decouple the event listener from the event processor. This is because we must introduce a wait into the event processor, but we do not want to block the event listener. In order to performantly introduce such a wait, we've made the event processor async. However, the event listener uses a non-async library, so the event listener is sync. Why do we need to introduce a wait time into the event processor? Changes to releases occur across an entire directory, but change events come in one file at a time. If we respond to a file event too early, we may do silly things like write a new `.rose.{uuid}.toml` file, only to have a pre-existing one suddenly appear after. We only want to operate on a release once all files have finished changing. Let's dive down into the details. The watcher is a process that spawns a background thread and then runs an event loop. Hierarchically: Process Thread -> watchdog/inotify listener that enqueues events Event Loop -> processes+debounces events asynchronously from the queue """ logger = logging.getLogger(__name__) # Shorten wait times if we are in a test. This way a test runs faster. This is wasteful in # production though. WAIT_DIVIDER = 1 if "pytest" not in sys.modules else 10 EventType = Literal["created", "deleted", "modified", "moved"] EVENT_TYPES: list[EventType] = ["created", "deleted", "modified", "moved"] @dataclass(frozen=True) class WatchdogEvent: type: EventType collage: str | None = None playlist: str | None = None release: Path | None = None class EventHandler(FileSystemEventHandler): # pragma: no cover def __init__(self, config: Config, queue: Queue[WatchdogEvent]): super().__init__() self.config = config self.queue = queue def on_any_event(self, event: FileSystemEvent) -> None: super().on_any_event(event) # type: ignore path = event.dest_path if isinstance(event, FileSystemMovedEvent) else event.src_path logger.debug(f"Notified of {event.event_type} event for {path}") etype: EventType = event.event_type # type: ignore if etype not in EVENT_TYPES: return # Collage event. relative_path = path.removeprefix(str(self.config.music_source_dir) + "/") if relative_path.startswith("!collages/"): if not relative_path.endswith(".toml"): return collage = relative_path.removeprefix("!collages/").removesuffix(".toml") logger.debug(f"Queueing {etype} event on collage {collage}") self.queue.put(WatchdogEvent(collage=collage, type=etype)) return # Playlist event. if relative_path.startswith("!playlists/"): if not relative_path.endswith(".toml"): return playlist = relative_path.removeprefix("!playlists/").removesuffix(".toml") logger.debug(f"Queueing {etype} event on playlist {playlist}") self.queue.put(WatchdogEvent(playlist=playlist, type=etype)) return # Release event. with contextlib.suppress(IndexError): final_path_part = Path(relative_path).parts[0] if final_path_part == "/": return release_dir = self.config.music_source_dir / final_path_part logger.debug(f"Queueing {etype} event on release {release_dir}") self.queue.put(WatchdogEvent(release=release_dir, type=etype)) async def handle_event( c: Config, e: WatchdogEvent, wait: float | None = None, ) -> None: # pragma: no cover if wait: await asyncio.sleep(wait / WAIT_DIVIDER) if e.type == "created" or e.type == "modified": if e.collage:
update_cache_for_collages(c, [e.collage])
3
2023-10-09 14:42:23+00:00
16k
grainseed/monitask
sam/segment_anything/build_sam.py
[ { "identifier": "Sam", "path": "sam/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\r\n mask_threshold: float = 0.0\r\n image_format: str = \"RGB\"\r\n\r\n def __init__(\r\n self,\r\n image_encoder: ImageEncoderViT,\r\n prompt_encoder: PromptEncoder,\r\n mask_decoder: MaskDecoder,\r\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\r\n pixel_std: List[float] = [58.395, 57.12, 57.375],\r\n ) -> None:\r\n \"\"\"\r\n SAM predicts object masks from an image and input prompts.\r\n\r\n Arguments:\r\n image_encoder (ImageEncoderViT): The backbone used to encode the\r\n image into image embeddings that allow for efficient mask prediction.\r\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\r\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\r\n and encoded prompts.\r\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\r\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\r\n \"\"\"\r\n super().__init__()\r\n self.image_encoder = image_encoder\r\n self.prompt_encoder = prompt_encoder\r\n self.mask_decoder = mask_decoder\r\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\r\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\r\n\r\n @property\r\n def device(self) -> Any:\r\n return self.pixel_mean.device\r\n\r\n def forward(\r\n self,\r\n batched_input: List[Dict[str, Any]],\r\n multimask_output: bool,\r\n hq_token_only: bool =False,\r\n ) -> List[Dict[str, torch.Tensor]]:\r\n \"\"\"\r\n Predicts masks end-to-end from provided images and prompts.\r\n If prompts are not known in advance, using SamPredictor is\r\n recommended over calling the model directly.\r\n\r\n Arguments:\r\n batched_input (list(dict)): A list over input images, each a\r\n dictionary with the following keys. A prompt key can be\r\n excluded if it is not present.\r\n 'image': The image as a torch tensor in 3xHxW format,\r\n already transformed for input to the model.\r\n 'original_size': (tuple(int, int)) The original size of\r\n the image before transformation, as (H, W).\r\n 'point_coords': (torch.Tensor) Batched point prompts for\r\n this image, with shape BxNx2. Already transformed to the\r\n input frame of the model.\r\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\r\n with shape BxN.\r\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\r\n Already transformed to the input frame of the model.\r\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\r\n in the form Bx1xHxW.\r\n multimask_output (bool): Whether the model should predict multiple\r\n disambiguating masks, or return a single mask.\r\n\r\n Returns:\r\n (list(dict)): A list over input images, where each element is\r\n as dictionary with the following keys.\r\n 'masks': (torch.Tensor) Batched binary mask predictions,\r\n with shape BxCxHxW, where B is the number of input prompts,\r\n C is determined by multimask_output, and (H, W) is the\r\n original size of the image.\r\n 'iou_predictions': (torch.Tensor) The model's predictions\r\n of mask quality, in shape BxC.\r\n 'low_res_logits': (torch.Tensor) Low resolution logits with\r\n shape BxCxHxW, where H=W=256. Can be passed as mask input\r\n to subsequent iterations of prediction.\r\n \"\"\"\r\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\r\n image_embeddings, interm_embeddings = self.image_encoder(input_images)\r\n interm_embeddings = interm_embeddings[0] # early layer\r\n\r\n outputs = []\r\n for image_record, curr_embedding, curr_interm in zip(batched_input, image_embeddings, interm_embeddings):\r\n if \"point_coords\" in image_record:\r\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\r\n else:\r\n points = None\r\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\r\n points=points,\r\n boxes=image_record.get(\"boxes\", None),\r\n masks=image_record.get(\"mask_inputs\", None),\r\n )\r\n low_res_masks, iou_predictions = self.mask_decoder(\r\n image_embeddings=curr_embedding.unsqueeze(0),\r\n image_pe=self.prompt_encoder.get_dense_pe(),\r\n sparse_prompt_embeddings=sparse_embeddings,\r\n dense_prompt_embeddings=dense_embeddings,\r\n multimask_output=multimask_output,\r\n hq_token_only=hq_token_only,\r\n interm_embeddings=curr_interm.unsqueeze(0).unsqueeze(0),\r\n )\r\n masks = self.postprocess_masks(\r\n low_res_masks,\r\n input_size=image_record[\"image\"].shape[-2:],\r\n original_size=image_record[\"original_size\"],\r\n )\r\n masks = masks > self.mask_threshold\r\n outputs.append(\r\n {\r\n \"masks\": masks,\r\n \"iou_predictions\": iou_predictions,\r\n \"low_res_logits\": low_res_masks,\r\n }\r\n )\r\n return outputs\r\n\r\n def postprocess_masks(\r\n self,\r\n masks: torch.Tensor,\r\n input_size: Tuple[int, ...],\r\n original_size: Tuple[int, ...],\r\n ) -> torch.Tensor:\r\n \"\"\"\r\n Remove padding and upscale masks to the original image size.\r\n\r\n Arguments:\r\n masks (torch.Tensor): Batched masks from the mask_decoder,\r\n in BxCxHxW format.\r\n input_size (tuple(int, int)): The size of the image input to the\r\n model, in (H, W) format. Used to remove padding.\r\n original_size (tuple(int, int)): The original size of the image\r\n before resizing for input to the model, in (H, W) format.\r\n\r\n Returns:\r\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\r\n is given by original_size.\r\n \"\"\"\r\n masks = F.interpolate(\r\n masks,\r\n (self.image_encoder.img_size, self.image_encoder.img_size),\r\n mode=\"bilinear\",\r\n align_corners=False,\r\n )\r\n masks = masks[..., : input_size[0], : input_size[1]]\r\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\r\n return masks\r\n\r\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\r\n # Normalize colors\r\n x = (x - self.pixel_mean) / self.pixel_std\r\n\r\n # Pad\r\n h, w = x.shape[-2:]\r\n padh = self.image_encoder.img_size - h\r\n padw = self.image_encoder.img_size - w\r\n x = F.pad(x, (0, padw, 0, padh))\r\n return x\r" }, { "identifier": "ImageEncoderViT", "path": "sam/segment_anything/modeling/image_encoder.py", "snippet": "class ImageEncoderViT(nn.Module):\r\n def __init__(\r\n self,\r\n img_size: int = 1024,\r\n patch_size: int = 16,\r\n in_chans: int = 3,\r\n embed_dim: int = 768,\r\n depth: int = 12,\r\n num_heads: int = 12,\r\n mlp_ratio: float = 4.0,\r\n out_chans: int = 256,\r\n qkv_bias: bool = True,\r\n norm_layer: Type[nn.Module] = nn.LayerNorm,\r\n act_layer: Type[nn.Module] = nn.GELU,\r\n use_abs_pos: bool = True,\r\n use_rel_pos: bool = False,\r\n rel_pos_zero_init: bool = True,\r\n window_size: int = 0,\r\n global_attn_indexes: Tuple[int, ...] = (),\r\n ) -> None:\r\n \"\"\"\r\n Args:\r\n img_size (int): Input image size.\r\n patch_size (int): Patch size.\r\n in_chans (int): Number of input image channels.\r\n embed_dim (int): Patch embedding dimension.\r\n depth (int): Depth of ViT.\r\n num_heads (int): Number of attention heads in each ViT block.\r\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\r\n qkv_bias (bool): If True, add a learnable bias to query, key, value.\r\n norm_layer (nn.Module): Normalization layer.\r\n act_layer (nn.Module): Activation layer.\r\n use_abs_pos (bool): If True, use absolute positional embeddings.\r\n use_rel_pos (bool): If True, add relative positional embeddings to the attention map.\r\n rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\r\n window_size (int): Window size for window attention blocks.\r\n global_attn_indexes (list): Indexes for blocks using global attention.\r\n \"\"\"\r\n super().__init__()\r\n self.img_size = img_size\r\n\r\n self.patch_embed = PatchEmbed(\r\n kernel_size=(patch_size, patch_size),\r\n stride=(patch_size, patch_size),\r\n in_chans=in_chans,\r\n embed_dim=embed_dim,\r\n )\r\n\r\n self.pos_embed: Optional[nn.Parameter] = None\r\n if use_abs_pos:\r\n # Initialize absolute positional embedding with pretrain image size.\r\n self.pos_embed = nn.Parameter(\r\n torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)\r\n )\r\n\r\n self.blocks = nn.ModuleList()\r\n for i in range(depth):\r\n block = Block(\r\n dim=embed_dim,\r\n num_heads=num_heads,\r\n mlp_ratio=mlp_ratio,\r\n qkv_bias=qkv_bias,\r\n norm_layer=norm_layer,\r\n act_layer=act_layer,\r\n use_rel_pos=use_rel_pos,\r\n rel_pos_zero_init=rel_pos_zero_init,\r\n window_size=window_size if i not in global_attn_indexes else 0,\r\n input_size=(img_size // patch_size, img_size // patch_size),\r\n )\r\n self.blocks.append(block)\r\n\r\n self.neck = nn.Sequential(\r\n nn.Conv2d(\r\n embed_dim,\r\n out_chans,\r\n kernel_size=1,\r\n bias=False,\r\n ),\r\n LayerNorm2d(out_chans),\r\n nn.Conv2d(\r\n out_chans,\r\n out_chans,\r\n kernel_size=3,\r\n padding=1,\r\n bias=False,\r\n ),\r\n LayerNorm2d(out_chans),\r\n )\r\n\r\n def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n x = self.patch_embed(x)\r\n if self.pos_embed is not None:\r\n x = x + self.pos_embed\r\n\r\n interm_embeddings=[]\r\n for blk in self.blocks:\r\n x = blk(x)\r\n if blk.window_size == 0:\r\n interm_embeddings.append(x)\r\n\r\n x = self.neck(x.permute(0, 3, 1, 2))\r\n\r\n return x, interm_embeddings\r" }, { "identifier": "MaskDecoderHQ", "path": "sam/segment_anything/modeling/mask_decoder_hq.py", "snippet": "class MaskDecoderHQ(nn.Module):\r\n def __init__(\r\n self,\r\n *,\r\n transformer_dim: int,\r\n transformer: nn.Module,\r\n num_multimask_outputs: int = 3,\r\n activation: Type[nn.Module] = nn.GELU,\r\n iou_head_depth: int = 3,\r\n iou_head_hidden_dim: int = 256,\r\n vit_dim: int = 1024,\r\n ) -> None:\r\n \"\"\"\r\n Predicts masks given an image and prompt embeddings, using a\r\n transformer architecture.\r\n\r\n Arguments:\r\n transformer_dim (int): the channel dimension of the transformer\r\n transformer (nn.Module): the transformer used to predict masks\r\n num_multimask_outputs (int): the number of masks to predict\r\n when disambiguating masks\r\n activation (nn.Module): the type of activation to use when\r\n upscaling masks\r\n iou_head_depth (int): the depth of the MLP used to predict\r\n mask quality\r\n iou_head_hidden_dim (int): the hidden dimension of the MLP\r\n used to predict mask quality\r\n \"\"\"\r\n super().__init__()\r\n self.transformer_dim = transformer_dim\r\n self.transformer = transformer\r\n\r\n self.num_multimask_outputs = num_multimask_outputs\r\n\r\n self.iou_token = nn.Embedding(1, transformer_dim)\r\n self.num_mask_tokens = num_multimask_outputs + 1\r\n self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\r\n\r\n self.output_upscaling = nn.Sequential(\r\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\r\n LayerNorm2d(transformer_dim // 4),\r\n activation(),\r\n nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\r\n activation(),\r\n )\r\n self.output_hypernetworks_mlps = nn.ModuleList(\r\n [\r\n MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)\r\n for i in range(self.num_mask_tokens)\r\n ]\r\n )\r\n\r\n self.iou_prediction_head = MLP(\r\n transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth\r\n )\r\n\r\n # HQ-SAM parameters\r\n self.hf_token = nn.Embedding(1, transformer_dim) # HQ-Ouptput-Token\r\n self.hf_mlp = MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) # corresponding new MLP layer for HQ-Ouptput-Token\r\n self.num_mask_tokens = self.num_mask_tokens + 1\r\n \r\n # three conv fusion layers for obtaining HQ-Feature\r\n self.compress_vit_feat = nn.Sequential(\r\n nn.ConvTranspose2d(vit_dim, transformer_dim, kernel_size=2, stride=2),\r\n LayerNorm2d(transformer_dim),\r\n nn.GELU(), \r\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 8, kernel_size=2, stride=2))\r\n \r\n self.embedding_encoder = nn.Sequential(\r\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\r\n LayerNorm2d(transformer_dim // 4),\r\n nn.GELU(),\r\n nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\r\n )\r\n self.embedding_maskfeature = nn.Sequential(\r\n nn.Conv2d(transformer_dim // 8, transformer_dim // 4, 3, 1, 1), \r\n LayerNorm2d(transformer_dim // 4),\r\n nn.GELU(),\r\n nn.Conv2d(transformer_dim // 4, transformer_dim // 8, 3, 1, 1))\r\n\r\n\r\n\r\n def forward(\r\n self,\r\n image_embeddings: torch.Tensor,\r\n image_pe: torch.Tensor,\r\n sparse_prompt_embeddings: torch.Tensor,\r\n dense_prompt_embeddings: torch.Tensor,\r\n multimask_output: bool,\r\n hq_token_only: bool,\r\n interm_embeddings: torch.Tensor,\r\n ) -> Tuple[torch.Tensor, torch.Tensor]:\r\n \"\"\"\r\n Predict masks given image and prompt embeddings.\r\n\r\n Arguments:\r\n image_embeddings (torch.Tensor): the embeddings from the ViT image encoder\r\n image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\r\n sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\r\n dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\r\n multimask_output (bool): Whether to return multiple masks or a single\r\n mask.\r\n\r\n Returns:\r\n torch.Tensor: batched predicted masks\r\n torch.Tensor: batched predictions of mask quality\r\n \"\"\"\r\n vit_features = interm_embeddings[0].permute(0, 3, 1, 2) # early-layer ViT feature, after 1st global attention block in ViT\r\n hq_features = self.embedding_encoder(image_embeddings) + self.compress_vit_feat(vit_features)\r\n\r\n masks, iou_pred = self.predict_masks(\r\n image_embeddings=image_embeddings,\r\n image_pe=image_pe,\r\n sparse_prompt_embeddings=sparse_prompt_embeddings,\r\n dense_prompt_embeddings=dense_prompt_embeddings,\r\n hq_features=hq_features,\r\n )\r\n\r\n # Select the correct mask or masks for output\r\n if multimask_output:\r\n # mask with highest score\r\n mask_slice = slice(1,self.num_mask_tokens-1)\r\n iou_pred = iou_pred[:, mask_slice]\r\n iou_pred, max_iou_idx = torch.max(iou_pred,dim=1)\r\n iou_pred = iou_pred.unsqueeze(1)\r\n masks_multi = masks[:, mask_slice, :, :]\r\n masks_sam = masks_multi[torch.arange(masks_multi.size(0)),max_iou_idx].unsqueeze(1)\r\n else:\r\n # singale mask output, default\r\n mask_slice = slice(0, 1)\r\n iou_pred = iou_pred[:,mask_slice]\r\n masks_sam = masks[:,mask_slice]\r\n\r\n masks_hq = masks[:,slice(self.num_mask_tokens-1, self.num_mask_tokens)]\r\n if hq_token_only:\r\n masks = masks_hq\r\n else:\r\n masks = masks_sam + masks_hq\r\n # Prepare output\r\n return masks, iou_pred\r\n\r\n def predict_masks(\r\n self,\r\n image_embeddings: torch.Tensor,\r\n image_pe: torch.Tensor,\r\n sparse_prompt_embeddings: torch.Tensor,\r\n dense_prompt_embeddings: torch.Tensor,\r\n hq_features: torch.Tensor,\r\n ) -> Tuple[torch.Tensor, torch.Tensor]:\r\n \"\"\"Predicts masks. See 'forward' for more details.\"\"\"\r\n # Concatenate output tokens\r\n output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight, self.hf_token.weight], dim=0)\r\n output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)\r\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\r\n\r\n # Expand per-image data in batch direction to be per-mask\r\n src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\r\n src = src + dense_prompt_embeddings\r\n pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\r\n b, c, h, w = src.shape\r\n\r\n # Run the transformer\r\n hs, src = self.transformer(src, pos_src, tokens)\r\n iou_token_out = hs[:, 0, :]\r\n mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]\r\n\r\n # Upscale mask embeddings and predict masks using the mask tokens\r\n src = src.transpose(1, 2).view(b, c, h, w)\r\n\r\n upscaled_embedding_sam = self.output_upscaling(src)\r\n upscaled_embedding_hq = self.embedding_maskfeature(upscaled_embedding_sam) + hq_features.repeat(b,1,1,1)\r\n\r\n hyper_in_list: List[torch.Tensor] = []\r\n for i in range(self.num_mask_tokens):\r\n if i < self.num_mask_tokens - 1:\r\n hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))\r\n else:\r\n hyper_in_list.append(self.hf_mlp(mask_tokens_out[:, i, :]))\r\n\r\n hyper_in = torch.stack(hyper_in_list, dim=1)\r\n b, c, h, w = upscaled_embedding_sam.shape\r\n\r\n masks_sam = (hyper_in[:,:self.num_mask_tokens-1] @ upscaled_embedding_sam.view(b, c, h * w)).view(b, -1, h, w)\r\n masks_sam_hq = (hyper_in[:,self.num_mask_tokens-1:] @ upscaled_embedding_hq.view(b, c, h * w)).view(b, -1, h, w)\r\n masks = torch.cat([masks_sam,masks_sam_hq],dim=1)\r\n # Generate mask quality predictions\r\n iou_pred = self.iou_prediction_head(iou_token_out)\r\n\r\n return masks, iou_pred\r" }, { "identifier": "MaskDecoder", "path": "sam/segment_anything/modeling/mask_decoder.py", "snippet": "class MaskDecoder(nn.Module):\r\n def __init__(\r\n self,\r\n *,\r\n transformer_dim: int,\r\n transformer: nn.Module,\r\n num_multimask_outputs: int = 3,\r\n activation: Type[nn.Module] = nn.GELU,\r\n iou_head_depth: int = 3,\r\n iou_head_hidden_dim: int = 256,\r\n ) -> None:\r\n \"\"\"\r\n Predicts masks given an image and prompt embeddings, using a\r\n transformer architecture.\r\n\r\n Arguments:\r\n transformer_dim (int): the channel dimension of the transformer\r\n transformer (nn.Module): the transformer used to predict masks\r\n num_multimask_outputs (int): the number of masks to predict\r\n when disambiguating masks\r\n activation (nn.Module): the type of activation to use when\r\n upscaling masks\r\n iou_head_depth (int): the depth of the MLP used to predict\r\n mask quality\r\n iou_head_hidden_dim (int): the hidden dimension of the MLP\r\n used to predict mask quality\r\n \"\"\"\r\n super().__init__()\r\n self.transformer_dim = transformer_dim\r\n self.transformer = transformer\r\n\r\n self.num_multimask_outputs = num_multimask_outputs\r\n\r\n self.iou_token = nn.Embedding(1, transformer_dim)\r\n self.num_mask_tokens = num_multimask_outputs + 1\r\n self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\r\n\r\n self.output_upscaling = nn.Sequential(\r\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\r\n LayerNorm2d(transformer_dim // 4),\r\n activation(),\r\n nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\r\n activation(),\r\n )\r\n self.output_hypernetworks_mlps = nn.ModuleList(\r\n [\r\n MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)\r\n for i in range(self.num_mask_tokens)\r\n ]\r\n )\r\n\r\n self.iou_prediction_head = MLP(\r\n transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth\r\n )\r\n\r\n def forward(\r\n self,\r\n image_embeddings: torch.Tensor,\r\n image_pe: torch.Tensor,\r\n sparse_prompt_embeddings: torch.Tensor,\r\n dense_prompt_embeddings: torch.Tensor,\r\n multimask_output: bool,\r\n hq_token_only: bool,\r\n interm_embeddings: torch.Tensor,\r\n ) -> Tuple[torch.Tensor, torch.Tensor]:\r\n \"\"\"\r\n Predict masks given image and prompt embeddings.\r\n\r\n Arguments:\r\n image_embeddings (torch.Tensor): the embeddings from the image encoder\r\n image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\r\n sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\r\n dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\r\n multimask_output (bool): Whether to return multiple masks or a single\r\n mask.\r\n\r\n Returns:\r\n torch.Tensor: batched predicted masks\r\n torch.Tensor: batched predictions of mask quality\r\n \"\"\"\r\n masks, iou_pred = self.predict_masks(\r\n image_embeddings=image_embeddings,\r\n image_pe=image_pe,\r\n sparse_prompt_embeddings=sparse_prompt_embeddings,\r\n dense_prompt_embeddings=dense_prompt_embeddings,\r\n )\r\n\r\n # Select the correct mask or masks for output\r\n if multimask_output:\r\n mask_slice = slice(1, None)\r\n else:\r\n mask_slice = slice(0, 1)\r\n masks = masks[:, mask_slice, :, :]\r\n iou_pred = iou_pred[:, mask_slice]\r\n\r\n # Prepare output\r\n return masks, iou_pred\r\n\r\n def predict_masks(\r\n self,\r\n image_embeddings: torch.Tensor,\r\n image_pe: torch.Tensor,\r\n sparse_prompt_embeddings: torch.Tensor,\r\n dense_prompt_embeddings: torch.Tensor,\r\n ) -> Tuple[torch.Tensor, torch.Tensor]:\r\n \"\"\"Predicts masks. See 'forward' for more details.\"\"\"\r\n # Concatenate output tokens\r\n output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)\r\n output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)\r\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\r\n\r\n # Expand per-image data in batch direction to be per-mask\r\n src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\r\n src = src + dense_prompt_embeddings\r\n pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\r\n b, c, h, w = src.shape\r\n\r\n # Run the transformer\r\n hs, src = self.transformer(src, pos_src, tokens)\r\n iou_token_out = hs[:, 0, :]\r\n mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]\r\n\r\n # Upscale mask embeddings and predict masks using the mask tokens\r\n src = src.transpose(1, 2).view(b, c, h, w)\r\n upscaled_embedding = self.output_upscaling(src)\r\n hyper_in_list: List[torch.Tensor] = []\r\n for i in range(self.num_mask_tokens):\r\n hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))\r\n hyper_in = torch.stack(hyper_in_list, dim=1)\r\n b, c, h, w = upscaled_embedding.shape\r\n masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)\r\n\r\n # Generate mask quality predictions\r\n iou_pred = self.iou_prediction_head(iou_token_out)\r\n\r\n return masks, iou_pred\r" }, { "identifier": "PromptEncoder", "path": "sam/segment_anything/modeling/prompt_encoder.py", "snippet": "class PromptEncoder(nn.Module):\r\n def __init__(\r\n self,\r\n embed_dim: int,\r\n image_embedding_size: Tuple[int, int],\r\n input_image_size: Tuple[int, int],\r\n mask_in_chans: int,\r\n activation: Type[nn.Module] = nn.GELU,\r\n ) -> None:\r\n \"\"\"\r\n Encodes prompts for input to SAM's mask decoder.\r\n\r\n Arguments:\r\n embed_dim (int): The prompts' embedding dimension\r\n image_embedding_size (tuple(int, int)): The spatial size of the\r\n image embedding, as (H, W).\r\n input_image_size (int): The padded size of the image as input\r\n to the image encoder, as (H, W).\r\n mask_in_chans (int): The number of hidden channels used for\r\n encoding input masks.\r\n activation (nn.Module): The activation to use when encoding\r\n input masks.\r\n \"\"\"\r\n super().__init__()\r\n self.embed_dim = embed_dim\r\n self.input_image_size = input_image_size\r\n self.image_embedding_size = image_embedding_size\r\n self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)\r\n\r\n self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners\r\n point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]\r\n self.point_embeddings = nn.ModuleList(point_embeddings)\r\n self.not_a_point_embed = nn.Embedding(1, embed_dim)\r\n\r\n self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])\r\n self.mask_downscaling = nn.Sequential(\r\n nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),\r\n LayerNorm2d(mask_in_chans // 4),\r\n activation(),\r\n nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),\r\n LayerNorm2d(mask_in_chans),\r\n activation(),\r\n nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),\r\n )\r\n self.no_mask_embed = nn.Embedding(1, embed_dim)\r\n\r\n def get_dense_pe(self) -> torch.Tensor:\r\n \"\"\"\r\n Returns the positional encoding used to encode point prompts,\r\n applied to a dense set of points the shape of the image encoding.\r\n\r\n Returns:\r\n torch.Tensor: Positional encoding with shape\r\n 1x(embed_dim)x(embedding_h)x(embedding_w)\r\n \"\"\"\r\n return self.pe_layer(self.image_embedding_size).unsqueeze(0)\r\n\r\n def _embed_points(\r\n self,\r\n points: torch.Tensor,\r\n labels: torch.Tensor,\r\n pad: bool,\r\n ) -> torch.Tensor:\r\n \"\"\"Embeds point prompts.\"\"\"\r\n points = points + 0.5 # Shift to center of pixel\r\n if pad:\r\n padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)\r\n padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)\r\n points = torch.cat([points, padding_point], dim=1)\r\n labels = torch.cat([labels, padding_label], dim=1)\r\n point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)\r\n point_embedding[labels == -1] = 0.0\r\n point_embedding[labels == -1] += self.not_a_point_embed.weight\r\n point_embedding[labels == 0] += self.point_embeddings[0].weight\r\n point_embedding[labels == 1] += self.point_embeddings[1].weight\r\n return point_embedding\r\n\r\n def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Embeds box prompts.\"\"\"\r\n boxes = boxes + 0.5 # Shift to center of pixel\r\n coords = boxes.reshape(-1, 2, 2)\r\n corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)\r\n corner_embedding[:, 0, :] += self.point_embeddings[2].weight\r\n corner_embedding[:, 1, :] += self.point_embeddings[3].weight\r\n return corner_embedding\r\n\r\n def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Embeds mask inputs.\"\"\"\r\n mask_embedding = self.mask_downscaling(masks)\r\n return mask_embedding\r\n\r\n def _get_batch_size(\r\n self,\r\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\r\n boxes: Optional[torch.Tensor],\r\n masks: Optional[torch.Tensor],\r\n ) -> int:\r\n \"\"\"\r\n Gets the batch size of the output given the batch size of the input prompts.\r\n \"\"\"\r\n if points is not None:\r\n return points[0].shape[0]\r\n elif boxes is not None:\r\n return boxes.shape[0]\r\n elif masks is not None:\r\n return masks.shape[0]\r\n else:\r\n return 1\r\n\r\n def _get_device(self) -> torch.device:\r\n return self.point_embeddings[0].weight.device\r\n\r\n def forward(\r\n self,\r\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\r\n boxes: Optional[torch.Tensor],\r\n masks: Optional[torch.Tensor],\r\n ) -> Tuple[torch.Tensor, torch.Tensor]:\r\n \"\"\"\r\n Embeds different types of prompts, returning both sparse and dense\r\n embeddings.\r\n\r\n Arguments:\r\n points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates\r\n and labels to embed.\r\n boxes (torch.Tensor or none): boxes to embed\r\n masks (torch.Tensor or none): masks to embed\r\n\r\n Returns:\r\n torch.Tensor: sparse embeddings for the points and boxes, with shape\r\n BxNx(embed_dim), where N is determined by the number of input points\r\n and boxes.\r\n torch.Tensor: dense embeddings for the masks, in the shape\r\n Bx(embed_dim)x(embed_H)x(embed_W)\r\n \"\"\"\r\n bs = self._get_batch_size(points, boxes, masks)\r\n sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())\r\n if points is not None:\r\n coords, labels = points\r\n point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))\r\n sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)\r\n if boxes is not None:\r\n box_embeddings = self._embed_boxes(boxes)\r\n sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)\r\n\r\n if masks is not None:\r\n dense_embeddings = self._embed_masks(masks)\r\n else:\r\n dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(\r\n bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]\r\n )\r\n\r\n return sparse_embeddings, dense_embeddings\r" }, { "identifier": "TwoWayTransformer", "path": "sam/segment_anything/modeling/transformer.py", "snippet": "class TwoWayTransformer(nn.Module):\r\n def __init__(\r\n self,\r\n depth: int,\r\n embedding_dim: int,\r\n num_heads: int,\r\n mlp_dim: int,\r\n activation: Type[nn.Module] = nn.ReLU,\r\n attention_downsample_rate: int = 2,\r\n ) -> None:\r\n \"\"\"\r\n A transformer decoder that attends to an input image using\r\n queries whose positional embedding is supplied.\r\n\r\n Args:\r\n depth (int): number of layers in the transformer\r\n embedding_dim (int): the channel dimension for the input embeddings\r\n num_heads (int): the number of heads for multihead attention. Must\r\n divide embedding_dim\r\n mlp_dim (int): the channel dimension internal to the MLP block\r\n activation (nn.Module): the activation to use in the MLP block\r\n \"\"\"\r\n super().__init__()\r\n self.depth = depth\r\n self.embedding_dim = embedding_dim\r\n self.num_heads = num_heads\r\n self.mlp_dim = mlp_dim\r\n self.layers = nn.ModuleList()\r\n\r\n for i in range(depth):\r\n self.layers.append(\r\n TwoWayAttentionBlock(\r\n embedding_dim=embedding_dim,\r\n num_heads=num_heads,\r\n mlp_dim=mlp_dim,\r\n activation=activation,\r\n attention_downsample_rate=attention_downsample_rate,\r\n skip_first_layer_pe=(i == 0),\r\n )\r\n )\r\n\r\n self.final_attn_token_to_image = Attention(\r\n embedding_dim, num_heads, downsample_rate=attention_downsample_rate\r\n )\r\n self.norm_final_attn = nn.LayerNorm(embedding_dim)\r\n\r\n def forward(\r\n self,\r\n image_embedding: Tensor,\r\n image_pe: Tensor,\r\n point_embedding: Tensor,\r\n ) -> Tuple[Tensor, Tensor]:\r\n \"\"\"\r\n Args:\r\n image_embedding (torch.Tensor): image to attend to. Should be shape\r\n B x embedding_dim x h x w for any h and w.\r\n image_pe (torch.Tensor): the positional encoding to add to the image. Must\r\n have the same shape as image_embedding.\r\n point_embedding (torch.Tensor): the embedding to add to the query points.\r\n Must have shape B x N_points x embedding_dim for any N_points.\r\n\r\n Returns:\r\n torch.Tensor: the processed point_embedding\r\n torch.Tensor: the processed image_embedding\r\n \"\"\"\r\n # BxCxHxW -> BxHWxC == B x N_image_tokens x C\r\n bs, c, h, w = image_embedding.shape\r\n image_embedding = image_embedding.flatten(2).permute(0, 2, 1)\r\n image_pe = image_pe.flatten(2).permute(0, 2, 1)\r\n\r\n # Prepare queries\r\n queries = point_embedding\r\n keys = image_embedding\r\n\r\n # Apply transformer blocks and final layernorm\r\n for layer in self.layers:\r\n queries, keys = layer(\r\n queries=queries,\r\n keys=keys,\r\n query_pe=point_embedding,\r\n key_pe=image_pe,\r\n )\r\n\r\n # Apply the final attention layer from the points to the image\r\n q = queries + point_embedding\r\n k = keys + image_pe\r\n attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)\r\n queries = queries + attn_out\r\n queries = self.norm_final_attn(queries)\r\n\r\n return queries, keys\r" }, { "identifier": "TinyViT", "path": "sam/segment_anything/modeling/tiny_vit_sam.py", "snippet": "class TinyViT(nn.Module):\r\n def __init__(self, img_size=224, in_chans=3, num_classes=1000,\r\n embed_dims=[96, 192, 384, 768], depths=[2, 2, 6, 2],\r\n num_heads=[3, 6, 12, 24],\r\n window_sizes=[7, 7, 14, 7],\r\n mlp_ratio=4.,\r\n drop_rate=0.,\r\n drop_path_rate=0.1,\r\n use_checkpoint=False,\r\n mbconv_expand_ratio=4.0,\r\n local_conv_size=3,\r\n layer_lr_decay=1.0,\r\n ):\r\n super().__init__()\r\n self.img_size=img_size\r\n self.num_classes = num_classes\r\n self.depths = depths\r\n self.num_layers = len(depths)\r\n self.mlp_ratio = mlp_ratio\r\n\r\n activation = nn.GELU\r\n\r\n self.patch_embed = PatchEmbed(in_chans=in_chans,\r\n embed_dim=embed_dims[0],\r\n resolution=img_size,\r\n activation=activation)\r\n\r\n patches_resolution = self.patch_embed.patches_resolution\r\n self.patches_resolution = patches_resolution\r\n\r\n # stochastic depth\r\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate,\r\n sum(depths))] # stochastic depth decay rule\r\n\r\n # build layers\r\n self.layers = nn.ModuleList()\r\n for i_layer in range(self.num_layers):\r\n kwargs = dict(dim=embed_dims[i_layer],\r\n input_resolution=(patches_resolution[0] // (2 ** (i_layer-1 if i_layer == 3 else i_layer)),\r\n patches_resolution[1] // (2 ** (i_layer-1 if i_layer == 3 else i_layer))),\r\n # input_resolution=(patches_resolution[0] // (2 ** i_layer),\r\n # patches_resolution[1] // (2 ** i_layer)),\r\n depth=depths[i_layer],\r\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\r\n downsample=PatchMerging if (\r\n i_layer < self.num_layers - 1) else None,\r\n use_checkpoint=use_checkpoint,\r\n out_dim=embed_dims[min(\r\n i_layer + 1, len(embed_dims) - 1)],\r\n activation=activation,\r\n )\r\n if i_layer == 0:\r\n layer = ConvLayer(\r\n conv_expand_ratio=mbconv_expand_ratio,\r\n **kwargs,\r\n )\r\n else:\r\n layer = BasicLayer(\r\n num_heads=num_heads[i_layer],\r\n window_size=window_sizes[i_layer],\r\n mlp_ratio=self.mlp_ratio,\r\n drop=drop_rate,\r\n local_conv_size=local_conv_size,\r\n **kwargs)\r\n self.layers.append(layer)\r\n\r\n # Classifier head\r\n self.norm_head = nn.LayerNorm(embed_dims[-1])\r\n self.head = nn.Linear(\r\n embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity()\r\n\r\n # init weights\r\n self.apply(self._init_weights)\r\n self.set_layer_lr_decay(layer_lr_decay)\r\n self.neck = nn.Sequential(\r\n nn.Conv2d(\r\n embed_dims[-1],\r\n 256,\r\n kernel_size=1,\r\n bias=False,\r\n ),\r\n LayerNorm2d(256),\r\n nn.Conv2d(\r\n 256,\r\n 256,\r\n kernel_size=3,\r\n padding=1,\r\n bias=False,\r\n ),\r\n LayerNorm2d(256),\r\n )\r\n def set_layer_lr_decay(self, layer_lr_decay):\r\n decay_rate = layer_lr_decay\r\n\r\n # layers -> blocks (depth)\r\n depth = sum(self.depths)\r\n lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]\r\n #print_log(\"LR SCALES:\", lr_scales)\r\n\r\n def _set_lr_scale(m, scale):\r\n for p in m.parameters():\r\n p.lr_scale = scale\r\n\r\n self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0]))\r\n i = 0\r\n for layer in self.layers:\r\n for block in layer.blocks:\r\n block.apply(lambda x: _set_lr_scale(x, lr_scales[i]))\r\n i += 1\r\n if layer.downsample is not None:\r\n layer.downsample.apply(\r\n lambda x: _set_lr_scale(x, lr_scales[i - 1]))\r\n assert i == depth\r\n for m in [self.norm_head, self.head]:\r\n m.apply(lambda x: _set_lr_scale(x, lr_scales[-1]))\r\n\r\n for k, p in self.named_parameters():\r\n p.param_name = k\r\n\r\n def _check_lr_scale(m):\r\n for p in m.parameters():\r\n assert hasattr(p, 'lr_scale'), p.param_name\r\n\r\n self.apply(_check_lr_scale)\r\n\r\n def _init_weights(self, m):\r\n if isinstance(m, nn.Linear):\r\n trunc_normal_(m.weight, std=.02)\r\n if isinstance(m, nn.Linear) and m.bias is not None:\r\n nn.init.constant_(m.bias, 0)\r\n elif isinstance(m, nn.LayerNorm):\r\n nn.init.constant_(m.bias, 0)\r\n nn.init.constant_(m.weight, 1.0)\r\n\r\n @torch.jit.ignore\r\n def no_weight_decay_keywords(self):\r\n return {'attention_biases'}\r\n\r\n def forward_features(self, x):\r\n # x: (N, C, H, W)\r\n x = self.patch_embed(x)\r\n\r\n x = self.layers[0](x)\r\n start_i = 1\r\n\r\n interm_embeddings=[]\r\n for i in range(start_i, len(self.layers)):\r\n layer = self.layers[i]\r\n x = layer(x)\r\n # print_log('x shape:', x.shape, '---i:', i)\r\n if i == 1:\r\n interm_embeddings.append(x.view(x.shape[0], 64, 64, -1))\r\n\r\n B,_,C=x.size()\r\n x = x.view(B, 64, 64, C)\r\n x=x.permute(0, 3, 1, 2)\r\n x=self.neck(x)\r\n return x, interm_embeddings\r\n\r\n def forward(self, x):\r\n x, interm_embeddings = self.forward_features(x)\r\n #x = self.norm_head(x)\r\n #x = self.head(x)\r\n # print_log('come to here is correct'* 3)\r\n return x, interm_embeddings\r" } ]
import torch from functools import partial from .modeling import ImageEncoderViT, MaskDecoder,MaskDecoderHQ, PromptEncoder, Sam, TwoWayTransformer, TinyViT
11,626
mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ), mask_decoder=MaskDecoderHQ( num_multimask_outputs=3, transformer=TwoWayTransformer( depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8, ), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, vit_dim=160, ), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], ) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, "rb") as f: device = "cuda" if torch.cuda.is_available() else "cpu" state_dict = torch.load(f, map_location=device) info = mobile_sam.load_state_dict(state_dict, strict=False) #print(info) for n, p in mobile_sam.named_parameters(): if 'hf_token' not in n and 'hf_mlp' not in n and 'compress_vit_feat' not in n and 'embedding_encoder' not in n and 'embedding_maskfeature' not in n: p.requires_grad = False return mobile_sam def build_mobile_sam(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ), mask_decoder=MaskDecoder( num_multimask_outputs=3, transformer=TwoWayTransformer( depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8, ), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, ), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], ) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, "rb") as f: state_dict = torch.load(f,map_location=torch.device(device)) mobile_sam.load_state_dict(state_dict,strict=True) return mobile_sam sam_model_registry = { "default": build_sam_vit_h, "vit_h": build_sam_vit_h, "vit_l": build_sam_vit_l, "vit_b": build_sam_vit_b, "vit_tiny": build_sam_vit_t, "mobile_sam": build_mobile_sam, } def _build_sam( encoder_embed_dim, encoder_depth, encoder_num_heads, encoder_global_attn_indexes, checkpoint=None, device="gpu", ): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size sam = Sam(
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. def build_sam_vit_h(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=1280, encoder_depth=32, encoder_num_heads=16, encoder_global_attn_indexes=[7, 15, 23, 31], checkpoint=checkpoint, ) build_sam = build_sam_vit_h def build_sam_vit_l(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=1024, encoder_depth=24, encoder_num_heads=16, encoder_global_attn_indexes=[5, 11, 17, 23], checkpoint=checkpoint, ) def build_sam_vit_b(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=768, encoder_depth=12, encoder_num_heads=12, encoder_global_attn_indexes=[2, 5, 8, 11], checkpoint=checkpoint, ) def build_sam_vit_t(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ), mask_decoder=MaskDecoderHQ( num_multimask_outputs=3, transformer=TwoWayTransformer( depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8, ), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, vit_dim=160, ), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], ) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, "rb") as f: device = "cuda" if torch.cuda.is_available() else "cpu" state_dict = torch.load(f, map_location=device) info = mobile_sam.load_state_dict(state_dict, strict=False) #print(info) for n, p in mobile_sam.named_parameters(): if 'hf_token' not in n and 'hf_mlp' not in n and 'compress_vit_feat' not in n and 'embedding_encoder' not in n and 'embedding_maskfeature' not in n: p.requires_grad = False return mobile_sam def build_mobile_sam(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ), mask_decoder=MaskDecoder( num_multimask_outputs=3, transformer=TwoWayTransformer( depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8, ), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, ), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], ) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, "rb") as f: state_dict = torch.load(f,map_location=torch.device(device)) mobile_sam.load_state_dict(state_dict,strict=True) return mobile_sam sam_model_registry = { "default": build_sam_vit_h, "vit_h": build_sam_vit_h, "vit_l": build_sam_vit_l, "vit_b": build_sam_vit_b, "vit_tiny": build_sam_vit_t, "mobile_sam": build_mobile_sam, } def _build_sam( encoder_embed_dim, encoder_depth, encoder_num_heads, encoder_global_attn_indexes, checkpoint=None, device="gpu", ): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size sam = Sam(
image_encoder=ImageEncoderViT(
1
2023-10-14 13:45:54+00:00
16k
zhaoyizhou1123/mbrcsl
examples/roboverse/run_mbrcsl_mlpdyn_roboverse.py
[ { "identifier": "EnsembleDynamicsModel", "path": "offlinerlkit/modules/dynamics_module.py", "snippet": "class EnsembleDynamicsModel(nn.Module):\n def __init__(\n self,\n obs_dim: int,\n action_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n num_ensemble: int = 7,\n num_elites: int = 5,\n activation: nn.Module = Swish,\n weight_decays: Optional[Union[List[float], Tuple[float]]] = None,\n with_reward: bool = True,\n device: str = \"cpu\"\n ) -> None:\n super().__init__()\n\n self.num_ensemble = num_ensemble\n self.num_elites = num_elites\n self._with_reward = with_reward\n self.device = torch.device(device)\n\n self.activation = activation()\n\n assert len(weight_decays) == (len(hidden_dims) + 1)\n\n module_list = []\n hidden_dims = [obs_dim+action_dim] + list(hidden_dims)\n if weight_decays is None:\n weight_decays = [0.0] * (len(hidden_dims) + 1)\n for in_dim, out_dim, weight_decay in zip(hidden_dims[:-1], hidden_dims[1:], weight_decays[:-1]):\n module_list.append(EnsembleLinear(in_dim, out_dim, num_ensemble, weight_decay))\n self.backbones = nn.ModuleList(module_list)\n\n self.output_layer = EnsembleLinear(\n hidden_dims[-1],\n 2 * (obs_dim + self._with_reward),\n num_ensemble,\n weight_decays[-1]\n )\n\n self.register_parameter(\n \"max_logvar\",\n nn.Parameter(torch.ones(obs_dim + self._with_reward) * 0.5, requires_grad=True)\n )\n self.register_parameter(\n \"min_logvar\",\n nn.Parameter(torch.ones(obs_dim + self._with_reward) * -10, requires_grad=True)\n )\n\n self.register_parameter(\n \"elites\",\n nn.Parameter(torch.tensor(list(range(0, self.num_elites))), requires_grad=False)\n )\n\n self.to(self.device)\n\n def forward(self, obs_action: np.ndarray) -> Tuple[torch.Tensor, torch.Tensor]:\n obs_action = torch.as_tensor(obs_action, dtype=torch.float32).to(self.device)\n output = obs_action\n for layer in self.backbones:\n output = self.activation(layer(output))\n mean, logvar = torch.chunk(self.output_layer(output), 2, dim=-1)\n logvar = soft_clamp(logvar, self.min_logvar, self.max_logvar)\n return mean, logvar\n\n def load_save(self) -> None:\n for layer in self.backbones:\n layer.load_save()\n self.output_layer.load_save()\n\n def update_save(self, indexes: List[int]) -> None:\n for layer in self.backbones:\n layer.update_save(indexes)\n self.output_layer.update_save(indexes)\n \n def get_decay_loss(self) -> torch.Tensor:\n decay_loss = 0\n for layer in self.backbones:\n decay_loss += layer.get_decay_loss()\n decay_loss += self.output_layer.get_decay_loss()\n return decay_loss\n\n def set_elites(self, indexes: List[int]) -> None:\n assert len(indexes) <= self.num_ensemble and max(indexes) < self.num_ensemble\n self.register_parameter('elites', nn.Parameter(torch.tensor(indexes), requires_grad=False))\n \n def random_elite_idxs(self, batch_size: int) -> np.ndarray:\n idxs = np.random.choice(self.elites.data.cpu().numpy(), size=batch_size)\n return idxs" }, { "identifier": "EnsembleDynamics", "path": "offlinerlkit/dynamics/ensemble_dynamics.py", "snippet": "class EnsembleDynamics(BaseDynamics):\n def __init__(\n self,\n model: nn.Module,\n optim: torch.optim.Optimizer,\n scaler: StandardScaler,\n terminal_fn: Callable[[np.ndarray, np.ndarray, np.ndarray], np.ndarray],\n penalty_coef: float = 0.0,\n uncertainty_mode: str = \"aleatoric\"\n ) -> None:\n super().__init__(model, optim)\n self.scaler = scaler\n self.terminal_fn = terminal_fn\n self._penalty_coef = penalty_coef\n self._uncertainty_mode = uncertainty_mode\n\n @ torch.no_grad()\n def step(\n self,\n obs: np.ndarray,\n action: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Dict]:\n '''\n Return:\n reward (B,1) (if obs has batch)\n terminal (B,1)\n '''\n \"imagine single forward step\"\n obs_act = np.concatenate([obs, action], axis=-1)\n obs_act = self.scaler.transform(obs_act)\n mean, logvar = self.model(obs_act)\n mean = mean.cpu().numpy()\n logvar = logvar.cpu().numpy()\n mean[..., :-1] += obs # We estimated delta_obs\n std = np.sqrt(np.exp(logvar))\n\n ensemble_samples = (mean + np.random.normal(size=mean.shape) * std).astype(np.float32)\n\n # choose one model from ensemble\n num_models, batch_size, _ = ensemble_samples.shape\n model_idxs = self.model.random_elite_idxs(batch_size)\n samples = ensemble_samples[model_idxs, np.arange(batch_size)]\n \n next_obs = samples[..., :-1]\n reward = samples[..., -1:]\n terminal = self.terminal_fn(obs, action, next_obs)\n info = {}\n info[\"raw_reward\"] = reward\n\n if self._penalty_coef:\n if self._uncertainty_mode == \"aleatoric\":\n penalty = np.amax(np.linalg.norm(std, axis=2), axis=0)\n elif self._uncertainty_mode == \"pairwise-diff\":\n next_obses_mean = mean[..., :-1]\n next_obs_mean = np.mean(next_obses_mean, axis=0)\n diff = next_obses_mean - next_obs_mean\n penalty = np.amax(np.linalg.norm(diff, axis=2), axis=0)\n elif self._uncertainty_mode == \"ensemble_std\":\n next_obses_mean = mean[..., :-1]\n penalty = np.sqrt(next_obses_mean.var(0).mean(1))\n else:\n raise ValueError\n penalty = np.expand_dims(penalty, 1).astype(np.float32)\n assert penalty.shape == reward.shape\n reward = reward - self._penalty_coef * penalty\n info[\"penalty\"] = penalty\n \n return next_obs, reward, terminal, info\n \n @ torch.no_grad()\n def sample_next_obss(\n self,\n obs: torch.Tensor,\n action: torch.Tensor,\n num_samples: int\n ) -> torch.Tensor:\n obs_act = torch.cat([obs, action], dim=-1)\n obs_act = self.scaler.transform_tensor(obs_act)\n mean, logvar = self.model(obs_act)\n mean[..., :-1] += obs\n std = torch.sqrt(torch.exp(logvar))\n\n mean = mean[self.model.elites.data.cpu().numpy()]\n std = std[self.model.elites.data.cpu().numpy()]\n\n samples = torch.stack([mean + torch.randn_like(std) * std for i in range(num_samples)], 0)\n next_obss = samples[..., :-1]\n return next_obss\n\n def format_samples_for_training(self, data: Dict) -> Tuple[np.ndarray, np.ndarray]:\n obss = data[\"observations\"]\n actions = data[\"actions\"]\n next_obss = data[\"next_observations\"]\n rewards = data[\"rewards\"]\n rewards = rewards.reshape(rewards.shape[0], -1)\n delta_obss = next_obss - obss\n inputs = np.concatenate((obss, actions), axis=-1)\n targets = np.concatenate((delta_obss, rewards), axis=-1)\n return inputs, targets\n\n def train(\n self,\n data: Dict,\n logger: Logger,\n max_epochs: Optional[float] = None,\n max_epochs_since_update: int = 5,\n batch_size: int = 256,\n holdout_ratio: float = 0.2,\n logvar_loss_coef: float = 0.01\n ) -> None:\n inputs, targets = self.format_samples_for_training(data)\n data_size = inputs.shape[0]\n holdout_size = min(int(data_size * holdout_ratio), 1000)\n train_size = data_size - holdout_size\n train_splits, holdout_splits = torch.utils.data.random_split(range(data_size), (train_size, holdout_size))\n train_inputs, train_targets = inputs[train_splits.indices], targets[train_splits.indices]\n holdout_inputs, holdout_targets = inputs[holdout_splits.indices], targets[holdout_splits.indices]\n\n self.scaler.fit(train_inputs)\n train_inputs = self.scaler.transform(train_inputs)\n holdout_inputs = self.scaler.transform(holdout_inputs)\n holdout_losses = [1e10 for i in range(self.model.num_ensemble)]\n\n data_idxes = np.random.randint(train_size, size=[self.model.num_ensemble, train_size])\n def shuffle_rows(arr):\n idxes = np.argsort(np.random.uniform(size=arr.shape), axis=-1)\n return arr[np.arange(arr.shape[0])[:, None], idxes]\n\n epoch = 0\n cnt = 0\n logger.log(\"Training dynamics:\")\n while True:\n epoch += 1\n train_loss = self.learn(train_inputs[data_idxes], train_targets[data_idxes], batch_size, logvar_loss_coef)\n new_holdout_losses = self.validate(holdout_inputs, holdout_targets)\n holdout_loss = (np.sort(new_holdout_losses)[:self.model.num_elites]).mean()\n logger.logkv(\"loss/dynamics_train_loss\", train_loss)\n logger.logkv(\"loss/dynamics_holdout_loss\", holdout_loss)\n logger.set_timestep(epoch)\n logger.dumpkvs(exclude=[\"policy_training_progress\"])\n\n # shuffle data for each base learner\n data_idxes = shuffle_rows(data_idxes)\n\n indexes = []\n for i, new_loss, old_loss in zip(range(len(holdout_losses)), new_holdout_losses, holdout_losses):\n improvement = (old_loss - new_loss) / old_loss\n if improvement > 0.01:\n indexes.append(i)\n holdout_losses[i] = new_loss\n \n if len(indexes) > 0:\n self.model.update_save(indexes)\n cnt = 0\n else:\n cnt += 1\n \n if (cnt >= max_epochs_since_update) or (max_epochs and (epoch >= max_epochs)):\n break\n\n indexes = self.select_elites(holdout_losses)\n self.model.set_elites(indexes)\n self.model.load_save()\n self.save(logger.model_dir)\n self.model.eval()\n logger.log(\"elites:{} , holdout loss: {}\".format(indexes, (np.sort(holdout_losses)[:self.model.num_elites]).mean()))\n \n def learn(\n self,\n inputs: np.ndarray,\n targets: np.ndarray,\n batch_size: int = 256,\n logvar_loss_coef: float = 0.01\n ) -> float:\n self.model.train()\n train_size = inputs.shape[1]\n losses = []\n\n for batch_num in range(int(np.ceil(train_size / batch_size))):\n inputs_batch = inputs[:, batch_num * batch_size:(batch_num + 1) * batch_size]\n targets_batch = targets[:, batch_num * batch_size:(batch_num + 1) * batch_size]\n targets_batch = torch.as_tensor(targets_batch).to(self.model.device)\n \n mean, logvar = self.model(inputs_batch)\n inv_var = torch.exp(-logvar)\n # Average over batch and dim, sum over ensembles.\n mse_loss_inv = (torch.pow(mean - targets_batch, 2) * inv_var).mean(dim=(1, 2)) # MLE for Gaussian\n var_loss = logvar.mean(dim=(1, 2))\n loss = mse_loss_inv.sum() + var_loss.sum()\n loss = loss + self.model.get_decay_loss()\n loss = loss + logvar_loss_coef * self.model.max_logvar.sum() - logvar_loss_coef * self.model.min_logvar.sum()\n\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n losses.append(loss.item())\n return np.mean(losses)\n \n @ torch.no_grad()\n def validate(self, inputs: np.ndarray, targets: np.ndarray) -> List[float]:\n self.model.eval()\n targets = torch.as_tensor(targets).to(self.model.device)\n mean, _ = self.model(inputs)\n loss = ((mean - targets) ** 2).mean(dim=(1, 2))\n val_loss = list(loss.cpu().numpy())\n return val_loss\n \n def select_elites(self, metrics: List) -> List[int]:\n pairs = [(metric, index) for metric, index in zip(metrics, range(len(metrics)))]\n pairs = sorted(pairs, key=lambda x: x[0])\n elites = [pairs[i][1] for i in range(self.model.num_elites)]\n return elites\n\n def save(self, save_path: str) -> None:\n torch.save(self.model.state_dict(), os.path.join(save_path, \"dynamics.pth\"))\n self.scaler.save_scaler(save_path)\n \n def load(self, load_path: str) -> None:\n self.model.load_state_dict(torch.load(os.path.join(load_path, \"dynamics.pth\"), map_location=self.model.device))\n self.scaler.load_scaler(load_path)" }, { "identifier": "get_termination_fn", "path": "offlinerlkit/utils/termination_fns.py", "snippet": "def get_termination_fn(task):\n if 'halfcheetahvel' in task:\n return termination_fn_halfcheetahveljump\n elif 'halfcheetah' in task:\n return termination_fn_halfcheetah\n elif 'hopper' in task:\n return termination_fn_hopper\n elif 'antangle' in task:\n return termination_fn_antangle\n elif 'ant' in task:\n return termination_fn_ant\n elif 'walker2d' in task:\n return termination_fn_walker2d\n elif 'point2denv' in task:\n return termination_fn_point2denv\n elif 'point2dwallenv' in task:\n return termination_fn_point2dwallenv\n elif 'pendulum' in task:\n return termination_fn_pendulum\n elif 'humanoid' in task:\n return termination_fn_humanoid\n elif 'pen' in task:\n return termination_fn_pen\n elif 'door' in task:\n return termination_fn_door\n else:\n return termination_fn_default" }, { "identifier": "StandardScaler", "path": "offlinerlkit/utils/scaler.py", "snippet": "class StandardScaler(object):\n def __init__(self, mu=None, std=None):\n self.mu = mu\n self.std = std\n\n def fit(self, data):\n \"\"\"Runs two ops, one for assigning the mean of the data to the internal mean, and\n another for assigning the standard deviation of the data to the internal standard deviation.\n This function must be called within a 'with <session>.as_default()' block.\n\n Arguments:\n data (np.ndarray): A numpy array containing the input\n\n Returns: None.\n \"\"\"\n self.mu = np.mean(data, axis=0, keepdims=True)\n self.std = np.std(data, axis=0, keepdims=True)\n self.std[self.std < 1e-12] = 1.0\n\n def transform(self, data):\n \"\"\"Transforms the input matrix data using the parameters of this scaler.\n\n Arguments:\n data (np.array): A numpy array containing the points to be transformed.\n\n Returns: (np.array) The transformed dataset.\n \"\"\"\n return (data - self.mu) / self.std\n\n def inverse_transform(self, data):\n \"\"\"Undoes the transformation performed by this scaler.\n\n Arguments:\n data (np.array): A numpy array containing the points to be transformed.\n\n Returns: (np.array) The transformed dataset.\n \"\"\"\n return self.std * data + self.mu\n \n def save_scaler(self, save_path):\n mu_path = path.join(save_path, \"mu.npy\")\n std_path = path.join(save_path, \"std.npy\")\n np.save(mu_path, self.mu)\n np.save(std_path, self.std)\n \n def load_scaler(self, load_path):\n mu_path = path.join(load_path, \"mu.npy\")\n std_path = path.join(load_path, \"std.npy\")\n self.mu = np.load(mu_path)\n self.std = np.load(std_path)\n\n def transform_tensor(self, data: torch.Tensor):\n device = data.device\n data = self.transform(data.cpu().numpy())\n data = torch.tensor(data, device=device)\n return data" }, { "identifier": "PickPlaceObsWrapper", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "class PickPlaceObsWrapper(gym.ObservationWrapper):\n '''\n Wrap pick place environment to return desired obs\n '''\n def __init__(self, env):\n super().__init__(env)\n # Get observation space\n tmp_obs = env.reset()\n\n tmp_true_obs = get_pickplace_obs(tmp_obs)\n low = env.observation_space['state'].low[0]\n high = env.observation_space['state'].high[0]\n self.observation_space = Box(shape = tmp_true_obs.shape, low = low, high = high)\n\n def observation(self, observation: Dict[str, np.ndarray]) -> np.ndarray:\n return get_pickplace_obs(observation)\n\n def reset(self, seed = None):\n if seed is not None:\n np.random.seed(seed) # controls env seed\n return self.observation(self.env.reset())" }, { "identifier": "DoubleDrawerObsWrapper", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "class DoubleDrawerObsWrapper(gym.Wrapper):\n '''\n Wrap pick place environment to return desired obs\n '''\n def __init__(self, env):\n super().__init__(env)\n # Get observation space\n tmp_obs = env.reset()\n info = env.get_info()\n\n tmp_true_obs = get_doubledrawer_obs(tmp_obs, info)\n low = env.observation_space['state'].low[0]\n high = env.observation_space['state'].high[0]\n self.observation_space = Box(shape = tmp_true_obs.shape, low = low, high = high)\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n obs = get_doubledrawer_obs(obs, info)\n return obs, reward, done, info\n\n def reset(self, seed = None):\n if seed is not None:\n np.random.seed(seed) # controls env seed\n obs = self.env.reset()\n info = self.env.get_info()\n return get_doubledrawer_obs(obs, info)" }, { "identifier": "get_pickplace_dataset", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "def get_pickplace_dataset(\n prior_data_path: str, \n task_data_path: str,\n prior_weight: float =1., \n task_weight: float = 1., \n set_type: str = 'full', \n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray]:\n '''\n Concatenate prior_data and task_data\n prior_weight and task_weight: weight of data point\n\n Args:\n set_type: 'prior', 'task', 'full'\n sample_ratio: Ratio of trajectories sampled. Sometimes we want to train on a smaller dataset.\n\n Return:\n dataset: Dict, additional key 'weights'\n init_obss: np.ndarray (num_traj, obs_dim)\n '''\n with open(prior_data_path, \"rb\") as fp:\n prior_data = np.load(fp, allow_pickle=True)\n with open(task_data_path, \"rb\") as ft:\n task_data = np.load(ft, allow_pickle=True)\n set_weight(prior_data, prior_weight)\n set_weight(task_data, task_weight)\n\n # Sample trajectories\n num_trajs_prior = int(len(prior_data) * sample_ratio)\n idxs_prior = np.random.choice(len(prior_data), size=(num_trajs_prior), replace = False)\n prior_data = prior_data[idxs_prior]\n\n num_trajs_task = int(len(task_data) * sample_ratio)\n idxs_task = np.random.choice(len(task_data), size=(num_trajs_task), replace = False)\n task_data = task_data[idxs_task]\n\n if set_type == 'full':\n full_data = np.concatenate([prior_data, task_data], axis=0) # list of dict\n elif set_type == 'prior':\n full_data = prior_data\n elif set_type =='task':\n full_data = task_data\n keys = ['observations', 'actions', 'rewards', 'next_observations', 'terminals', 'weights']\n\n init_obss = []\n for d in prior_data:\n obs_list = d['observations']\n init_obss.append(get_pickplace_obs(obs_list[0]))\n \n dict_data = {}\n for key in keys:\n values = []\n for d in full_data: # trajectory, dict of lists\n value_list = d[key] # list of timesteps data\n if key == 'observations':\n values += [get_pickplace_obs(obs) for obs in value_list] # element is list\n elif key == 'next_observations':\n values += [get_pickplace_obs(obs) for obs in value_list] # element is list\n else:\n values += value_list # element is list\n values = np.asarray(values)\n dict_data[key] = values\n rtgs = np.zeros_like(dict_data['rewards']) # no return\n dict_data['rtgs'] = rtgs\n\n init_obss = np.asarray(init_obss)\n return dict_data, init_obss" }, { "identifier": "get_doubledrawer_dataset", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "def get_doubledrawer_dataset(\n prior_data_path: str, \n task_data_path: str,\n prior_weight: float =1., \n task_weight: float = 1., \n set_type: str = 'full', \n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray]:\n '''\n Concatenate prior_data and task_data\n prior_weight and task_weight: weight of data point\n\n Args:\n set_type: 'prior', 'task', 'full'\n sample_ratio: Ratio of trajectories sampled. Sometimes we want to train on a smaller dataset.\n\n Return:\n dataset: Dict, additional key 'weights'\n init_obss: np.ndarray (num_traj, obs_dim)\n '''\n with open(prior_data_path, \"rb\") as fp:\n prior_data = np.load(fp, allow_pickle=True)\n with open(task_data_path, \"rb\") as ft:\n task_data = np.load(ft, allow_pickle=True)\n set_weight(prior_data, prior_weight)\n set_weight(task_data, task_weight)\n\n # Sample trajectories\n num_trajs_prior = int(len(prior_data) * sample_ratio)\n idxs_prior = np.random.choice(len(prior_data), size=(num_trajs_prior), replace = False)\n prior_data = prior_data[idxs_prior]\n\n num_trajs_task = int(len(task_data) * sample_ratio)\n idxs_task = np.random.choice(len(task_data), size=(num_trajs_task), replace = False)\n task_data = task_data[idxs_task]\n\n if set_type == 'full':\n full_data = np.concatenate([prior_data, task_data], axis=0) # list of dict\n elif set_type == 'prior':\n full_data = prior_data\n elif set_type =='task':\n full_data = task_data\n keys = ['observations', 'actions', 'rewards', 'next_observations', 'terminals', 'weights']\n\n init_obss = []\n for d in prior_data:\n obs_list = d['observations']\n info_list = d['env_infos']\n init_obss.append(get_doubledrawer_obs(obs_list[0], info_list[0]))\n \n dict_data = {}\n for key in keys:\n values = []\n for d in full_data: # trajectory, dict of lists\n value_list = d[key] # list of timesteps data\n if key == 'observations':\n info_list = d['env_infos']\n # initial info is similar to step 1\n values += [get_doubledrawer_obs(obs,info) for obs,info in zip(value_list, [info_list[0]] + info_list[:-1])]\n elif key == 'next_observations':\n info_list = d['env_infos']\n values += [get_doubledrawer_obs(obs,info) for obs,info in zip(value_list, info_list)]\n else:\n values += value_list # element is list\n values = np.asarray(values)\n dict_data[key] = values\n rtgs = np.zeros_like(dict_data['rewards']) # no return\n dict_data['rtgs'] = rtgs\n\n init_obss = np.asarray(init_obss)\n return dict_data, init_obss" }, { "identifier": "Logger", "path": "offlinerlkit/utils/logger.py", "snippet": "class Logger(object):\n def __init__(self, dir: str, ouput_config: Dict) -> None:\n self._dir = dir\n self._init_dirs()\n self._init_ouput_handlers(ouput_config)\n self._name2val = defaultdict(float)\n self._name2cnt = defaultdict(int)\n self._level = INFO\n self._timestep = 0\n \n def _init_dirs(self) -> None:\n self._record_dir = os.path.join(self._dir, \"record\")\n self._checkpoint_dir = os.path.join(self._dir, \"checkpoint\")\n self._model_dir = os.path.join(self._dir, \"model\")\n self._result_dir = os.path.join(self._dir, \"result\")\n os.mkdir(self._record_dir)\n os.mkdir(self._checkpoint_dir)\n os.mkdir(self._model_dir)\n os.mkdir(self._result_dir)\n \n def _init_ouput_handlers(self, output_config: Dict) -> None:\n self._output_handlers = []\n for file_name, fmt in output_config.items():\n try:\n self._output_handlers.append(HANDLER[fmt](os.path.join(self._record_dir, file_name)))\n except KeyError:\n warnings.warn(\"Invalid output type, Valid types: stdout, csv, tensorboard\", DeprecationWarning)\n # default output to console\n self._output_handlers.append(StandardOutputHandler(sys.stdout))\n \n def log_hyperparameters(self, hyper_param: Dict) -> None:\n json_output_handler = JSONOutputHandler(os.path.join(self._record_dir, \"hyper_param\"))\n json_output_handler.writekvs(hyper_param)\n json_output_handler.close()\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.add_hyper_params_to_tb(hyper_param)\n\n def logkv(self, key: Any, val: Any) -> None:\n \"\"\"\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n \"\"\"\n self._name2val[key] = val\n\n def logkv_mean(self, key: Any, val: Number) -> None:\n \"\"\"\n The same as logkv(), but if called many times, values averaged.\n \"\"\"\n oldval, cnt = self._name2val[key], self._name2cnt[key]\n self._name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)\n self._name2cnt[key] = cnt + 1\n\n def dumpkvs(self, exclude:Optional[Union[str, Tuple[str, ...]]]=None) -> None:\n # log timestep\n self.logkv(DEFAULT_X_NAME, self._timestep)\n for handler in self._output_handlers:\n if isinstance(handler, KVWriter):\n if exclude is not None and handler.handler_name in exclude:\n continue\n handler.writekvs(self._name2val)\n self._name2val.clear()\n self._name2cnt.clear()\n\n def log(self, s: str, level=INFO) -> None:\n for handler in self._output_handlers:\n if isinstance(handler, StandardOutputHandler):\n handler.writestr(s)\n \n def set_timestep(self, timestep: int) -> None:\n self._timestep = timestep\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.set_step(timestep)\n\n def set_level(self, level) -> None:\n self._level = level\n\n @property\n def record_dir(self) -> str:\n return self._record_dir\n \n @property\n def checkpoint_dir(self) -> str:\n return self._checkpoint_dir\n\n @property\n def model_dir(self) -> str:\n return self._model_dir\n \n @property\n def result_dir(self) -> str:\n return self._result_dir\n \n def close(self) -> None:\n for handler in self._output_handlers:\n handler.close()" }, { "identifier": "make_log_dirs", "path": "offlinerlkit/utils/logger.py", "snippet": "def make_log_dirs(\n task_name: str,\n algo_name: str,\n exp_name: str,\n args: Dict,\n part: Optional[str] = None,\n record_params: Optional[List]=None\n) -> str:\n if record_params is not None:\n for param_name in record_params:\n algo_name += f\"&{param_name}={args[param_name]}\"\n\n if part is not None:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name, part)\n else:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name)\n os.makedirs(log_dirs)\n return log_dirs" }, { "identifier": "RcslPolicyTrainer", "path": "offlinerlkit/policy_trainer/rcsl_policy_trainer.py", "snippet": "class RcslPolicyTrainer:\n def __init__(\n self,\n policy: BasePolicy,\n eval_env: Union[gym.Env, gymnasium.Env],\n offline_dataset: Dict[str, np.ndarray],\n rollout_dataset: Optional[Dict[str, np.ndarray]],\n goal: float,\n logger: Logger,\n seed,\n eval_env2: Optional[Union[gym.Env, gymnasium.Env]] = None,\n epoch: int = 1000,\n batch_size: int = 256,\n offline_ratio: float = 0,\n eval_episodes: int = 10,\n lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n horizon: Optional[int] = None,\n num_workers = 1,\n has_terminal = False,\n binary_return = True\n ) -> None:\n '''\n offline_ratio = 0: rollout only, 1: offline only\n '''\n self.policy = policy\n self.eval_env = eval_env\n self.eval_env2 = eval_env2\n self.horizon = horizon\n self.offline_dataset = offline_dataset\n self.rollout_dataset = rollout_dataset\n self.goal = goal\n self.logger = logger\n\n self._epoch = epoch\n self._batch_size = batch_size\n self._offline_ratio = offline_ratio\n self._eval_episodes = eval_episodes\n self.lr_scheduler = lr_scheduler\n self.num_workers = num_workers\n self.env_seed = seed\n self.binary_return = binary_return\n\n self.is_gymnasium_env = hasattr(self.eval_env, \"get_true_observation\")\n assert (not self.is_gymnasium_env) or (self.horizon is not None), \"Horizon must be specified for Gymnasium env\"\n self.has_terminal = has_terminal\n\n def train(self, holdout_ratio: float = 0.1, last_eval = False, find_best_start: Optional[int] = None, improve_threshold: float = 0.01) -> Dict[str, float]:\n '''\n last_eval: If True, only evaluates at the last epoch\n find_best_start: If >=0, begin to find the best epoch by holdout loss\n '''\n start_time = time.time()\n\n num_timesteps = 0\n last_10_performance = deque(maxlen=10)\n\n dataset = DictDataset(self.offline_dataset)\n\n if holdout_ratio == 0.:\n has_holdout = False\n train_dataset = dataset\n else:\n has_holdout = True\n holdout_size = int(len(dataset) * holdout_ratio)\n train_size = len(dataset) - holdout_size\n train_dataset, holdout_dataset = torch.utils.data.random_split(dataset, [train_size, holdout_size], \n generator=torch.Generator().manual_seed(self.env_seed))\n data_loader = DataLoader(\n train_dataset,\n batch_size = self._batch_size,\n shuffle = True,\n pin_memory = True,\n num_workers = self.num_workers\n )\n best_policy_dict = self.policy.state_dict()\n best_holdout_loss = 1e10\n epochs_since_upd = 0\n stop_by_holdout = (find_best_start is not None)\n for e in range(1, self._epoch + 1):\n\n self.policy.train()\n\n pbar = tqdm(enumerate(data_loader), desc=f\"Epoch #{e}/{self._epoch}\")\n for it, batch in pbar:\n '''\n batch: dict with keys\n 'observations'\n 'next_observations'\n 'actions'\n 'terminals'\n 'rewards'\n 'rtgs'\n\n '''\n loss_dict = self.policy.learn(batch)\n pbar.set_postfix(**loss_dict)\n\n for k, v in loss_dict.items():\n self.logger.logkv_mean(k, v)\n \n num_timesteps += 1\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n # Test validation loss\n if has_holdout:\n holdout_loss = self.validate(holdout_dataset)\n if stop_by_holdout and e >= find_best_start: # test holdout improvement\n if (best_holdout_loss - holdout_loss) / best_holdout_loss > improve_threshold:\n best_holdout_loss = holdout_loss\n best_policy_dict = deepcopy(self.policy.state_dict())\n epochs_since_upd = 0\n else:\n epochs_since_upd += 1\n\n if last_eval and e < self._epoch: # When last_eval is True, only evaluate on last epoch\n pass\n else:\n eval_info = self._evaluate()\n ep_reward_mean, ep_reward_std = np.mean(eval_info[\"eval/episode_reward\"]), np.std(eval_info[\"eval/episode_reward\"])\n ep_reward_max, ep_reward_min = np.max(eval_info[\"eval/episode_reward\"]), np.min(eval_info[\"eval/episode_reward\"])\n ep_length_mean, ep_length_std = np.mean(eval_info[\"eval/episode_length\"]), np.std(eval_info[\"eval/episode_length\"])\n\n if not hasattr(self.eval_env, \"get_normalized_score\"): # gymnasium_env does not have normalized score\n last_10_performance.append(ep_reward_mean)\n self.logger.logkv(\"eval/episode_reward\", ep_reward_mean)\n self.logger.logkv(\"eval/episode_reward_std\", ep_reward_std) \n else: \n norm_ep_rew_mean = self.eval_env.get_normalized_score(ep_reward_mean) * 100\n norm_ep_rew_std = self.eval_env.get_normalized_score(ep_reward_std) * 100\n norm_ep_rew_max = self.eval_env.get_normalized_score(ep_reward_max) * 100\n norm_ep_rew_min = self.eval_env.get_normalized_score(ep_reward_min) * 100\n last_10_performance.append(norm_ep_rew_mean)\n self.logger.logkv(\"eval/normalized_episode_reward\", norm_ep_rew_mean)\n self.logger.logkv(\"eval/normalized_episode_reward_std\", norm_ep_rew_std)\n self.logger.logkv(\"eval/normalized_episode_reward_max\", norm_ep_rew_max)\n self.logger.logkv(\"eval/normalized_episode_reward_min\", norm_ep_rew_min)\n self.logger.logkv(\"eval/episode_length\", ep_length_mean)\n self.logger.logkv(\"eval/episode_length_std\", ep_length_std)\n\n self.logger.set_timestep(num_timesteps)\n self.logger.dumpkvs(exclude=[\"dynamics_training_progress\"])\n\n if stop_by_holdout and epochs_since_upd >= 5: # Stop, evaluate for the last time\n self.policy.load_state_dict(best_policy_dict)\n eval_info = self._evaluate()\n ep_reward_mean, ep_reward_std = np.mean(eval_info[\"eval/episode_reward\"]), np.std(eval_info[\"eval/episode_reward\"])\n self.logger.log(f\"Final evaluation: Mean {ep_reward_mean}, std {ep_reward_std}\\n\")\n break\n \n self.logger.log(\"total time: {:.2f}s\".format(time.time() - start_time))\n torch.save(self.policy.state_dict(), os.path.join(self.logger.model_dir, \"policy_final.pth\"))\n self.logger.close()\n \n return {\"last_10_performance\": np.mean(last_10_performance)}\n\n def _evaluate(self, eval_episodes: int = -1) -> Dict[str, List[float]]:\n '''\n Always set desired rtg to 0\n '''\n # Pointmaze obs has different format, needs to be treated differently\n if eval_episodes == -1:\n real_eval_episodes = self._eval_episodes\n else:\n real_eval_episodes = eval_episodes\n is_gymnasium_env = self.is_gymnasium_env\n\n self.eval_env.reset(seed=self.env_seed) # Fix seed\n \n self.policy.eval()\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n \n eval_ep_info_buffer = []\n num_episodes = 0\n episode_reward, episode_length = 0, 0\n\n if not self.has_terminal: # don't use terminal signal, terminate when reach horizon\n while num_episodes < real_eval_episodes:\n rtg = torch.tensor([[self.goal]]).type(torch.float32)\n for timestep in range(self.horizon): # One epoch\n action = self.policy.select_action(obs.reshape(1, -1), rtg)\n if hasattr(self.eval_env, \"get_true_observation\"): # gymnasium env \n next_obs, reward, terminal, _, _ = self.eval_env.step(action.flatten())\n else:\n next_obs, reward, terminal, info = self.eval_env.step(action.flatten())\n if is_gymnasium_env:\n next_obs = self.eval_env.get_true_observation(next_obs)\n episode_reward += reward\n rtg = rtg - reward\n episode_length += 1\n\n obs = next_obs\n if self.binary_return:\n episode_reward = 1 if episode_reward > 0 else 0 # Clip to 1\n eval_ep_info_buffer.append(\n {\"episode_reward\": episode_reward, \"episode_length\": episode_length}\n )\n num_episodes +=1\n episode_reward, episode_length = 0, 0\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n else:\n rtg = torch.tensor([[self.goal]]).type(torch.float32)\n while num_episodes < self._eval_episodes:\n action = self.policy.select_action(obs.reshape(1, -1), rtg)\n if hasattr(self.eval_env, \"get_true_observation\"): # gymnasium env \n next_obs, reward, terminal, _, _ = self.eval_env.step(action.flatten())\n else:\n next_obs, reward, terminal, _ = self.eval_env.step(action.flatten())\n if is_gymnasium_env:\n next_obs = self.eval_env.get_true_observation(next_obs)\n episode_reward += reward\n episode_length += 1\n\n obs = next_obs\n\n if terminal: # Episode finishes\n if self.binary_return:\n episode_reward = 1 if episode_reward > 0 else 0 # Clip to 1\n eval_ep_info_buffer.append(\n {\"episode_reward\": episode_reward, \"episode_length\": episode_length}\n )\n episode_reward, episode_length = 0, 0\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n rtg = torch.tensor([[self.goal]]).type(torch.float32)\n \n return {\n \"eval/episode_reward\": [ep_info[\"episode_reward\"] for ep_info in eval_ep_info_buffer],\n \"eval/episode_length\": [ep_info[\"episode_length\"] for ep_info in eval_ep_info_buffer]\n }\n \n @ torch.no_grad()\n def validate(self, holdout_dataset: torch.utils.data.Dataset) -> Optional[float]:\n data_loader = DataLoader(\n holdout_dataset,\n batch_size = self._batch_size,\n shuffle = True,\n pin_memory = True,\n num_workers = self.num_workers\n )\n self.policy.eval()\n\n pbar = tqdm(enumerate(data_loader), total=len(data_loader))\n losses = []\n for it, batch in pbar:\n '''\n batch: dict with keys\n 'observations'\n 'next_observations'\n 'actions'\n 'terminals'\n 'rewards'\n 'rtgs'\n '''\n loss_dict = self.policy.validate(batch)\n\n for k, v in loss_dict.items():\n self.logger.logkv_mean(k, v)\n\n if \"holdout_loss\" in loss_dict:\n loss = loss_dict[\"holdout_loss\"]\n losses.append(loss)\n\n if len(losses) > 0:\n return(sum(losses) / len(losses))\n else:\n return None" }, { "identifier": "DiffusionPolicyTrainer", "path": "offlinerlkit/policy_trainer/diffusion_policy_trainer.py", "snippet": "class DiffusionPolicyTrainer:\n def __init__(\n self,\n policy: BasePolicy,\n offline_dataset: Dict[str, np.ndarray],\n logger: Logger,\n seed,\n epoch: int = 25,\n batch_size: int = 256,\n lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n horizon: Optional[int] = None,\n num_workers = 1,\n has_terminal = False\n ) -> None:\n '''\n offline_ratio = 0: rollout only, 1: offline only\n '''\n self.policy = policy\n self.horizon = horizon\n self.offline_dataset = offline_dataset\n self.logger = logger\n\n self._epoch = epoch\n self._batch_size = batch_size\n self.lr_scheduler = lr_scheduler\n self.num_workers = num_workers\n self.env_seed = seed\n self.has_terminal = has_terminal\n\n def train(self) -> Dict[str, float]:\n start_time = time.time()\n\n num_timesteps = 0\n last_10_performance = deque(maxlen=10)\n\n data_loader = DataLoader(\n DictDataset(self.offline_dataset),\n batch_size = self._batch_size,\n shuffle = True,\n pin_memory = True,\n num_workers = self.num_workers\n ) \n\n # train loop\n for e in range(1, self._epoch + 1):\n\n self.policy.train()\n\n pbar = tqdm(enumerate(data_loader), desc=f\"Epoch #{e}/{self._epoch}\")\n for it, batch in pbar:\n '''\n batch: dict with keys\n 'observations'\n 'next_observations'\n 'actions'\n 'terminals'\n 'rewards'\n 'rtgs'\n\n '''\n loss_dict = self.policy.learn(batch)\n pbar.set_postfix(**loss_dict)\n\n for k, v in loss_dict.items():\n self.logger.logkv_mean(k, v)\n \n num_timesteps += 1\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n self.logger.set_timestep(num_timesteps)\n self.logger.dumpkvs(exclude=[\"dynamics_training_progress\"])\n \n # save checkpoint\n torch.save(self.policy.state_dict(), os.path.join(self.logger.checkpoint_dir, \"policy.pth\"))\n\n self.logger.log(\"total time: {:.2f}s\".format(time.time() - start_time))\n torch.save(self.policy.state_dict(), os.path.join(self.logger.model_dir, \"policy.pth\"))\n self.logger.close()\n \n return {\"last_10_performance\": np.mean(last_10_performance)}" }, { "identifier": "none_or_str", "path": "offlinerlkit/utils/none_or_str.py", "snippet": "def none_or_str(value):\n if value == 'None':\n return None\n return value" }, { "identifier": "SimpleDiffusionPolicy", "path": "offlinerlkit/policy/diffusion/simple_diffusion.py", "snippet": "class SimpleDiffusionPolicy(ConditionalDiffusionModel):\n '''\n Note: When loading DiffusionPolicy, also need to load scaler manually\n '''\n def __init__(\n self,\n obs_shape,\n act_shape,\n feature_dim,\n num_training_steps,\n num_diffusion_steps,\n device,\n **kwargs,\n ):\n super().__init__(\n input_dim=np.prod(act_shape),\n cond_shape_dict={\"obs\": obs_shape, \"feat\": (feature_dim,)},\n num_training_steps=num_training_steps,\n num_diffusion_steps=num_diffusion_steps,\n clip_sample=True,\n device=device,\n **kwargs,\n )\n\n def learn(self, batch: Dict):\n '''\n Update one batch\n '''\n obss = batch['observations'].type(torch.float32).to(self.device)\n actions = batch['actions'].type(torch.float32).to(self.device)\n rtgs = batch['rtgs']\n rtgs = rtgs.reshape(rtgs.shape[0], -1).type(torch.float32).to(self.device)\n if 'weights' in batch:\n weights = batch['weights'].type(torch.float32).to(self.device) # (batch, )\n else:\n weights = None\n\n return super().learn(actions, {\"obs\": obss, \"feat\": rtgs}, weights)\n\n def validate(self, batch: Dict):\n '''\n Update one batch\n '''\n obss = batch['observations'].type(torch.float32).to(self.device)\n actions = batch['actions'].type(torch.float32).to(self.device)\n rtgs = batch['rtgs']\n rtgs = rtgs.reshape(rtgs.shape[0], -1).type(torch.float32).to(self.device)\n if 'weights' in batch:\n weights = batch['weights'].type(torch.float32).to(self.device) # (batch, )\n else:\n weights = None\n\n return super().validate(actions, {\"obs\": obss, \"feat\": rtgs}, weights)\n\n def select_action(self, obs, feat):\n # print(f\"DiffusionPolicy: select action with obs shape {obs.shape}, feat(rtg) shape {feat.shape}\")\n obs = torch.as_tensor(obs, dtype = torch.float32, device = self.device)\n feat = torch.as_tensor(feat, dtype = torch.float32, device = self.device)\n\n with torch.no_grad():\n action = super().sample({\"obs\": obs, \"feat\": feat})\n # print(action)\n return action.cpu().numpy()\n\n def train(self) -> None:\n self.noise_pred_net.train()\n self.cond_encoders.train()\n\n def eval(self) -> None:\n self.noise_pred_net.eval()\n self.cond_encoders.eval()" }, { "identifier": "AutoregressivePolicy", "path": "offlinerlkit/policy/rcsl/rcsl_autoregressive.py", "snippet": "class AutoregressivePolicy(nn.Module):\n def __init__(self, obs_dim, act_dim, hidden_dims, lr, device):\n super().__init__()\n self.obs_dim = obs_dim\n self.act_dim = act_dim\n\n # Input is obs + act + one-hot for the predicted dimension\n # Output is the mean and standard deviation of the predicted dimension\n input_dim = obs_dim + 1 + act_dim + act_dim # also depend on return\n all_dims = [input_dim] + hidden_dims + [2]\n self.model = nn.ModuleList()\n for in_dim, out_dim in zip(all_dims[:-1], all_dims[1:]):\n self.model.append(nn.Linear(in_dim, out_dim))\n self.model.append(nn.LeakyReLU())\n\n self.rcsl_optim = torch.optim.Adam(self.model.parameters(), lr=lr)\n self.device = device\n self.register_parameter(\n \"max_logstd\",\n nn.Parameter(torch.ones(1) * 0.5, requires_grad=True)\n )\n self.register_parameter(\n \"min_logstd\",\n nn.Parameter(torch.ones(1) * -10, requires_grad=True)\n )\n self.to(self.device)\n\n def forward(self, obs, rtg, deterministic: bool = False):\n batch_size = obs.size(0)\n rtg = rtg.reshape(batch_size, 1)\n\n # Initialize action to zeros\n act = torch.zeros((batch_size, self.act_dim), device=obs.device)\n\n # One-hot encoding for all dimensions\n one_hot_all = torch.eye(self.act_dim, device=obs.device)\n\n # Predict each dimension autoregressively\n for i in range(self.act_dim):\n one_hot = one_hot_all[i][None, :].repeat(batch_size, 1)\n x = torch.cat([obs, rtg, act, one_hot], dim=1)\n for layer in self.model:\n x = layer(x)\n mean, logstd = torch.chunk(x, 2, dim=-1)\n logstd = soft_clamp(logstd, self.min_logstd, self.max_logstd)\n\n # logstd might be too small\n if deterministic:\n next_dim = mean\n else:\n assert logstd.exp() != float('nan'), f\"{logstd}\"\n if logstd.exp() == 0:\n next_dim = mean\n else:\n dist = Normal(mean, logstd.exp())\n next_dim = dist.sample()\n act = torch.cat([act[:, :i], next_dim, act[:, i + 1 :]], dim=1)\n\n return act\n\n def select_action(self, obs: np.ndarray, rtg: np.ndarray, deterministic: bool = False) -> np.ndarray:\n with torch.no_grad():\n obs = torch.tensor(obs, dtype=torch.float32).to(self.device)\n rtg = torch.as_tensor(rtg).type(torch.float32).to(self.device)\n action = self.forward(obs, rtg, deterministic)\n return action.cpu().numpy()\n\n def fit(self, obs, rtg, act, weights = None):\n batch_size = obs.size(0)\n\n # Generate all the one-hot vectors, expand by repeat\n one_hot_all = torch.eye(self.act_dim, device=obs.device)\n one_hot_full = one_hot_all.repeat_interleave(batch_size, dim=0)\n\n # Repeat act by act_dim times and mask by one-hot encoding\n mask = (\n torch.tril(torch.ones((self.act_dim, self.act_dim), device=obs.device))\n - one_hot_all\n ) # lower trig - diag\n mask_full = mask.repeat_interleave(batch_size, dim=0)\n act_full = act.repeat(self.act_dim, 1) # (batch*act_dim, act_dim)\n act_masked = act_full * mask_full\n\n # Repeat obs by act_dim times\n rtg = rtg.reshape(batch_size, 1)\n obs_rtg = torch.cat([obs, rtg], dim = 1)\n obs_rtg_full = obs_rtg.repeat(self.act_dim, 1)\n\n # Concatenate everything to get input\n input_full = torch.cat([obs_rtg_full, act_masked, one_hot_full], dim=1)\n\n # Use the one-hot vector as boolean mask to get target\n target = act_full[one_hot_full.bool()].unsqueeze(1)\n\n # Forward through model and compute loss\n x = input_full\n for layer in self.model:\n x = layer(x)\n mean, logstd = torch.chunk(x, 2, dim=-1)\n logstd = soft_clamp(logstd, self.min_logstd, self.max_logstd)\n if any(torch.isnan(mean)):\n torch.save(self.model.state_dict(), \"model_debug.pth\")\n torch.save(input_full, \"input_debug.pth\")\n raise Exception(f\"Mean is nan, input_full {input_full.detach().cpu().numpy()}\")\n dist = Normal(mean, logstd.exp())\n loss = -dist.log_prob(target)\n if weights is None:\n loss = loss.mean()\n else:\n loss = loss.reshape(loss.shape[0], -1) # (batch * act_dim, 1)\n weights = weights.reshape(weights.shape[0], -1) # (batch, 1)\n weights = weights.repeat(self.act_dim, 1) # (batch * act_dim, 1)\n loss = torch.sum(loss * weights) / (torch.sum(weights) * loss.shape[-1])\n return loss\n \n def learn(self, batch: Dict) -> Dict[str, float]:\n obss, actions, rtgs = batch[\"observations\"], batch[\"actions\"], batch[\"rtgs\"]\n obss = obss.type(torch.float32).to(self.device)\n actions = actions.type(torch.float32).to(self.device)\n rtgs = rtgs.type(torch.float32).to(self.device)\n if 'weights' in batch:\n weights = batch['weights'].type(torch.float32).to(self.device) # (batch, )\n else:\n weights = None\n loss = self.fit(obss, rtgs, actions,weights)\n\n self.rcsl_optim.zero_grad()\n loss.backward()\n self.rcsl_optim.step()\n\n result = {\n \"loss\": loss.item(),\n }\n \n return result\n\n def validate(self, batch: Dict) -> Dict[str, float]:\n obss, actions, rtgs = batch[\"observations\"], batch[\"actions\"], batch[\"rtgs\"]\n obss = obss.type(torch.float32).to(self.device)\n actions = actions.type(torch.float32).to(self.device)\n rtgs = rtgs.type(torch.float32).to(self.device)\n if 'weights' in batch:\n weights = batch['weights'].type(torch.float32).to(self.device) # (batch, )\n else:\n weights = None\n with torch.no_grad():\n loss = self.fit(obss, rtgs, actions, weights)\n return {\n \"holdout_loss\": loss.item()\n }" } ]
import numpy as np import torch import roboverse import argparse import os import random import pickle import datetime from copy import deepcopy from typing import Dict, Tuple from collections import defaultdict from offlinerlkit.modules import EnsembleDynamicsModel from offlinerlkit.dynamics import EnsembleDynamics from offlinerlkit.utils.termination_fns import get_termination_fn from offlinerlkit.utils.scaler import StandardScaler from offlinerlkit.utils.roboverse_utils import PickPlaceObsWrapper, DoubleDrawerObsWrapper, get_pickplace_dataset, get_doubledrawer_dataset from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import RcslPolicyTrainer, DiffusionPolicyTrainer from offlinerlkit.utils.none_or_str import none_or_str from offlinerlkit.policy import SimpleDiffusionPolicy, AutoregressivePolicy
14,115
''' Recommended hyperparameters: pickplace, horizon=40, behavior_epoch=30 doubledraweropen, horizon=50, behavior_epoch=40 doubledrawercloseopen, horizon=80, behavior_epoch=40 ''' def get_args(): parser = argparse.ArgumentParser() # general parser.add_argument("--algo-name", type=str, default="mbrcsl_mlpdyn") parser.add_argument("--task", type=str, default="pickplace", help="task name") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--num_workers", type=int, default=1, help="Dataloader workers, align with cpu number") parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") parser.add_argument("--last_eval", action="store_false") # env config parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") # transformer_autoregressive dynamics parser.add_argument("--dynamics_lr", type=float, default=1e-3) parser.add_argument("--dynamics_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics_weight_decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n_ensemble", type=int, default=7) parser.add_argument("--n_elites", type=int, default=5) parser.add_argument("--load_dynamics_path", type=none_or_str, default=None) # Behavior policy (diffusion) parser.add_argument("--behavior_epoch", type=int, default=30) parser.add_argument("--num_diffusion_iters", type=int, default=5, help="Number of diffusion steps") parser.add_argument('--behavior_batch', type=int, default=256) parser.add_argument('--load_diffusion_path', type=none_or_str, default=None) parser.add_argument('--task_weight', type=float, default=1.4, help="Weight on task data when training diffusion policy") parser.add_argument('--sample_ratio', type=float, default=0.8, help="Use (sample_ratio * num_total_data) data to train diffusion policy") # Rollout parser.add_argument('--rollout_ckpt_path', type=none_or_str, default=None, help="file dir, used to load/store rollout trajs" ) parser.add_argument('--rollout_epoch', type=int, default=200, help="Max number of epochs to rollout the policy") parser.add_argument('--num_need_traj', type=int, default=5000, help="Needed valid trajs in rollout") parser.add_argument("--rollout-batch", type=int, default=200, help="Number of trajs to be sampled at one time") # RCSL policy (mlp) parser.add_argument("--rcsl_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--rcsl_lr", type=float, default=1e-3) parser.add_argument("--rcsl_batch", type=int, default=256) parser.add_argument("--rcsl_epoch", type=int, default=100) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--holdout_ratio", type=float, default=0.2) return parser.parse_args() def rollout_simple( init_obss: np.ndarray, dynamics,
''' Recommended hyperparameters: pickplace, horizon=40, behavior_epoch=30 doubledraweropen, horizon=50, behavior_epoch=40 doubledrawercloseopen, horizon=80, behavior_epoch=40 ''' def get_args(): parser = argparse.ArgumentParser() # general parser.add_argument("--algo-name", type=str, default="mbrcsl_mlpdyn") parser.add_argument("--task", type=str, default="pickplace", help="task name") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--num_workers", type=int, default=1, help="Dataloader workers, align with cpu number") parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") parser.add_argument("--last_eval", action="store_false") # env config parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") # transformer_autoregressive dynamics parser.add_argument("--dynamics_lr", type=float, default=1e-3) parser.add_argument("--dynamics_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics_weight_decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n_ensemble", type=int, default=7) parser.add_argument("--n_elites", type=int, default=5) parser.add_argument("--load_dynamics_path", type=none_or_str, default=None) # Behavior policy (diffusion) parser.add_argument("--behavior_epoch", type=int, default=30) parser.add_argument("--num_diffusion_iters", type=int, default=5, help="Number of diffusion steps") parser.add_argument('--behavior_batch', type=int, default=256) parser.add_argument('--load_diffusion_path', type=none_or_str, default=None) parser.add_argument('--task_weight', type=float, default=1.4, help="Weight on task data when training diffusion policy") parser.add_argument('--sample_ratio', type=float, default=0.8, help="Use (sample_ratio * num_total_data) data to train diffusion policy") # Rollout parser.add_argument('--rollout_ckpt_path', type=none_or_str, default=None, help="file dir, used to load/store rollout trajs" ) parser.add_argument('--rollout_epoch', type=int, default=200, help="Max number of epochs to rollout the policy") parser.add_argument('--num_need_traj', type=int, default=5000, help="Needed valid trajs in rollout") parser.add_argument("--rollout-batch", type=int, default=200, help="Number of trajs to be sampled at one time") # RCSL policy (mlp) parser.add_argument("--rcsl_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--rcsl_lr", type=float, default=1e-3) parser.add_argument("--rcsl_batch", type=int, default=256) parser.add_argument("--rcsl_epoch", type=int, default=100) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--holdout_ratio", type=float, default=0.2) return parser.parse_args() def rollout_simple( init_obss: np.ndarray, dynamics,
rollout_policy: SimpleDiffusionPolicy,
13
2023-10-11 08:36:06+00:00
16k
wilhelmagren/finq
finq/portfolio.py
[ { "identifier": "Asset", "path": "finq/asset.py", "snippet": "class Asset(object):\n \"\"\" \"\"\"\n\n def __init__(\n self,\n data: pd.Series,\n name: str,\n *,\n market: Optional[str] = None,\n index_name: Optional[str] = None,\n price_type: str = \"Close\",\n pre_compute: bool = True,\n ):\n \"\"\" \"\"\"\n\n self._data = data\n self._name = name\n self._market = market\n self._index_name = index_name\n self._price_type = price_type\n self._pre_compute = pre_compute\n self._metrics = {}\n\n if pre_compute:\n log.info(\"pre-computing some common metrics...\")\n self.compute_common_metrics()\n log.info(\"OK!\")\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"\n Compare self with the other object. If ``other`` is of instance class\n ``Asset`` then compare their hashes. Otherwise ``False``.\n\n Parameters\n ----------\n other : Any\n The other object to compare equality against.\n\n Returns\n -------\n bool\n Whether or not they objects are equal.\n\n \"\"\"\n if isinstance(other, self.__class__):\n return hash(self) == hash(other)\n return False\n\n def __hash__(self) -> int:\n \"\"\"\n Compute a hash from the following attributes of the ``Asset`` object:\n (`_name`, `_market_`, `_index_name`, `_price_type`).\n\n NOTE: the ``Asset`` object is mutable, thus, the hash functionality\n can have unknown side effects... Use responsibly.\n\n Returns\n -------\n int\n The computed hash value.\n\n \"\"\"\n return hash(\n (\n len(self._data),\n self._data.mean(),\n self._data.std(),\n self._name,\n self._market,\n self._index_name,\n self._price_type,\n )\n )\n\n def __str__(self) -> str:\n \"\"\" \"\"\"\n\n format = f\"<{self.__class__.__name__} called `{self._name}`\"\n if self._market:\n format += f\" on {self._market}\"\n if self._index_name:\n format += f\" in {self._index_name}\"\n\n format += f\" (price type: {self._price_type})\"\n format += f\"\\n-- num samples:\\t\\t\\t{self._data.shape[0]}\"\n\n drm = self._metrics.get(\"daily_returns_mean\", None)\n if drm:\n format += f\"\\n-- daily returns mean:\\t\\t{drm:.5f}\"\n\n yrm = self._metrics.get(\"yearly_returns_mean\", None)\n if yrm:\n format += f\"\\n-- yearly returns mean:\\t\\t{yrm:.5f}\"\n\n yv = self._metrics.get(\"yearly_volatility\", None)\n if yv:\n format += f\"\\n-- yearly volatility:\\t\\t{yv:.5f}\"\n\n skew = self._metrics.get(\"skewness\", None)\n if skew:\n format += f\"\\n-- unbiased skewness:\\t\\t{self._metrics['skewness']:.5f}\"\n\n format += f\"\\nobject located at {hex(id(self))}>\"\n\n return format\n\n def compute_common_metrics(self):\n \"\"\" \"\"\"\n self._metrics[\"daily_returns\"] = self.period_returns(period=1)\n self._metrics[\"daily_returns_mean\"] = self.period_returns_mean(period=1)\n self._metrics[\"yearly_returns_mean\"] = self.period_returns_mean(period=252)\n self._metrics[\"yearly_volatility\"] = self.volatility(period=1, trading_days=252)\n self._metrics[\"skewness\"] = self.skewness()\n\n def period_returns(self, period: int = 1) -> pd.Series:\n \"\"\" \"\"\"\n return self._data.pct_change(periods=period)\n\n def period_returns_mean(self, period: int = 1) -> np.typing.DTypeLike:\n \"\"\" \"\"\"\n return self.period_returns(period=period).mean(axis=0)\n\n def volatility(\n self, period: int = 1, trading_days: int = 252\n ) -> np.typing.DTypeLike:\n \"\"\" \"\"\"\n return self.period_returns(period=period).std() * np.sqrt(trading_days)\n\n def skewness(self) -> np.float32:\n \"\"\"\n Computes the skewness of the saved data. Uses the ``Adjusted Fisher-Pearson\n standardized moment coefficient`` formula without bias [1, 2]. Skewness is a\n measure of the asymmetry of the probability distribution for a real-valued\n random variable around its mean.\n\n Returns\n -------\n np.float32\n The skewness measure for the saved historical price data.\n\n References\n ----------\n [1] Skewness calculation on scipy.\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.skew.html\n [2] Moment calculation on scipy.\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.moment.html\n\n \"\"\"\n return self._data.skew().astype(np.float32)\n\n @property\n def data(self) -> pd.Series:\n \"\"\"\n Return the saved data by accessing it as a property of the ``Asset`` object.\n\n Returns\n -------\n pd.Series\n A ``pd.Series`` copy of the saved data.\n\n \"\"\"\n return self._data\n\n @data.setter\n def data(self, data: pd.Series):\n \"\"\"\n Set the value of the data attribute for the ``Asset`` object.\n\n Parameters\n ----------\n data : pd.Series\n The new ``pd.Series`` to set as data attribute for the object.\n\n \"\"\"\n self._data = data\n\n @property\n def name(self) -> str:\n \"\"\"\n Get the name property of the ``Asset`` object.\n\n Returns\n -------\n str\n The name of the ``Asset``.\n\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name: str):\n \"\"\"\n Set the value of the name property for the ``Asset`` object.\n\n Parameters\n ----------\n name : str\n The new ``str`` to set as name attribute for the object.\n\n \"\"\"\n self._name = name\n\n def as_numpy(self, dtype: np.typing.DTypeLike = np.float32) -> np.ndarray:\n \"\"\"\n Return the saved data as an numpy array. It will have the shape (n_samples, ).\n\n Parameters\n ----------\n dtype : np.typing.DTypeLike\n The data type to create the new ``np.ndarray`` as.\n Defaults to ``np.float32``.\n\n Returns\n -------\n np.ndarray\n A new ``np.ndarray`` from the ``pd.Series`` data.\n\n \"\"\"\n return self._data.to_numpy().astype(dtype)" }, { "identifier": "Dataset", "path": "finq/datasets/dataset.py", "snippet": "class Dataset(object):\n \"\"\"\n A collection of ticker symbols and their historical price data. Fetches information\n and prices from Yahoo! Finance and optionally saves them to a local path for later\n use. Supports fixing missing values by interpolating ``NaN`` and verifying the\n integrity of the fetched data.\n\n Parameters\n ----------\n names : list | None\n The names of the financial assets to create a dataset with.\n symbols : list | None\n The ticker symbols corresponding to the names of the financial assets.\n market : str\n The name of the market to fetch the historical price data from.\n Defaults to ``OMX``.\n index_name : str | None\n The name of the financial index to get ticker symbols and names from.\n proxy : str | None\n The name of the proxy url to use for REST requests.\n cache_name: Path | str\n The name of the path to the file which stores the cache.\n Defaults to ``/home/.finq/http_cache``.\n n_requests : int\n The max number of requests to perform per ``t_interval``. Defaults to ``5``.\n t_interval : int\n The time interval (in seconds) to use with the ``CachedRateLimiter``.\n Defaults to ``1``.\n save : bool\n Wether or not to save the fetched data to a local file path.\n save_path : Path | str\n The local file path to potentially save any fetched data to.\n Defaults to ``.data/dataset/``.\n dataset_name : str\n The name of the ``Dataset`` class instance.\n separator : str\n The csv separator to use when loading and saving any ``pd.DataFrame``.\n Defaults to ``;``.\n\n \"\"\"\n\n def __init__(\n self,\n names: Optional[List[str]] = None,\n symbols: Optional[List[str]] = None,\n *,\n market: str = \"OMX\",\n index_name: Optional[str] = None,\n proxy: Optional[str] = None,\n cache_name: Union[Path, str] = default_finq_cache_path(),\n n_requests: int = 5,\n t_interval: int = 1,\n save: bool = False,\n save_path: Union[Path, str] = default_finq_save_path(),\n dataset_name: str = \"dataset\",\n separator: str = \";\",\n filter_symbols: Callable = lambda s: s,\n ) -> Optional[InvalidCombinationOfArgumentsError]:\n \"\"\" \"\"\"\n\n log.info(\n \"creating cached rate-limited session with \"\n f\"{n_requests} requests per {t_interval} seconds\"\n )\n\n # We combine a cache with rate-limiting to avoid triggering\n # Yahoo! Finance's rate-limiter that can otherwise corrupt data.\n # We specify a maximum number of requests N per X seconds.\n session = CachedRateLimiter(\n cache_name=cache_name,\n limiter=Limiter(\n RequestRate(\n n_requests,\n Duration.SECOND * t_interval,\n ),\n ),\n )\n\n if proxy:\n session.proxies.update(\n {\n \"https\": proxy,\n }\n )\n\n self._proxy = proxy\n self._session = session\n self._n_requests = n_requests\n self._t_interval = t_interval\n\n if (not names or not symbols) and isinstance(index_name, str):\n if market == \"OMX\":\n\n def filter_symbols(s):\n return s.replace(\" \", \"-\") + \".ST\"\n\n names, symbols = fetch_names_and_symbols(\n index_name,\n market=market,\n session=session,\n filter_symbols=filter_symbols,\n )\n\n if not names or not symbols:\n raise InvalidCombinationOfArgumentsError(\n \"You did not pass in a list of names and symbols, and if you \"\n \"passed in an index name to fetch, the request failed since \"\n f\"`{names=}` and `{symbols=}`. Did you pass in a valid index name?\"\n )\n\n if not (len(names) == len(symbols)):\n raise InvalidCombinationOfArgumentsError(\n \"Number of names does not match the number of ticker symbols, \"\n f\"{len(names)} != {len(symbols)}.\\n{names=}\\n{symbols=}\"\n )\n\n self._data = None\n self._info = None\n\n self._names = names\n self._symbols = symbols\n self._market = market\n self._index_name = index_name\n\n self._save = save\n self._save_path = Path(save_path) / dataset_name\n self._dataset_name = dataset_name\n self._separator = separator\n\n def __getitem__(self, key: str) -> Optional[pd.DataFrame]:\n \"\"\"\n Get the ``pd.DataFrame`` from the locally stored dictionary which maps ticker\n symbols to their corresponding historical price data.\n\n Parameters\n ----------\n key : str\n The dictionary key to get data for.\n\n Returns\n -------\n pd.DataFrame\n The data that is associated with the provided ticker key.\n\n \"\"\"\n return self._data.get(key, None)\n\n def __len__(self) -> int:\n \"\"\"\n Get the number of names in the dataset.\n\n Returns\n -------\n int\n The number of names.\n\n \"\"\"\n return len(self._symbols)\n\n @staticmethod\n def _save_data(data: pd.DataFrame, path: Union[Path, str], separator: str):\n \"\"\"\n Save the historical price data for a ticker to a local csv file.\n\n Parameters\n ----------\n data : pd.DataFrame\n The ``pd.DataFrame`` to save as a csv file.\n path : Path | str\n The local file name to save the csv to.\n separator : str\n The csv separator to use when saving the data. Defaults to ``;``.\n\n \"\"\"\n data.to_csv(\n path,\n sep=separator,\n header=True,\n )\n\n @staticmethod\n def _save_info(info: dict, path: Union[Path, str]):\n \"\"\"\n Save the ticker information dictionary to a local file as a ``json`` object.\n\n Parameters\n ----------\n info : dict\n The ticker information dictionary to save as a ``json`` file.\n path : Path | str\n The local file name to save the dictionary to.\n\n \"\"\"\n with open(path, \"w\") as f:\n json.dump(info, f)\n\n @staticmethod\n def _load_data(path: Union[Path, str], separator: str) -> pd.DataFrame:\n \"\"\"\n Create a new ``pd.DataFrame`` from data that is stored locally as a ``csv``.\n\n Parameters\n ----------\n path : Path | str\n The local file path to read the csv from.\n separator : str\n The separator to use for parsing the csv.\n\n Returns\n -------\n pd.DataFrame\n The data that was stored in the csv.\n\n \"\"\"\n return pd.read_csv(path, sep=separator, index_col=\"Date\")\n\n @staticmethod\n def _load_info(path: Union[Path, str]) -> dict:\n \"\"\"\n Parameters\n ----------\n path : Path | str\n The local file path to read the json object from.\n\n Returns\n -------\n dict\n A dictionary containing the information for the ticker.\n\n \"\"\"\n with open(path, \"r\") as f:\n return json.load(f)\n\n @staticmethod\n def _extract_dates_from_data(data: pd.DataFrame) -> Tuple[List, Dict]:\n \"\"\"\n Extract the ``Date`` column from a ``pd.DataFrame`` and produce a sorted list of\n unique dates for the ticker.\n\n Parameters\n ----------\n data : pd.DataFrame\n The data to extract ``Date`` column from.\n\n Returns\n -------\n tuple\n A list of the unique dates (sorted in ascending order) and a dictionary\n containing all ticker dates as key: ``str`` and value: ``list``.\n\n \"\"\"\n dates = {}\n all_dates = []\n\n for ticker, df in data.items():\n dates[ticker] = df.index.to_list()\n all_dates.extend(dates[ticker])\n\n unique_dates = sorted(list(set(all_dates)), reverse=False)\n\n return (unique_dates, dates)\n\n def _save_tickers_data(self):\n \"\"\" \"\"\"\n\n log.info(f\"saving fetched tickers data to {self._save_path}...\")\n\n for ticker in self._symbols:\n self._save_data(\n self._data[ticker],\n self._save_path / \"data\" / f\"{ticker}.csv\",\n separator=self._separator,\n )\n\n log.info(\"OK!\")\n\n def _save_tickers_info(self):\n \"\"\" \"\"\"\n\n log.info(f\"saving fetched tickers info to {self._save_path}...\")\n\n for ticker in self._symbols:\n self._save_info(\n self._info[ticker],\n self._save_path / \"info\" / f\"{ticker}.json\",\n )\n\n log.info(\"OK!\")\n\n def _save_data_and_info(self):\n \"\"\"\n Saves the info and data objects to a local file path.\n\n \"\"\"\n\n self._save_tickers_data()\n self._save_tickers_info()\n\n def _fetch_tickers_data(\n self,\n period: str,\n cols: List[str],\n ):\n \"\"\" \"\"\"\n\n data = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Fetching ticker {ticker} data from Yahoo! Finance\")\n\n yf_ticker = yf.Ticker(ticker, session=self._session)\n data[ticker] = yf_ticker.history(\n period=period,\n proxy=self._proxy,\n )[\n cols\n ].tz_localize(None)\n\n all_dates, dates = self._extract_dates_from_data(data)\n\n self._data = data\n self._dates = dates\n self._all_dates = all_dates\n\n def _fetch_tickers_info(self):\n \"\"\" \"\"\"\n\n info = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Fetching ticker {ticker} info from Yahoo! Finance\")\n\n yf_ticker = yf.Ticker(ticker, session=self._session)\n info[ticker] = yf_ticker.get_info(proxy=self._proxy)\n\n self._info = info\n\n def _fetch_tickers_data_and_info(\n self,\n period: str,\n cols: List[str],\n ):\n \"\"\"\n Use the `yfinance` library to fetch historical ticker data for the specified time\n period. The performance of the REST requests is highly dependent on three things:\n the config of your `CachedRateLimiter`, the amount of tickers you want to fetch,\n and the multi-threading support of your CPU.\n\n Parameters\n ----------\n period : str\n The time period to try and fetch data from.\n cols : list\n The columns of the fetched ticker data to collect.\n\n \"\"\"\n\n self._fetch_tickers_data(period, cols)\n self._fetch_tickers_info()\n\n def load_local_data_files(self) -> Optional[DirectoryNotFoundError]:\n \"\"\" \"\"\"\n\n path = Path(self._save_path)\n data_path = path / \"data\"\n\n if not path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {path} does not exist. Perhaps you haven't yet \"\n \"tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n if not data_path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {data_path} does not exist. Perhaps you haven't \"\n \"yet tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n data = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Loading ticker {ticker} data from local path {path}\")\n\n data[ticker] = self._load_data(\n data_path / f\"{ticker}.csv\",\n separator=self._separator,\n )\n\n if not isinstance(data[ticker].index, pd.DatetimeIndex):\n data[ticker].index = pd.to_datetime(data[ticker].index)\n\n all_dates, dates = self._extract_dates_from_data(data)\n\n self._data = data\n self._dates = dates\n self._all_dates = all_dates\n\n def load_local_info_files(self) -> Optional[DirectoryNotFoundError]:\n \"\"\" \"\"\"\n path = Path(self._save_path)\n info_path = path / \"info\"\n\n if not path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {path} does not exist. Perhaps you haven't yet \"\n \"tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n if not info_path.is_dir():\n raise DirectoryNotFoundError(\n f\"The local save path {info_path} does not exist. Perhaps you haven't \"\n \"yet tried fetching any data? To do that run `dataset.fetch_data(..)`.\"\n )\n\n info = {}\n\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Loading ticker {ticker} data from local path {path}\")\n\n info[ticker] = self._load_info(\n info_path / f\"{ticker}.json\",\n )\n\n self._info = info\n\n def load_local_files(self):\n \"\"\"\n Load the locally saved info and data files. The info is read from file as a\n ``json`` and the data is read from ``csv`` as a ``pd.DataFrame``.\n\n Raises\n ------\n DirectoryNotFoundError\n When either of the paths to the saved ``info`` and ``data`` is not a directory.\n\n \"\"\"\n\n self.load_local_data_files()\n self.load_local_info_files()\n\n def fetch_data(\n self,\n period: str,\n *,\n cols: List[str] = [\"Open\", \"High\", \"Low\", \"Close\"],\n ) -> Dataset:\n \"\"\"\n Fetch the historical ticker data for the specified time period. If there exists\n locally saved files for all tickers, will try and load them instead of fetching\n from Yahoo! Finance. Saves the fetched files if ``save=True`` was specified in\n the class constructor.\n\n Parameters\n ----------\n period : str\n The time period to try and fetch data from. Valid values are (``1d``,\n ``5d``, ``1mo``, ``3mo``, ``6mo``, ``1y``, ``2y``, ``5y``, ``10y``,\n ``ytd``, ``max``).\n cols : list\n The columns of the fetched ticker data to collect. Defaults to\n (``Date``, ``Open``, ``High``, ``Low``, ``Close``).\n\n Returns\n -------\n Dataset\n The initialized instance of ``self`` with ticker data loaded or fetched.\n\n \"\"\"\n\n if all_tickers_data_saved(self._save_path, self._symbols):\n log.info(\n f\"found existing local data files for {self.__class__.__name__}, \"\n \"attempting local load of data files...\"\n )\n\n try:\n self.load_local_data_files()\n log.info(\"OK!\")\n return self\n\n except DirectoryNotFoundError:\n log.warning(\"failed to load local data files, attempting new fetch...\")\n\n self._fetch_tickers_data(period, cols)\n\n if self._save:\n setup_finq_save_data_path(self._save_path)\n self._save_tickers_data()\n\n return self\n\n def fetch_info(\n self,\n ) -> Dataset:\n \"\"\" \"\"\"\n\n if all_tickers_info_saved(self._save_path, self._symbols):\n log.info(\n f\"found existing local info files for {self.__class__.__name__}, \"\n \"attempting local load of info files...\"\n )\n\n try:\n self.load_local_info_files()\n log.info(\"OK!\")\n return self\n\n except DirectoryNotFoundError:\n log.warning(\"failed to load local info files, attempting new fetch...\")\n\n self._fetch_tickers_info()\n\n if self._save:\n setup_finq_save_info_path(self._save_path)\n\n return self\n\n def fetch_data_and_info(\n self,\n period: str,\n *,\n cols: List[str] = [\"Open\", \"High\", \"Low\", \"Close\"],\n ) -> Dataset:\n \"\"\" \"\"\"\n self = self.fetch_data(period, cols=cols)\n self = self.fetch_info()\n return self\n\n def fix_missing_data(\n self,\n *,\n cols: List[str] = [\"Open\", \"High\", \"Low\", \"Close\"],\n resave: bool = True,\n ) -> Dataset:\n \"\"\"\n Compares each tickers dates in their corresponding ``pd.DataFrame`` and compares\n to the known set of dates collected. If there are any missing values, will add\n the missing dates to the dataframe and then use ``df.interpolate()`` to fix them.\n Default interpolation strategy is ``linear``.\n\n Parameters\n ----------\n cols : list\n The columns of the ``pd.DataFrame`` to consider when looking for missing data\n to interpolate. Defaults to (``Open``, ``High``, ``Low``, ``Close``).\n resave : bool\n Whether or not to resave the data to local path after fixing missing values.\n Defaults to ``True`` but will onlyesave if there existed missing data.\n\n Returns\n -------\n Dataset\n The initialized instance of ``self``.\n\n \"\"\"\n\n log.info(\"attempting to fix any missing data...\")\n\n n_missing_data = 0\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Fixing ticker {ticker} potential missing values\")\n\n df = self._data[ticker]\n diff = set(self._all_dates) - set(self._dates[ticker])\n\n if diff:\n n_missing_data += 1\n\n df_missed = pd.DataFrame(index=list(diff))\n df_missed.index.name = \"Date\"\n\n df_fixed = pd.concat((df, df_missed)).sort_index(inplace=False)\n df_fixed[cols] = df_fixed[cols].interpolate()\n\n if df_fixed[df_fixed.isnull().any(axis=1)].index.values.size:\n log.error(\n f\"failed to interpolate missing prices for ticker {ticker}!\"\n )\n\n self._data[ticker] = df_fixed\n self._dates[ticker] = self._all_dates\n\n if n_missing_data and resave:\n log.info(f\"fixed {n_missing_data} tickers with missing data\")\n if self._save:\n log.info(f\"saving fixed data to {self._save_path}...\")\n self._save_tickers_data()\n\n log.info(\"OK!\")\n return self\n\n def verify_data(self) -> Union[ValueError, Dataset]:\n \"\"\"\n Tries to verify that the stored data does not contain any missing values.\n This is performed by comparing the dates in each ticker ``pd.DataFrame``\n with the known set of all fetched dates.\n\n Returns\n -------\n Dataset\n The initialized instance of ``self``.\n\n Raises\n ------\n ValueError\n If there exists missing values in any stored ``pd.DataFrame``.\n\n \"\"\"\n\n log.info(\"verifying that stored data has no missing values...\")\n for ticker in (bar := tqdm(self._symbols)):\n bar.set_description(f\"Verifying ticker {ticker} data\")\n\n diff = set(self._all_dates) - set(self._dates[ticker])\n if diff:\n raise ValueError(\n f\"There is a difference in dates for symbol {ticker}, have you \"\n \"tried fixing missing values prior to verifying? To do that, run \"\n \"dataset.fix_missing_data() with your initialized Dataset class.\"\n )\n\n log.info(\"OK!\")\n return self\n\n def run(self, period: str = \"1y\") -> Dataset:\n \"\"\"\n Call the three core methods for the ``Dataset`` class which fetches data,\n tries to fix missing values, and lastly verifies that there is no missing data.\n\n Parameters\n ----------\n period : str\n The time period to try and fetch data from. Valid values are (``1d``,\n ``5d``, ``1mo``, ``3mo``, ``6mo``, ``1y``, ``2y``, ``5y``, ``10y``,\n ``ytd``, ``max``). Defaults to ``1y``.\n\n Returns\n -------\n Dataset\n The intialized instance of ``self``.\n\n \"\"\"\n return self.fetch_data(period).fix_missing_data().verify_data()\n\n def visualize_ticker(\n self,\n ticker: str,\n **kwargs: Dict[str, Any],\n ):\n \"\"\" \"\"\"\n\n if kwargs.get(\"title\", None) is None:\n kwargs[\"title\"] = f\"{ticker} historical OHLC prices [{self._market}]\"\n\n mpf.plot(\n self._data[ticker],\n **kwargs,\n )\n\n def visualize(\n self,\n *,\n title: str = \"Historical stock data\",\n xlabel: str = \"Dates\",\n ylabel: str = \"Closing price [$]\",\n ticks_rotation: int = 70,\n legend_loc: str = \"best\",\n log_scale: bool = False,\n save_path: Optional[str] = None,\n price_type: str = \"Close\",\n show: bool = True,\n block: bool = True,\n ):\n \"\"\"\n Plot the historical ticker price data over time.\n\n Parameters\n ----------\n title : str\n The header title to set on the generated plot.\n xlabel : str\n The label to use for the x-axis.\n ylabel : str\n The label to use for the y-axis.\n ticks_rotation : int\n The amount of degrees to rotate the x-axis ticks with. Defaults to ``70``.\n legend_loc : str\n The location of the legend. Some possible values are (``best``, ``center``,\n ``upper left``, ``upper right``, ``lower left``, ``lower right``).\n Defaults to ``best``.\n log_scale : bool\n ``True`` if the historical data should be log scaled, otherwise ``False``.\n save_path : str | None\n The local file to save the generated plot to. Does not save the plot if\n the argument is ``None``.\n price_type : str\n The price type of the historical data to plot. Has to be one\n of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n show : bool\n ``True`` if the generated plot should be shown on the screen, otherwise\n ``False``. Defaults to ``True``.\n block : bool\n Whether to wait for all figures to be closed before returning. When ``False``\n the figure windows will be displayed and returned immediately. Defaults to\n ``True``.\n\n \"\"\"\n\n for ticker, data in self._data.items():\n plt.plot(\n np.log(data[price_type]) if log_scale else data[price_type],\n label=ticker,\n )\n\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.xticks(rotation=ticks_rotation)\n plt.legend(loc=legend_loc)\n\n if save_path:\n log.info(f\"saving plot to path {save_path}\")\n plt.savefig(save_path)\n log.info(\"OK!\")\n\n if show:\n plt.show(block=block)\n plt.close()\n\n def get_tickers(self) -> List[str]:\n \"\"\"\n Return the saved list of ticker symbols.\n\n Returns\n -------\n list\n A list of ``str`` ticker symbols.\n\n \"\"\"\n return self._symbols\n\n def get_data(self) -> Dict[str, pd.DataFrame]:\n \"\"\"\n Return the saved dictionary which maps ticker symbols to their\n corresponding historical data with the following columns:\n (``Date``, ``Open``, ``High``, ``Low``, ``Close``).\n\n Returns\n -------\n dict\n A dictionary with key: ``str`` and value: ``pd.DataFrame``.\n\n \"\"\"\n return self._data\n\n def as_assets(self, price_type: str = \"Close\") -> Dict[str, Asset]:\n \"\"\"\n Create a list of Assets for each ticker and specified price type.\n\n Parameters\n ----------\n price_type : str\n The price type data to create an ``Asset`` object with. Has to be one\n of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n\n Returns\n -------\n dict\n A dictionary of newly created ``Asset`` objects with ticker symbols as keys.\n\n \"\"\"\n return {\n ticker: Asset(\n self._data[ticker][price_type],\n self._names[i],\n market=self._market,\n index_name=self._index_name,\n price_type=price_type,\n pre_compute=False,\n )\n for i, ticker in enumerate(self._symbols)\n }\n\n def as_df(self, price_type: str = \"Close\") -> pd.DataFrame:\n \"\"\"\n Create an aggregated ``pd.DataFrame`` for the specified price type.\n It will have the shape (n_samples, n_tickers).\n\n Parameters\n ----------\n price_type : str\n The price type data to create the ``pd.DataFrame`` object with. Has to\n be one of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n\n Returns\n -------\n pd.DataFrame\n A new ``pd.DataFrame`` with ticker names as columns.\n\n \"\"\"\n\n return pd.DataFrame(\n {t: d[price_type] for t, d in zip(self._symbols, self._data.values())},\n index=self._all_dates,\n )\n\n def as_numpy(\n self,\n price_type: str = \"Close\",\n *,\n dtype: np.typing.DTypeLike = np.float32,\n ) -> np.ndarray:\n \"\"\"\n Extract the specified price type from stored data as np.ndarray.\n It will have the shape (n_tickers, n_samples).\n\n Parameters\n ----------\n price_type : str\n The price type data to create the ``np.ndarray`` with. Has to be one\n of (``Open``, ``High``, ``Low``, ``Close``). Defaults to ``Close``.\n dtype : np.typing.DTypeLike\n The data type to create the new ``np.ndarray`` as.\n Defaults to ``np.float32``.\n\n Returns\n -------\n np.ndarray\n A new ``np.ndarray`` from the specified price type and dtype.\n\n \"\"\"\n return np.array(\n [d[price_type].to_numpy().astype(dtype) for d in self._data.values()]\n )" }, { "identifier": "FinqError", "path": "finq/exceptions.py", "snippet": "class FinqError(Exception):\n \"\"\" \"\"\"" }, { "identifier": "InvalidCombinationOfArgumentsError", "path": "finq/exceptions.py", "snippet": "class InvalidCombinationOfArgumentsError(FinqError):\n \"\"\" \"\"\"\n\n pass" }, { "identifier": "InvalidPortfolioWeightsError", "path": "finq/exceptions.py", "snippet": "class InvalidPortfolioWeightsError(FinqError):\n \"\"\" \"\"\"\n\n pass" }, { "identifier": "ObjectiveFunctionError", "path": "finq/exceptions.py", "snippet": "class ObjectiveFunctionError(FinqError):\n \"\"\" \"\"\"\n\n pass" }, { "identifier": "PortfolioNotYetOptimizedError", "path": "finq/exceptions.py", "snippet": "class PortfolioNotYetOptimizedError(FinqError):\n \"\"\" \"\"\"\n\n pass" }, { "identifier": "period_returns", "path": "finq/formulas.py", "snippet": "def period_returns(x: np.ndarray, period: int = 1) -> np.ndarray:\n \"\"\" \"\"\"\n\n return (x[:, period:] / x[:, :-period]) - 1" }, { "identifier": "sharpe_ratio", "path": "finq/formulas.py", "snippet": "def sharpe_ratio(\n r: Union[float, np.ndarray],\n v: Union[float, np.ndarray],\n rfr: float,\n) -> Union[float, np.ndarray]:\n \"\"\" \"\"\"\n\n return (r - rfr) / v" }, { "identifier": "weighted_returns", "path": "finq/formulas.py", "snippet": "def weighted_returns(w: np.ndarray, r: np.ndarray) -> np.ndarray:\n \"\"\" \"\"\"\n\n return np.dot(w, r)" }, { "identifier": "weighted_variance", "path": "finq/formulas.py", "snippet": "def weighted_variance(w: np.ndarray, cov: np.ndarray) -> np.ndarray:\n \"\"\" \"\"\"\n\n return np.dot(w, np.dot(cov, w.T))" } ]
import logging import pandas as pd import numpy as np import scipy.optimize as scipyopt import matplotlib.pyplot as plt from functools import wraps from tqdm import tqdm from finq.asset import Asset from finq.datasets import Dataset from finq.exceptions import ( FinqError, InvalidCombinationOfArgumentsError, InvalidPortfolioWeightsError, ObjectiveFunctionError, PortfolioNotYetOptimizedError, ) from finq.formulas import ( period_returns, sharpe_ratio, weighted_returns, weighted_variance, ) from typing import ( Any, Callable, List, Dict, Tuple, Union, Optional, )
10,853
return period_returns(self._data, period=period) def daily_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=1), axis=1) def yearly_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=self._n_trading_days), axis=1) def period_returns_mean(self, period: int) -> float: """ """ return np.mean(period_returns(self._data, period=period), axis=1) def daily_covariance(self) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=1), rowvar=True) def yearly_covariance(self) -> np.ndarray: """ """ return np.cov( period_returns(self._data, period=self._n_trading_days), rowvar=True ) def period_covariance(self, period: int) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=period), rowvar=True) def set_objective_function( self, function: Callable, *args: Tuple[Any, ...], ): """ """ self._objective_function = function self._objective_function_args = args def set_objective_constraints( self, *constraints, ): """ """ self._objective_constraints = [{"type": t, "fun": c} for (t, c) in constraints] def set_objective_bounds( self, bounds: Union[Tuple[int, ...], List[Tuple[int, ...]]], ): """ """ if isinstance(bounds, tuple): bounds = [bounds for _ in range(self._data.shape[0])] self._objective_bounds = bounds def sample_random_portfolios( self, n_samples: int, *, distribution: Union[str, Callable] = "lognormal", **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) portfolios = [] for i in (bar := tqdm(range(n_samples))): if i % 10: bar.set_description( f"Sampling random portfolio {i + 1} from " f"{distribution.__name__} distribution" ) portfolio = distribution(**kwargs) portfolios.append(portfolio / portfolio.sum()) self._random_portfolios = np.transpose(np.concatenate(portfolios, axis=1)) @check_valid_weights def variance(self) -> float: """ """ return weighted_variance( self._weights.T, self.daily_covariance(), ) @check_valid_weights def volatility(self) -> float: """ """ return np.sqrt( weighted_variance( self._weights.T, self.daily_covariance(), ), ) @check_valid_weights def expected_returns(self) -> float: """ """ return weighted_returns(self._weights.T, self.daily_returns_mean()) @check_valid_weights
""" MIT License Copyright (c) 2023 Wilhelm Ågren Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. File created: 2023-10-20 Last updated: 2023-11-10 """ log = logging.getLogger(__name__) class Portfolio(object): """ """ # For a full list of `scipy` optimization methods and references, see the link below. # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html _supported_optimization_methods = ( "Nelder-Mead", "Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B", "TNC", "COBYLA", "SLSQP", "trust-constr", "dogleg", "trust-ncg", "trust-exact", "trust-krylov", ) _weight_initializations = { "lognormal": np.random.lognormal, "normal": np.random.normal, "uniform": np.random.uniform, } def __init__( self, data: Union[Dataset, List[Asset], np.ndarray, pd.DataFrame], *, weights: Optional[np.ndarray] = None, names: Optional[Union[Dict[str, str], List[str]]] = None, symbols: Optional[Union[Dict[str, str], List[str]]] = None, confidence_level: float = 0.95, risk_free_rate: float = 5e-3, n_trading_days: int = 252, objective_function: Optional[Callable] = None, objective_function_args: Tuple[Any, ...] = (), objective_bounds: Optional[List[Tuple[int, ...]]] = None, objective_constraints: Optional[Tuple[Dict, ...]] = None, ): """ """ if isinstance(data, Dataset): assets = data.as_assets() data = list(assets.values()) symbols = list(assets.keys()) if not isinstance(data, list): if names is None and symbols is None and not isinstance(data, pd.DataFrame): raise InvalidCombinationOfArgumentsError( "You need to provide the names and ticker symbols of each asset that you " "want to include in your portfolio if the data you provided is neither a " "`list` of `Asset` objects or a `pd.DataFrame`. You can also try " "providing only one of the arguments `names` and `symbols`, but then as " "a dictionary of the form `key=name` `value=symbol`." ) if isinstance(data, list): symbols = [a.name for a in data] data = np.array([a.data for a in data]) if isinstance(data, pd.DataFrame): symbols = data.columns data = data.to_numpy().T if isinstance(names, dict): symbols = list(names.values()) names = list(names.keys()) if isinstance(symbols, dict): names = list(symbols.keys()) symbols = list(symbols.values()) self._data = data self._weights = weights self._names = names self._symbols = symbols self._confidence_level = confidence_level self._risk_free_rate = risk_free_rate self._n_trading_days = n_trading_days self._random_portfolios = None self._objective_function = objective_function self._objective_function_args = objective_function_args self._objective_bounds = objective_bounds self._objective_constraints = objective_constraints def weights_are_normalized(self) -> bool: """ """ return np.allclose(self._weights.sum(), 1.0, rtol=1e-6) def initialize_random_weights( self, distribution: Union[str, Callable], *args: Tuple[Any, ...], **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) weights = distribution(*args, **kwargs) self._weights = weights / weights.sum() def check_valid_weights(func) -> Callable: """ """ @wraps(func) def _check_valid_weights(self, *args, **kwargs) -> Optional[FinqError]: """ """ if self._weights is None: raise PortfolioNotYetOptimizedError( "Portfolio weights are `None`. Perhaps you have not yet optimized it? " ) if not self.weights_are_normalized(): raise InvalidPortfolioWeightsError( "Your portfolio weights are not normalized. Make sure to normalize them " "(they sum to one) before calculating any analytical quantities. " ) return func(self, *args, **kwargs) return _check_valid_weights def daily_returns(self) -> np.ndarray: """ """ return period_returns(self._data, period=1) def yearly_returns(self) -> np.ndarray: """ """ return period_returns(self._data, period=self._n_trading_days) def period_returns(self, period: int) -> np.ndarray: """ """ return period_returns(self._data, period=period) def daily_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=1), axis=1) def yearly_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=self._n_trading_days), axis=1) def period_returns_mean(self, period: int) -> float: """ """ return np.mean(period_returns(self._data, period=period), axis=1) def daily_covariance(self) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=1), rowvar=True) def yearly_covariance(self) -> np.ndarray: """ """ return np.cov( period_returns(self._data, period=self._n_trading_days), rowvar=True ) def period_covariance(self, period: int) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=period), rowvar=True) def set_objective_function( self, function: Callable, *args: Tuple[Any, ...], ): """ """ self._objective_function = function self._objective_function_args = args def set_objective_constraints( self, *constraints, ): """ """ self._objective_constraints = [{"type": t, "fun": c} for (t, c) in constraints] def set_objective_bounds( self, bounds: Union[Tuple[int, ...], List[Tuple[int, ...]]], ): """ """ if isinstance(bounds, tuple): bounds = [bounds for _ in range(self._data.shape[0])] self._objective_bounds = bounds def sample_random_portfolios( self, n_samples: int, *, distribution: Union[str, Callable] = "lognormal", **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) portfolios = [] for i in (bar := tqdm(range(n_samples))): if i % 10: bar.set_description( f"Sampling random portfolio {i + 1} from " f"{distribution.__name__} distribution" ) portfolio = distribution(**kwargs) portfolios.append(portfolio / portfolio.sum()) self._random_portfolios = np.transpose(np.concatenate(portfolios, axis=1)) @check_valid_weights def variance(self) -> float: """ """ return weighted_variance( self._weights.T, self.daily_covariance(), ) @check_valid_weights def volatility(self) -> float: """ """ return np.sqrt( weighted_variance( self._weights.T, self.daily_covariance(), ), ) @check_valid_weights def expected_returns(self) -> float: """ """ return weighted_returns(self._weights.T, self.daily_returns_mean()) @check_valid_weights
def sharpe_ratio(self) -> float:
8
2023-10-09 19:02:54+00:00
16k
lmb-freiburg/ldce
scripts/ldce.py
[ { "identifier": "disabled_train", "path": "sampling_helpers.py", "snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self" }, { "identifier": "get_model", "path": "sampling_helpers.py", "snippet": "def get_model(cfg_path=\"configs/latent-diffusion/cin256-v2.yaml\", ckpt_path=\"models/ldm/cin256-v2/model.ckpt\"):\n config = OmegaConf.load(cfg_path)\n model = load_model_from_config(config, ckpt_path)\n return model" }, { "identifier": "_unmap_img", "path": "sampling_helpers.py", "snippet": "def _unmap_img(x, from_image_net_dist=False):\n \"\"\"\n from 0 to 1 to -1 to 1\n \"\"\"\n\n return 2. * x - 1" }, { "identifier": "generate_samples", "path": "sampling_helpers.py", "snippet": "def generate_samples(\n model, \n sampler, \n target_y, \n ddim_steps, \n scale, \n init_image=None, \n t_enc=None,\n init_latent=None, \n ccdddim=False, \n ddim_eta=0., \n latent_t_0=True, \n prompts: list = None,\n seed: int = 0\n):\n torch.cuda.empty_cache()\n \n all_samples = []\n all_probs = []\n all_videos = []\n all_masks = []\n all_cgs = []\n\n with torch.no_grad():\n with model.ema_scope():\n tic = time.time()\n print(f\"rendering target classes '{target_y}' in {len(sampler.ddim_timesteps)} or {ddim_steps} steps and using s={scale:.2f}.\")\n batch_size = target_y.shape[0]\n if \"class_label\" == model.cond_stage_key: # class-conditional\n uc = model.get_learned_conditioning({model.cond_stage_key: torch.tensor(batch_size * [1000]).to(model.device)})\n c = model.get_learned_conditioning({model.cond_stage_key: target_y.to(model.device)})\n elif \"txt\" == model.cond_stage_key: # text-conditional\n uc = model.get_learned_conditioning(batch_size * [\"\"])\n if prompts is None:\n raise ValueError(\"Prompts are not defined!\")\n c = model.get_learned_conditioning(prompts)\n else:\n raise NotImplementedError\n \n if init_latent is not None:\n if seed!=-1:\n noises_per_batch = []\n for b in range(batch_size):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.cuda.manual_seed_all(seed)\n noises_per_batch.append(torch.randn_like(init_latent[b]))\n noise = torch.stack(noises_per_batch, dim=0)\n else:\n noise = None\n z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc] * (batch_size)).to(\n init_latent.device), noise=noise) if not latent_t_0 else init_latent\n\n if seed!=-1:\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n # decode it\n if ccdddim:\n out = sampler.decode(\n z_enc, \n c, \n t_enc, \n unconditional_guidance_scale=scale,\n unconditional_conditioning=uc, \n y=target_y.to(model.device), \n latent_t_0=latent_t_0,\n )\n samples = out[\"x_dec\"]\n prob = out[\"prob\"]\n vid = out[\"video\"]\n mask = out[\"mask\"]\n cg = out[\"concensus_regions\"]\n\n else:\n samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=scale,\n unconditional_conditioning=uc)\n\n x_samples = model.decode_first_stage(samples)\n x_samples_ddim = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)\n cat_samples = x_samples_ddim #torch.cat([init_image[:1], x_samples_ddim], dim=0)\n else:\n\n samples_ddim, _ = sampler.sample(S=ddim_steps,\n conditioning=c,\n batch_size=batch_size,\n shape=[3, 64, 64],\n verbose=False,\n unconditional_guidance_scale=scale,\n unconditional_conditioning=uc,\n eta=ddim_eta)\n\n x_samples_ddim = model.decode_first_stage(samples_ddim)\n x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0,\n min=0.0, max=1.0)\n cat_samples = x_samples_ddim\n\n all_samples.append(cat_samples)\n all_probs.append(prob) if ccdddim and prob is not None else None\n all_videos.append(vid) if ccdddim and vid is not None else None\n all_masks.append(mask) if ccdddim and mask is not None else None\n all_cgs.append(cg) if ccdddim and cg is not None else None\n tac = time.time()\n\n out = {}\n out[\"samples\"] = all_samples\n out[\"probs\"] = all_probs if len(all_probs) > 0 else None\n out[\"videos\"] = all_videos if len(all_videos) > 0 else None\n out[\"masks\"] = all_masks if len(all_masks) > 0 else None\n out[\"cgs\"] = all_cgs if len(all_cgs) > 0 else None\n \n return out" }, { "identifier": "load_model_hf", "path": "sampling_helpers.py", "snippet": "def load_model_hf(repo_id, filename, dir, ckpt_config_filename, device='cpu'):\n cache_config_file = hf_hub_download(repo_id=repo_id, filename=ckpt_config_filename)\n\n args = SLConfig.fromfile(cache_config_file)\n args.device = device\n model = build_model(args)\n\n cache_file = hf_hub_download(repo_id=repo_id, filename=filename, cache_dir=dir)\n checkpoint = torch.load(cache_file, map_location='cpu')\n log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)\n print(\"Model loaded from {} \\n => {}\".format(cache_file, log))\n _ = model.eval()\n return model.to(device)" }, { "identifier": "CCMDDIMSampler", "path": "ldm/models/diffusion/cc_ddim.py", "snippet": "class CCMDDIMSampler(object):\n def __init__(self, model, classifier, model_type=\"latent\", schedule=\"linear\", guidance=\"free\", lp_custom=False,\n deg_cone_projection=10., denoise_dist_input=True, classifier_lambda=1, dist_lambda=0.15,\n enforce_same_norms=True, seg_model=None, detect_model=None, masked_guidance=False,\n backprop_diffusion=True, log_backprop_gradients: bool = False, mask_alpha = 5., cone_projection_type= 'default', self_recurrence=0, classifier_wrapper: bool = True, record_intermediate_results:bool=False, verbose:bool=True,**kwargs):\n\n super().__init__()\n self.model_type = model_type\n self.lp_custom = lp_custom\n self.images = []\n self.probs = []\n self.classifier_lambda = classifier_lambda\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.classifier = classifier\n self.guidance = guidance\n self.backprop_diffusion = backprop_diffusion\n self.log_backprop_gradients = log_backprop_gradients\n # self.projected_counterfactuals = projected_counterfactuals\n self.deg_cone_projection = deg_cone_projection\n self.cone_projection_type = cone_projection_type\n self.denoise_dist_input = denoise_dist_input\n self.dist_lambda = dist_lambda\n self.enforce_same_norms = enforce_same_norms\n self.seg_model = seg_model\n self.masked_guidance = masked_guidance\n self.mask_alpha = mask_alpha\n self.self_recurrence = self_recurrence\n self.classifier_wrapper = classifier_wrapper\n self.record_intermediate_results = record_intermediate_results\n self.verbose = verbose\n\n self.init_images = None\n self.init_labels = None \n self.mask = None\n self.concensus_regions = []\n \n self.detect_model = detect_model\n self.classification_criterion = torch.nn.CrossEntropyLoss()\n self.binary_classification_criterion = torch.nn.BCEWithLogitsLoss()\n \n self.dino_pipeline = False\n if isinstance(self.lp_custom, str) and \"dino_\" in self.lp_custom:\n self.distance_criterion = DinoLoss(dino=torch.hub.load('facebookresearch/dino:main', 'dino_vitb16').eval(), loss_identifier=self.lp_custom.split(\"_\")[-1])\n self.dino_init_features = None\n self.dino_pipeline = True\n elif isinstance(self.lp_custom, int):\n if self.lp_custom == 1:\n self.distance_criterion = torch.nn.L1Loss(reduction='sum')\n elif self.lp_custom == 2:\n self.distance_criterion = torch.nn.MSELoss(reduction='sum')\n else:\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n def get_classifier_dist(self, x, t=None):\n \"\"\"\n Create a distribution over the classifier output space\n Args:\n x: input image for which to create the distribution over the classifier output space range [-1, 1]\n\n Returns:\n dist: torch distribution over the classifier output space\n\n \"\"\"\n x = tf.center_crop(x, 224)\n x = normalize(_map_img(x))\n logit = self.classifier(x) # (TODO) add option for t here\n dist = torchd.independent.Independent(OneHotDist(logit, validate_args = False), 0) # 0 here is the batch dimension, so event_shape is (num_classes, )\n return dist\n\n def get_classifier_logits(self, x, t=None):\n \"\"\"\n Returns classifier logits\n Args:\n x: input image for which to create the prediction\n\n Returns:\n logits: logits of output layer of target model\n\n \"\"\"\n x = _map_img(x)\n if not self.classifier_wrapper: # only works for ImageNet!\n x = tf.center_crop(x, 224)\n x = normalize(x)\n return self.classifier(x)\n\n def get_dino_features(self, x, device):\n x = normalize(_map_img(tf.center_crop(x, output_size=224)))\n return self.distance_criterion.dino(x.to(device))\n\n def get_mask_clip_seg(self):\n \"\"\"\n this function returns a negative mask given by a segmentation model for the region of interest\n values are higher outside the region of interest\n \"\"\"\n if self.mask is not None:\n return self.mask\n\n prompts = []\n\n for l in self.init_labels:\n prompts.append(re.sub(r'\\b(\\w)', lambda m: m.group(1).upper(), i2h[l]))\n\n with torch.no_grad():\n img_to_seg = F.interpolate(normalize(self.init_images), size=(352, 352), mode='bilinear',\n align_corners=False).to(self.init_images.device)\n preds = self.seg_model(img_to_seg, prompts)[0]\n preds = F.interpolate(preds, size=self.init_images.shape[-2:], mode='bilinear', align_corners=False)\n preds = torch.sigmoid(preds) # torch.softmax(preds.view(preds.shape[0], -1), dim=1).view(*preds.shape)\n # penalty = 1-preds\n preds = (preds - preds.min()) / (preds.max() - preds.min())\n preds = torch.sigmoid(self.mask_alpha*2*(preds-0.5))\n self.mask = preds.to(self.init_images.device)\n return self.mask\n\n def get_mask(self):\n \"\"\"\n this function returns a negative mask given by a segmentation model for the region of interest\n values are higher outside the region of interest\n \"\"\"\n\n if self.mask is not None:\n return self.mask\n\n with torch.no_grad():\n print(\"input range\", self.init_images.min(), self.init_images.max())\n image_int8 = (self.init_images[0].permute(1, 2, 0).cpu().numpy() * 255.).astype(np.uint8)\n # detected_boxes = detect(image, text_prompt=i2h[label], model=groundingdino_model, image_source=image_image)\n detected_boxes = detect(normalize(self.init_images[0]).squeeze(),\n text_prompt=i2h[self.init_labels[0]].split(',')[0],\n model=self.detect_model) # , image_source=image_int8)\n segmented_frame_masks = segment(image_int8, self.seg_model, boxes=detected_boxes)\n preds = torch.any(segmented_frame_masks, dim=0)\n preds = preds.unsqueeze(0).repeat(self.init_images.shape[0], *(1,) * len(preds.shape))\n # print(\"preds range after first seg \", preds.min(), preds.max())\n self.mask = preds.to(self.init_images.device)\n\n return self.mask\n\n def get_output(self, x, t, c, index, unconditional_conditioning, use_original_steps=True, quantize_denoised=True,\n return_decoded=False, return_pred_latent_x0=False):\n b, device = x.shape[0], x.device\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n with torch.enable_grad() if self.backprop_diffusion else torch.no_grad():\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n\n if return_decoded:\n # getting the original denoised image\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n # current prediction for x_0\n # get the original image with range [0, 1] if it is in latent space\n pred_latent_x0 = (x - sqrt_one_minus_at * e_t_uncond) / a_t.sqrt() # e_t - > e_t_uncond\n if quantize_denoised:\n pred_latent_x0, _, *_ = self.model.first_stage_model.quantize(pred_latent_x0)\n\n pred_x0 = self.model.differentiable_decode_first_stage(\n pred_latent_x0) # if self.model_type == \"latent\" else pred_latent_x0\n # pred_x0 = torch.clamp((pred_x0 + 1.0) / 2.0, min=0.0, max=1.0)\n \n if return_pred_latent_x0:\n return e_t_uncond, e_t, pred_x0, pred_latent_x0\n else:\n return e_t_uncond, e_t, pred_x0\n else:\n return e_t_uncond, e_t\n\n def conditional_score(self, x, t, c, index, use_original_steps, quantize_denoised, unconditional_guidance_scale=1.,\n unconditional_conditioning=None, y=None):\n \"\"\"\n\n Args:\n x: input image\n t: time step\n c: conditioning\n index: index for the schedule\n use_original_steps: whether to use the original steps\n quantize_denoised: whether to quantize the denoised image\n unconditional_guidance_scale: scale for the unconditional guidance\n unconditional_conditioning: unconditional conditioning\n y: target class\n\n\n Returns:\n e_t: score after conditioning\n\n \"\"\"\n b, *_, device = *x.shape, x.device\n x = x.detach() # .requires_grad_()\n # x.requires_grad = True\n prob_best_class = None\n mask_guidance = None\n\n ## check if gradient tracking is on for x\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n return e_t\n\n # print(\"check gradient tracking onf e \", e_t.requires_grad)\n if self.guidance == \"free\":\n e_t_uncond, e_t, pred_x0 = self.get_output(x, t, c, index, unconditional_conditioning, use_original_steps,\n quantize_denoised, return_decoded=True)\n\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n return e_t\n\n # print(\"check gradient tracking onf e \", e_t.requires_grad)\n score_out = torch.zeros_like(x)\n\n with torch.enable_grad():\n x_noise = x.detach().requires_grad_()\n ret_vals = self.get_output(x_noise, t, c, index, unconditional_conditioning,\n use_original_steps, quantize_denoised=quantize_denoised,\n return_decoded=True, return_pred_latent_x0=self.log_backprop_gradients)\n if self.log_backprop_gradients:\n e_t_uncond, e_t, pred_x0, pred_latent_x0 = ret_vals\n else:\n e_t_uncond, e_t, pred_x0 = ret_vals\n\n with torch.no_grad():\n if isinstance(self.lp_custom, str) and \"dino_\" in self.lp_custom: # retain_graph causes cuda oom issues for dino distance regularizer...\n with torch.enable_grad():\n pred_x0_0to1 = torch.clamp(_map_img(pred_x0), min=0.0, max=1.0)\n lp_dist = self.distance_criterion(pred_x0_0to1, self.dino_init_features.to(x.device).detach())\n lp_grad = torch.autograd.grad(lp_dist.mean(), x_noise, retain_graph=False)[0]\n elif self.lp_custom:\n with torch.enable_grad():\n pred_x0_0to1 = torch.clamp(_map_img(pred_x0), min=0.0, max=1.0)\n lp_dist = self.distance_criterion(pred_x0_0to1, self.init_images.to(x.device))\n lp_grad = torch.autograd.grad(lp_dist.mean(), x_noise, retain_graph=True)[0]\n \n if self.classifier_lambda != 0:\n with torch.enable_grad():\n if isinstance(self.lp_custom, str) and \"dino_\" in self.lp_custom:\n x_noise = x.detach().requires_grad_()\n ret_vals = self.get_output(x_noise, t, c, index, unconditional_conditioning,\n use_original_steps, quantize_denoised=quantize_denoised,\n return_decoded=True, return_pred_latent_x0=self.log_backprop_gradients)\n if self.log_backprop_gradients:\n e_t_uncond, e_t, pred_x0, pred_latent_x0 = ret_vals\n else:\n e_t_uncond, e_t, pred_x0 = ret_vals\n pred_logits = self.get_classifier_logits(pred_x0)\n if len(pred_logits.shape) == 2: # multi-class\n log_probs = torch.nn.functional.log_softmax(pred_logits, dim=-1)\n log_probs = log_probs[range(log_probs.size(0)), y.view(-1)]\n prob_best_class = torch.exp(log_probs).detach()\n else: # binary\n loss = self.binary_classification_criterion(pred_logits, y)\n loss *= -1 # minimize this\n log_probs = loss\n prob_best_class = pred_logits.sigmoid().detach()\n\n if self.log_backprop_gradients: pred_latent_x0.retain_grad()\n\n if self.dino_pipeline:\n grad_classifier = torch.autograd.grad(log_probs.sum(), x_noise, retain_graph=False)[0]\n else:\n grad_classifier = torch.autograd.grad(log_probs.sum(), x_noise, retain_graph=True)[0]\n # grad_classifier = torch.autograd.grad(log_probs.sum(), x_noise, retain_graph=True)[0]\n # grad_classifier2 = torch.autograd.grad(log_probs[0].sum(), x_noise, retain_graph=False)[0]\n\n if self.log_backprop_gradients:\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_t_sqrt = a_t.sqrt()\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n grad_pred_latent_x0 = pred_latent_x0.grad.data\n grad_unet_wrt_zt = (grad_classifier*a_t_sqrt/grad_pred_latent_x0 - 1)*(-1/sqrt_one_minus_at)\n\n cossim = torch.nn.CosineSimilarity()\n cossim_wpre = cossim(grad_classifier.view(2, -1), grad_pred_latent_x0.view(2, -1))\n \n print(torch.norm(grad_classifier, dim=(2,3)), torch.norm(grad_pred_latent_x0, dim=(2,3)), torch.norm(grad_unet_wrt_zt, dim=(2,3)))\n print(cossim_wpre)\n\n # assert e_t_uncond.requires_grad == True and e_t.requires_grad == True, \"e_t_uncond and e_t should require gradients\"\n\n # if self.guidance == \"projected\":\n implicit_classifier_score = (e_t - e_t_uncond) # .detach()\n # check gradient tracking on implicit_classifier_score\n assert implicit_classifier_score.requires_grad == False, \"implicit_classifier_score requires grad\"\n\n if self.lp_custom or self.classifier_lambda != 0:\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n\n if self.classifier_lambda != 0:\n classifier_score = -1 * grad_classifier * (1 - a_t).sqrt()\n assert classifier_score.requires_grad == False, \"classifier_score requires grad\"\n # project the gradient of the classifier on the implicit classifier\n\n\n projection_fn = cone_project if self.cone_projection_type == \"default\" else cone_project_chuncked\n projection_fn = cone_project_chuncked_zero if \"zero\" in self.cone_projection_type else projection_fn\n \n \n proj_out = projection_fn(implicit_classifier_score.view(x.shape[0], -1),\n classifier_score.view(x.shape[0], -1),\n self.deg_cone_projection,\n orig_shp=implicit_classifier_score.shape) \\\n if self.guidance == \"projected\" else classifier_score\n \n classifier_score = proj_out if self.cone_projection_type == \"default\" else proj_out[0].view_as(classifier_score)\n concensus_region = proj_out[1].unsqueeze(1) if self.cone_projection_type == \"binning\" else None\n #print(classifier_score.shape, concensus_region.shape)\n if self.enforce_same_norms:\n score_, norm_ = _renormalize_gradient(classifier_score,\n implicit_classifier_score) # e_t_uncond (AWAREE!!)\n classifier_score = self.classifier_lambda * score_\n\n else:\n classifier_score *= self.classifier_lambda\n\n score_out += classifier_score\n\n # distance gradients\n if self.lp_custom:\n\n lp_score = -1 * lp_grad * (1 - a_t).sqrt()\n\n if self.enforce_same_norms:\n score_, norm_ = _renormalize_gradient(lp_score,\n implicit_classifier_score)\n lp_score = self.dist_lambda * score_\n\n else:\n\n lp_score *= self.dist_lambda\n\n score_out -= lp_score\n\n e_t = e_t_uncond + unconditional_guidance_scale * score_out # (1 - a_t).sqrt() * grad_out\n\n \n if self.record_intermediate_results:\n # adding images to create a gif\n pred_x0_copy = pred_x0.clone().detach()\n img = torch.clamp(_map_img(pred_x0_copy), min=0.0, max=1.0)\n #img = torch.permute(img, (1, 2, 0, 3)).reshape((img.shape[1], img.shape[2], -1))\n\n self.images.append(img.detach().cpu())\n if self.classifier_lambda != 0 and self.cone_projection_type == \"binning\":\n self.concensus_regions.append(concensus_region.detach().cpu())\n \n if prob_best_class is not None:\n self.probs.append(prob_best_class.detach().cpu())\n\n return e_t\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n #pass\n # TODO: this is a hack to make it work on CPU\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)\n #print(\"DDIM timesteps: \", self.ddim_timesteps, \"with length: \", len(self.ddim_timesteps))\n #print all input parameters\n #print(\"DDIM parameters: \", self.ddim_timesteps, ddim_discretize, ddim_eta)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta, verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, ):\n\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, y=None):\n b, *_, device = *x.shape, x.device\n\n e_t = self.conditional_score(x=x, c=c, t=t, index=index, use_original_steps=use_original_steps,\n quantize_denoised=quantize_denoised,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning, y=y)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas).to(x0.device)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas.to(x0.device)\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, y=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, latent_t_0=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n if self.masked_guidance:\n print(\"### Getting the mask ###\")\n mask = self.get_mask()\n mask = F.interpolate(mask.to(torch.uint8), size=x_latent.shape[-2:])\n # mask = self.get_mask()\n # mask = F.interpolate(mask, size=x_latent.shape[-2:], mode='bilinear', align_corners=True)\n # mask = (mask - mask.min()) / (mask.max() - mask.min())\n # mask[mask < 0.5] = 0.\n # mask[mask >= 0.5] = 1.\n\n if self.verbose:\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n else:\n iterator = range(time_range)\n\n # if latent_t_0:\n # x_orig = x_latent\n # x_dec = self.stochastic_encode(x_latent.clone(),\n # torch.tensor([t_start] * (x_latent.shape[0])).to(x_latent.device))\n # else:\n x_dec = x_latent if not latent_t_0 else self.stochastic_encode(x_latent.clone(), torch.tensor([t_start] * (x_latent.shape[0])).to(x_latent.device))\n for i, step in enumerate(iterator):\n tic = time.time()\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n\n if self.masked_guidance and latent_t_0:\n #print(\"blending with original image\")\n img_orig = self.model.q_sample(x_latent.clone(), ts)\n x_dec = img_orig * (1. - mask) + (mask) * x_dec\n\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning, y=y)\n x_dec = x_dec.detach()\n for j in range(self.self_recurrence):\n print(\"self recurrence\")\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, unconditional_guidance_scale = 1)\n\n #workaround for long running time\n elapsed_time = time.time() - tic\n if elapsed_time > 6:\n print(f\"Iteration time {elapsed_time} exceeded limit 6 secs, terminating program...\")\n print(\"x_dec device: \", x_dec.device)\n sys.exit(1) # Terminate the program with exit code 1 (indicating an error) \n \n out = {}\n out['x_dec'] = x_dec\n out['video'] = torch.stack(self.images, dim=1) if len(self.images) != 0 else None\n out[\"mask\"] = self.mask.to(torch.float32) if self.mask is not None else None\n # print(f\"Video shape: {out['video'].shape}\")\n #out['prob'] = self.probs[-1].item() if len(self.probs) != 0 else None\n out['prob'] = self.probs[-1].detach().cpu().numpy() if len(self.probs) != 0 else None\n out['concensus_regions'] = torch.stack(self.concensus_regions, dim=1) if len(self.concensus_regions) != 0 else None\n #print(out['concensus_regions'].shape, (out[\"concensus_regions\"]>200).to(torch.float32).mean())\n self.images = []\n self.probs = []\n \n self.concensus_regions = []\n self.mask = None\n\n return out" }, { "identifier": "name_map", "path": "data/imagenet_classnames.py", "snippet": "" }, { "identifier": "DecisionDensenetModel", "path": "utils/DecisionDensenetModel.py", "snippet": "class DecisionDensenetModel(nn.Module):\n\n def __init__(self, num_classes=40, pretrained=False, query_label=-1):\n super().__init__()\n self.feat_extract = DenseNet121(pretrained=pretrained)\n self.classifier = nn.Linear(self.feat_extract.output_size, num_classes)\n self.query_label = query_label\n\n def forward(self, x, before_sigmoid=True):\n\n x = self.feat_extract(x)\n x = self.classifier(x)\n if not before_sigmoid:\n x = torch.sigmoid(x)\n return x[:, self.query_label]" }, { "identifier": "Normalizer", "path": "utils/preprocessor.py", "snippet": "class Normalizer(torch.nn.Module):\n '''\n normalizing module. Useful for computing the gradient\n to a x image (x in [0, 1]) when using a classifier with\n different normalization inputs (i.e. f((x - mu) / sigma))\n '''\n def __init__(self, classifier,\n mu=[0.485, 0.456, 0.406],\n sigma=[0.229, 0.224, 0.225]):\n super().__init__()\n self.classifier = classifier\n self.register_buffer('mu', torch.tensor(mu).view(1, -1, 1, 1))\n self.register_buffer('sigma', torch.tensor(sigma).view(1, -1, 1, 1))\n\n def forward(self, x):\n x = (x - self.mu) / self.sigma\n return self.classifier(x)" }, { "identifier": "CropAndNormalizer", "path": "utils/preprocessor.py", "snippet": "class CropAndNormalizer(torch.nn.Module):\n def __init__(self, classifier, crop_size: int=224, mu=[0.485, 0.456, 0.406], sigma=[0.229, 0.224, 0.225]) -> None:\n super().__init__()\n self.classifier = classifier\n self.crop_size = crop_size\n self.center_crop = torchvision.transforms.CenterCrop(crop_size)\n self.register_buffer('mu', torch.tensor(mu).view(1, -1, 1, 1))\n self.register_buffer('sigma', torch.tensor(sigma).view(1, -1, 1, 1))\n\n def forward(self, x):\n # assumes x in [0, 1]!\n # x = F.center_crop(x, self.crop_size)\n x = self.center_crop(x)\n x = (x - self.mu) / self.sigma\n return self.classifier(x)" }, { "identifier": "ResizeAndNormalizer", "path": "utils/preprocessor.py", "snippet": "class ResizeAndNormalizer(torch.nn.Module):\n def __init__(self, classifier, resolution: tuple=(224, 224), mu=[0.485, 0.456, 0.406], sigma=[0.229, 0.224, 0.225]) -> None:\n super().__init__()\n self.classifier = classifier\n self.resolution = resolution\n self.resize = torchvision.transforms.Resize(resolution)\n self.register_buffer('mu', torch.tensor(mu).view(1, -1, 1, 1))\n self.register_buffer('sigma', torch.tensor(sigma).view(1, -1, 1, 1))\n\n def forward(self, x):\n # assumes x in [0, 1]!\n x = self.resize(x)\n x = (x - self.mu) / self.sigma\n return self.classifier(x)" }, { "identifier": "GenericPreprocessing", "path": "utils/preprocessor.py", "snippet": "class GenericPreprocessing(torch.nn.Module):\n def __init__(self, classifier, preprocessor) -> None:\n super().__init__()\n self.classifier = classifier\n self.preprocessor = preprocessor\n\n def forward(self, x):\n # assumes x in [0, 1]!\n x = self.preprocessor(x)\n return self.classifier(x)" }, { "identifier": "Crop", "path": "utils/preprocessor.py", "snippet": "class Crop(torch.nn.Module):\n def __init__(self, classifier, crop_size: int=224) -> None:\n super().__init__()\n self.classifier = classifier\n self.crop_size = crop_size\n self.center_crop = torchvision.transforms.CenterCrop(crop_size)\n\n def forward(self, x):\n # assumes x in [0, 1]!\n x = self.center_crop(x)\n return self.classifier(x)" }, { "identifier": "VisionLanguageWrapper", "path": "utils/vision_language_wrapper.py", "snippet": "class VisionLanguageWrapper(nn.Module):\n def __init__(self, model, tokenizer, prompts) -> None:\n super().__init__()\n self.model = model\n self.tokenizer = tokenizer\n self.prompts = prompts\n\n device = next(self.model.parameters()).device\n\n text = tokenizer(prompts)\n with torch.no_grad():\n self.text_features = model.encode_text(text.to(device))\n self.text_features = self.text_features / self.text_features.norm(dim=-1, keepdim=True)\n\n def forward(self, x):\n image_features = self.model.encode_image(x)\n image_features = image_features / image_features.norm(dim=-1, keepdim=True)\n logits = 100.0 * image_features @ self.text_features.T\n return logits" }, { "identifier": "MadryNet", "path": "utils/madry_net.py", "snippet": "def MadryNet(ckpt, device):\n norm = \"l2\"\n model = load_model(\n modelname=\"Engstrom2019Robustness\", norm=norm, device=device\n )\n state_dict = torch.load(ckpt, map_location=\"cpu\")\n model.model.load_state_dict(state_dict, strict=True)\n return model" }, { "identifier": "LinearClassifier", "path": "utils/dino_linear.py", "snippet": "class LinearClassifier(nn.Module):\n \"\"\"Linear layer to train on top of frozen features\"\"\"\n def __init__(self, dim, num_labels=1000):\n super(LinearClassifier, self).__init__()\n self.num_labels = num_labels\n self.linear = nn.Linear(dim, num_labels)\n self.linear.weight.data.normal_(mean=0.0, std=0.01)\n self.linear.bias.data.zero_()\n\n def forward(self, x):\n # flatten\n x = x.view(x.size(0), -1)\n\n # linear layer\n return self.linear(x)" }, { "identifier": "DINOLinear", "path": "utils/dino_linear.py", "snippet": "class DINOLinear(nn.Module):\n def __init__(self, dino, linear_classifier) -> None:\n super().__init__()\n self.dino = dino\n self.linear = linear_classifier\n \n def forward(self, x):\n x = self.dino(x)\n return self.linear(x)" } ]
import argparse import os import psutil import yaml import copy import random import matplotlib.pyplot as plt import numpy as np import pathlib import torch import hydra import wandb import torchvision import json import sys import regex as re import open_clip from contextlib import nullcontext from torch import autocast from omegaconf import OmegaConf, open_dict from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf from torchvision import transforms, datasets from torchvision.utils import save_image from sampling_helpers import disabled_train, get_model, _unmap_img, generate_samples from sampling_helpers import load_model_hf from ldm import * from ldm.models.diffusion.cc_ddim import CCMDDIMSampler from data.imagenet_classnames import name_map, openai_imagenet_classes from utils.DecisionDensenetModel import DecisionDensenetModel from utils.preprocessor import Normalizer, CropAndNormalizer, ResizeAndNormalizer, GenericPreprocessing, Crop from utils.vision_language_wrapper import VisionLanguageWrapper from utils.madry_net import MadryNet from utils.dino_linear import LinearClassifier, DINOLinear
11,609
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper:
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper:
classifier_model = Crop(classifier_model)
12
2023-10-10 09:40:10+00:00
16k
cpuimage/minSDXLTF
stable_diffusion_xl/stable_diffusion_xl.py
[ { "identifier": "SimpleTokenizer", "path": "stable_diffusion_xl/clip_tokenizer.py", "snippet": "class SimpleTokenizer:\n def __init__(self, bpe_path=None):\n bpe_path = bpe_path or tf.keras.utils.get_file(\n \"bpe_simple_vocab_16e6.txt.gz\",\n \"https://github.com/openai/CLIP/blob/main/clip/bpe_simple_vocab_16e6.txt.gz?raw=true\", # noqa: E501\n file_hash=\"924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a\", # noqa: E501\n )\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n merges = gzip.open(bpe_path).read().decode(\"utf-8\").split(\"\\n\")\n merges = merges[1: 49152 - 256 - 2 + 1]\n merges = [tuple(merge.split()) for merge in merges]\n vocab = list(bytes_to_unicode().values())\n vocab = vocab + [v + \"</w>\" for v in vocab]\n for merge in merges:\n vocab.append(\"\".join(merge))\n vocab.extend([\"<|startoftext|>\", \"<|endoftext|>\"])\n self.vocab = vocab\n self.encoder = self._create_encoder(self.vocab)\n self.decoder = self._create_decoder(self.encoder)\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n\n self.special_tokens = {\n \"<|startoftext|>\": \"<|startoftext|>\",\n \"<|endoftext|>\": \"<|endoftext|>\",\n }\n self.cache = {\n \"<|startoftext|>\": \"<|startoftext|>\",\n \"<|endoftext|>\": \"<|endoftext|>\",\n }\n self.pat = self._create_pat()\n\n def _create_encoder(self, vocab):\n return dict(zip(vocab, range(len(vocab))))\n\n def _create_decoder(self, encoder):\n return {v: k for k, v in encoder.items()}\n\n def _create_pat(self):\n return re.compile(\n \"|\".join([re.escape(key) for key in self.special_tokens.keys()])\n + r\"\"\"|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\",\n re.IGNORECASE,\n )\n\n @property\n def end_of_text(self):\n return self.encoder[\"<|endoftext|>\"]\n\n @property\n def start_of_text(self):\n return self.encoder[\"<|startoftext|>\"]\n\n def add_tokens(self, tokens):\n if isinstance(tokens, str):\n tokens = [tokens]\n tokens_added = 0\n for token in tokens:\n if token in self.vocab:\n continue\n tokens_added += 1\n self.vocab.append(token)\n self.special_tokens[token] = token\n self.cache[token] = token\n self.encoder = self._create_encoder(self.vocab)\n self.decoder = self._create_decoder(self.encoder)\n self.pat = self._create_pat()\n return tokens_added\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token[:-1]) + (token[-1] + \"</w>\",)\n pairs = get_pairs(word)\n\n if not pairs:\n return token + \"</w>\"\n\n while True:\n bigram = min(\n pairs, key=lambda pair: self.bpe_ranks.get(pair, float(\"inf\"))\n )\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if (word[i] == first\n and i < len(word) - 1\n and word[i + 1] == second):\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = \" \".join(word)\n self.cache[token] = word\n return word\n\n def encode(self, text):\n bpe_tokens = []\n text = whitespace_clean(basic_clean(text)).lower()\n for token in re.findall(self.pat, text):\n token = \"\".join(self.byte_encoder[b] for b in token.encode(\"utf-8\"))\n bpe_tokens.extend(\n self.encoder[bpe_token]\n for bpe_token in self.bpe(token).split(\" \")\n )\n return [self.start_of_text] + bpe_tokens + [self.end_of_text]\n\n def decode(self, tokens):\n text = \"\".join([self.decoder[token] for token in tokens])\n text = (\n bytearray([self.byte_decoder[c] for c in text])\n .decode(\"utf-8\", errors=\"replace\")\n .replace(\"</w>\", \" \")\n )\n return text" }, { "identifier": "DiffusionXLModel", "path": "stable_diffusion_xl/diffusion_model.py", "snippet": "class DiffusionXLModel(tf.keras.Model):\n @staticmethod\n def push_block(hidden_states, res_stack):\n res_stack.append(hidden_states)\n return res_stack\n\n @staticmethod\n def pop_block(hidden_states, res_stack):\n res_hidden_states = res_stack.pop()\n hidden_states = tf.concat([hidden_states, res_hidden_states], axis=-1)\n return hidden_states, res_stack\n\n def __init__(self, img_height=1024, img_width=1024, name=None, ckpt_path=None, lora_dict=None):\n sample = tf.keras.layers.Input((img_height // 8, img_width // 8, 4))\n timestep = tf.keras.layers.Input(())\n text_emb = tf.keras.layers.Input((None, 2048))\n text_embeds = tf.keras.layers.Input((1280,))\n time_ids = tf.keras.layers.Input((6,))\n # 1. time\n t_emb = Timesteps(320, name=\"time_proj\")(timestep)\n t_emb = tf.reshape(t_emb, (-1, 320))\n t_emb = Linear(1280, name=\"time_embedding.linear_1\")(tf.cast(t_emb, sample.dtype))\n t_emb = tf.keras.layers.Activation(\"swish\")(t_emb)\n t_emb = Linear(1280, name=\"time_embedding.linear_2\")(t_emb)\n time_embeds = Timesteps(256, name=\"add_time_proj\")(time_ids)\n time_embeds = tf.reshape(time_embeds, (-1, 1536)) # 6*256 = 1536\n add_embeds = tf.concat([text_embeds, time_embeds], axis=-1)\n add_embeds = tf.cast(add_embeds, sample.dtype)\n add_embeds = Linear(1280, name=\"add_embedding.linear_1\")(add_embeds)\n add_embeds = tf.keras.layers.Activation(\"swish\")(add_embeds)\n add_embeds = Linear(1280, name=\"add_embedding.linear_2\")(add_embeds)\n time_emb = tf.keras.layers.Activation(\"swish\")(t_emb + add_embeds)\n # 2. pre-process\n hidden_states = tf.keras.layers.Conv2D(320, kernel_size=3, strides=1, name=\"conv_in\")(\n tf.keras.layers.ZeroPadding2D(1)(sample))\n res_stack = [hidden_states]\n # 3. blocks\n # DownBlock2D\n hidden_states = ResnetBlock(320, name=\"down_blocks.0.resnets.0\")((hidden_states, time_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"down_blocks.0.resnets.1\")((hidden_states, time_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = DownSampler(320, name=\"down_blocks.0.downsamplers.0\")(hidden_states)\n res_stack = self.push_block(hidden_states, res_stack)\n # CrossAttnDownBlock2D\n hidden_states = ResnetBlock(640, name=\"down_blocks.1.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"down_blocks.1.attentions.0\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"down_blocks.1.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"down_blocks.1.attentions.1\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = DownSampler(640, name=\"down_blocks.1.downsamplers.0\")(hidden_states)\n res_stack = self.push_block(hidden_states, res_stack)\n # CrossAttnDownBlock2D\n hidden_states = ResnetBlock(1280, name=\"down_blocks.2.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"down_blocks.2.attentions.0\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"down_blocks.2.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"down_blocks.2.attentions.1\")((hidden_states, text_emb))\n res_stack = self.push_block(hidden_states, res_stack)\n # UNetMidBlock2DCrossAttn\n hidden_states = ResnetBlock(1280, name=\"mid_block.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"mid_block.attentions.0\")((hidden_states, text_emb))\n hidden_states = ResnetBlock(1280, name=\"mid_block.resnets.1\")((hidden_states, time_emb))\n # CrossAttnUpBlock2D\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"up_blocks.0.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"up_blocks.0.attentions.0\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"up_blocks.0.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"up_blocks.0.attentions.1\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(1280, name=\"up_blocks.0.resnets.2\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(20, 64, 1280, 10, name=\"up_blocks.0.attentions.2\")((hidden_states, text_emb))\n hidden_states = UpSampler(1280, name=\"up_blocks.0.upsamplers.0\")(hidden_states)\n # CrossAttnUpBlock2D\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"up_blocks.1.resnets.0\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"up_blocks.1.attentions.0\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"up_blocks.1.resnets.1\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"up_blocks.1.attentions.1\")((hidden_states, text_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(640, name=\"up_blocks.1.resnets.2\")((hidden_states, time_emb))\n hidden_states = AttentionBlock(10, 64, 640, 2, name=\"up_blocks.1.attentions.2\")((hidden_states, text_emb))\n hidden_states = UpSampler(640, name=\"up_blocks.1.upsamplers.0\")(hidden_states)\n # UpBlock2D\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"up_blocks.2.resnets.0\")((hidden_states, time_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"up_blocks.2.resnets.1\")((hidden_states, time_emb))\n hidden_states, res_stack = self.pop_block(hidden_states, res_stack)\n hidden_states = ResnetBlock(320, name=\"up_blocks.2.resnets.2\")((hidden_states, time_emb))\n hidden_states = GroupNormalization(32, epsilon=1e-05, center=True, scale=True,\n name=\"conv_norm_out\")(\n hidden_states)\n hidden_states = tf.keras.layers.Activation(\"swish\")(hidden_states)\n output = tf.keras.layers.Conv2D(4, kernel_size=3, strides=1, name=\"conv_out\")(\n tf.keras.layers.ZeroPadding2D(1)(hidden_states))\n super().__init__([sample, timestep, text_emb, time_ids, text_embeds], output, name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/unet/diffusion_pytorch_model.fp16.safetensors\"\n ckpt_mapping = CKPT_MAPPING[\"diffusion_model\"]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, key_mapping=UNET_KEY_MAPPING,\n lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, key_mapping=UNET_KEY_MAPPING,\n lora_dict=lora_dict)" }, { "identifier": "ImageDecoder", "path": "stable_diffusion_xl/image_decoder.py", "snippet": "class ImageDecoder(tf.keras.Sequential):\n def __init__(self, img_height=1024, img_width=1024, name=None, ckpt_path=None):\n super().__init__(\n [\n tf.keras.layers.Input((img_height // 8, img_width // 8, 4)),\n tf.keras.layers.Rescaling(1.0 / 0.13025),\n tf.keras.layers.Conv2D(4, 1, strides=1),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(512, 3, strides=1),\n VaeResnetBlock(512),\n VaeAttentionBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n UpSampler(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n UpSampler(512),\n VaeResnetBlock(256),\n VaeResnetBlock(256),\n VaeResnetBlock(256),\n UpSampler(256),\n VaeResnetBlock(128),\n VaeResnetBlock(128),\n VaeResnetBlock(128),\n GroupNormalization(epsilon=1e-5),\n tf.keras.layers.Activation(\"swish\"),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(3, 3, strides=1),\n ],\n name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae_1_0/diffusion_pytorch_model.fp16.safetensors\"\n ckpt_mapping = CKPT_MAPPING[\"decoder\"]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)" }, { "identifier": "ImageEncoder", "path": "stable_diffusion_xl/image_encoder.py", "snippet": "class ImageEncoder(tf.keras.Sequential):\n \"\"\"ImageEncoder is the VAE Encoder for StableDiffusionXL.\"\"\"\n\n def __init__(self, ckpt_path=None):\n super().__init__(\n [\n tf.keras.layers.Input((None, None, 3)),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(128, 3, strides=1),\n VaeResnetBlock(128),\n VaeResnetBlock(128),\n DownSampler(128, padding=((0, 1), (0, 1))),\n VaeResnetBlock(256),\n VaeResnetBlock(256),\n DownSampler(256, padding=((0, 1), (0, 1))),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n DownSampler(512, padding=((0, 1), (0, 1))),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeResnetBlock(512),\n VaeAttentionBlock(512),\n VaeResnetBlock(512),\n GroupNormalization(epsilon=1e-5),\n tf.keras.layers.Activation(\"swish\"),\n tf.keras.layers.ZeroPadding2D(padding=1),\n tf.keras.layers.Conv2D(8, 3, strides=1),\n tf.keras.layers.Conv2D(8, 1, strides=1),\n tf.keras.layers.Lambda(lambda x: tf.split(x, num_or_size_splits=2, axis=-1)[0] * 0.13025),\n ])\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae_1_0/diffusion_pytorch_model.fp16.safetensors\"\n ckpt_mapping = CKPT_MAPPING[\"encoder\"]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, key_mapping=VAE_KEY_MAPPING)" }, { "identifier": "get_weighted_text_embeddings", "path": "stable_diffusion_xl/long_prompt_weighting.py", "snippet": "def get_weighted_text_embeddings(\n tokenizer,\n text_encoder,\n prompt: Union[str, List[str]],\n max_embeddings_multiples: Optional[int] = 4,\n no_boseos_middle: Optional[bool] = False,\n skip_parsing: Optional[bool] = False,\n skip_weighting: Optional[bool] = False,\n model_max_length=77,\n pad_token_id=49407,\n text_encoder_pool=None,\n):\n r\"\"\"\n Prompts can be assigned with local weights using brackets. For example,\n prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',\n and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.\n\n Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.\n\n Args:\n tokenizer : provide access to the tokenizer\n text_encoder : provide access to the text encoder.\n prompt (`str` or `List[str]`):\n The prompt or prompts to guide the image generation.\n max_embeddings_multiples (`int`, *optional*, defaults to `1`):\n The max multiple length of prompt embeddings compared to the max output length of text encoder.\n no_boseos_middle (`bool`, *optional*, defaults to `False`):\n If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and\n ending token in each of the chunk in the middle.\n skip_parsing (`bool`, *optional*, defaults to `False`):\n Skip the parsing of brackets.\n skip_weighting (`bool`, *optional*, defaults to `False`):\n Skip the weighting. When the parsing is skipped, it is forced True.\n \"\"\"\n max_length = (model_max_length - 2) * max_embeddings_multiples + 2\n if isinstance(prompt, str):\n prompt = [prompt]\n\n if not skip_parsing:\n prompt_tokens, prompt_weights = get_prompts_with_weights(tokenizer, prompt, max_length - 2)\n else:\n prompt_tokens = [\n token[1:-1]\n for token in tokenizer.encode(prompt)[:max_length]\n ]\n prompt_weights = [[1.0] * len(token) for token in prompt_tokens]\n\n # round up the longest length of tokens to a multiple of (model_max_length - 2)\n max_length = max([len(token) for token in prompt_tokens])\n\n max_embeddings_multiples = min(\n max_embeddings_multiples,\n (max_length - 1) // (model_max_length - 2) + 1,\n )\n max_embeddings_multiples = max(1, max_embeddings_multiples)\n max_length = (model_max_length - 2) * max_embeddings_multiples + 2\n\n # pad the length of tokens and weights\n bos = tokenizer.start_of_text\n eos = tokenizer.end_of_text\n pad = pad_token_id\n prompt_tokens, prompt_weights = pad_tokens_and_weights(\n prompt_tokens,\n prompt_weights,\n max_length,\n bos,\n eos,\n pad,\n no_boseos_middle=no_boseos_middle,\n chunk_length=model_max_length,\n )\n prompt_tokens = np.array(prompt_tokens, dtype=np.int32)\n # get the embeddings\n if pad_token_id != 0:\n text_embeddings_pool = None\n text_embeddings = get_unweighted_text_embeddings_openai(\n text_encoder,\n prompt_tokens,\n model_max_length,\n no_boseos_middle=no_boseos_middle,\n )\n else:\n text_embeddings, text_embeddings_pool = get_unweighted_text_embeddings_laion(\n text_encoder,\n prompt_tokens,\n model_max_length,\n no_boseos_middle=no_boseos_middle,\n text_encoder_pool=text_encoder_pool,\n )\n prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype)\n if (not skip_parsing) and (not skip_weighting):\n previous_mean = text_embeddings.mean(axis=(-2, -1))\n text_embeddings *= prompt_weights[:, :, None]\n text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None]\n return text_embeddings, text_embeddings_pool" }, { "identifier": "Scheduler", "path": "stable_diffusion_xl/scheduler.py", "snippet": "class Scheduler(object):\n \"\"\"\n `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with\n non-Markovian guidance.\n\n\n Args:\n num_train_timesteps (`int`, defaults to 1000):\n The number of diffusion steps to train the model.\n beta_start (`float`, defaults to 0.0001):\n The starting `beta` value of inference.\n beta_end (`float`, defaults to 0.02):\n The final `beta` value.\n active_lcm (`bool`, defaults true):\n apply lcm or not.\n original_inference_steps (`int`, *optional*, defaults to 50):\n The default number of inference steps used to generate a linearly-spaced timestep schedule, from which we\n will ultimately take `num_inference_steps` evenly spaced timesteps to form the final timestep schedule.\n timestep_scaling (`float`, defaults to 10.0):\n The factor the timesteps will be multiplied by when calculating the consistency model boundary conditions\n `c_skip` and `c_out`. Increasing this will decrease the approximation error (although the approximation\n error at the default of `10.0` is already pretty small).\n \"\"\"\n\n def __init__(self, num_train_timesteps: int = 1000, beta_start: float = 0.00085, beta_end: float = 0.012,\n original_inference_steps: int = 50, timestep_scaling: float = 10.0, active_lcm=True):\n self.active_lcm = active_lcm\n self.num_train_timesteps = num_train_timesteps\n self.original_inference_steps = original_inference_steps\n self.timestep_scaling = timestep_scaling\n # this schedule is very specific to the latent diffusion model.\n self.alphas_cumprod = np.cumprod(\n 1. - np.square(np.linspace(np.sqrt(beta_start), np.sqrt(beta_end), num_train_timesteps)), axis=0)\n self.signal_rates = np.sqrt(self.alphas_cumprod)\n self.noise_rates = np.sqrt(1. - self.alphas_cumprod)\n self.final_alpha_cumprod = 1.0\n # standard deviation of the initial noise distribution\n self.init_noise_sigma = 1.0\n # setable values\n self.num_inference_steps = None\n self.timesteps = np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int32)\n self._step_index = None\n\n # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._init_step_index\n def _init_step_index(self, timestep):\n index_candidates = np.nonzero(self.timesteps == timestep)\n # The sigma index that is taken for the **very** first `step`\n # is always the second index (or the last index if there is only 1)\n # This way we can ensure we don't accidentally skip a sigma in\n # case we start in the middle of the denoising schedule (e.g. for image-to-image)\n if len(index_candidates) > 1:\n step_index = index_candidates[1]\n else:\n step_index = index_candidates[0]\n self._step_index = step_index\n\n @property\n def step_index(self):\n return self._step_index\n\n def set_timesteps(self, num_inference_steps: int, original_inference_steps: Optional[int] = None,\n strength: int = 1.0):\n \"\"\"\n Sets the discrete timesteps used for the diffusion chain (to be run before inference).\n\n Args:\n num_inference_steps (`int`):\n The number of diffusion steps used when generating samples with a pre-trained model.\n original_inference_steps (`int`, *optional*):\n The original number of inference steps, which will be used to generate a linearly-spaced timestep\n schedule (which is different from the standard `diffusers` implementation). We will then take\n `num_inference_steps` timesteps from this schedule, evenly spaced in terms of indices, and use that as\n our final timestep schedule. If not set, this will default to the `original_inference_steps` attribute.\n \"\"\"\n\n if num_inference_steps > self.num_train_timesteps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config_train_timesteps`:\"\n f\" {self.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.num_train_timesteps} timesteps.\")\n self.num_inference_steps = num_inference_steps\n if self.active_lcm:\n original_steps = (\n original_inference_steps if original_inference_steps is not None else self.original_inference_steps)\n\n if original_steps > self.num_train_timesteps:\n raise ValueError(\n f\"`original_steps`: {original_steps} cannot be larger than `self.config_train_timesteps`:\"\n f\" {self.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.num_train_timesteps} timesteps.\")\n if num_inference_steps > original_steps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `original_inference_steps`:\"\n f\" {original_steps} because the final timestep schedule will be a subset of the\"\n f\" `original_inference_steps`-sized initial timestep schedule.\")\n # LCM Timesteps Setting\n # Currently, only linear spacing is supported.\n c = self.num_train_timesteps // original_steps\n # LCM Training Steps Schedule\n lcm_origin_timesteps = np.asarray(list(range(1, int(original_steps * strength) + 1))) * c - 1\n skipping_step = len(lcm_origin_timesteps) // num_inference_steps\n # LCM Inference Steps Schedule\n timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps]\n else:\n timesteps = np.linspace(0, 1000 - 1, num_inference_steps, dtype=np.int32)[::-1]\n self.timesteps = timesteps.copy().astype(np.int32)\n self._step_index = None\n\n def get_scalings_for_boundary_condition_discrete(self, timestep, sigma_data=0.5):\n scaled_timestep = timestep * self.timestep_scaling\n c_skip = sigma_data ** 2 / (scaled_timestep ** 2 + sigma_data ** 2)\n c_out = scaled_timestep / (scaled_timestep ** 2 + sigma_data ** 2) ** 0.5\n return c_skip, c_out\n\n def step(self, latent: np.ndarray, timestep: int, latent_prev: np.ndarray):\n \"\"\"\n Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion\n process from the learned model outputs (most often the predicted noise).\n\n Args:\n latent (`np.ndarray`):\n The direct output from learned diffusion model.\n timestep (`float`):\n The current discrete timestep in the diffusion chain.\n latent_prev (`np.ndarray`):\n A current instance of a sample created by the diffusion process.\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\")\n\n if self.step_index is None:\n self._init_step_index(timestep)\n # 1. get previous step value\n prev_step_index = self.step_index + 1\n if prev_step_index < len(self.timesteps):\n prev_timestep = self.timesteps[prev_step_index]\n else:\n prev_timestep = timestep\n next_signal_rates = self.signal_rates[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n next_noise_rates = self.noise_rates[prev_timestep]\n signal_rates = self.signal_rates[timestep]\n noise_rates = self.noise_rates[timestep]\n # 2. Compute the predicted original sample x_0 based on the model parameterization\n pred_x0 = (latent_prev - noise_rates * latent) / signal_rates\n # 3. Denoise model output using boundary conditions\n if self.active_lcm:\n # 4. Get scalings for boundary conditions\n c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)\n denoised = c_out * pred_x0 + c_skip * latent_prev\n # 5. Sample and inject noise z ~ N(0, I) for MultiStep Inference\n # Noise is not used on the final timestep of the timestep schedule.\n # This also means that noise is not used for one-step sampling.\n if self.step_index != self.num_inference_steps - 1:\n noise = np.random.randn(*latent.shape).astype(np.float32)\n latent = next_signal_rates * denoised + next_noise_rates * noise\n else:\n latent = denoised\n else:\n if self.step_index != self.num_inference_steps - 1:\n latent = next_signal_rates * pred_x0 + next_noise_rates * latent\n else:\n latent = pred_x0\n # upon completion increase step index by one\n self._step_index += 1\n return latent\n\n def __len__(self):\n return self.num_train_timesteps" }, { "identifier": "TextEncoderLaion", "path": "stable_diffusion_xl/text_encoder_laion.py", "snippet": "class TextEncoderLaion(tf.keras.Model):\n def __init__(self, max_length=77, embed_dim=1280, vocab_size=49408, num_heads=20, num_layers=32, name=None,\n ckpt_path=None, lora_dict=None):\n tokens = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"tokens\")\n positions = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"positions\")\n clip_emb = CLIPEmbedding(vocab_size, embed_dim, max_length, name=\"embeddings\")([tokens, positions])\n x = clip_emb\n out = []\n for idx in range(num_layers):\n x = CLIPEncoderLayer(embed_dim, num_heads, activation=gelu,\n name=\"text_model.encoder.layers.{}\".format(idx))(x)\n out.append(x)\n embedded = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"text_model.final_layer_norm\")(out[-1])\n super().__init__([tokens, positions], [out[-2], embedded], name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.fp16.safetensors\"\n ckpt_mapping = [('text_model.embeddings.token_embedding.weight', None),\n ('text_model.embeddings.position_embedding.weight', None)]\n for idx in range(0, num_layers):\n layers_name = 'text_model.encoder.layers.{}'.format(idx)\n ckpt_mapping.append(('{}.layer_norm1.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.q_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.q_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.k_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.k_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.v_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.v_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.out_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.out_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc1.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc2.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc2.bias'.format(layers_name), None))\n ckpt_mapping.append(('text_model.final_layer_norm.weight', None))\n ckpt_mapping.append(('text_model.final_layer_norm.bias', None))\n # ckpt_mapping.append(('text_projection.weight', (1, 0)))\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)" }, { "identifier": "TextEncoderLaionProj", "path": "stable_diffusion_xl/text_encoder_laion.py", "snippet": "class TextEncoderLaionProj(tf.keras.Model):\n def __init__(self, embed_dim=1280, name=None, ckpt_path=None, lora_dict=None):\n embedded = tf.keras.layers.Input(shape=(embed_dim,), dtype=\"float32\", name=\"embedded\")\n proje_out = tf.keras.layers.Dense(1280, name=\"text_projection\", use_bias=False)(embedded)\n super().__init__(embedded, proje_out, name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.fp16.safetensors\"\n ckpt_mapping = [('text_projection.weight', (1, 0))]\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)" }, { "identifier": "TextEncoderOpenAi", "path": "stable_diffusion_xl/text_encoder_openai.py", "snippet": "class TextEncoderOpenAi(tf.keras.Model):\n def __init__(self, max_length=77, embed_dim=768, vocab_size=49408, num_heads=12, num_layers=12, clip_skip=-2,\n final_layer_norm=False,\n name=None,\n ckpt_path=None, lora_dict=None):\n tokens = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"tokens\")\n positions = tf.keras.layers.Input(shape=(max_length,), dtype=\"int32\", name=\"positions\")\n clip_emb = CLIPEmbedding(vocab_size, embed_dim, max_length, name=\"embeddings\")([tokens, positions])\n x = clip_emb\n out = []\n for idx in range(num_layers):\n x = CLIPEncoderLayer(embed_dim, num_heads, activation=quick_gelu,\n name=\"text_model.encoder.layers.{}\".format(idx))(x)\n out.append(x)\n embedded = out[clip_skip]\n if final_layer_norm:\n embedded = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"text_model.final_layer_norm\")(embedded)\n super().__init__([tokens, positions], embedded, name=name)\n origin = \"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder/model.fp16.safetensors\"\n ckpt_mapping = [('text_model.embeddings.token_embedding.weight', None),\n ('text_model.embeddings.position_embedding.weight', None)]\n for idx in range(0, num_layers + clip_skip + 1):\n layers_name = 'text_model.encoder.layers.{}'.format(idx)\n ckpt_mapping.append(('{}.layer_norm1.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.q_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.q_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.k_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.k_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.v_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.v_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.self_attn.out_proj.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.self_attn.out_proj.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.weight'.format(layers_name), None))\n ckpt_mapping.append(('{}.layer_norm2.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc1.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc1.bias'.format(layers_name), None))\n ckpt_mapping.append(('{}.mlp.fc2.weight'.format(layers_name), (1, 0)))\n ckpt_mapping.append(('{}.mlp.fc2.bias'.format(layers_name), None))\n if final_layer_norm:\n ckpt_mapping.append(('text_model.final_layer_norm.weight', None))\n ckpt_mapping.append(('text_model.final_layer_norm.bias', None))\n if ckpt_path is not None:\n if os.path.exists(ckpt_path):\n load_weights_from_file(self, ckpt_path, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)\n return\n else:\n origin = ckpt_path\n model_weights_fpath = tf.keras.utils.get_file(origin=origin)\n if os.path.exists(model_weights_fpath):\n load_weights_from_file(self, model_weights_fpath, ckpt_mapping=ckpt_mapping, lora_dict=lora_dict)" } ]
import numpy as np import tensorflow as tf from PIL import Image from scipy.ndimage import correlate1d from .clip_tokenizer import SimpleTokenizer from .diffusion_model import DiffusionXLModel from .image_decoder import ImageDecoder from .image_encoder import ImageEncoder from .long_prompt_weighting import get_weighted_text_embeddings from .scheduler import Scheduler from .text_encoder_laion import TextEncoderLaion, TextEncoderLaionProj from .text_encoder_openai import TextEncoderOpenAi
12,844
def tokenizer(self): """tokenizer returns the tokenizer used for text inputs. Can be overriden for tasks like textual inversion where the tokenizer needs to be modified. """ if self._tokenizer is None: self._tokenizer = SimpleTokenizer() return self._tokenizer def _get_initial_diffusion_noise(self, batch_size, seed): if seed is not None: try: seed = int(seed) except: seed = None return tf.random.stateless_normal( (batch_size, self.img_height // 8, self.img_width // 8, 4), seed=[seed, seed], ) else: return tf.random.normal( (batch_size, self.img_height // 8, self.img_width // 8, 4) ) def _get_initial_diffusion_latent(self, batch_size, init_latent=None, init_time=None, seed=None, noise=None): if noise is None: noise = self._get_initial_diffusion_noise(batch_size, seed=seed) if init_latent is None: latent = noise else: latent = self.scheduler.signal_rates[init_time] * np.repeat(init_latent, batch_size, axis=0) + \ self.scheduler.noise_rates[init_time] * noise return latent @staticmethod def _get_pos_ids(): return np.asarray([list(range(MAX_PROMPT_LENGTH))], dtype=np.int32) class StableDiffusionXL(StableDiffusionXLBase): """Keras implementation of Stable Diffusion. Note that the StableDiffusionXL API, as well as the APIs of the sub-components of StableDiffusionXL (e.g. ImageEncoder, DiffusionModel) should be considered unstable at this point. We do not guarantee backwards compatability for future changes to these APIs. Stable Diffusion is a powerful image generation model that can be used, among other things, to generate pictures according to a short text description (called a "prompt"). Arguments: img_height: int, height of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. img_width: int, width of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. jit_compile: bool, whether to compile the underlying models to XLA. This can lead to a significant speedup on some systems. Defaults to False. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL from PIL import Image model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) img = model.text_to_image( prompt="A beautiful horse running through a field", batch_size=1, # How many images to generate at once num_steps=25, # Number of iterations (controls image quality) seed=123, # Set this to always get the same image from the same prompt ) Image.fromarray(img[0]).save("horse.png") print("saved at horse.png") ``` References: - [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement) - [Original implementation](https://github.com/CompVis/stable-diffusion) """ # noqa: E501 def __init__( self, img_height=1024, img_width=1024, jit_compile=True, unet_ckpt=None, text_encoder_ckpt=None, text_encoder2_ckpt=None, vae_ckpt=None, ): super().__init__(img_height, img_width, jit_compile) self.unet_ckpt = unet_ckpt self.text_encoder_ckpt = text_encoder_ckpt self.text_encoder2_ckpt = text_encoder2_ckpt self.vae_ckpt = vae_ckpt @property def text_encoder_openai(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_openai is None: self._text_encoder_openai = TextEncoderOpenAi(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder_ckpt) if self.jit_compile: self._text_encoder_openai.compile(jit_compile=True) return self._text_encoder_openai @property def text_encoder_laion(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_laion is None:
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras implementation of StableDiffusionXL.""" MAX_PROMPT_LENGTH = 77 class StableDiffusionXLBase: """Base class for stable diffusion xl model.""" def __init__(self, img_height=1024, img_width=1024, jit_compile=False, active_lcm=False): self.img_height = img_height self.img_width = img_width # lazy initialize the component models and the tokenizer self._image_encoder = None self._text_encoder_laion = None self._text_encoder_laion_proj = None self._text_encoder_openai = None self._diffusion_model = None self._image_decoder = None self._tokenizer = None self.jit_compile = jit_compile self.active_lcm = active_lcm self.scheduler = Scheduler(active_lcm=active_lcm) def text_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def image_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, reference_image=None, reference_image_strength=0.8, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, reference_image=reference_image, reference_image_strength=reference_image_strength, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def inpaint( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, reference_image=None, reference_image_strength=0.8, inpaint_mask=None, mask_blur_strength=None, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, reference_image=reference_image, reference_image_strength=reference_image_strength, inpaint_mask=inpaint_mask, mask_blur_strength=mask_blur_strength, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def encode_text(self, prompt): """Encodes a prompt into a latent text encoding. The encoding produced by this method should be used as the `encoded_text` parameter of `StableDiffusion.generate_image`. Encoding text separately from generating an image can be used to arbitrarily modify the text encoding prior to image generation, e.g. for walking between two prompts. Args: prompt: a string to encode, must be 77 tokens or shorter. Example: ```python from keras_cv.models import StableDiffusion model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) encoded_text = model.encode_text("Tacos at dawn") img = model.generate_image(encoded_text) ``` """ # Tokenize prompt (i.e. starting context) context_openai, _ = get_weighted_text_embeddings(self.tokenizer, self.text_encoder_openai, prompt, model_max_length=MAX_PROMPT_LENGTH, pad_token_id=49407) context_laion, add_text_embeds = get_weighted_text_embeddings(self.tokenizer, self.text_encoder_laion, prompt, model_max_length=MAX_PROMPT_LENGTH, pad_token_id=0, text_encoder_pool=self.text_encoder_laion_proj) return np.concatenate([context_openai, context_laion], axis=-1), add_text_embeds def gaussian_blur(self, image, radius=3, h_axis=1, v_axis=2): def build_filter1d(kernel_size): if kernel_size == 1: filter1d = [1] else: triangle = [[1, 1]] for i in range(1, kernel_size - 1): cur_row = [1] prev_row = triangle[i - 1] for j in range(len(prev_row) - 1): cur_row.append(prev_row[j] + prev_row[j + 1]) cur_row.append(1) triangle.append(cur_row) filter1d = triangle[-1] filter1d = np.reshape(filter1d, (kernel_size,)) return filter1d / np.sum(filter1d) weights = build_filter1d(radius) # Apply filter horizontally blurred_image = correlate1d(image, weights, axis=h_axis, output=None, mode="reflect", cval=0.0, origin=0) # Apply filter vertically blurred_image = correlate1d(blurred_image, weights, axis=v_axis, output=None, mode="reflect", cval=0.0, origin=0) return blurred_image @staticmethod def resize(image_array, new_h=None, new_w=None): h, w, c = image_array.shape if new_h == h and new_w == w: return image_array h_bounds = 0, h - 1 w_bounds = 0, w - 1 y = np.expand_dims(np.linspace(h_bounds[0], h_bounds[1], new_h), axis=-1) x = np.expand_dims(np.linspace(w_bounds[0], w_bounds[1], new_w), axis=0) # Calculate the floor and ceiling values of x and y x_floor = np.floor(x).astype(int) x_ceil = np.ceil(x).astype(int) y_floor = np.floor(y).astype(int) y_ceil = np.ceil(y).astype(int) # Clip the values to stay within the image bounds x_floor = np.clip(x_floor, w_bounds[0], w_bounds[1]) x_ceil = np.clip(x_ceil, w_bounds[0], w_bounds[1]) y_floor = np.clip(y_floor, h_bounds[0], h_bounds[1]) y_ceil = np.clip(y_ceil, h_bounds[0], h_bounds[1]) # Calculate the fractional part of x and y dx = x - x_floor dy = y - y_floor # Get the values of the four neighboring pixels dx = np.expand_dims(dx, axis=-1) dy = np.expand_dims(dy, axis=-1) q11 = image_array[y_floor, x_floor, :] q21 = image_array[y_floor, x_ceil, :] q12 = image_array[y_ceil, x_floor, :] q22 = image_array[y_ceil, x_ceil, :] # Perform bilinear interpolation top_interp = q11 * (1.0 - dx) + q21 * dx bottom_interp = q12 * (1.0 - dx) + q22 * dx interpolated = top_interp * (1.0 - dy) + bottom_interp * dy return interpolated def preprocessed_image(self, x): if type(x) is str: x = np.array(Image.open(x).convert("RGB")) else: x = np.asarray(x) image_array = self.resize(x, self.img_height, self.img_width) image_array = np.array(image_array, dtype=np.float32) / 255.0 input_image_array = image_array[None, ..., :3] input_image_tensor = input_image_array * 2.0 - 1.0 return input_image_array, input_image_tensor def preprocessed_mask(self, x, blur_radius=5): if type(x) is str: x = np.array(Image.open(x).convert("L")) else: x = np.asarray(x) if len(x.shape) == 2: x = np.expand_dims(x, axis=-1) mask_array = self.resize(x, self.img_height, self.img_width) if mask_array.shape[-1] != 1: mask_array = np.mean(mask_array, axis=-1, keepdims=True) input_mask_array = np.array(mask_array, dtype=np.float32) / 255.0 if blur_radius is not None: input_mask_array = self.gaussian_blur(input_mask_array, radius=blur_radius, h_axis=0, v_axis=1) latent_mask_tensor = self.resize(input_mask_array, self.img_width // 8, self.img_height // 8) return np.expand_dims(input_mask_array, axis=0), np.expand_dims(latent_mask_tensor, axis=0) def rescale_noise_cfg(self, noise_cfg, noise_pred_text, guidance_rescale=0.0, epsilon=1e-05): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/abs/2305.08891). See Section 3.4 """ std_text = np.std(noise_pred_text, axis=tuple(range(1, len(noise_pred_text.shape))), keepdims=True) std_cfg = np.std(noise_cfg, axis=tuple(range(1, len(noise_cfg.shape))), keepdims=True) + epsilon # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1.0 - guidance_rescale) * noise_cfg return noise_cfg def generate_image( self, encoded_text, add_text_embeds, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, diffusion_noise=None, seed=None, inpaint_mask=None, mask_blur_strength=None, reference_image=None, reference_image_strength=0.8, callback=None, original_size=None, crops_coords_top_left=(0, 0), guidance_rescale=0.0, target_size=None): """Generates an image based on encoded text. The encoding passed to this method should be derived from `StableDiffusion.encode_text`. Args: encoded_text: Tensor of shape (`batch_size`, 77, 768), or a Tensor of shape (77, 768). When the batch axis is omitted, the same encoded text will be used to produce every generated image. batch_size: int, number of images to generate, defaults to 1. negative_prompt: a string containing information to negatively guide the image generation (e.g. by removing or altering certain aspects of the generated image), defaults to None. num_steps: int, number of diffusion steps (controls image quality), defaults to 50. unconditional_guidance_scale: float, controlling how closely the image should adhere to the prompt. Larger values result in more closely adhering to the prompt, but will make the image noisier. Defaults to 7.5. diffusion_noise: Tensor of shape (`batch_size`, img_height // 8, img_width // 8, 4), or a Tensor of shape (img_height // 8, img_width // 8, 4). Optional custom noise to seed the diffusion process. When the batch axis is omitted, the same noise will be used to seed diffusion for every generated image. seed: integer which is used to seed the random generation of diffusion noise, only to be specified if `diffusion_noise` is None. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL batch_size = 8 model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) e_tacos = model.encode_text("Tacos at dawn") e_watermelons = model.encode_text("Watermelons at dusk") e_interpolated = tf.linspace(e_tacos, e_watermelons, batch_size) images = model.generate_image(e_interpolated, batch_size=batch_size) ``` """ if diffusion_noise is not None and seed is not None: raise ValueError( "`diffusion_noise` and `seed` should not both be passed to " "`generate_image`. `seed` is only used to generate diffusion " "noise when it's not already user-specified." ) context = self._expand_tensor(encoded_text, batch_size) if negative_prompt is None: negative_prompt = "" unconditional_context, unconditional_add_text_embeds = self.encode_text(negative_prompt) unconditional_context = self._expand_tensor(unconditional_context, batch_size) if diffusion_noise is not None: diffusion_noise = np.squeeze(diffusion_noise) if len(diffusion_noise.shape) == 3: diffusion_noise = np.repeat(np.expand_dims(diffusion_noise, axis=0), batch_size, axis=0) # Iterative reverse diffusion stage self.scheduler.set_timesteps(num_steps) timesteps = self.scheduler.timesteps[::-1] init_time = None init_latent = None input_image_array = None input_mask_array = None latent_mask_tensor = None if inpaint_mask is not None: input_mask_array, latent_mask_tensor = self.preprocessed_mask(inpaint_mask, mask_blur_strength) if input_mask_array is None or latent_mask_tensor is None: print("wrong inpaint mask:{}".format(inpaint_mask)) if reference_image is not None and (0. < reference_image_strength < 1.): input_image_array, input_image_tensor = self.preprocessed_image(reference_image) if input_image_tensor is not None: num_steps = int(num_steps * reference_image_strength + 0.5) init_time = timesteps[num_steps] init_latent = self.image_encoder.predict_on_batch(input_image_tensor) timesteps = timesteps[:num_steps] else: print("wrong reference image:{}".format(reference_image)) latent = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=init_time, seed=seed, noise=diffusion_noise) progbar = tf.keras.utils.Progbar(len(timesteps)) iteration = 0 if original_size is None: original_size = [self.img_height, self.img_width] if target_size is None: target_size = [self.img_height, self.img_width] add_time_ids = tf.expand_dims( tf.convert_to_tensor(list(list(original_size) + list(crops_coords_top_left) + list(target_size)), latent.dtype), axis=0) for index, timestep in list(enumerate(timesteps))[::-1]: latent_prev = latent # Set aside the previous latent vector time_emb = np.repeat(np.reshape(timestep, [1, -1]), batch_size, axis=0) if unconditional_guidance_scale > 0.0: unconditional_latent = self.diffusion_model.predict_on_batch( [latent, time_emb, unconditional_context, add_time_ids, tf.zeros_like(add_text_embeds)]) latent_text = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = unconditional_latent + unconditional_guidance_scale * ( latent_text - unconditional_latent) if guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/abs/2305.08891 latent = self.rescale_noise_cfg(latent, latent_text, guidance_rescale=guidance_rescale) else: latent = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = self.scheduler.step(latent, timestep, latent_prev) if latent_mask_tensor is not None and init_latent is not None: latent_orgin = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=timestep, seed=seed, noise=diffusion_noise) latent = latent_orgin * (1. - latent_mask_tensor) + latent * latent_mask_tensor iteration += 1 if callback is not None: callback(iteration) progbar.update(iteration) # Decoding stage decoded = self.image_decoder.predict_on_batch(latent) decoded = np.array(((decoded + 1.) * 0.5), dtype=np.float32) if input_mask_array is not None and input_image_array is not None: decoded = input_image_array * (1. - input_mask_array) + decoded * input_mask_array return np.clip(decoded * 255., 0, 255).astype("uint8") def _expand_tensor(self, text_embedding, batch_size): """Extends a tensor by repeating it to fit the shape of the given batch size.""" text_embedding = np.squeeze(text_embedding) if len(text_embedding.shape) == 2: text_embedding = np.repeat( np.expand_dims(text_embedding, axis=0), batch_size, axis=0 ) return text_embedding @property def image_encoder(self): pass @property def text_encoder_openai(self): pass @property def text_encoder_laion(self): pass @property def text_encoder_laion_proj(self): pass @property def diffusion_model(self): pass @property def image_decoder(self): pass @property def tokenizer(self): """tokenizer returns the tokenizer used for text inputs. Can be overriden for tasks like textual inversion where the tokenizer needs to be modified. """ if self._tokenizer is None: self._tokenizer = SimpleTokenizer() return self._tokenizer def _get_initial_diffusion_noise(self, batch_size, seed): if seed is not None: try: seed = int(seed) except: seed = None return tf.random.stateless_normal( (batch_size, self.img_height // 8, self.img_width // 8, 4), seed=[seed, seed], ) else: return tf.random.normal( (batch_size, self.img_height // 8, self.img_width // 8, 4) ) def _get_initial_diffusion_latent(self, batch_size, init_latent=None, init_time=None, seed=None, noise=None): if noise is None: noise = self._get_initial_diffusion_noise(batch_size, seed=seed) if init_latent is None: latent = noise else: latent = self.scheduler.signal_rates[init_time] * np.repeat(init_latent, batch_size, axis=0) + \ self.scheduler.noise_rates[init_time] * noise return latent @staticmethod def _get_pos_ids(): return np.asarray([list(range(MAX_PROMPT_LENGTH))], dtype=np.int32) class StableDiffusionXL(StableDiffusionXLBase): """Keras implementation of Stable Diffusion. Note that the StableDiffusionXL API, as well as the APIs of the sub-components of StableDiffusionXL (e.g. ImageEncoder, DiffusionModel) should be considered unstable at this point. We do not guarantee backwards compatability for future changes to these APIs. Stable Diffusion is a powerful image generation model that can be used, among other things, to generate pictures according to a short text description (called a "prompt"). Arguments: img_height: int, height of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. img_width: int, width of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. jit_compile: bool, whether to compile the underlying models to XLA. This can lead to a significant speedup on some systems. Defaults to False. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL from PIL import Image model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) img = model.text_to_image( prompt="A beautiful horse running through a field", batch_size=1, # How many images to generate at once num_steps=25, # Number of iterations (controls image quality) seed=123, # Set this to always get the same image from the same prompt ) Image.fromarray(img[0]).save("horse.png") print("saved at horse.png") ``` References: - [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement) - [Original implementation](https://github.com/CompVis/stable-diffusion) """ # noqa: E501 def __init__( self, img_height=1024, img_width=1024, jit_compile=True, unet_ckpt=None, text_encoder_ckpt=None, text_encoder2_ckpt=None, vae_ckpt=None, ): super().__init__(img_height, img_width, jit_compile) self.unet_ckpt = unet_ckpt self.text_encoder_ckpt = text_encoder_ckpt self.text_encoder2_ckpt = text_encoder2_ckpt self.vae_ckpt = vae_ckpt @property def text_encoder_openai(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_openai is None: self._text_encoder_openai = TextEncoderOpenAi(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder_ckpt) if self.jit_compile: self._text_encoder_openai.compile(jit_compile=True) return self._text_encoder_openai @property def text_encoder_laion(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_laion is None:
self._text_encoder_laion = TextEncoderLaion(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder2_ckpt)
6
2023-10-14 18:40:16+00:00
16k
spla-tam/SplaTAM
scripts/iphone_demo.py
[ { "identifier": "relative_transformation", "path": "datasets/gradslam_datasets/geometryutils.py", "snippet": "def relative_transformation(\n trans_01: torch.Tensor, trans_02: torch.Tensor, orthogonal_rotations: bool = False\n) -> torch.Tensor:\n r\"\"\"Function that computes the relative homogenous transformation from a\n reference transformation :math:`T_1^{0} = \\begin{bmatrix} R_1 & t_1 \\\\\n \\mathbf{0} & 1 \\end{bmatrix}` to destination :math:`T_2^{0} =\n \\begin{bmatrix} R_2 & t_2 \\\\ \\mathbf{0} & 1 \\end{bmatrix}`.\n\n .. note:: Works with imperfect (non-orthogonal) rotation matrices as well.\n\n The relative transformation is computed as follows:\n\n .. math::\n\n T_1^{2} = (T_0^{1})^{-1} \\cdot T_0^{2}\n\n Arguments:\n trans_01 (torch.Tensor): reference transformation tensor of shape\n :math:`(N, 4, 4)` or :math:`(4, 4)`.\n trans_02 (torch.Tensor): destination transformation tensor of shape\n :math:`(N, 4, 4)` or :math:`(4, 4)`.\n orthogonal_rotations (bool): If True, will invert `trans_01` assuming `trans_01[:, :3, :3]` are\n orthogonal rotation matrices (more efficient). Default: False\n\n Shape:\n - Output: :math:`(N, 4, 4)` or :math:`(4, 4)`.\n\n Returns:\n torch.Tensor: the relative transformation between the transformations.\n\n Example::\n >>> trans_01 = torch.eye(4) # 4x4\n >>> trans_02 = torch.eye(4) # 4x4\n >>> trans_12 = gradslam.geometry.geometryutils.relative_transformation(trans_01, trans_02) # 4x4\n \"\"\"\n if not torch.is_tensor(trans_01):\n raise TypeError(\n \"Input trans_01 type is not a torch.Tensor. Got {}\".format(type(trans_01))\n )\n if not torch.is_tensor(trans_02):\n raise TypeError(\n \"Input trans_02 type is not a torch.Tensor. Got {}\".format(type(trans_02))\n )\n if not trans_01.dim() in (2, 3) and trans_01.shape[-2:] == (4, 4):\n raise ValueError(\n \"Input must be a of the shape Nx4x4 or 4x4.\"\n \" Got {}\".format(trans_01.shape)\n )\n if not trans_02.dim() in (2, 3) and trans_02.shape[-2:] == (4, 4):\n raise ValueError(\n \"Input must be a of the shape Nx4x4 or 4x4.\"\n \" Got {}\".format(trans_02.shape)\n )\n if not trans_01.dim() == trans_02.dim():\n raise ValueError(\n \"Input number of dims must match. Got {} and {}\".format(\n trans_01.dim(), trans_02.dim()\n )\n )\n trans_10: torch.Tensor = (\n inverse_transformation(trans_01)\n if orthogonal_rotations\n else torch.inverse(trans_01)\n )\n trans_12: torch.Tensor = compose_transformations(trans_10, trans_02)\n return trans_12" }, { "identifier": "seed_everything", "path": "utils/common_utils.py", "snippet": "def seed_everything(seed=42):\n \"\"\"\n Set the `seed` value for torch and numpy seeds. Also turns on\n deterministic execution for cudnn.\n \n Parameters:\n - seed: A hashable seed value\n \"\"\"\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n print(f\"Seed set to: {seed} (type: {type(seed)})\")" }, { "identifier": "save_params_ckpt", "path": "utils/common_utils.py", "snippet": "def save_params_ckpt(output_params, output_dir, time_idx):\n # Convert to CPU Numpy Arrays\n to_save = params2cpu(output_params)\n # Save the Parameters containing the Gaussian Trajectories\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params\"+str(time_idx)+\".npz\")\n np.savez(save_path, **to_save)" }, { "identifier": "save_params", "path": "utils/common_utils.py", "snippet": "def save_params(output_params, output_dir):\n # Convert to CPU Numpy Arrays\n to_save = params2cpu(output_params)\n # Save the Parameters containing the Gaussian Trajectories\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params.npz\")\n np.savez(save_path, **to_save)" }, { "identifier": "report_progress", "path": "utils/eval_helpers.py", "snippet": "def report_progress(params, data, i, progress_bar, iter_time_idx, sil_thres, every_i=1, qual_every_i=1, \n tracking=False, mapping=False, wandb_run=None, wandb_step=None, wandb_save_qual=False, online_time_idx=None,\n global_logging=True):\n if i % every_i == 0 or i == 1:\n if wandb_run is not None:\n if tracking:\n stage = \"Tracking\"\n elif mapping:\n stage = \"Mapping\"\n else:\n stage = \"Current Frame Optimization\"\n if not global_logging:\n stage = \"Per Iteration \" + stage\n\n if tracking:\n # Get list of gt poses\n gt_w2c_list = data['iter_gt_w2c_list']\n valid_gt_w2c_list = []\n \n # Get latest trajectory\n latest_est_w2c = data['w2c']\n latest_est_w2c_list = []\n latest_est_w2c_list.append(latest_est_w2c)\n valid_gt_w2c_list.append(gt_w2c_list[0])\n for idx in range(1, iter_time_idx+1):\n # Check if gt pose is not nan for this time step\n if torch.isnan(gt_w2c_list[idx]).sum() > 0:\n continue\n interm_cam_rot = F.normalize(params['cam_unnorm_rots'][..., idx].detach())\n interm_cam_trans = params['cam_trans'][..., idx].detach()\n intermrel_w2c = torch.eye(4).cuda().float()\n intermrel_w2c[:3, :3] = build_rotation(interm_cam_rot)\n intermrel_w2c[:3, 3] = interm_cam_trans\n latest_est_w2c = intermrel_w2c\n latest_est_w2c_list.append(latest_est_w2c)\n valid_gt_w2c_list.append(gt_w2c_list[idx])\n\n # Get latest gt pose\n gt_w2c_list = valid_gt_w2c_list\n iter_gt_w2c = gt_w2c_list[-1]\n # Get euclidean distance error between latest and gt pose\n iter_pt_error = torch.sqrt((latest_est_w2c[0,3] - iter_gt_w2c[0,3])**2 + (latest_est_w2c[1,3] - iter_gt_w2c[1,3])**2 + (latest_est_w2c[2,3] - iter_gt_w2c[2,3])**2)\n if iter_time_idx > 0:\n # Calculate relative pose error\n rel_gt_w2c = relative_transformation(gt_w2c_list[-2], gt_w2c_list[-1])\n rel_est_w2c = relative_transformation(latest_est_w2c_list[-2], latest_est_w2c_list[-1])\n rel_pt_error = torch.sqrt((rel_gt_w2c[0,3] - rel_est_w2c[0,3])**2 + (rel_gt_w2c[1,3] - rel_est_w2c[1,3])**2 + (rel_gt_w2c[2,3] - rel_est_w2c[2,3])**2)\n else:\n rel_pt_error = torch.zeros(1).float()\n \n # Calculate ATE RMSE\n ate_rmse = evaluate_ate(gt_w2c_list, latest_est_w2c_list)\n ate_rmse = np.round(ate_rmse, decimals=6)\n if wandb_run is not None:\n tracking_log = {f\"{stage}/Latest Pose Error\":iter_pt_error, \n f\"{stage}/Latest Relative Pose Error\":rel_pt_error,\n f\"{stage}/ATE RMSE\":ate_rmse}\n\n # Get current frame Gaussians\n transformed_pts = transform_to_frame(params, iter_time_idx, \n gaussians_grad=False,\n camera_grad=False)\n\n # Initialize Render Variables\n rendervar = transformed_params2rendervar(params, transformed_pts)\n depth_sil_rendervar = transformed_params2depthplussilhouette(params, data['w2c'], \n transformed_pts)\n depth_sil, _, _, = Renderer(raster_settings=data['cam'])(**depth_sil_rendervar)\n rastered_depth = depth_sil[0, :, :].unsqueeze(0)\n valid_depth_mask = (data['depth'] > 0)\n silhouette = depth_sil[1, :, :]\n presence_sil_mask = (silhouette > sil_thres)\n\n im, _, _, = Renderer(raster_settings=data['cam'])(**rendervar)\n if tracking:\n psnr = calc_psnr(im * presence_sil_mask, data['im'] * presence_sil_mask).mean()\n else:\n psnr = calc_psnr(im, data['im']).mean()\n\n if tracking:\n diff_depth_rmse = torch.sqrt((((rastered_depth - data['depth']) * presence_sil_mask) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n diff_depth_l1 = torch.abs((rastered_depth - data['depth']) * presence_sil_mask)\n diff_depth_l1 = diff_depth_l1 * valid_depth_mask\n depth_l1 = diff_depth_l1.sum() / valid_depth_mask.sum()\n else:\n diff_depth_rmse = torch.sqrt((((rastered_depth - data['depth'])) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n diff_depth_l1 = torch.abs((rastered_depth - data['depth']))\n diff_depth_l1 = diff_depth_l1 * valid_depth_mask\n depth_l1 = diff_depth_l1.sum() / valid_depth_mask.sum()\n\n if not (tracking or mapping):\n progress_bar.set_postfix({f\"Time-Step: {iter_time_idx} | PSNR: {psnr:.{7}} | Depth RMSE: {rmse:.{7}} | L1\": f\"{depth_l1:.{7}}\"})\n progress_bar.update(every_i)\n elif tracking:\n progress_bar.set_postfix({f\"Time-Step: {iter_time_idx} | Rel Pose Error: {rel_pt_error.item():.{7}} | Pose Error: {iter_pt_error.item():.{7}} | ATE RMSE\": f\"{ate_rmse.item():.{7}}\"})\n progress_bar.update(every_i)\n elif mapping:\n progress_bar.set_postfix({f\"Time-Step: {online_time_idx} | Frame {data['id']} | PSNR: {psnr:.{7}} | Depth RMSE: {rmse:.{7}} | L1\": f\"{depth_l1:.{7}}\"})\n progress_bar.update(every_i)\n \n if wandb_run is not None:\n wandb_log = {f\"{stage}/PSNR\": psnr,\n f\"{stage}/Depth RMSE\": rmse,\n f\"{stage}/Depth L1\": depth_l1,\n f\"{stage}/step\": wandb_step}\n if tracking:\n wandb_log = {**wandb_log, **tracking_log}\n wandb_run.log(wandb_log)\n \n if wandb_save_qual and (i % qual_every_i == 0 or i == 1):\n # Silhouette Mask\n presence_sil_mask = presence_sil_mask.detach().cpu().numpy()\n\n # Log plot to wandb\n if not mapping:\n fig_title = f\"Time-Step: {iter_time_idx} | Iter: {i} | Frame: {data['id']}\"\n else:\n fig_title = f\"Time-Step: {online_time_idx} | Iter: {i} | Frame: {data['id']}\"\n plot_rgbd_silhouette(data['im'], data['depth'], im, rastered_depth, presence_sil_mask, diff_depth_l1,\n psnr, depth_l1, fig_title, wandb_run=wandb_run, wandb_step=wandb_step, \n wandb_title=f\"{stage} Qual Viz\")" }, { "identifier": "keyframe_selection_overlap", "path": "utils/keyframe_selection.py", "snippet": "def keyframe_selection_overlap(gt_depth, w2c, intrinsics, keyframe_list, k, pixels=1600):\n \"\"\"\n Select overlapping keyframes to the current camera observation.\n\n Args:\n gt_depth (tensor): ground truth depth image of the current frame.\n w2c (tensor): world to camera matrix (4 x 4).\n keyframe_list (list): a list containing info for each keyframe.\n k (int): number of overlapping keyframes to select.\n pixels (int, optional): number of pixels to sparsely sample \n from the image of the current camera. Defaults to 1600.\n Returns:\n selected_keyframe_list (list): list of selected keyframe id.\n \"\"\"\n # Radomly Sample Pixel Indices from valid depth pixels\n width, height = gt_depth.shape[2], gt_depth.shape[1]\n valid_depth_indices = torch.where(gt_depth[0] > 0)\n valid_depth_indices = torch.stack(valid_depth_indices, dim=1)\n indices = torch.randint(valid_depth_indices.shape[0], (pixels,))\n sampled_indices = valid_depth_indices[indices]\n\n # Back Project the selected pixels to 3D Pointcloud\n pts = get_pointcloud(gt_depth, intrinsics, w2c, sampled_indices)\n\n list_keyframe = []\n for keyframeid, keyframe in enumerate(keyframe_list):\n # Get the estimated world2cam of the keyframe\n est_w2c = keyframe['est_w2c']\n # Transform the 3D pointcloud to the keyframe's camera space\n pts4 = torch.cat([pts, torch.ones_like(pts[:, :1])], dim=1)\n transformed_pts = (est_w2c @ pts4.T).T[:, :3]\n # Project the 3D pointcloud to the keyframe's image space\n points_2d = torch.matmul(intrinsics, transformed_pts.transpose(0, 1))\n points_2d = points_2d.transpose(0, 1)\n points_z = points_2d[:, 2:] + 1e-5\n points_2d = points_2d / points_z\n projected_pts = points_2d[:, :2]\n # Filter out the points that are outside the image\n edge = 20\n mask = (projected_pts[:, 0] < width-edge)*(projected_pts[:, 0] > edge) * \\\n (projected_pts[:, 1] < height-edge)*(projected_pts[:, 1] > edge)\n mask = mask & (points_z[:, 0] > 0)\n # Compute the percentage of points that are inside the image\n percent_inside = mask.sum()/projected_pts.shape[0]\n list_keyframe.append(\n {'id': keyframeid, 'percent_inside': percent_inside})\n\n # Sort the keyframes based on the percentage of points that are inside the image\n list_keyframe = sorted(\n list_keyframe, key=lambda i: i['percent_inside'], reverse=True)\n # Select the keyframes with percentage of points inside the image > 0\n selected_keyframe_list = [keyframe_dict['id']\n for keyframe_dict in list_keyframe if keyframe_dict['percent_inside'] > 0.0]\n selected_keyframe_list = list(np.random.permutation(\n np.array(selected_keyframe_list))[:k])\n\n return selected_keyframe_list" }, { "identifier": "setup_camera", "path": "utils/recon_helpers.py", "snippet": "def setup_camera(w, h, k, w2c, near=0.01, far=100):\n fx, fy, cx, cy = k[0][0], k[1][1], k[0][2], k[1][2]\n w2c = torch.tensor(w2c).cuda().float()\n cam_center = torch.inverse(w2c)[:3, 3]\n w2c = w2c.unsqueeze(0).transpose(1, 2)\n opengl_proj = torch.tensor([[2 * fx / w, 0.0, -(w - 2 * cx) / w, 0.0],\n [0.0, 2 * fy / h, -(h - 2 * cy) / h, 0.0],\n [0.0, 0.0, far / (far - near), -(far * near) / (far - near)],\n [0.0, 0.0, 1.0, 0.0]]).cuda().float().unsqueeze(0).transpose(1, 2)\n full_proj = w2c.bmm(opengl_proj)\n cam = Camera(\n image_height=h,\n image_width=w,\n tanfovx=w / (2 * fx),\n tanfovy=h / (2 * fy),\n bg=torch.tensor([0, 0, 0], dtype=torch.float32, device=\"cuda\"),\n scale_modifier=1.0,\n viewmatrix=w2c,\n projmatrix=full_proj,\n sh_degree=0,\n campos=cam_center,\n prefiltered=False\n )\n return cam" }, { "identifier": "build_rotation", "path": "utils/slam_external.py", "snippet": "def build_rotation(q):\n norm = torch.sqrt(q[:, 0] * q[:, 0] + q[:, 1] * q[:, 1] + q[:, 2] * q[:, 2] + q[:, 3] * q[:, 3])\n q = q / norm[:, None]\n rot = torch.zeros((q.size(0), 3, 3), device='cuda')\n r = q[:, 0]\n x = q[:, 1]\n y = q[:, 2]\n z = q[:, 3]\n rot[:, 0, 0] = 1 - 2 * (y * y + z * z)\n rot[:, 0, 1] = 2 * (x * y - r * z)\n rot[:, 0, 2] = 2 * (x * z + r * y)\n rot[:, 1, 0] = 2 * (x * y + r * z)\n rot[:, 1, 1] = 1 - 2 * (x * x + z * z)\n rot[:, 1, 2] = 2 * (y * z - r * x)\n rot[:, 2, 0] = 2 * (x * z - r * y)\n rot[:, 2, 1] = 2 * (y * z + r * x)\n rot[:, 2, 2] = 1 - 2 * (x * x + y * y)\n return rot" }, { "identifier": "prune_gaussians", "path": "utils/slam_external.py", "snippet": "def prune_gaussians(params, variables, optimizer, iter, prune_dict):\n if iter <= prune_dict['stop_after']:\n if (iter >= prune_dict['start_after']) and (iter % prune_dict['prune_every'] == 0):\n if iter == prune_dict['stop_after']:\n remove_threshold = prune_dict['final_removal_opacity_threshold']\n else:\n remove_threshold = prune_dict['removal_opacity_threshold']\n # Remove Gaussians with low opacity\n to_remove = (torch.sigmoid(params['logit_opacities']) < remove_threshold).squeeze()\n # Remove Gaussians that are too big\n if iter >= prune_dict['remove_big_after']:\n big_points_ws = torch.exp(params['log_scales']).max(dim=1).values > 0.1 * variables['scene_radius']\n to_remove = torch.logical_or(to_remove, big_points_ws)\n params, variables = remove_points(to_remove, params, variables, optimizer)\n torch.cuda.empty_cache()\n \n # Reset Opacities for all Gaussians\n if iter > 0 and iter % prune_dict['reset_opacities_every'] == 0 and prune_dict['reset_opacities']:\n new_params = {'logit_opacities': inverse_sigmoid(torch.ones_like(params['logit_opacities']) * 0.01)}\n params = update_params_and_optimizer(new_params, params, optimizer)\n \n return params, variables" }, { "identifier": "densify", "path": "utils/slam_external.py", "snippet": "def densify(params, variables, optimizer, iter, densify_dict):\n if iter <= densify_dict['stop_after']:\n variables = accumulate_mean2d_gradient(variables)\n grad_thresh = densify_dict['grad_thresh']\n if (iter >= densify_dict['start_after']) and (iter % densify_dict['densify_every'] == 0):\n grads = variables['means2D_gradient_accum'] / variables['denom']\n grads[grads.isnan()] = 0.0\n to_clone = torch.logical_and(grads >= grad_thresh, (\n torch.max(torch.exp(params['log_scales']), dim=1).values <= 0.01 * variables['scene_radius']))\n new_params = {k: v[to_clone] for k, v in params.items() if k not in ['cam_unnorm_rots', 'cam_trans']}\n params = cat_params_to_optimizer(new_params, params, optimizer)\n num_pts = params['means3D'].shape[0]\n\n padded_grad = torch.zeros(num_pts, device=\"cuda\")\n padded_grad[:grads.shape[0]] = grads\n to_split = torch.logical_and(padded_grad >= grad_thresh,\n torch.max(torch.exp(params['log_scales']), dim=1).values > 0.01 * variables[\n 'scene_radius'])\n n = densify_dict['num_to_split_into'] # number to split into\n new_params = {k: v[to_split].repeat(n, 1) for k, v in params.items() if k not in ['cam_unnorm_rots', 'cam_trans']}\n stds = torch.exp(params['log_scales'])[to_split].repeat(n, 3)\n means = torch.zeros((stds.size(0), 3), device=\"cuda\")\n samples = torch.normal(mean=means, std=stds)\n rots = build_rotation(params['unnorm_rotations'][to_split]).repeat(n, 1, 1)\n new_params['means3D'] += torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1)\n new_params['log_scales'] = torch.log(torch.exp(new_params['log_scales']) / (0.8 * n))\n params = cat_params_to_optimizer(new_params, params, optimizer)\n num_pts = params['means3D'].shape[0]\n\n variables['means2D_gradient_accum'] = torch.zeros(num_pts, device=\"cuda\")\n variables['denom'] = torch.zeros(num_pts, device=\"cuda\")\n variables['max_2D_radius'] = torch.zeros(num_pts, device=\"cuda\")\n to_remove = torch.cat((to_split, torch.zeros(n * to_split.sum(), dtype=torch.bool, device=\"cuda\")))\n params, variables = remove_points(to_remove, params, variables, optimizer)\n\n if iter == densify_dict['stop_after']:\n remove_threshold = densify_dict['final_removal_opacity_threshold']\n else:\n remove_threshold = densify_dict['removal_opacity_threshold']\n to_remove = (torch.sigmoid(params['logit_opacities']) < remove_threshold).squeeze()\n if iter >= densify_dict['remove_big_after']:\n big_points_ws = torch.exp(params['log_scales']).max(dim=1).values > 0.1 * variables['scene_radius']\n to_remove = torch.logical_or(to_remove, big_points_ws)\n params, variables = remove_points(to_remove, params, variables, optimizer)\n\n torch.cuda.empty_cache()\n\n # Reset Opacities for all Gaussians (This is not desired for mapping on only current frame)\n if iter > 0 and iter % densify_dict['reset_opacities_every'] == 0 and densify_dict['reset_opacities']:\n new_params = {'logit_opacities': inverse_sigmoid(torch.ones_like(params['logit_opacities']) * 0.01)}\n params = update_params_and_optimizer(new_params, params, optimizer)\n\n return params, variables" }, { "identifier": "get_loss", "path": "scripts/splatam.py", "snippet": "def get_loss(params, curr_data, variables, iter_time_idx, loss_weights, use_sil_for_loss,\n sil_thres, use_l1,ignore_outlier_depth_loss, tracking=False, \n mapping=False, do_ba=False, plot_dir=None, visualize_tracking_loss=False, tracking_iteration=None):\n # Initialize Loss Dictionary\n losses = {}\n\n if tracking:\n # Get current frame Gaussians, where only the camera pose gets gradient\n transformed_pts = transform_to_frame(params, iter_time_idx, \n gaussians_grad=False,\n camera_grad=True)\n elif mapping:\n if do_ba:\n # Get current frame Gaussians, where both camera pose and Gaussians get gradient\n transformed_pts = transform_to_frame(params, iter_time_idx,\n gaussians_grad=True,\n camera_grad=True)\n else:\n # Get current frame Gaussians, where only the Gaussians get gradient\n transformed_pts = transform_to_frame(params, iter_time_idx,\n gaussians_grad=True,\n camera_grad=False)\n else:\n # Get current frame Gaussians, where only the Gaussians get gradient\n transformed_pts = transform_to_frame(params, iter_time_idx,\n gaussians_grad=True,\n camera_grad=False)\n\n # Initialize Render Variables\n rendervar = transformed_params2rendervar(params, transformed_pts)\n depth_sil_rendervar = transformed_params2depthplussilhouette(params, curr_data['w2c'],\n transformed_pts)\n\n # RGB Rendering\n rendervar['means2D'].retain_grad()\n im, radius, _, = Renderer(raster_settings=curr_data['cam'])(**rendervar)\n variables['means2D'] = rendervar['means2D'] # Gradient only accum from colour render for densification\n\n # Depth & Silhouette Rendering\n depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar)\n depth = depth_sil[0, :, :].unsqueeze(0)\n silhouette = depth_sil[1, :, :]\n presence_sil_mask = (silhouette > sil_thres)\n depth_sq = depth_sil[2, :, :].unsqueeze(0)\n uncertainty = depth_sq - depth**2\n uncertainty = uncertainty.detach()\n\n # Mask with valid depth values (accounts for outlier depth values)\n nan_mask = (~torch.isnan(depth)) & (~torch.isnan(uncertainty))\n if ignore_outlier_depth_loss:\n depth_error = torch.abs(curr_data['depth'] - depth) * (curr_data['depth'] > 0)\n mask = (depth_error < 10*depth_error.median())\n mask = mask & (curr_data['depth'] > 0)\n else:\n mask = (curr_data['depth'] > 0)\n mask = mask & nan_mask\n # Mask with presence silhouette mask (accounts for empty space)\n if tracking and use_sil_for_loss:\n mask = mask & presence_sil_mask\n\n # Depth loss\n if use_l1:\n mask = mask.detach()\n if tracking:\n losses['depth'] = torch.abs(curr_data['depth'] - depth)[mask].sum()\n else:\n losses['depth'] = torch.abs(curr_data['depth'] - depth)[mask].mean()\n \n # RGB Loss\n if tracking and (use_sil_for_loss or ignore_outlier_depth_loss):\n color_mask = torch.tile(mask, (3, 1, 1))\n color_mask = color_mask.detach()\n losses['im'] = torch.abs(curr_data['im'] - im)[color_mask].sum()\n elif tracking:\n losses['im'] = torch.abs(curr_data['im'] - im).sum()\n else:\n losses['im'] = 0.8 * l1_loss_v1(im, curr_data['im']) + 0.2 * (1.0 - calc_ssim(im, curr_data['im']))\n\n # Visualize the Diff Images\n if tracking and visualize_tracking_loss:\n fig, ax = plt.subplots(2, 4, figsize=(12, 6))\n weighted_render_im = im * color_mask\n weighted_im = curr_data['im'] * color_mask\n weighted_render_depth = depth * mask\n weighted_depth = curr_data['depth'] * mask\n diff_rgb = torch.abs(weighted_render_im - weighted_im).mean(dim=0).detach().cpu()\n diff_depth = torch.abs(weighted_render_depth - weighted_depth).mean(dim=0).detach().cpu()\n viz_img = torch.clip(weighted_im.permute(1, 2, 0).detach().cpu(), 0, 1)\n ax[0, 0].imshow(viz_img)\n ax[0, 0].set_title(\"Weighted GT RGB\")\n viz_render_img = torch.clip(weighted_render_im.permute(1, 2, 0).detach().cpu(), 0, 1)\n ax[1, 0].imshow(viz_render_img)\n ax[1, 0].set_title(\"Weighted Rendered RGB\")\n ax[0, 1].imshow(weighted_depth[0].detach().cpu(), cmap=\"jet\", vmin=0, vmax=6)\n ax[0, 1].set_title(\"Weighted GT Depth\")\n ax[1, 1].imshow(weighted_render_depth[0].detach().cpu(), cmap=\"jet\", vmin=0, vmax=6)\n ax[1, 1].set_title(\"Weighted Rendered Depth\")\n ax[0, 2].imshow(diff_rgb, cmap=\"jet\", vmin=0, vmax=0.8)\n ax[0, 2].set_title(f\"Diff RGB, Loss: {torch.round(losses['im'])}\")\n ax[1, 2].imshow(diff_depth, cmap=\"jet\", vmin=0, vmax=0.8)\n ax[1, 2].set_title(f\"Diff Depth, Loss: {torch.round(losses['depth'])}\")\n ax[0, 3].imshow(presence_sil_mask.detach().cpu(), cmap=\"gray\")\n ax[0, 3].set_title(\"Silhouette Mask\")\n ax[1, 3].imshow(mask[0].detach().cpu(), cmap=\"gray\")\n ax[1, 3].set_title(\"Loss Mask\")\n # Turn off axis\n for i in range(2):\n for j in range(4):\n ax[i, j].axis('off')\n # Set Title\n fig.suptitle(f\"Tracking Iteration: {tracking_iteration}\", fontsize=16)\n # Figure Tight Layout\n fig.tight_layout()\n os.makedirs(plot_dir, exist_ok=True)\n plt.savefig(os.path.join(plot_dir, f\"tmp.png\"), bbox_inches='tight')\n plt.close()\n plot_img = cv2.imread(os.path.join(plot_dir, f\"tmp.png\"))\n cv2.imshow('Diff Images', plot_img)\n cv2.waitKey(1)\n ## Save Tracking Loss Viz\n # save_plot_dir = os.path.join(plot_dir, f\"tracking_%04d\" % iter_time_idx)\n # os.makedirs(save_plot_dir, exist_ok=True)\n # plt.savefig(os.path.join(save_plot_dir, f\"%04d.png\" % tracking_iteration), bbox_inches='tight')\n # plt.close()\n\n weighted_losses = {k: v * loss_weights[k] for k, v in losses.items()}\n loss = sum(weighted_losses.values())\n\n seen = radius > 0\n variables['max_2D_radius'][seen] = torch.max(radius[seen], variables['max_2D_radius'][seen])\n variables['seen'] = seen\n weighted_losses['loss'] = loss\n\n return loss, variables, weighted_losses" }, { "identifier": "initialize_optimizer", "path": "scripts/splatam.py", "snippet": "def initialize_optimizer(params, lrs_dict, tracking):\n lrs = lrs_dict\n param_groups = [{'params': [v], 'name': k, 'lr': lrs[k]} for k, v in params.items()]\n if tracking:\n return torch.optim.Adam(param_groups)\n else:\n return torch.optim.Adam(param_groups, lr=0.0, eps=1e-15)" }, { "identifier": "initialize_params", "path": "scripts/splatam.py", "snippet": "def initialize_params(init_pt_cld, num_frames, mean3_sq_dist):\n num_pts = init_pt_cld.shape[0]\n means3D = init_pt_cld[:, :3] # [num_gaussians, 3]\n unnorm_rots = np.tile([1, 0, 0, 0], (num_pts, 1)) # [num_gaussians, 3]\n logit_opacities = torch.zeros((num_pts, 1), dtype=torch.float, device=\"cuda\")\n params = {\n 'means3D': means3D,\n 'rgb_colors': init_pt_cld[:, 3:6],\n 'unnorm_rotations': unnorm_rots,\n 'logit_opacities': logit_opacities,\n 'log_scales': torch.tile(torch.log(torch.sqrt(mean3_sq_dist))[..., None], (1, 1)),\n }\n\n # Initialize a single gaussian trajectory to model the camera poses relative to the first frame\n cam_rots = np.tile([1, 0, 0, 0], (1, 1))\n cam_rots = np.tile(cam_rots[:, :, None], (1, 1, num_frames))\n params['cam_unnorm_rots'] = cam_rots\n params['cam_trans'] = np.zeros((1, 3, num_frames))\n\n for k, v in params.items():\n # Check if value is already a torch tensor\n if not isinstance(v, torch.Tensor):\n params[k] = torch.nn.Parameter(torch.tensor(v).cuda().float().contiguous().requires_grad_(True))\n else:\n params[k] = torch.nn.Parameter(v.cuda().float().contiguous().requires_grad_(True))\n\n variables = {'max_2D_radius': torch.zeros(params['means3D'].shape[0]).cuda().float(),\n 'means2D_gradient_accum': torch.zeros(params['means3D'].shape[0]).cuda().float(),\n 'denom': torch.zeros(params['means3D'].shape[0]).cuda().float(),\n 'timestep': torch.zeros(params['means3D'].shape[0]).cuda().float()}\n\n return params, variables" }, { "identifier": "initialize_camera_pose", "path": "scripts/splatam.py", "snippet": "def initialize_camera_pose(params, curr_time_idx, forward_prop):\n with torch.no_grad():\n if curr_time_idx > 1 and forward_prop:\n # Initialize the camera pose for the current frame based on a constant velocity model\n # Rotation\n prev_rot1 = F.normalize(params['cam_unnorm_rots'][..., curr_time_idx-1].detach())\n prev_rot2 = F.normalize(params['cam_unnorm_rots'][..., curr_time_idx-2].detach())\n new_rot = F.normalize(prev_rot1 + (prev_rot1 - prev_rot2))\n params['cam_unnorm_rots'][..., curr_time_idx] = new_rot.detach()\n # Translation\n prev_tran1 = params['cam_trans'][..., curr_time_idx-1].detach()\n prev_tran2 = params['cam_trans'][..., curr_time_idx-2].detach()\n new_tran = prev_tran1 + (prev_tran1 - prev_tran2)\n params['cam_trans'][..., curr_time_idx] = new_tran.detach()\n else:\n # Initialize the camera pose for the current frame\n params['cam_unnorm_rots'][..., curr_time_idx] = params['cam_unnorm_rots'][..., curr_time_idx-1].detach()\n params['cam_trans'][..., curr_time_idx] = params['cam_trans'][..., curr_time_idx-1].detach()\n \n return params" }, { "identifier": "get_pointcloud", "path": "scripts/splatam.py", "snippet": "def get_pointcloud(color, depth, intrinsics, w2c, transform_pts=True, \n mask=None, compute_mean_sq_dist=False, mean_sq_dist_method=\"projective\"):\n width, height = color.shape[2], color.shape[1]\n CX = intrinsics[0][2]\n CY = intrinsics[1][2]\n FX = intrinsics[0][0]\n FY = intrinsics[1][1]\n\n # Compute indices of pixels\n x_grid, y_grid = torch.meshgrid(torch.arange(width).cuda().float(), \n torch.arange(height).cuda().float(),\n indexing='xy')\n xx = (x_grid - CX)/FX\n yy = (y_grid - CY)/FY\n xx = xx.reshape(-1)\n yy = yy.reshape(-1)\n depth_z = depth[0].reshape(-1)\n\n # Initialize point cloud\n pts_cam = torch.stack((xx * depth_z, yy * depth_z, depth_z), dim=-1)\n if transform_pts:\n pix_ones = torch.ones(height * width, 1).cuda().float()\n pts4 = torch.cat((pts_cam, pix_ones), dim=1)\n c2w = torch.inverse(w2c)\n pts = (c2w @ pts4.T).T[:, :3]\n else:\n pts = pts_cam\n\n # Compute mean squared distance for initializing the scale of the Gaussians\n if compute_mean_sq_dist:\n if mean_sq_dist_method == \"projective\":\n # Projective Geometry (this is fast, farther -> larger radius)\n scale_gaussian = depth_z / ((FX + FY)/2)\n mean3_sq_dist = scale_gaussian**2\n else:\n raise ValueError(f\"Unknown mean_sq_dist_method {mean_sq_dist_method}\")\n \n # Colorize point cloud\n cols = torch.permute(color, (1, 2, 0)).reshape(-1, 3) # (C, H, W) -> (H, W, C) -> (H * W, C)\n point_cld = torch.cat((pts, cols), -1)\n\n # Select points based on mask\n if mask is not None:\n point_cld = point_cld[mask]\n if compute_mean_sq_dist:\n mean3_sq_dist = mean3_sq_dist[mask]\n\n if compute_mean_sq_dist:\n return point_cld, mean3_sq_dist\n else:\n return point_cld" }, { "identifier": "add_new_gaussians", "path": "scripts/splatam.py", "snippet": "def add_new_gaussians(params, variables, curr_data, sil_thres, time_idx, mean_sq_dist_method):\n # Silhouette Rendering\n transformed_pts = transform_to_frame(params, time_idx, gaussians_grad=False, camera_grad=False)\n depth_sil_rendervar = transformed_params2depthplussilhouette(params, curr_data['w2c'],\n transformed_pts)\n depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar)\n silhouette = depth_sil[1, :, :]\n non_presence_sil_mask = (silhouette < sil_thres)\n # Check for new foreground objects by using GT depth\n gt_depth = curr_data['depth'][0, :, :]\n render_depth = depth_sil[0, :, :]\n depth_error = torch.abs(gt_depth - render_depth) * (gt_depth > 0)\n non_presence_depth_mask = (render_depth > gt_depth) * (depth_error > 50*depth_error.median())\n # Determine non-presence mask\n non_presence_mask = non_presence_sil_mask | non_presence_depth_mask\n # Flatten mask\n non_presence_mask = non_presence_mask.reshape(-1)\n\n # Get the new frame Gaussians based on the Silhouette\n if torch.sum(non_presence_mask) > 0:\n # Get the new pointcloud in the world frame\n curr_cam_rot = torch.nn.functional.normalize(params['cam_unnorm_rots'][..., time_idx].detach())\n curr_cam_tran = params['cam_trans'][..., time_idx].detach()\n curr_w2c = torch.eye(4).cuda().float()\n curr_w2c[:3, :3] = build_rotation(curr_cam_rot)\n curr_w2c[:3, 3] = curr_cam_tran\n valid_depth_mask = (curr_data['depth'][0, :, :] > 0)\n non_presence_mask = non_presence_mask & valid_depth_mask.reshape(-1)\n new_pt_cld, mean3_sq_dist = get_pointcloud(curr_data['im'], curr_data['depth'], curr_data['intrinsics'], \n curr_w2c, mask=non_presence_mask, compute_mean_sq_dist=True,\n mean_sq_dist_method=mean_sq_dist_method)\n new_params = initialize_new_params(new_pt_cld, mean3_sq_dist)\n for k, v in new_params.items():\n params[k] = torch.nn.Parameter(torch.cat((params[k], v), dim=0).requires_grad_(True))\n num_pts = params['means3D'].shape[0]\n variables['means2D_gradient_accum'] = torch.zeros(num_pts, device=\"cuda\").float()\n variables['denom'] = torch.zeros(num_pts, device=\"cuda\").float()\n variables['max_2D_radius'] = torch.zeros(num_pts, device=\"cuda\").float()\n new_timestep = time_idx*torch.ones(new_pt_cld.shape[0],device=\"cuda\").float()\n variables['timestep'] = torch.cat((variables['timestep'],new_timestep),dim=0)\n\n return params, variables" } ]
import argparse import os import shutil import sys import time import json import cv2 import matplotlib.pyplot as plt import numpy as np import torch import torch.nn.functional as F import cyclonedds.idl as idl import cyclonedds.idl.annotations as annotate import cyclonedds.idl.types as types from pathlib import Path from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets.geometryutils import relative_transformation from utils.common_utils import seed_everything, save_params_ckpt, save_params from utils.eval_helpers import report_progress from utils.keyframe_selection import keyframe_selection_overlap from utils.recon_helpers import setup_camera from utils.slam_external import build_rotation, prune_gaussians, densify from scripts.splatam import get_loss, initialize_optimizer, initialize_params, initialize_camera_pose, get_pointcloud, add_new_gaussians from diff_gaussian_rasterization import GaussianRasterizer as Renderer from dataclasses import dataclass from cyclonedds.domain import DomainParticipant, Domain from cyclonedds.core import Qos, Policy from cyclonedds.sub import DataReader from cyclonedds.topic import Topic from cyclonedds.util import duration
13,627
# Initialize Params for first time step if time_idx == 0: # Get Initial Point Cloud mask = (densify_depth > 0) # Mask out invalid depth values mask = mask.reshape(-1) init_pt_cld, mean3_sq_dist = get_pointcloud(densify_color, densify_depth, densify_intrinsics, first_frame_w2c, mask=mask, compute_mean_sq_dist=True, mean_sq_dist_method=config['mean_sq_dist_method']) params, variables = initialize_params(init_pt_cld, num_frames, mean3_sq_dist) variables['scene_radius'] = torch.max(densify_depth)/config['scene_radius_depth_ratio'] # Initialize Mapping & Tracking for current frame iter_time_idx = time_idx curr_gt_w2c = gt_w2c_all_frames curr_data = {'cam': cam, 'im': color, 'depth':depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} tracking_curr_data = curr_data # Optimization Iterations num_iters_mapping = config['mapping']['num_iters'] # Initialize the camera pose for the current frame if time_idx > 0: params = initialize_camera_pose(params, time_idx, forward_prop=config['tracking']['forward_prop']) # Tracking tracking_start_time = time.time() if time_idx > 0 and not config['tracking']['use_gt_poses']: # Reset Optimizer & Learning Rates for tracking optimizer = initialize_optimizer(params, config['tracking']['lrs'], tracking=True) # Keep Track of Best Candidate Rotation & Translation candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() current_min_loss = float(1e20) # Tracking Optimization iter = 0 do_continue_slam = False num_iters_tracking = config['tracking']['num_iters'] progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") while True: iter_start_time = time.time() # Loss for current frame loss, variables, losses = get_loss(params, tracking_curr_data, variables, iter_time_idx, config['tracking']['loss_weights'], config['tracking']['use_sil_for_loss'], config['tracking']['sil_thres'], config['tracking']['use_l1'], config['tracking']['ignore_outlier_depth_loss'], tracking=True, visualize_tracking_loss=config['tracking']['visualize_tracking_loss'], tracking_iteration=iter) # Backprop loss.backward() # Optimizer Update optimizer.step() optimizer.zero_grad(set_to_none=True) with torch.no_grad(): # Save the best candidate rotation & translation if loss < current_min_loss: current_min_loss = loss candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() # Report Progress if config['report_iter_progress']: report_progress(params, tracking_curr_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) else: progress_bar.update(1) # Update the runtime numbers iter_end_time = time.time() tracking_iter_time_sum += iter_end_time - iter_start_time tracking_iter_time_count += 1 # Check if we should stop tracking iter += 1 if iter == num_iters_tracking: if losses['depth'] < config['tracking']['depth_loss_thres'] and config['tracking']['use_depth_loss_thres']: break elif config['tracking']['use_depth_loss_thres'] and not do_continue_slam: do_continue_slam = True progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") num_iters_tracking = 2*num_iters_tracking else: break progress_bar.close() # Copy over the best candidate rotation & translation with torch.no_grad(): params['cam_unnorm_rots'][..., time_idx] = candidate_cam_unnorm_rot params['cam_trans'][..., time_idx] = candidate_cam_tran elif time_idx > 0 and config['tracking']['use_gt_poses']: with torch.no_grad(): # Get the ground truth pose relative to frame 0 rel_w2c = curr_gt_w2c[-1] rel_w2c_rot = rel_w2c[:3, :3].unsqueeze(0).detach() rel_w2c_rot_quat = matrix_to_quaternion(rel_w2c_rot) rel_w2c_tran = rel_w2c[:3, 3].detach() # Update the camera parameters params['cam_unnorm_rots'][..., time_idx] = rel_w2c_rot_quat params['cam_trans'][..., time_idx] = rel_w2c_tran # Update the runtime numbers tracking_end_time = time.time() tracking_frame_time_sum += tracking_end_time - tracking_start_time tracking_frame_time_count += 1 if time_idx == 0 or (time_idx+1) % config['report_global_progress_every'] == 0: try: # Report Final Tracking Progress progress_bar = tqdm(range(1), desc=f"Tracking Result Time Step: {time_idx}") with torch.no_grad(): report_progress(params, tracking_curr_data, 1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) progress_bar.close() except: ckpt_output_dir = save_path.joinpath("checkpoints") os.makedirs(ckpt_output_dir, exist_ok=True) save_params_ckpt(params, ckpt_output_dir, time_idx) print('Failed to evaluate trajectory.') # Densification & KeyFrame-based Mapping if time_idx == 0 or (time_idx+1) % config['map_every'] == 0: # Densification if config['mapping']['add_new_gaussians'] and time_idx > 0: densify_curr_data = {'cam': densify_cam, 'im': densify_color, 'depth': densify_depth, 'id': time_idx, 'intrinsics': densify_intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} # Add new Gaussians to the scene based on the Silhouette
""" Script to stream RGB-D data from the NeRFCapture iOS App & build a Gaussian Splat on the fly using SplaTAM. The CycloneDDS parts of this script are adapted from the Instant-NGP Repo: https://github.com/NVlabs/instant-ngp/blob/master/scripts/nerfcapture2nerf.py """ #!/usr/bin/env python3 _BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--config", default="./configs/iphone/online_demo.py", type=str, help="Path to config file.") return parser.parse_args() # DDS # ================================================================================================== @dataclass @annotate.final @annotate.autoid("sequential") class SplatCaptureFrame(idl.IdlStruct, typename="SplatCaptureData.SplatCaptureFrame"): id: types.uint32 annotate.key("id") timestamp: types.float64 fl_x: types.float32 fl_y: types.float32 cx: types.float32 cy: types.float32 transform_matrix: types.array[types.float32, 16] width: types.uint32 height: types.uint32 image: types.sequence[types.uint8] has_depth: bool depth_width: types.uint32 depth_height: types.uint32 depth_scale: types.float32 depth_image: types.sequence[types.uint8] dds_config = """<?xml version="1.0" encoding="UTF-8" ?> \ <CycloneDDS xmlns="https://cdds.io/config" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://cdds.io/config https://raw.githubusercontent.com/eclipse-cyclonedds/cyclonedds/master/etc/cyclonedds.xsd"> \ <Domain id="any"> \ <Internal> \ <MinimumSocketReceiveBufferSize>10MB</MinimumSocketReceiveBufferSize> \ </Internal> \ <Tracing> \ <Verbosity>config</Verbosity> \ <OutputFile>stdout</OutputFile> \ </Tracing> \ </Domain> \ </CycloneDDS> \ """ # ================================================================================================== def dataset_capture_loop(reader: DataReader, save_path: Path, overwrite: bool, n_frames: int, depth_scale: float, config: dict): rgb_path = save_path.joinpath("rgb") if rgb_path.exists(): if overwrite: # Prompt user to confirm deletion if (input(f"warning! folder '{save_path}' will be deleted/replaced. continue? (Y/n)").lower().strip()+"y")[:1] != "y": sys.exit(1) shutil.rmtree(save_path) else: print(f"rgb_path {rgb_path} already exists. Please use overwrite=True in config if you want to overwrite.") sys.exit(1) print("Waiting for frames...") # Make directory images_dir = save_path.joinpath("rgb") manifest = { "fl_x": 0.0, "fl_y": 0.0, "cx": 0.0, "cy": 0.0, "w": 0.0, "h": 0.0, "frames": [] } total_frames = 0 # Total frames received time_idx = total_frames num_frames = n_frames # Total frames desired # Initialize list to keep track of Keyframes keyframe_list = [] keyframe_time_indices = [] # Init Variables to keep track of ARkit poses and runtimes gt_w2c_all_frames = [] tracking_iter_time_sum = 0 tracking_iter_time_count = 0 mapping_iter_time_sum = 0 mapping_iter_time_count = 0 tracking_frame_time_sum = 0 tracking_frame_time_count = 0 mapping_frame_time_sum = 0 mapping_frame_time_count = 0 P = torch.tensor( [ [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1] ] ).float() # Start DDS Loop while True: sample = reader.read_next() # Get frame from NeRFCapture if sample: print(f"{total_frames + 1}/{n_frames} frames received") if total_frames == 0: save_path.mkdir(parents=True, exist_ok=True) images_dir.mkdir(exist_ok=True) manifest["w"] = sample.width manifest["h"] = sample.height manifest["cx"] = sample.cx manifest["cy"] = sample.cy manifest["fl_x"] = sample.fl_x manifest["fl_y"] = sample.fl_y manifest["integer_depth_scale"] = float(depth_scale)/65535.0 if sample.has_depth: depth_dir = save_path.joinpath("depth") depth_dir.mkdir(exist_ok=True) # RGB image = np.asarray(sample.image, dtype=np.uint8).reshape((sample.height, sample.width, 3)) cv2.imwrite(str(images_dir.joinpath(f"{total_frames}.png")), cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) # Depth if avaiable save_depth = None if sample.has_depth: # Save Depth Image save_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) save_depth = (save_depth*65535/float(depth_scale)).astype(np.uint16) save_depth = cv2.resize(save_depth, dsize=( sample.width, sample.height), interpolation=cv2.INTER_NEAREST) cv2.imwrite(str(depth_dir.joinpath(f"{total_frames}.png")), save_depth) # Load Depth Image for SplaTAM curr_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) else: print("No Depth Image Received. Please make sure that the NeRFCapture App \ mentions Depth Supported on the top right corner. Skipping Frame...") continue # ARKit Poses for saving dataset X_WV = np.asarray(sample.transform_matrix, dtype=np.float32).reshape((4, 4)).T frame = { "transform_matrix": X_WV.tolist(), "file_path": f"rgb/{total_frames}.png", "fl_x": sample.fl_x, "fl_y": sample.fl_y, "cx": sample.cx, "cy": sample.cy, "w": sample.width, "h": sample.height } if save_depth is not None: frame["depth_path"] = f"depth/{total_frames}.png" manifest["frames"].append(frame) # Convert ARKit Pose to GradSLAM format gt_pose = torch.from_numpy(X_WV).float() gt_pose = P @ gt_pose @ P.T if time_idx == 0: first_abs_gt_pose = gt_pose gt_pose = relative_transformation(first_abs_gt_pose.unsqueeze(0), gt_pose.unsqueeze(0), orthogonal_rotations=False) gt_w2c = torch.linalg.inv(gt_pose[0]) gt_w2c_all_frames.append(gt_w2c) # Initialize Tracking & Mapping Resolution Data color = cv2.resize(image, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_LINEAR) depth = cv2.resize(curr_depth, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_NEAREST) depth = np.expand_dims(depth, -1) color = torch.from_numpy(color).cuda().float() color = color.permute(2, 0, 1) / 255 depth = torch.from_numpy(depth).cuda().float() depth = depth.permute(2, 0, 1) if time_idx == 0: intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() intrinsics = intrinsics / config['data']['downscale_factor'] intrinsics[2, 2] = 1.0 first_frame_w2c = torch.eye(4).cuda().float() cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Densification Resolution Data densify_color = cv2.resize(image, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_LINEAR) densify_depth = cv2.resize(curr_depth, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_NEAREST) densify_depth = np.expand_dims(densify_depth, -1) densify_color = torch.from_numpy(densify_color).cuda().float() densify_color = densify_color.permute(2, 0, 1) / 255 densify_depth = torch.from_numpy(densify_depth).cuda().float() densify_depth = densify_depth.permute(2, 0, 1) if time_idx == 0: densify_intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() densify_intrinsics = densify_intrinsics / config['data']['densify_downscale_factor'] densify_intrinsics[2, 2] = 1.0 densify_cam = setup_camera(densify_color.shape[2], densify_color.shape[1], densify_intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Params for first time step if time_idx == 0: # Get Initial Point Cloud mask = (densify_depth > 0) # Mask out invalid depth values mask = mask.reshape(-1) init_pt_cld, mean3_sq_dist = get_pointcloud(densify_color, densify_depth, densify_intrinsics, first_frame_w2c, mask=mask, compute_mean_sq_dist=True, mean_sq_dist_method=config['mean_sq_dist_method']) params, variables = initialize_params(init_pt_cld, num_frames, mean3_sq_dist) variables['scene_radius'] = torch.max(densify_depth)/config['scene_radius_depth_ratio'] # Initialize Mapping & Tracking for current frame iter_time_idx = time_idx curr_gt_w2c = gt_w2c_all_frames curr_data = {'cam': cam, 'im': color, 'depth':depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} tracking_curr_data = curr_data # Optimization Iterations num_iters_mapping = config['mapping']['num_iters'] # Initialize the camera pose for the current frame if time_idx > 0: params = initialize_camera_pose(params, time_idx, forward_prop=config['tracking']['forward_prop']) # Tracking tracking_start_time = time.time() if time_idx > 0 and not config['tracking']['use_gt_poses']: # Reset Optimizer & Learning Rates for tracking optimizer = initialize_optimizer(params, config['tracking']['lrs'], tracking=True) # Keep Track of Best Candidate Rotation & Translation candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() current_min_loss = float(1e20) # Tracking Optimization iter = 0 do_continue_slam = False num_iters_tracking = config['tracking']['num_iters'] progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") while True: iter_start_time = time.time() # Loss for current frame loss, variables, losses = get_loss(params, tracking_curr_data, variables, iter_time_idx, config['tracking']['loss_weights'], config['tracking']['use_sil_for_loss'], config['tracking']['sil_thres'], config['tracking']['use_l1'], config['tracking']['ignore_outlier_depth_loss'], tracking=True, visualize_tracking_loss=config['tracking']['visualize_tracking_loss'], tracking_iteration=iter) # Backprop loss.backward() # Optimizer Update optimizer.step() optimizer.zero_grad(set_to_none=True) with torch.no_grad(): # Save the best candidate rotation & translation if loss < current_min_loss: current_min_loss = loss candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() # Report Progress if config['report_iter_progress']: report_progress(params, tracking_curr_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) else: progress_bar.update(1) # Update the runtime numbers iter_end_time = time.time() tracking_iter_time_sum += iter_end_time - iter_start_time tracking_iter_time_count += 1 # Check if we should stop tracking iter += 1 if iter == num_iters_tracking: if losses['depth'] < config['tracking']['depth_loss_thres'] and config['tracking']['use_depth_loss_thres']: break elif config['tracking']['use_depth_loss_thres'] and not do_continue_slam: do_continue_slam = True progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") num_iters_tracking = 2*num_iters_tracking else: break progress_bar.close() # Copy over the best candidate rotation & translation with torch.no_grad(): params['cam_unnorm_rots'][..., time_idx] = candidate_cam_unnorm_rot params['cam_trans'][..., time_idx] = candidate_cam_tran elif time_idx > 0 and config['tracking']['use_gt_poses']: with torch.no_grad(): # Get the ground truth pose relative to frame 0 rel_w2c = curr_gt_w2c[-1] rel_w2c_rot = rel_w2c[:3, :3].unsqueeze(0).detach() rel_w2c_rot_quat = matrix_to_quaternion(rel_w2c_rot) rel_w2c_tran = rel_w2c[:3, 3].detach() # Update the camera parameters params['cam_unnorm_rots'][..., time_idx] = rel_w2c_rot_quat params['cam_trans'][..., time_idx] = rel_w2c_tran # Update the runtime numbers tracking_end_time = time.time() tracking_frame_time_sum += tracking_end_time - tracking_start_time tracking_frame_time_count += 1 if time_idx == 0 or (time_idx+1) % config['report_global_progress_every'] == 0: try: # Report Final Tracking Progress progress_bar = tqdm(range(1), desc=f"Tracking Result Time Step: {time_idx}") with torch.no_grad(): report_progress(params, tracking_curr_data, 1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) progress_bar.close() except: ckpt_output_dir = save_path.joinpath("checkpoints") os.makedirs(ckpt_output_dir, exist_ok=True) save_params_ckpt(params, ckpt_output_dir, time_idx) print('Failed to evaluate trajectory.') # Densification & KeyFrame-based Mapping if time_idx == 0 or (time_idx+1) % config['map_every'] == 0: # Densification if config['mapping']['add_new_gaussians'] and time_idx > 0: densify_curr_data = {'cam': densify_cam, 'im': densify_color, 'depth': densify_depth, 'id': time_idx, 'intrinsics': densify_intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} # Add new Gaussians to the scene based on the Silhouette
params, variables = add_new_gaussians(params, variables, densify_curr_data,
15
2023-11-30 20:26:47+00:00
16k
zhyever/PatchFusion
zoedepth/trainers/zoedepth_custom_trainer.py
[ { "identifier": "SILogLoss", "path": "zoedepth/trainers/loss_sample.py", "snippet": "class SILogLoss(nn.Module):\n \"\"\"SILog loss (pixel-wise)\"\"\"\n def __init__(self, beta=0.15):\n super(SILogLoss, self).__init__()\n self.name = 'SILog'\n self.beta = beta\n\n def forward(self, input, target, mask=None):\n input = extract_key(input, KEY_OUTPUT)\n \n if mask is not None:\n input_filtered = input[mask]\n target_filtered = target[mask]\n\n with amp.autocast(enabled=False): # amp causes NaNs in this loss function\n alpha = 1e-7\n g = torch.log(input_filtered + alpha) - torch.log(target_filtered + alpha)\n Dg = torch.var(g) + self.beta * torch.pow(torch.mean(g), 2)\n loss = 10 * torch.sqrt(Dg)\n\n if torch.isnan(loss):\n print(\"Nan SILog loss\")\n print(\"input:\", input.shape)\n print(\"target:\", target.shape)\n print(\"G\", torch.sum(torch.isnan(g)))\n print(\"Input min max\", torch.min(input), torch.max(input))\n print(\"Target min max\", torch.min(target), torch.max(target))\n print(\"Dg\", torch.isnan(Dg))\n print(\"loss\", torch.isnan(loss))\n\n return loss" }, { "identifier": "DistributionLoss", "path": "zoedepth/trainers/loss_sample.py", "snippet": "class DistributionLoss(nn.Module):\n def __init__(self, max_depth):\n super(DistributionLoss, self).__init__()\n self.name = 'DistributionLoss'\n self.max_depth = max_depth\n\n def forward(self, input, target, mask=None, dist='biLaplacian'):\n \n \n mu0 = input['mu0']\n mu1 = input['mu1']\n sigma0 = input['sigma0']\n sigma1 = input['sigma1']\n pi0 = input['pi0']\n pi1 = input['pi1']\n \n pred_mask = (pi0 / sigma0 > pi1 / sigma1).float()\n pred_depth = (mu0 * pred_mask + mu1 * (1. - pred_mask))\n pred_metric_depth = (1 - pred_depth) * self.max_depth\n\n\n if mask is not None:\n mu0 = mu0[mask]\n mu1 = mu1[mask]\n sigma0 = sigma0[mask]\n sigma1 = sigma1[mask]\n pi0 = pi0[mask]\n pi1 = pi1[mask]\n\n # real_input = real_depth[mask]\n \n real_input = mu0\n pred_metric_depth = pred_metric_depth[mask]\n record_target = target[mask]\n\n\n target_filtered = 1 - target[mask] / self.max_depth\n bi_loss = bimodal_loss(mu0, mu1, sigma0, sigma1, pi0, pi1, target_filtered, dist=dist).mean()\n # print(bi_loss) \n\n alpha = 1e-7\n beta = 0.15\n g = torch.log(real_input + alpha) - torch.log(record_target + alpha)\n Dg = torch.var(g) + beta * torch.pow(torch.mean(g), 2)\n sig_loss = 10 * torch.sqrt(Dg)\n # print(sig_loss)\n \n return bi_loss, sig_loss" }, { "identifier": "SILogLoss", "path": "zoedepth/trainers/loss.py", "snippet": "class SILogLoss(nn.Module):\n \"\"\"SILog loss (pixel-wise)\"\"\"\n def __init__(self, beta=0.15):\n super(SILogLoss, self).__init__()\n self.name = 'SILog'\n self.beta = beta\n\n def forward(self, input, target, mask=None, interpolate=True, return_interpolated=False):\n hack_input = input\n\n input = extract_key(input, KEY_OUTPUT)\n if input.shape[-1] != target.shape[-1] and interpolate:\n input = nn.functional.interpolate(\n input, target.shape[-2:], mode='bilinear', align_corners=True)\n intr_input = input\n else:\n intr_input = input\n\n if target.ndim == 3:\n target = target.unsqueeze(1)\n\n if mask is not None:\n if mask.ndim == 3:\n mask = mask.unsqueeze(1)\n\n input = input[mask]\n target = target[mask]\n\n with amp.autocast(enabled=False): # amp causes NaNs in this loss function\n alpha = 1e-7\n g = torch.log(input + alpha) - torch.log(target + alpha)\n\n # n, c, h, w = g.shape\n # norm = 1/(h*w)\n # Dg = norm * torch.sum(g**2) - (0.85/(norm**2)) * (torch.sum(g))**2\n\n Dg = torch.var(g) + self.beta * torch.pow(torch.mean(g), 2)\n\n loss = 10 * torch.sqrt(Dg)\n\n if torch.isnan(loss):\n if input.numel() == 0:\n loss = torch.mean(hack_input) * 0\n if not return_interpolated:\n return loss\n return loss, intr_input\n \n print(\"Nan SILog loss\")\n print(\"input:\", input.shape)\n print(\"target:\", target.shape)\n print(\"G\", torch.sum(torch.isnan(g)))\n print(\"Input min max\", torch.min(input), torch.max(input))\n print(\"Target min max\", torch.min(target), torch.max(target))\n print(\"Dg\", torch.isnan(Dg))\n print(\"loss\", torch.isnan(loss))\n\n if not return_interpolated:\n return loss\n\n return loss, intr_input" }, { "identifier": "BudgetConstraint", "path": "zoedepth/trainers/loss.py", "snippet": "class BudgetConstraint(nn.Module):\n \"\"\"\n Given budget constraint to reduce expected inference FLOPs in the Dynamic Network.\n \"\"\"\n def __init__(self, loss_mu, flops_all, warm_up=True):\n super().__init__()\n self.loss_mu = loss_mu\n self.flops_all = flops_all\n self.warm_up = warm_up\n\n def forward(self, flops_expt, warm_up_rate=1.0):\n if self.warm_up:\n warm_up_rate = min(1.0, warm_up_rate)\n else:\n warm_up_rate = 1.0\n losses = warm_up_rate * ((flops_expt / self.flops_all - self.loss_mu)**2)\n return losses" }, { "identifier": "HistogramMatchingLoss", "path": "zoedepth/trainers/loss.py", "snippet": "class HistogramMatchingLoss(nn.Module):\n def __init__(self, min_depth, max_depth, bins=512):\n super(HistogramMatchingLoss, self).__init__()\n self.name = 'HistogramMatchingLoss'\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.bins = bins\n\n def forward(self, input, target, mask, interpolate=True):\n if input.shape[-1] != mask.shape[-1] and interpolate:\n input = nn.functional.interpolate(\n input, mask.shape[-2:], mode='bilinear', align_corners=True)\n \n if target.shape[-1] != mask.shape[-1] and interpolate:\n target = nn.functional.interpolate(\n target, mask.shape[-2:], mode='bilinear', align_corners=True)\n\n input[~mask] = 0\n target[~mask] = 0\n\n\n pred_hist = torch.histc(input, bins=self.bins, min=self.min_depth, max=self.max_depth)\n gt_hist = torch.histc(target, bins=self.bins, min=self.min_depth, max=self.max_depth)\n\n pred_hist /= pred_hist.sum(dim=0, keepdim=True)\n gt_hist /= gt_hist.sum(dim=0, keepdim=True)\n\n # print(pred_hist.shape)\n # print(pred_hist)\n # _pred_hist = pred_hist.detach().cpu().numpy()\n # _gt_hist = gt_hist.detach().cpu().numpy()\n # plt.subplot(2, 1, 1)\n # plt.bar(range(len(_pred_hist)), _pred_hist)\n # plt.subplot(2, 1, 2)\n # plt.bar(range(len(_gt_hist)), _gt_hist)\n # plt.savefig('./debug_scale.png')\n\n # Compute cumulative histograms (CDF)\n cdf_pred = torch.cumsum(pred_hist, dim=0)\n cdf_gt = torch.cumsum(gt_hist, dim=0)\n\n # Compute Earth Mover's Distance (EMD) between the CDFs\n loss = torch.mean(torch.abs(cdf_pred - cdf_gt))\n # loss = torch.mean(torch.sqrt((pred_hist - gt_hist)**2))\n # loss = F.kl_div(torch.log(pred_hist + 1e-10), gt_hist, reduction='mean')\n \n return loss" }, { "identifier": "SSIM", "path": "zoedepth/trainers/loss.py", "snippet": "class SSIM(torch.nn.Module):\n def __init__(self, window_size = 11, size_average = True):\n super(SSIM, self).__init__()\n self.window_size = window_size\n self.size_average = size_average\n self.channel = 1\n self.window = create_window(window_size, self.channel)\n\n def forward(self, img1, img2, mask, interpolate=True):\n if img1.shape[-1] != mask.shape[-1] and interpolate:\n img1 = nn.functional.interpolate(\n img1, mask.shape[-2:], mode='bilinear', align_corners=True)\n \n if img2.shape[-1] != mask.shape[-1] and interpolate:\n img2 = nn.functional.interpolate(\n img2, mask.shape[-2:], mode='bilinear', align_corners=True)\n\n img1[~mask] = 0\n img2[~mask] = 0\n\n (_, channel, _, _) = img1.size()\n\n if channel == self.channel and self.window.data.type() == img1.data.type():\n window = self.window\n else:\n window = create_window(self.window_size, channel)\n \n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n \n self.window = window\n self.channel = channel\n\n\n loss = _ssim(img1, img2, window, self.window_size, channel, self.size_average)\n return loss" }, { "identifier": "ConsistencyLoss", "path": "zoedepth/trainers/loss.py", "snippet": "class ConsistencyLoss(nn.Module):\n def __init__(self, target, focus_flatten=False, wp=1) -> None:\n super().__init__()\n self.name = 'Consistency'\n self.target = target\n self.mode = 'no-resize'\n # self.mode = 'resize'\n self.focus_flatten = focus_flatten\n self.wp = wp\n\n def gradient_y(self, img):\n # gy = torch.cat([F.conv2d(img[:, i, :, :].unsqueeze(0), torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).view((1, 1, 3, 3)).to(img.device), padding=1) for i in range(img.shape[1])], 1)\n gy = F.conv2d(img, torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).view((1, 1, 3, 3)).to(img.device), padding=1)\n return gy\n\n def gradient_x(self, img):\n # gx = torch.cat([F.conv2d(img[:, i, :, :].unsqueeze(0), torch.Tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]).view((1, 1, 3, 3)).to(img.device), padding=1) for i in range(img.shape[1])], 1)\n gx = F.conv2d(img, torch.Tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]).view((1, 1, 3, 3)).to(img.device), padding=1)\n return gx\n\n def forward(self, depth_preds, shifts, mask, temp_features, pred_f=None):\n\n common_area_1_list = []\n common_area_2_list = []\n\n if self.focus_flatten:\n # only consider flatten place\n grad = kornia.filters.spatial_gradient(pred_f.detach())\n grad_x, grad_y = grad[:, :, 0, :, :], grad[:, :, 1, :, :]\n grad = torch.sqrt(grad_x ** 2 + grad_y ** 2)\n grad_ext = grad > 0.05 # over 5cm\n grad_ext = grad_ext.float()\n grad_blur = kornia.filters.gaussian_blur2d(grad_ext, (11, 11), (3, 3))\n grad_ext = grad_blur > 0 # over 5cm\n grad_ext = grad_blur == 0 \n mask = torch.logical_and(mask, grad_ext)\n\n\n if self.target == \"mix\":\n ## for feature\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n split_mask = torch.split(F.interpolate(mask.float(), (384, 512)).bool(), bs//2, dim=0)\n\n feat_ori_list = []\n feat_shift_list = []\n multi_level_mask = []\n\n for idx, feature in enumerate(temp_features): # multi-level\n split_feat = torch.split(feature, bs//2, dim=0)\n\n _, _, h, w = split_feat[0].shape\n feat_ori_list.append(split_feat[0])\n feat_shift_list.append(split_feat[1])\n\n mask_ori_cur_scale = F.interpolate(split_mask[0].float(), (h, w)).bool()\n multi_level_mask.append(mask_ori_cur_scale)\n\n for idx_out, (feat_ori_cur_level, feat_shift_cur_level, mask_ori_cur_level) in enumerate(zip(feat_ori_list, feat_shift_list, multi_level_mask)): # iter multi-scale\n scale_factor = 2 ** (5 - idx_out)\n _, _, cur_scale_h, cur_scale_w = feat_ori_cur_level.shape\n scale_factor = int(384 / cur_scale_h)\n\n for idx_in, (feat_ori, feat_shift, mask_ori, shift_bs) in enumerate(zip(feat_ori_cur_level, feat_shift_cur_level, mask_ori_cur_level, shifts)): # iter bs (paired feat)\n c, _, _ = feat_ori.shape\n mask_ori = mask_ori.repeat(c, 1, 1)\n shift_h, shift_w = int(shift_bs[0] * (384/540) / scale_factor), int(shift_bs[1]* (512/960) / scale_factor)\n\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, shift_h:, shift_w:]\n common_area_2 = feat_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:] \n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = feat_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = feat_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = feat_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n else:\n print(\"can you really reach here?\")\n\n common_area_masked_1 = common_area_1[mask_common].flatten()\n common_area_masked_2 = common_area_2[mask_common].flatten()\n common_area_1_list.append(common_area_masked_1)\n common_area_2_list.append(common_area_masked_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n consistency_loss = F.mse_loss(common_area_1, common_area_2)\n consistency_loss_feat = consistency_loss\n\n \n common_area_1_list = []\n common_area_2_list = []\n\n ## for pred\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n split_mask = torch.split(mask, bs//2, dim=0)\n \n for shift, depth_ori, depth_shift, mask_ori, mask_shift in zip(shifts, split_depth[0], split_depth[1], split_mask[0], split_mask[1]):\n shift_h, shift_w = shift[0], shift[1]\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = depth_ori[:, shift_h:, shift_w:]\n common_area_2 = depth_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:]\n # mask_debug = mask_shift[:, :-shift_h, :-shift_w]\n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = depth_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = depth_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n # mask_debug = mask_shift[:, :-shift_h, abs(shift_w):]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = depth_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = depth_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n # mask_debug = mask_shift[:, abs(shift_h):, abs(shift_w):]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = depth_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = depth_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n # mask_debug = mask_shift[:, abs(shift_h):, :-shift_w]\n else:\n print(\"can you really reach here?\")\n \n common_area_1 = common_area_1[mask_common].flatten()\n common_area_2 = common_area_2[mask_common].flatten()\n common_area_1_list.append(common_area_1)\n common_area_2_list.append(common_area_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n # pred_hist = torch.histc(common_area_1, bins=512, min=0, max=80)\n # gt_hist = torch.histc(common_area_2, bins=512, min=0, max=80)\n\n # pred_hist /= pred_hist.sum(dim=0, keepdim=True)\n # gt_hist /= gt_hist.sum(dim=0, keepdim=True)\n\n # # Compute cumulative histograms (CDF)\n # cdf_pred = torch.cumsum(pred_hist, dim=0)\n # cdf_gt = torch.cumsum(gt_hist, dim=0)\n\n # # Compute Earth Mover's Distance (EMD) between the CDFs\n # consistency_loss = torch.mean(torch.abs(cdf_pred - cdf_gt))\n consistency_loss = F.mse_loss(common_area_1, common_area_2) \n consistency_loss_pred = consistency_loss\n\n consistency_loss = consistency_loss_pred * self.wp + consistency_loss_feat\n return consistency_loss\n \n elif 'feat' in self.target:\n if self.mode == 'resize':\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n split_mask = torch.split(mask, bs//2, dim=0)\n \n feat_ori_list = []\n feat_shift_list = []\n\n for idx, feature in enumerate(temp_features): # multi-level\n if idx < 4:\n continue\n \n split_feat = torch.split(feature, bs//2, dim=0)\n f = F.interpolate(split_feat[0], (h, w), mode='bilinear', align_corners=True)\n feat_ori_list.append(f)\n f = F.interpolate(split_feat[1], (h, w), mode='bilinear', align_corners=True)\n feat_shift_list.append(f)\n\n\n for idx_out, (feat_ori_cur_level, feat_shift_cur_level) in enumerate(zip(feat_ori_list, feat_shift_list)): # iter multi-scale\n scale_factor = 2 ** (5 - idx_out)\n\n for idx_in, (feat_ori, feat_shift, mask_ori, shift_bs) in enumerate(zip(feat_ori_cur_level, feat_shift_cur_level, split_mask[0], shifts)): # iter bs (paired feat)\n c, h, w = feat_ori.shape\n mask_ori = mask_ori.repeat(c, 1, 1)\n shift_h, shift_w = shift_bs[0], shift_bs[1]\n\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, shift_h:, shift_w:]\n common_area_2 = feat_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:] \n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = feat_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = feat_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = feat_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n else:\n print(\"can you really reach here?\")\n\n common_area_masked_1 = common_area_1[mask_common].flatten()\n common_area_masked_2 = common_area_2[mask_common].flatten()\n # common_area_masked_1 = common_area_1.flatten()\n # common_area_masked_2 = common_area_2.flatten()\n common_area_1_list.append(common_area_masked_1)\n common_area_2_list.append(common_area_masked_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n consistency_loss = F.mse_loss(common_area_1, common_area_2)\n\n return consistency_loss\n \n\n else:\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n mask = F.interpolate(mask.float(), (384, 512)).bool() # back to 384, 512\n split_mask = torch.split(mask, bs//2, dim=0)\n\n feat_ori_list = []\n feat_shift_list = []\n multi_level_mask = []\n\n for idx, feature in enumerate(temp_features): # multi-level\n split_feat = torch.split(feature, bs//2, dim=0)\n\n _, _, h, w = split_feat[0].shape\n feat_ori_list.append(split_feat[0])\n feat_shift_list.append(split_feat[1])\n\n mask_ori_cur_scale = F.interpolate(split_mask[0].float(), (h, w)).bool()\n multi_level_mask.append(mask_ori_cur_scale)\n\n for idx_out, (feat_ori_cur_level, feat_shift_cur_level, mask_ori_cur_level) in enumerate(zip(feat_ori_list, feat_shift_list, multi_level_mask)): # iter multi-scale\n scale_factor = 2 ** (5 - idx_out)\n _, _, cur_scale_h, cur_scale_w = feat_ori_cur_level.shape\n scale_factor = int(384 / cur_scale_h)\n\n for idx_in, (feat_ori, feat_shift, mask_ori, shift_bs) in enumerate(zip(feat_ori_cur_level, feat_shift_cur_level, mask_ori_cur_level, shifts)): # iter bs (paired feat)\n c, _, _ = feat_ori.shape\n mask_ori = mask_ori.repeat(c, 1, 1)\n shift_h, shift_w = int(shift_bs[0] * (384/540) / scale_factor), int(shift_bs[1]* (512/960) / scale_factor)\n\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, shift_h:, shift_w:]\n common_area_2 = feat_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:] \n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = feat_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = feat_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = feat_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n else:\n print(\"can you really reach here?\")\n\n common_area_masked_1 = common_area_1[mask_common].flatten()\n common_area_masked_2 = common_area_2[mask_common].flatten()\n common_area_1_list.append(common_area_masked_1)\n common_area_2_list.append(common_area_masked_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n consistency_loss = F.mse_loss(common_area_1, common_area_2)\n return consistency_loss\n \n elif self.target == 'pred':\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n split_mask = torch.split(mask, bs//2, dim=0)\n \n for shift, depth_ori, depth_shift, mask_ori, mask_shift in zip(shifts, split_depth[0], split_depth[1], split_mask[0], split_mask[1]):\n shift_h, shift_w = shift[0], shift[1]\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = depth_ori[:, shift_h:, shift_w:]\n common_area_2 = depth_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:]\n # mask_debug = mask_shift[:, :-shift_h, :-shift_w]\n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = depth_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = depth_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n # mask_debug = mask_shift[:, :-shift_h, abs(shift_w):]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = depth_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = depth_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n # mask_debug = mask_shift[:, abs(shift_h):, abs(shift_w):]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = depth_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = depth_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n # mask_debug = mask_shift[:, abs(shift_h):, :-shift_w]\n else:\n print(\"can you really reach here?\")\n \n common_area_1 = common_area_1[mask_common].flatten()\n common_area_2 = common_area_2[mask_common].flatten()\n common_area_1_list.append(common_area_1)\n common_area_2_list.append(common_area_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n # pred_hist = torch.histc(common_area_1, bins=512, min=0, max=80)\n # gt_hist = torch.histc(common_area_2, bins=512, min=0, max=80)\n\n # pred_hist /= pred_hist.sum(dim=0, keepdim=True)\n # gt_hist /= gt_hist.sum(dim=0, keepdim=True)\n\n # # Compute cumulative histograms (CDF)\n # cdf_pred = torch.cumsum(pred_hist, dim=0)\n # cdf_gt = torch.cumsum(gt_hist, dim=0)\n\n # # Compute Earth Mover's Distance (EMD) between the CDFs\n # consistency_loss = torch.mean(torch.abs(cdf_pred - cdf_gt))\n consistency_loss = F.mse_loss(common_area_1, common_area_2)\n \n return consistency_loss\n \n else:\n raise NotImplementedError" }, { "identifier": "DATASETS_CONFIG", "path": "zoedepth/utils/config.py", "snippet": "DATASETS_CONFIG = {\n \"kitti\": {\n \"dataset\": \"kitti\",\n \"min_depth\": 0.001,\n \"max_depth\": 80,\n \"data_path\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/raw\"),\n \"gt_path\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/gts\"),\n \"filenames_file\": \"./train_test_inputs/kitti_eigen_train_files_with_gt.txt\",\n \"input_height\": 352,\n \"input_width\": 1216, # 704\n \"data_path_eval\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/raw\"),\n \"gt_path_eval\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/gts\"),\n \"filenames_file_eval\": \"./train_test_inputs/kitti_eigen_test_files_with_gt.txt\",\n\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": True,\n \"garg_crop\": True,\n \"eigen_crop\": False,\n \"use_right\": False\n },\n \"kitti_test\": {\n \"dataset\": \"kitti\",\n \"min_depth\": 0.001,\n \"max_depth\": 80,\n \"data_path\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/raw\"),\n \"gt_path\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/gts\"),\n \"filenames_file\": \"./train_test_inputs/kitti_eigen_train_files_with_gt.txt\",\n \"input_height\": 352,\n \"input_width\": 1216,\n \"data_path_eval\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/raw\"),\n \"gt_path_eval\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/gts\"),\n \"filenames_file_eval\": \"./train_test_inputs/kitti_eigen_test_files_with_gt.txt\",\n\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n\n \"do_random_rotate\": False,\n \"degree\": 1.0,\n \"do_kb_crop\": True,\n \"garg_crop\": True,\n \"eigen_crop\": False,\n \"use_right\": False\n },\n \"nyu\": {\n \"dataset\": \"nyu\",\n \"avoid_boundary\": False,\n \"min_depth\": 1e-3, # originally 0.1\n \"max_depth\": 10,\n \"data_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder\"),\n \"gt_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder\"),\n \"filenames_file\": \"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder/nyu_train.txt\",\n \"input_height\": 480,\n \"input_width\": 640,\n \"data_path_eval\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder\"),\n \"gt_path_eval\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder\"),\n \"filenames_file_eval\": \"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder/nyu_test.txt\",\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 10,\n \"min_depth_diff\": -10,\n \"max_depth_diff\": 10,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": False,\n \"garg_crop\": False,\n \"eigen_crop\": False,\n },\n \"u4k\": {\n \"dataset\": \"u4k\",\n \"min_depth\": 1e-3, # originally 0.1\n \"max_depth\": 80,\n \"data_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/u4k\"),\n \"filenames_train\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/train.txt\",\n \"input_height\": 480, # ? will not be used (random crop)\n \"input_width\": 640, # ? will not be used (random crop)\n \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/val.txt\",\n # \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/test.txt\",\n \"filenames_test\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/test.txt\",\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth_diff\": -10,\n \"max_depth_diff\": 10,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": False,\n \"garg_crop\": False,\n \"eigen_crop\": False,\n \n \"num_sample_inout\": 50000,\n # \"num_sample_inout\": 40000,\n \"sampling_strategy\": 'random',\n # \"sampling_strategy\": 'dda',\n \"dilation_factor\": 10,\n\n \"use_rgb\": False,\n \"do_normalize\": True, # do normalize in dataloader\n \"do_input_resize\": True\n },\n \"mid\": {\n \"dataset\": \"mid\",\n \"min_depth\": 1e-3, # originally 0.1\n \"max_depth\": 10,\n \"data_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/middlebury\"),\n \"filenames_train\": \"/ibex/ai/home/liz0l/codes/datasets/middlebury/splits/train.txt\",\n \"input_height\": 480, # ? will not be used (random crop)\n \"input_width\": 640, # ? will not be used (random crop)\n \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/middlebury/splits/val.txt\",\n \"filenames_test\": \"/ibex/ai/home/liz0l/codes/datasets/middlebury/splits/test.txt\",\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 10,\n \"min_depth_diff\": -10,\n \"max_depth_diff\": 10,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": False,\n \"garg_crop\": False,\n \"eigen_crop\": False,\n \n \"num_sample_inout\": 50000,\n # \"num_sample_inout\": 40000,\n \"sampling_strategy\": 'random',\n # \"sampling_strategy\": 'dda',\n \"dilation_factor\": 10,\n\n \"use_rgb\": False,\n \"do_normalize\": True, # do normalize in dataloader\n \"do_input_resize\": True\n },\n \"gta\": {\n \"dataset\": \"gta\",\n \"min_depth\": 1e-3, # originally 0.1\n \"max_depth\": 80,\n \"data_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/gta/GTAV_1080\"),\n \"filenames_train\": \"/ibex/ai/home/liz0l/codes/datasets/gta/GTAV_1080/train.txt\",\n \"input_height\": 480, # ? will not be used (random crop)\n \"input_width\": 640, # ? will not be used (random crop)\n \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/gta/GTAV_1080/val.txt\",\n # \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/test.txt\",\n \"filenames_test\": \"/ibex/ai/home/liz0l/codes/datasets/gta/GTAV_1080/test.txt\",\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth_diff\": -10,\n \"max_depth_diff\": 10,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": False,\n \"garg_crop\": False,\n \"eigen_crop\": False,\n \n \"num_sample_inout\": 50000,\n # \"num_sample_inout\": 40000,\n \"sampling_strategy\": 'random',\n # \"sampling_strategy\": 'dda',\n \"dilation_factor\": 10,\n\n \"use_rgb\": False,\n \"do_normalize\": True, # do normalize in dataloader\n \"do_input_resize\": True\n },\n \"ibims\": {\n \"dataset\": \"ibims\",\n \"ibims_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/ibims/ibims1_core_raw/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 0,\n \"max_depth_eval\": 10,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"sunrgbd\": {\n \"dataset\": \"sunrgbd\",\n \"sunrgbd_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/SUNRGBD/test/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 0,\n \"max_depth_eval\": 8,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"diml_indoor\": {\n \"dataset\": \"diml_indoor\",\n \"diml_indoor_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/diml_indoor_test/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 0,\n \"max_depth_eval\": 10,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"diml_outdoor\": {\n \"dataset\": \"diml_outdoor\",\n \"diml_outdoor_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/diml_outdoor_test/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 2,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80\n },\n \"diode_indoor\": {\n \"dataset\": \"diode_indoor\",\n \"diode_indoor_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/diode_indoor/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 10,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"diode_outdoor\": {\n \"dataset\": \"diode_outdoor\",\n \"diode_outdoor_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/diode_outdoor/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80\n },\n \"hypersim_test\": {\n \"dataset\": \"hypersim_test\",\n \"hypersim_test_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/hypersim_test/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"vkitti\": {\n \"dataset\": \"vkitti\",\n \"vkitti_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/vkitti_test/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": True,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80\n },\n \"vkitti2\": {\n \"dataset\": \"vkitti2\",\n \"vkitti2_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/vkitti2/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": True,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80,\n },\n \"ddad\": {\n \"dataset\": \"ddad\",\n \"ddad_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/ddad/ddad_val/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": True,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80,\n },\n}" }, { "identifier": "compute_metrics", "path": "zoedepth/utils/misc.py", "snippet": "def compute_metrics(gt, pred, interpolate=True, garg_crop=False, eigen_crop=True, dataset='nyu', min_depth_eval=0.1, max_depth_eval=10, disp_gt_edges=None, pred_depths=None, **kwargs):\n \"\"\"Compute metrics of predicted depth maps. Applies cropping and masking as necessary or specified via arguments. Refer to compute_errors for more details on metrics.\n \"\"\"\n if 'config' in kwargs:\n config = kwargs['config']\n garg_crop = config.garg_crop\n eigen_crop = config.eigen_crop\n min_depth_eval = config.min_depth_eval\n max_depth_eval = config.max_depth_eval\n\n if gt.shape[-2:] != pred.shape[-2:] and interpolate:\n pred = nn.functional.interpolate(\n pred.unsqueeze(dim=0).unsqueeze(dim=0), gt.shape[-2:], mode='bilinear', align_corners=True).squeeze()\n\n pred = pred.squeeze().cpu().numpy()\n pred[pred < min_depth_eval] = min_depth_eval\n pred[pred > max_depth_eval] = max_depth_eval\n pred[np.isinf(pred)] = max_depth_eval\n pred[np.isnan(pred)] = min_depth_eval\n\n gt_depth = gt.squeeze().cpu().numpy()\n valid_mask = np.logical_and(\n gt_depth > min_depth_eval, gt_depth < max_depth_eval)\n\n eval_mask = np.ones(valid_mask.shape)\n if garg_crop or eigen_crop:\n gt_height, gt_width = gt_depth.shape\n eval_mask = np.zeros(valid_mask.shape)\n\n if garg_crop:\n eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height),\n int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1\n\n elif eigen_crop:\n # print(\"-\"*10, \" EIGEN CROP \", \"-\"*10)\n if dataset == 'kitti':\n eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height),\n int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1\n else:\n # assert gt_depth.shape == (480, 640), \"Error: Eigen crop is currently only valid for (480, 640) images\"\n eval_mask[45:471, 41:601] = 1\n else:\n eval_mask = np.ones(valid_mask.shape)\n valid_mask = np.logical_and(valid_mask, eval_mask)\n\n # if dataset == 'nyu':\n # # pred = scale_shift_linear(torch.tensor(pred_depths), torch.tensor(pred), torch.tensor(valid_mask), fuse=False).numpy()\n # pred = scale_shift_linear(torch.tensor(gt), torch.tensor(pred), torch.tensor(valid_mask), fuse=False).numpy()\n \n metrics = compute_errors(gt_depth[valid_mask], pred[valid_mask])\n\n mask = valid_mask.squeeze() # squeeze\n gt = gt_depth\n pred = pred\n see_depth = 0\n if disp_gt_edges is None:\n print(\"Maybe we need edge maps from origin disp!\")\n edges = get_boundaries(gt, th=0.08, dilation=0)\n else:\n edges = disp_gt_edges\n \n mask = np.logical_and(mask, edges)\n import matplotlib.pyplot as plt\n if mask.sum() > 0:\n see_depth = soft_edge_error(pred, gt)[mask].mean()\n metrics['see'] = see_depth\n \n return metrics" }, { "identifier": "get_black_border", "path": "zoedepth/data/preprocess.py", "snippet": "def get_black_border(rgb_image, **kwargs) -> CropParams:\n \"\"\"Crops the black border of the RGB.\n\n Args:\n rgb: RGB image, shape (H, W, 3).\n\n Returns:\n Crop parameters.\n \"\"\"\n\n return get_border_params(rgb_image, value=0, **kwargs)" }, { "identifier": "BaseTrainer", "path": "zoedepth/trainers/base_trainer.py", "snippet": "def is_rank_zero(args):\n def __init__(self, config, model, train_loader, test_loader=None, device=None):\n def resize_to_target(self, prediction, target):\n def load_ckpt(self, checkpoint_dir=\"./checkpoints\", ckpt_type=\"best\"):\n def init_optimizer(self):\n def init_scheduler(self):\n def train_on_batch(self, batch, train_step):\n def validate_on_batch(self, batch, val_step):\n def raise_if_nan(self, losses):\n def iters_per_epoch(self):\n def total_iters(self):\n def should_early_stop(self):\n def train(self):\n def stringify_losses(L): return \"; \".join(map(\n def validate(self):\n def save_checkpoint(self, filename):\n def log_images(self, rgb: Dict[str, list] = {}, depth: Dict[str, list] = {}, scalar_field: Dict[str, list] = {}, prefix=\"\", scalar_cmap=\"turbo_r\", min_depth=None, max_depth=None):\n def log_line_plot(self, data):\n def log_bar_plot(self, title, labels, values):\nclass BaseTrainer:" }, { "identifier": "generatemask", "path": "zoedepth/utils/misc.py", "snippet": "def generatemask(size, k_size=-1, sigma=-1, h_factor=0.03, w_factor=0.02):\n # Generates a Guassian mask\n mask = np.zeros(size, dtype=np.float32)\n if sigma == -1:\n sigma = int(size[0]/16)\n if k_size == -1:\n k_size = int(2 * np.ceil(2 * int(size[0]/16)) + 1)\n # mask[int(0.02*size[0]):size[0] - int(0.02*size[0]), int(0.015*size[1]): size[1] - int(0.015*size[1])] = 1\n mask[int(h_factor*size[0]):size[0] - int(h_factor*size[0]), int(w_factor*size[1]): size[1] - int(w_factor*size[1])] = 1\n mask = cv2.GaussianBlur(mask, (int(k_size), int(k_size)), sigma)\n mask = (mask - mask.min()) / (mask.max() - mask.min())\n mask = mask.astype(np.float32)\n return mask" } ]
import os import torch import torch.cuda.amp as amp import torch.nn as nn import numpy as np import wandb import uuid import torch.distributed as dist import copy import torch.optim as optim import matplotlib.pyplot as plt from zoedepth.trainers.loss_sample import SILogLoss, DistributionLoss from zoedepth.trainers.loss import SILogLoss as DenseSILogLoss from zoedepth.trainers.loss import BudgetConstraint, HistogramMatchingLoss, SSIM, ConsistencyLoss from zoedepth.utils.config import DATASETS_CONFIG from zoedepth.utils.misc import compute_metrics from zoedepth.data.preprocess import get_black_border from .base_trainer import BaseTrainer, is_rank_zero, colors, flatten from torchvision import transforms from PIL import Image from tqdm import tqdm from datetime import datetime as dt from zoedepth.utils.misc import generatemask
13,062
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li # This file is partly inspired from ZoeDepth (https://github.com/isl-org/ZoeDepth/blob/main/zoedepth/trainers/zoedepth_trainer.py); author: Shariq Farooq Bhat class Trainer(BaseTrainer): def __init__(self, config, model, train_loader, test_loader=None, device=None): self.addf = config.get("addf", False) self.lazy_epoch = -1 self.boostingdepth = config.get("boostingdepth", False) super().__init__(config, model, train_loader, test_loader=test_loader, device=device) self.device = device self.silog_loss = SILogLoss(beta=config.get("beta", 0.15)) self.dense_silog_loss = DenseSILogLoss(beta=config.get("beta", 0.15)) print("sigloss's beta is set to {}".format(config.get("beta", 0.15))) self.scaler = amp.GradScaler(enabled=self.config.use_amp) self.distribution_loss = DistributionLoss(max_depth=self.config.max_depth) self.sampled_training = config.get("sampled_training", False) self.sec_stage = config.get("sec_stage", False) self.multi_consistency = config.get("multi_consistency", False) self.use_blur = config.get("use_blur", False) self.dynamic = config.get("dynamic", False) if self.dynamic: self.dynamic_unupdate_rate = config.get("dynamic_unupdate_rate", 0.0) self.budget_loss = BudgetConstraint(loss_mu=0.0, flops_all=21552.5684, warm_up=True) self.use_scale_loss = config.get("use_scale_loss", False) if self.use_scale_loss: if config.get("scale_type", "ssim"): self.scale_loss = SSIM(window_size=config.get("window_size", int(11))) else:
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li # This file is partly inspired from ZoeDepth (https://github.com/isl-org/ZoeDepth/blob/main/zoedepth/trainers/zoedepth_trainer.py); author: Shariq Farooq Bhat class Trainer(BaseTrainer): def __init__(self, config, model, train_loader, test_loader=None, device=None): self.addf = config.get("addf", False) self.lazy_epoch = -1 self.boostingdepth = config.get("boostingdepth", False) super().__init__(config, model, train_loader, test_loader=test_loader, device=device) self.device = device self.silog_loss = SILogLoss(beta=config.get("beta", 0.15)) self.dense_silog_loss = DenseSILogLoss(beta=config.get("beta", 0.15)) print("sigloss's beta is set to {}".format(config.get("beta", 0.15))) self.scaler = amp.GradScaler(enabled=self.config.use_amp) self.distribution_loss = DistributionLoss(max_depth=self.config.max_depth) self.sampled_training = config.get("sampled_training", False) self.sec_stage = config.get("sec_stage", False) self.multi_consistency = config.get("multi_consistency", False) self.use_blur = config.get("use_blur", False) self.dynamic = config.get("dynamic", False) if self.dynamic: self.dynamic_unupdate_rate = config.get("dynamic_unupdate_rate", 0.0) self.budget_loss = BudgetConstraint(loss_mu=0.0, flops_all=21552.5684, warm_up=True) self.use_scale_loss = config.get("use_scale_loss", False) if self.use_scale_loss: if config.get("scale_type", "ssim"): self.scale_loss = SSIM(window_size=config.get("window_size", int(11))) else:
self.scale_loss = HistogramMatchingLoss(min_depth=self.config.min_depth, max_depth=self.config.max_depth)
4
2023-12-04 08:43:15+00:00
16k
baaivision/GeoDream
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
11,975
key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize, ) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) # @torch.no_grad() # wasted two hours to find this bug... why no grad here! def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params["original_image_size"] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df ) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() if self.model.conditioning_key is not None: assert c is not None # if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = ( "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" ) cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride ) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if ( self.cond_stage_key in ["image", "LR_image", "segmentation", "bbox_img"] and self.model.conditioning_key ): # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert len(c) == 1 # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view( (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == "coordinates_bbox": assert ( "original_image_size" in self.split_input_params ), "BoudingBoxRescaling is missing original_image_size" # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params["original_image_size"] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ ( rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h, ) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ ( x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h, ) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device ) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) # cut tknzd crop position from conditioning assert isinstance(cond, dict), "cond must be dict to be fed into model" cut_cond = cond["c_crossattn"][0][..., :-2].to(self.device) adapted_cond = torch.stack( [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd] ) adapted_cond = rearrange(adapted_cond, "l b n -> (l b) n") adapted_cond = self.get_learned_conditioning(adapted_cond) adapted_cond = rearrange( adapted_cond, "(l b) n d -> l b n d", l=z.shape[-1] ) cond_list = [{"c_crossattn": [e]} for e in adapted_cond] else: cond_list = [ cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 ) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = "train" if self.training else "val" if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f"{prefix}/loss_simple": loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f"{prefix}/loss_gamma": loss.mean()}) loss_dict.update({"logvar": self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f"{prefix}/loss_vlb": loss_vlb}) loss += self.original_elbo_weight * loss_vlb loss_dict.update({f"{prefix}/loss": loss}) return loss, loss_dict def p_mean_variance( self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None, ): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score( self, model_out, x, t, c, **corrector_kwargs ) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1.0, 1.0) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample( self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, ): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance( x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * ( 0.5 * model_log_variance ).exp() * noise, logits.argmax(dim=1) if return_x0: return ( model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0, ) else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising( self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm( reversed(range(0, timesteps)), desc="Progressive Generation", total=timesteps, ) if verbose else reversed(range(0, timesteps)) ) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop( self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) if verbose else reversed(range(0, timesteps)) ) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, ) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample( self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs, ): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc
elif isimage(xc):
15
2023-12-01 01:59:42+00:00
16k
lucidrains/meshgpt-pytorch
meshgpt_pytorch/trainer.py
[ { "identifier": "custom_collate", "path": "meshgpt_pytorch/data.py", "snippet": "def custom_collate(data, pad_id = -1):\n is_dict = isinstance(first(data), dict)\n\n if is_dict:\n keys = first(data).keys()\n data = [d.values() for d in data]\n\n output = []\n\n for datum in zip(*data):\n if is_tensor(first(datum)):\n datum = pad_sequence(datum, batch_first = True, padding_value = pad_id)\n else:\n datum = list(datum)\n\n output.append(datum)\n\n output = tuple(output)\n\n if is_dict:\n output = dict(zip(keys, output))\n\n return output" }, { "identifier": "__version__", "path": "meshgpt_pytorch/version.py", "snippet": "" }, { "identifier": "MeshAutoencoder", "path": "meshgpt_pytorch/meshgpt_pytorch.py", "snippet": "class MeshAutoencoder(Module):\n @beartype\n def __init__(\n self,\n num_discrete_coors = 128,\n coor_continuous_range: Tuple[float, float] = (-1., 1.),\n dim_coor_embed = 64,\n num_discrete_area = 128,\n dim_area_embed = 16,\n num_discrete_normals = 128,\n dim_normal_embed = 64,\n num_discrete_angle = 128,\n dim_angle_embed = 16,\n encoder_dims_through_depth: Tuple[int, ...] = (\n 64, 128, 256, 256, 576\n ),\n init_decoder_conv_kernel = 7,\n decoder_dims_through_depth: Tuple[int, ...] = (\n 128, 128, 128, 128,\n 192, 192, 192, 192,\n 256, 256, 256, 256, 256, 256,\n 384, 384, 384\n ),\n dim_codebook = 192,\n num_quantizers = 2, # or 'D' in the paper\n codebook_size = 16384, # they use 16k, shared codebook between layers\n use_residual_lfq = True, # whether to use the latest lookup-free quantization\n rq_kwargs: dict = dict(\n quantize_dropout = True,\n quantize_dropout_cutoff_index = 1,\n quantize_dropout_multiple_of = 1,\n ),\n rvq_kwargs: dict = dict(\n kmeans_init = True,\n threshold_ema_dead_code = 2,\n ),\n rlfq_kwargs: dict = dict(\n frac_per_sample_entropy = 1.\n ),\n rvq_stochastic_sample_codes = True,\n sageconv_kwargs: dict = dict(\n normalize = True,\n project = True\n ),\n commit_loss_weight = 0.1,\n bin_smooth_blur_sigma = 0.4, # they blur the one hot discretized coordinate positions\n attn_encoder_depth = 0,\n attn_decoder_depth = 0,\n local_attn_kwargs: dict = dict(\n dim_head = 32,\n heads = 8\n ),\n local_attn_window_size = 64,\n linear_attn_kwargs: dict = dict(\n dim_head = 8,\n heads = 16\n ),\n use_linear_attn = True,\n pad_id = -1,\n flash_attn = True,\n sageconv_dropout = 0.,\n attn_dropout = 0.,\n ff_dropout = 0.,\n resnet_dropout = 0,\n checkpoint_quantizer = False\n ):\n super().__init__()\n\n # main face coordinate embedding\n\n self.num_discrete_coors = num_discrete_coors\n self.coor_continuous_range = coor_continuous_range\n\n self.discretize_face_coords = partial(discretize, num_discrete = num_discrete_coors, continuous_range = coor_continuous_range)\n self.coor_embed = nn.Embedding(num_discrete_coors, dim_coor_embed)\n\n # derived feature embedding\n\n self.discretize_angle = partial(discretize, num_discrete = num_discrete_angle, continuous_range = (0., pi))\n self.angle_embed = nn.Embedding(num_discrete_angle, dim_angle_embed)\n\n lo, hi = coor_continuous_range\n self.discretize_area = partial(discretize, num_discrete = num_discrete_area, continuous_range = (0., (hi - lo) ** 2))\n self.area_embed = nn.Embedding(num_discrete_area, dim_area_embed)\n\n self.discretize_normals = partial(discretize, num_discrete = num_discrete_normals, continuous_range = coor_continuous_range)\n self.normal_embed = nn.Embedding(num_discrete_normals, dim_normal_embed)\n\n # attention related\n\n attn_kwargs = dict(\n causal = False,\n prenorm = True,\n dropout = attn_dropout,\n window_size = local_attn_window_size,\n )\n\n # initial dimension\n\n init_dim = dim_coor_embed * 9 + dim_angle_embed * 3 + dim_normal_embed * 3 + dim_area_embed\n\n # project into model dimension\n\n self.project_in = nn.Linear(init_dim, dim_codebook)\n\n # initial sage conv\n\n sageconv_kwargs = {**sageconv_kwargs, 'sageconv_dropout' : sageconv_dropout}\n\n init_encoder_dim, *encoder_dims_through_depth = encoder_dims_through_depth\n curr_dim = init_encoder_dim\n\n self.init_sage_conv = SAGEConv(dim_codebook, init_encoder_dim, **sageconv_kwargs)\n\n self.init_encoder_act_and_norm = nn.Sequential(\n nn.SiLU(),\n nn.LayerNorm(init_encoder_dim)\n )\n\n self.encoders = ModuleList([])\n\n for dim_layer in encoder_dims_through_depth:\n sage_conv = SAGEConv(\n curr_dim,\n dim_layer,\n **sageconv_kwargs\n )\n\n self.encoders.append(sage_conv)\n curr_dim = dim_layer\n\n self.encoder_attn_blocks = ModuleList([])\n\n for _ in range(attn_encoder_depth):\n self.encoder_attn_blocks.append(nn.ModuleList([\n TaylorSeriesLinearAttn(curr_dim, prenorm = True, **linear_attn_kwargs) if use_linear_attn else None,\n LocalMHA(dim = curr_dim, **attn_kwargs, **local_attn_kwargs),\n nn.Sequential(RMSNorm(curr_dim), FeedForward(curr_dim, glu = True, dropout = ff_dropout))\n ]))\n\n # residual quantization\n\n self.codebook_size = codebook_size\n self.num_quantizers = num_quantizers\n\n self.project_dim_codebook = nn.Linear(curr_dim, dim_codebook * 3)\n\n if use_residual_lfq:\n self.quantizer = ResidualLFQ(\n dim = dim_codebook,\n num_quantizers = num_quantizers,\n codebook_size = codebook_size,\n commitment_loss_weight = 1.,\n **rlfq_kwargs,\n **rq_kwargs\n )\n else:\n self.quantizer = ResidualVQ(\n dim = dim_codebook,\n num_quantizers = num_quantizers,\n codebook_size = codebook_size,\n shared_codebook = True,\n commitment_weight = 1.,\n stochastic_sample_codes = rvq_stochastic_sample_codes,\n **rvq_kwargs,\n **rq_kwargs\n )\n\n self.checkpoint_quantizer = checkpoint_quantizer # whether to memory checkpoint the quantizer\n\n self.pad_id = pad_id # for variable lengthed faces, padding quantized ids will be set to this value\n\n # decoder\n\n decoder_input_dim = dim_codebook * 3\n\n self.decoder_attn_blocks = ModuleList([])\n\n for _ in range(attn_decoder_depth):\n self.decoder_attn_blocks.append(nn.ModuleList([\n TaylorSeriesLinearAttn(decoder_input_dim, prenorm = True, **linear_attn_kwargs) if use_linear_attn else None,\n LocalMHA(dim = decoder_input_dim, **attn_kwargs, **local_attn_kwargs),\n nn.Sequential(RMSNorm(decoder_input_dim), FeedForward(decoder_input_dim, glu = True, dropout = ff_dropout))\n ]))\n\n init_decoder_dim, *decoder_dims_through_depth = decoder_dims_through_depth\n curr_dim = init_decoder_dim\n\n assert is_odd(init_decoder_conv_kernel)\n\n self.init_decoder_conv = nn.Sequential(\n nn.Conv1d(dim_codebook * 3, init_decoder_dim, kernel_size = init_decoder_conv_kernel, padding = init_decoder_conv_kernel // 2),\n nn.SiLU(),\n Rearrange('b c n -> b n c'),\n nn.LayerNorm(init_decoder_dim),\n Rearrange('b n c -> b c n')\n )\n\n self.decoders = ModuleList([])\n\n for dim_layer in decoder_dims_through_depth:\n resnet_block = ResnetBlock(curr_dim, dim_layer, dropout = resnet_dropout)\n\n self.decoders.append(resnet_block)\n curr_dim = dim_layer\n\n self.to_coor_logits = nn.Sequential(\n nn.Linear(curr_dim, num_discrete_coors * 9),\n Rearrange('... (v c) -> ... v c', v = 9)\n )\n\n # loss related\n\n self.commit_loss_weight = commit_loss_weight\n self.bin_smooth_blur_sigma = bin_smooth_blur_sigma\n\n @beartype\n def encode(\n self,\n *,\n vertices: TensorType['b', 'nv', 3, float],\n faces: TensorType['b', 'nf', 3, int],\n face_edges: TensorType['b', 'e', 2, int],\n face_mask: TensorType['b', 'nf', bool],\n face_edges_mask: TensorType['b', 'e', bool],\n return_face_coordinates = False\n ):\n \"\"\"\n einops:\n b - batch\n nf - number of faces\n nv - number of vertices (3)\n c - coordinates (3)\n d - embed dim\n \"\"\"\n\n batch, num_vertices, num_coors, device = *vertices.shape, vertices.device\n _, num_faces, _ = faces.shape\n\n face_without_pad = faces.masked_fill(~rearrange(face_mask, 'b nf -> b nf 1'), 0)\n\n faces_vertices = repeat(face_without_pad, 'b nf nv -> b nf nv c', c = num_coors)\n vertices = repeat(vertices, 'b nv c -> b nf nv c', nf = num_faces)\n\n # continuous face coords\n\n face_coords = vertices.gather(-2, faces_vertices)\n\n # compute derived features and embed\n\n derived_features = get_derived_face_features(face_coords)\n\n discrete_angle = self.discretize_angle(derived_features['angles'])\n angle_embed = self.angle_embed(discrete_angle)\n\n discrete_area = self.discretize_area(derived_features['area'])\n area_embed = self.area_embed(discrete_area)\n\n discrete_normal = self.discretize_normals(derived_features['normals'])\n normal_embed = self.normal_embed(discrete_normal)\n\n # discretize vertices for face coordinate embedding\n\n discrete_face_coords = self.discretize_face_coords(face_coords)\n discrete_face_coords = rearrange(discrete_face_coords, 'b nf nv c -> b nf (nv c)') # 9 coordinates per face\n\n face_coor_embed = self.coor_embed(discrete_face_coords)\n face_coor_embed = rearrange(face_coor_embed, 'b nf c d -> b nf (c d)')\n\n # combine all features and project into model dimension\n\n face_embed, _ = pack([face_coor_embed, angle_embed, area_embed, normal_embed], 'b nf *')\n face_embed = self.project_in(face_embed)\n\n # handle variable lengths by using masked_select and masked_scatter\n\n # first handle edges\n # needs to be offset by number of faces for each batch\n\n face_index_offsets = reduce(face_mask.long(), 'b nf -> b', 'sum')\n face_index_offsets = F.pad(face_index_offsets.cumsum(dim = 0), (1, -1), value = 0)\n face_index_offsets = rearrange(face_index_offsets, 'b -> b 1 1')\n\n face_edges = face_edges + face_index_offsets\n face_edges = face_edges[face_edges_mask]\n face_edges = rearrange(face_edges, 'be ij -> ij be')\n\n # next prepare the face_mask for using masked_select and masked_scatter\n\n orig_face_embed_shape = face_embed.shape[:2]\n\n face_embed = face_embed[face_mask]\n\n # initial sage conv followed by activation and norm\n\n face_embed = self.init_sage_conv(face_embed, face_edges)\n\n face_embed = self.init_encoder_act_and_norm(face_embed)\n\n for conv in self.encoders:\n face_embed = conv(face_embed, face_edges)\n\n shape = (*orig_face_embed_shape, face_embed.shape[-1])\n\n face_embed = face_embed.new_zeros(shape).masked_scatter(rearrange(face_mask, '... -> ... 1'), face_embed)\n\n for linear_attn, attn, ff in self.encoder_attn_blocks:\n if exists(linear_attn):\n face_embed = linear_attn(face_embed, mask = face_mask) + face_embed\n\n face_embed = attn(face_embed, mask = face_mask) + face_embed\n face_embed = ff(face_embed) + face_embed\n\n if not return_face_coordinates:\n return face_embed\n\n return face_embed, discrete_face_coords\n\n @beartype\n def quantize(\n self,\n *,\n faces: TensorType['b', 'nf', 3, int],\n face_mask: TensorType['b', 'n', bool],\n face_embed: TensorType['b', 'nf', 'd', float],\n pad_id = None,\n rvq_sample_codebook_temp = 1.\n ):\n pad_id = default(pad_id, self.pad_id)\n batch, num_faces, device = *faces.shape[:2], faces.device\n\n max_vertex_index = faces.amax()\n num_vertices = int(max_vertex_index.item() + 1)\n\n face_embed = self.project_dim_codebook(face_embed)\n face_embed = rearrange(face_embed, 'b nf (nv d) -> b nf nv d', nv = 3)\n\n vertex_dim = face_embed.shape[-1]\n vertices = torch.zeros((batch, num_vertices, vertex_dim), device = device)\n\n # create pad vertex, due to variable lengthed faces\n\n pad_vertex_id = num_vertices\n vertices = pad_at_dim(vertices, (0, 1), dim = -2, value = 0.)\n\n faces = faces.masked_fill(~rearrange(face_mask, 'b n -> b n 1'), pad_vertex_id)\n\n # prepare for scatter mean\n\n faces_with_dim = repeat(faces, 'b nf nv -> b (nf nv) d', d = vertex_dim)\n\n face_embed = rearrange(face_embed, 'b ... d -> b (...) d')\n\n # scatter mean\n\n averaged_vertices = scatter_mean(vertices, faces_with_dim, face_embed, dim = -2)\n\n # mask out null vertex token\n\n mask = torch.ones((batch, num_vertices + 1), device = device, dtype = torch.bool)\n mask[:, -1] = False\n\n # rvq specific kwargs\n\n quantize_kwargs = dict(mask = mask)\n\n if isinstance(self.quantizer, ResidualVQ):\n quantize_kwargs.update(sample_codebook_temp = rvq_sample_codebook_temp)\n\n # a quantize function that makes it memory checkpointable\n\n def quantize_wrapper_fn(inp):\n unquantized, quantize_kwargs = inp\n return self.quantizer(unquantized, **quantize_kwargs)\n\n # maybe checkpoint the quantize fn\n\n if self.checkpoint_quantizer:\n quantize_wrapper_fn = partial(checkpoint, quantize_wrapper_fn, use_reentrant = False)\n\n # residual VQ\n\n quantized, codes, commit_loss = quantize_wrapper_fn((averaged_vertices, quantize_kwargs))\n\n # gather quantized vertexes back to faces for decoding\n # now the faces have quantized vertices\n\n face_embed_output = quantized.gather(-2, faces_with_dim)\n face_embed_output = rearrange(face_embed_output, 'b (nf nv) d -> b nf (nv d)', nv = 3)\n\n # vertex codes also need to be gathered to be organized by face sequence\n # for autoregressive learning\n\n faces_with_quantized_dim = repeat(faces, 'b nf nv -> b (nf nv) q', q = self.num_quantizers)\n codes_output = codes.gather(-2, faces_with_quantized_dim)\n\n # make sure codes being outputted have this padding\n\n face_mask = repeat(face_mask, 'b nf -> b (nf nv) 1', nv = 3)\n codes_output = codes_output.masked_fill(~face_mask, self.pad_id)\n\n # output quantized, codes, as well as commitment loss\n\n return face_embed_output, codes_output, commit_loss\n\n @beartype\n def decode(\n self,\n quantized: TensorType['b', 'n', 'd', float],\n face_mask: TensorType['b', 'n', bool]\n ):\n conv_face_mask = rearrange(face_mask, 'b n -> b 1 n')\n\n x = quantized\n\n for linear_attn, attn, ff in self.decoder_attn_blocks:\n if exists(linear_attn):\n x = linear_attn(x, mask = face_mask) + x\n\n x = attn(x, mask = face_mask) + x\n x = ff(x) + x\n\n x = rearrange(x, 'b n d -> b d n')\n\n x = x.masked_fill(~conv_face_mask, 0.)\n x = self.init_decoder_conv(x)\n\n for resnet_block in self.decoders:\n x = resnet_block(x, mask = conv_face_mask)\n\n return rearrange(x, 'b d n -> b n d')\n\n @beartype\n @torch.no_grad()\n def decode_from_codes_to_faces(\n self,\n codes: Tensor,\n face_mask: Optional[TensorType['b', 'n', bool]] = None,\n return_discrete_codes = False\n ):\n codes = rearrange(codes, 'b ... -> b (...)')\n\n if not exists(face_mask):\n face_mask = reduce(codes != self.pad_id, 'b (nf nv q) -> b nf', 'all', nv = 3, q = self.num_quantizers)\n\n # handle different code shapes\n\n codes = rearrange(codes, 'b (n q) -> b n q', q = self.num_quantizers)\n\n # decode\n\n quantized = self.quantizer.get_output_from_indices(codes)\n quantized = rearrange(quantized, 'b (nf nv) d -> b nf (nv d)', nv = 3)\n\n decoded = self.decode(\n quantized,\n face_mask = face_mask\n )\n\n decoded = decoded.masked_fill(~face_mask[..., None], 0.)\n pred_face_coords = self.to_coor_logits(decoded)\n\n pred_face_coords = pred_face_coords.argmax(dim = -1)\n\n pred_face_coords = rearrange(pred_face_coords, '... (v c) -> ... v c', v = 3)\n\n # back to continuous space\n\n continuous_coors = undiscretize(\n pred_face_coords,\n num_discrete = self.num_discrete_coors,\n continuous_range = self.coor_continuous_range\n )\n\n # mask out with nan\n\n continuous_coors = continuous_coors.masked_fill(~rearrange(face_mask, 'b nf -> b nf 1 1'), float('nan'))\n\n if not return_discrete_codes:\n return continuous_coors, face_mask\n\n return continuous_coors, pred_face_coords, face_mask\n\n @torch.no_grad()\n def tokenize(self, vertices, faces, face_edges = None, **kwargs):\n assert 'return_codes' not in kwargs\n\n inputs = [vertices, faces, face_edges]\n inputs = [*filter(exists, inputs)]\n ndims = {i.ndim for i in inputs}\n\n assert len(ndims) == 1\n batch_less = first(list(ndims)) == 2\n\n if batch_less:\n inputs = [rearrange(i, '... -> 1 ...') for i in inputs]\n\n input_kwargs = dict(zip(['vertices', 'faces', 'face_edges'], inputs))\n\n self.eval()\n\n codes = self.forward(\n **input_kwargs,\n return_codes = True,\n **kwargs\n )\n\n if batch_less:\n codes = rearrange(codes, '1 ... -> ...')\n\n return codes\n\n @beartype\n def forward(\n self,\n *,\n vertices: TensorType['b', 'nv', 3, float],\n faces: TensorType['b', 'nf', 3, int],\n face_edges: Optional[TensorType['b', 'e', 2, int]] = None,\n return_codes = False,\n return_loss_breakdown = False,\n return_recon_faces = False,\n only_return_recon_faces = False,\n rvq_sample_codebook_temp = 1.\n ):\n if not exists(face_edges):\n face_edges = derive_face_edges_from_faces(faces, pad_id = self.pad_id)\n\n num_faces, num_face_edges, device = faces.shape[1], face_edges.shape[1], faces.device\n\n face_mask = reduce(faces != self.pad_id, 'b nf c -> b nf', 'all')\n face_edges_mask = reduce(face_edges != self.pad_id, 'b e ij -> b e', 'all')\n\n encoded, face_coordinates = self.encode(\n vertices = vertices,\n faces = faces,\n face_edges = face_edges,\n face_edges_mask = face_edges_mask,\n face_mask = face_mask,\n return_face_coordinates = True\n )\n\n quantized, codes, commit_loss = self.quantize(\n face_embed = encoded,\n faces = faces,\n face_mask = face_mask,\n rvq_sample_codebook_temp = rvq_sample_codebook_temp\n )\n\n if return_codes:\n assert not return_recon_faces, 'cannot return reconstructed faces when just returning raw codes'\n\n codes = codes.masked_fill(~repeat(face_mask, 'b nf -> b (nf 3) 1'), self.pad_id)\n return codes\n\n decode = self.decode(\n quantized,\n face_mask = face_mask\n )\n\n pred_face_coords = self.to_coor_logits(decode)\n\n # compute reconstructed faces if needed\n\n if return_recon_faces or only_return_recon_faces:\n\n recon_faces = undiscretize(\n pred_face_coords.argmax(dim = -1),\n num_discrete = self.num_discrete_coors,\n continuous_range = self.coor_continuous_range,\n )\n\n recon_faces = rearrange(recon_faces, 'b nf (nv c) -> b nf nv c', nv = 3)\n face_mask = rearrange(face_mask, 'b nf -> b nf 1 1')\n recon_faces = recon_faces.masked_fill(~face_mask, float('nan'))\n face_mask = rearrange(face_mask, 'b nf 1 1 -> b nf')\n\n if only_return_recon_faces:\n return recon_faces\n\n # prepare for recon loss\n\n pred_face_coords = rearrange(pred_face_coords, 'b ... c -> b c (...)')\n face_coordinates = rearrange(face_coordinates, 'b ... -> b 1 (...)')\n\n # reconstruction loss on discretized coordinates on each face\n # they also smooth (blur) the one hot positions, localized label smoothing basically\n\n with autocast(enabled = False):\n pred_log_prob = pred_face_coords.log_softmax(dim = 1)\n\n target_one_hot = torch.zeros_like(pred_log_prob).scatter(1, face_coordinates, 1.)\n\n if self.bin_smooth_blur_sigma >= 0.:\n target_one_hot = gaussian_blur_1d(target_one_hot, sigma = self.bin_smooth_blur_sigma)\n\n # cross entropy with localized smoothing\n\n recon_losses = (-target_one_hot * pred_log_prob).sum(dim = 1)\n\n face_mask = repeat(face_mask, 'b nf -> b (nf r)', r = 9)\n recon_loss = recon_losses[face_mask].mean()\n\n # calculate total loss\n\n total_loss = recon_loss + \\\n commit_loss.sum() * self.commit_loss_weight\n\n # calculate loss breakdown if needed\n\n loss_breakdown = (recon_loss, commit_loss)\n\n # some return logic\n\n if not return_loss_breakdown:\n if not return_recon_faces:\n return total_loss\n\n return recon_faces, total_loss\n\n if not return_recon_faces:\n return total_loss, loss_breakdown\n\n return recon_faces, total_loss, loss_breakdown" }, { "identifier": "MeshTransformer", "path": "meshgpt_pytorch/meshgpt_pytorch.py", "snippet": "class MeshTransformer(Module):\n @beartype\n def __init__(\n self,\n autoencoder: MeshAutoencoder,\n *,\n dim: Union[int, Tuple[int, int]] = 512,\n max_seq_len = 8192,\n flash_attn = True,\n attn_depth = 12,\n attn_dim_head = 64,\n attn_heads = 16,\n attn_kwargs: dict = dict(\n ff_glu = True,\n num_mem_kv = 4\n ),\n dropout = 0.,\n coarse_pre_gateloop_depth = 2,\n fine_pre_gateloop_depth = 2,\n gateloop_use_heinsen = False,\n fine_attn_depth = 2,\n fine_attn_dim_head = 32,\n fine_attn_heads = 8,\n pad_id = -1,\n condition_on_text = False,\n text_condition_model_types = ('t5',),\n text_condition_cond_drop_prob = 0.25\n ):\n super().__init__()\n\n dim, dim_fine = (dim, dim) if isinstance(dim, int) else dim\n\n self.autoencoder = autoencoder\n set_module_requires_grad_(autoencoder, False)\n\n self.codebook_size = autoencoder.codebook_size\n self.num_quantizers = autoencoder.num_quantizers\n\n self.sos_token = nn.Parameter(torch.randn(dim_fine))\n self.eos_token_id = self.codebook_size\n\n # they use axial positional embeddings\n\n assert divisible_by(max_seq_len, 3 * self.num_quantizers), f'max_seq_len ({max_seq_len}) must be divisible by (3 x {self.num_quantizers}) = {3 * self.num_quantizers}' # 3 vertices per face, with D codes per vertex\n\n self.token_embed = nn.Embedding(self.codebook_size + 1, dim)\n\n self.quantize_level_embed = nn.Parameter(torch.randn(self.num_quantizers, dim))\n self.vertex_embed = nn.Parameter(torch.randn(3, dim))\n\n self.abs_pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.max_seq_len = max_seq_len\n\n # text condition\n\n self.condition_on_text = condition_on_text\n self.conditioner = None\n\n cross_attn_dim_context = None\n\n if condition_on_text:\n self.conditioner = TextEmbeddingReturner(\n model_types = text_condition_model_types,\n cond_drop_prob = text_condition_cond_drop_prob\n )\n cross_attn_dim_context = self.conditioner.dim_latent\n\n # for summarizing the vertices of each face\n\n self.to_face_tokens = nn.Sequential(\n nn.Linear(self.num_quantizers * 3 * dim, dim),\n nn.LayerNorm(dim)\n )\n\n self.coarse_gateloop_block = GateLoopBlock(dim, depth = coarse_pre_gateloop_depth, use_heinsen = gateloop_use_heinsen) if coarse_pre_gateloop_depth > 0 else None\n\n # main autoregressive attention network\n # attending to a face token\n\n self.decoder = Decoder(\n dim = dim,\n depth = attn_depth,\n dim_head = attn_dim_head,\n heads = attn_heads,\n attn_flash = flash_attn,\n attn_dropout = dropout,\n ff_dropout = dropout,\n cross_attend = condition_on_text,\n cross_attn_dim_context = cross_attn_dim_context,\n **attn_kwargs\n )\n\n # projection from coarse to fine, if needed\n\n self.maybe_project_coarse_to_fine = nn.Linear(dim, dim_fine) if dim != dim_fine else nn.Identity()\n\n # address a weakness in attention\n\n self.fine_gateloop_block = GateLoopBlock(dim, depth = fine_pre_gateloop_depth) if fine_pre_gateloop_depth > 0 else None\n\n # decoding the vertices, 2-stage hierarchy\n\n self.fine_decoder = Decoder(\n dim = dim_fine,\n depth = fine_attn_depth,\n dim_head = attn_dim_head,\n heads = attn_heads,\n attn_flash = flash_attn,\n attn_dropout = dropout,\n ff_dropout = dropout,\n **attn_kwargs\n )\n\n # to logits\n\n self.to_logits = nn.Linear(dim_fine, self.codebook_size + 1)\n\n # padding id\n # force the autoencoder to use the same pad_id given in transformer\n\n self.pad_id = pad_id\n autoencoder.pad_id = pad_id\n\n @property\n def device(self):\n return next(self.parameters()).device\n\n @beartype\n @torch.no_grad()\n def embed_texts(self, texts: Union[str, List[str]]):\n single_text = not isinstance(texts, list)\n if single_text:\n texts = [texts]\n\n assert exists(self.conditioner)\n text_embeds = self.conditioner.embed_texts(texts).detach()\n\n if single_text:\n text_embeds = text_embeds[0]\n\n return text_embeds\n\n @eval_decorator\n @torch.no_grad()\n @beartype\n def generate(\n self,\n prompt: Optional[Tensor] = None,\n batch_size: Optional[int] = None,\n filter_logits_fn: Callable = top_k,\n filter_kwargs: dict = dict(),\n temperature = 1.,\n return_codes = False,\n texts: Optional[List[str]] = None,\n text_embeds: Optional[Tensor] = None,\n cond_scale = 1.,\n cache_kv = True,\n max_seq_len = None,\n face_coords_to_file: Optional[Callable[[Tensor], Any]] = None\n ):\n max_seq_len = default(max_seq_len, self.max_seq_len)\n\n if exists(prompt):\n assert not exists(batch_size)\n\n prompt = rearrange(prompt, 'b ... -> b (...)')\n assert prompt.shape[-1] <= self.max_seq_len\n\n batch_size = prompt.shape[0]\n\n if self.condition_on_text:\n assert exists(texts) ^ exists(text_embeds), '`text` or `text_embeds` must be passed in if `condition_on_text` is set to True'\n if exists(texts):\n text_embeds = self.embed_texts(texts)\n\n batch_size = default(batch_size, text_embeds.shape[0])\n\n batch_size = default(batch_size, 1)\n\n codes = default(prompt, torch.empty((batch_size, 0), dtype = torch.long, device = self.device))\n\n curr_length = codes.shape[-1]\n\n cache = (None, None)\n\n for i in tqdm(range(curr_length, max_seq_len)):\n # v1([q1] [q2] [q1] [q2] [q1] [q2]) v2([eos| q1] [q2] [q1] [q2] [q1] [q2]) -> 0 1 2 3 4 5 6 7 8 9 10 11 12 -> v1(F F F F F F) v2(T F F F F F) v3(T F F F F F)\n\n can_eos = i != 0 and divisible_by(i, self.num_quantizers * 3) # only allow for eos to be decoded at the end of each face, defined as 3 vertices with D residual VQ codes\n\n output = self.forward_on_codes(\n codes,\n text_embeds = text_embeds,\n return_loss = False,\n return_cache = cache_kv,\n append_eos = False,\n cond_scale = cond_scale,\n cfg_routed_kwargs = dict(\n cache = cache\n )\n )\n\n if cache_kv:\n logits, cache = output\n\n if cond_scale == 1.:\n cache = (cache, None)\n else:\n logits = output\n\n logits = logits[:, -1]\n\n if not can_eos:\n logits[:, -1] = -torch.finfo(logits.dtype).max\n\n filtered_logits = filter_logits_fn(logits, **filter_kwargs)\n\n if temperature == 0.:\n sample = filtered_logits.argmax(dim = -1)\n else:\n probs = F.softmax(filtered_logits / temperature, dim = -1)\n sample = torch.multinomial(probs, 1)\n\n codes, _ = pack([codes, sample], 'b *')\n\n # check for all rows to have [eos] to terminate\n\n is_eos_codes = (codes == self.eos_token_id)\n\n if is_eos_codes.any(dim = -1).all():\n break\n\n # mask out to padding anything after the first eos\n\n mask = is_eos_codes.float().cumsum(dim = -1) >= 1\n codes = codes.masked_fill(mask, self.pad_id)\n\n # remove a potential extra token from eos, if breaked early\n\n code_len = codes.shape[-1]\n round_down_code_len = code_len // self.num_quantizers * self.num_quantizers\n codes = codes[:, :round_down_code_len]\n\n # early return of raw residual quantizer codes\n\n if return_codes:\n codes = rearrange(codes, 'b (n q) -> b n q', q = self.num_quantizers)\n return codes\n\n self.autoencoder.eval()\n face_coords, face_mask = self.autoencoder.decode_from_codes_to_faces(codes)\n\n if not exists(face_coords_to_file):\n return face_coords, face_mask\n\n files = [face_coords_to_file(coords[mask]) for coords, mask in zip(face_coords, face_mask)]\n return files\n\n def forward(\n self,\n *,\n vertices: TensorType['b', 'nv', 3, int],\n faces: TensorType['b', 'nf', 3, int],\n face_edges: Optional[TensorType['b', 'e', 2, int]] = None,\n codes: Optional[Tensor] = None,\n cache: Optional[LayerIntermediates] = None,\n **kwargs\n ):\n if not exists(codes):\n codes = self.autoencoder.tokenize(\n vertices = vertices,\n faces = faces,\n face_edges = face_edges\n )\n\n return self.forward_on_codes(codes, cache = cache, **kwargs)\n\n @classifier_free_guidance\n def forward_on_codes(\n self,\n codes = None,\n return_loss = True,\n return_cache = False,\n append_eos = True,\n cache = None,\n texts: Optional[List[str]] = None,\n text_embeds: Optional[Tensor] = None,\n cond_drop_prob = 0.\n ):\n # handle text conditions\n\n attn_context_kwargs = dict()\n\n if self.condition_on_text:\n assert exists(texts) ^ exists(text_embeds), '`text` or `text_embeds` must be passed in if `condition_on_text` is set to True'\n\n if exists(texts):\n text_embeds = self.conditioner.embed_texts(texts)\n\n if exists(codes):\n assert text_embeds.shape[0] == codes.shape[0], 'batch size of texts or text embeddings is not equal to the batch size of the mesh codes'\n\n _, maybe_dropped_text_embeds = self.conditioner(\n text_embeds = text_embeds,\n cond_drop_prob = cond_drop_prob\n )\n\n attn_context_kwargs = dict(\n context = maybe_dropped_text_embeds.embed,\n context_mask = maybe_dropped_text_embeds.mask\n )\n\n # take care of codes that may be flattened\n\n if codes.ndim > 2:\n codes = rearrange(codes, 'b ... -> b (...)')\n\n # get some variable\n\n batch, seq_len, device = *codes.shape, codes.device\n\n assert seq_len <= self.max_seq_len, f'received codes of length {seq_len} but needs to be less than or equal to set max_seq_len {self.max_seq_len}'\n\n # auto append eos token\n\n if append_eos:\n assert exists(codes)\n\n code_lens = ((codes == self.pad_id).cumsum(dim = -1) == 0).sum(dim = -1)\n\n codes = F.pad(codes, (0, 1), value = 0)\n\n batch_arange = torch.arange(batch, device = device)\n\n batch_arange = rearrange(batch_arange, '... -> ... 1')\n code_lens = rearrange(code_lens, '... -> ... 1')\n\n codes[batch_arange, code_lens] = self.eos_token_id\n\n # if returning loss, save the labels for cross entropy\n\n if return_loss:\n assert seq_len > 0\n codes, labels = codes[:, :-1], codes\n\n # token embed (each residual VQ id)\n\n codes = codes.masked_fill(codes == self.pad_id, 0)\n codes = self.token_embed(codes)\n\n # codebook embed + absolute positions\n\n seq_arange = torch.arange(codes.shape[-2], device = device)\n\n codes = codes + self.abs_pos_emb(seq_arange)\n\n # embedding for quantizer level\n\n code_len = codes.shape[1]\n\n level_embed = repeat(self.quantize_level_embed, 'q d -> (r q) d', r = ceil(code_len / self.num_quantizers))\n codes = codes + level_embed[:code_len]\n\n # embedding for each vertex\n\n vertex_embed = repeat(self.vertex_embed, 'nv d -> (r nv q) d', r = ceil(code_len / (3 * self.num_quantizers)), q = self.num_quantizers)\n codes = codes + vertex_embed[:code_len]\n\n # create a token per face, by summarizing the 3 vertices\n # this is similar in design to the RQ transformer from Lee et al. https://arxiv.org/abs/2203.01941\n\n num_tokens_per_face = self.num_quantizers * 3\n\n curr_vertex_pos = code_len % num_tokens_per_face # the current intra-face vertex-code position id, needed for caching at the fine decoder stage\n\n code_len_is_multiple_of_face = divisible_by(code_len, num_tokens_per_face)\n\n next_multiple_code_len = ceil(code_len / num_tokens_per_face) * num_tokens_per_face\n\n codes = pad_to_length(codes, next_multiple_code_len, dim = -2)\n\n # grouped codes will be used for the second stage\n\n grouped_codes = rearrange(codes, 'b (nf n) d -> b nf n d', n = num_tokens_per_face)\n\n # create the coarse tokens for the first attention network\n\n face_codes = grouped_codes if code_len_is_multiple_of_face else grouped_codes[:, :-1]\n face_codes = rearrange(face_codes, 'b nf n d -> b nf (n d)')\n face_codes = self.to_face_tokens(face_codes)\n\n face_codes_len = face_codes.shape[-2]\n\n # cache logic\n\n (\n cached_attended_face_codes,\n coarse_cache,\n fine_cache,\n coarse_gateloop_cache,\n fine_gateloop_cache\n ) = cache if exists(cache) else ((None,) * 5)\n\n if exists(cache):\n cached_face_codes_len = cached_attended_face_codes.shape[-2]\n need_call_first_transformer = face_codes_len > cached_face_codes_len\n else:\n need_call_first_transformer = True\n\n should_cache_fine = not divisible_by(curr_vertex_pos + 1, num_tokens_per_face)\n\n # attention on face codes (coarse)\n\n if need_call_first_transformer:\n if exists(self.coarse_gateloop_block):\n face_codes, coarse_gateloop_cache = self.coarse_gateloop_block(face_codes, cache = coarse_gateloop_cache)\n\n attended_face_codes, coarse_cache = self.decoder(\n face_codes,\n cache = coarse_cache,\n return_hiddens = True,\n **attn_context_kwargs\n )\n\n attended_face_codes = safe_cat((cached_attended_face_codes, attended_face_codes), dim = -2)\n else:\n attended_face_codes = cached_attended_face_codes\n\n # maybe project from coarse to fine dimension for hierarchical transformers\n\n attended_face_codes = self.maybe_project_coarse_to_fine(attended_face_codes)\n\n # auto prepend sos token\n\n sos = repeat(self.sos_token, 'd -> b d', b = batch)\n\n attended_face_codes_with_sos, _ = pack([sos, attended_face_codes], 'b * d')\n\n grouped_codes = pad_to_length(grouped_codes, attended_face_codes_with_sos.shape[-2], dim = 1)\n fine_vertex_codes, _ = pack([attended_face_codes_with_sos, grouped_codes], 'b n * d')\n\n fine_vertex_codes = fine_vertex_codes[..., :-1, :]\n\n # gateloop layers\n\n if exists(self.fine_gateloop_block):\n fine_vertex_codes = rearrange(fine_vertex_codes, 'b nf n d -> b (nf n) d')\n orig_length = fine_vertex_codes.shape[-2]\n fine_vertex_codes = fine_vertex_codes[:, :(code_len + 1)]\n\n fine_vertex_codes, fine_gateloop_cache = self.fine_gateloop_block(fine_vertex_codes, cache = fine_gateloop_cache)\n\n fine_vertex_codes = pad_to_length(fine_vertex_codes, orig_length, dim = -2)\n fine_vertex_codes = rearrange(fine_vertex_codes, 'b (nf n) d -> b nf n d', n = num_tokens_per_face)\n\n # fine attention - 2nd stage\n\n if exists(cache):\n fine_vertex_codes = fine_vertex_codes[:, -1:]\n\n if exists(fine_cache):\n for attn_intermediate in fine_cache.attn_intermediates:\n ck, cv = attn_intermediate.cached_kv\n ck, cv = map(lambda t: rearrange(t, '(b nf) ... -> b nf ...', b = batch), (ck, cv))\n ck, cv = map(lambda t: t[:, -1, :, :curr_vertex_pos], (ck, cv))\n attn_intermediate.cached_kv = (ck, cv)\n\n one_face = fine_vertex_codes.shape[1] == 1\n\n fine_vertex_codes = rearrange(fine_vertex_codes, 'b nf n d -> (b nf) n d')\n\n if one_face:\n fine_vertex_codes = fine_vertex_codes[:, :(curr_vertex_pos + 1)]\n\n attended_vertex_codes, fine_cache = self.fine_decoder(\n fine_vertex_codes,\n cache = fine_cache,\n return_hiddens = True\n )\n\n if not should_cache_fine:\n fine_cache = None\n\n if not one_face:\n # reconstitute original sequence\n\n embed = rearrange(attended_vertex_codes, '(b nf) n d -> b (nf n) d', b = batch)\n embed = embed[:, :(code_len + 1)]\n else:\n embed = attended_vertex_codes\n\n # logits\n\n logits = self.to_logits(embed)\n\n if not return_loss:\n if not return_cache:\n return logits\n\n next_cache = (\n attended_face_codes,\n coarse_cache,\n fine_cache,\n coarse_gateloop_cache,\n fine_gateloop_cache\n )\n\n return logits, next_cache\n\n # loss\n\n ce_loss = F.cross_entropy(\n rearrange(logits, 'b n c -> b c n'),\n labels,\n ignore_index = self.pad_id\n )\n\n return ce_loss" } ]
from pathlib import Path from functools import partial from packaging import version from contextlib import nullcontext, contextmanager from torch import nn, Tensor from torch.nn import Module from torch.utils.data import Dataset, DataLoader from torch.optim.lr_scheduler import _LRScheduler from pytorch_custom_utils import ( get_adam_optimizer, OptimizerWithWarmupSchedule, add_wandb_tracker_contextmanager ) from accelerate import Accelerator from accelerate.utils import DistributedDataParallelKwargs from beartype import beartype from beartype.door import is_bearable from beartype.typing import Optional, Tuple, Type, List from ema_pytorch import EMA from meshgpt_pytorch.data import custom_collate from meshgpt_pytorch.version import __version__ from meshgpt_pytorch.meshgpt_pytorch import ( MeshAutoencoder, MeshTransformer ) import torch import torch.nn.functional as F
13,006
optimizer = self.optimizer.state_dict(), version = __version__, step = self.step.item(), config = self.unwrapped_model._config ) torch.save(pkg, str(path)) def load(self, path): path = Path(path) assert path.exists() pkg = torch.load(str(path)) if version.parse(__version__) != version.parse(pkg['version']): self.print(f'loading saved mesh autoencoder at version {pkg["version"]}, but current package version is {__version__}') self.model.load_state_dict(pkg['model']) self.ema_model.load_state_dict(pkg['ema_model']) self.optimizer.load_state_dict(pkg['optimizer']) self.step.copy_(pkg['step']) def next_data_to_forward_kwargs(self, dl_iter) -> dict: data = next(dl_iter) if isinstance(data, tuple): forward_kwargs = dict(zip(self.data_kwargs, data)) elif isinstance(data, dict): forward_kwargs = data maybe_del(forward_kwargs, 'texts', 'text_embeds') return forward_kwargs def forward(self): step = self.step.item() dl_iter = cycle(self.dataloader) if self.is_main and self.should_validate: val_dl_iter = cycle(self.val_dataloader) while step < self.num_train_steps: for i in range(self.grad_accum_every): is_last = i == (self.grad_accum_every - 1) maybe_no_sync = partial(self.accelerator.no_sync, self.model) if not is_last else nullcontext forward_kwargs = self.next_data_to_forward_kwargs(dl_iter) with self.accelerator.autocast(), maybe_no_sync(): total_loss, (recon_loss, commit_loss) = self.model( **forward_kwargs, return_loss_breakdown = True ) self.accelerator.backward(total_loss / self.grad_accum_every) self.print(f'recon loss: {recon_loss.item():.3f} | commit loss: {commit_loss.sum().item():.3f}') self.log( total_loss = total_loss.item(), commit_loss = commit_loss.sum().item(), recon_loss = recon_loss.item() ) self.optimizer.step() self.optimizer.zero_grad() step += 1 self.step.add_(1) self.wait() if self.is_main: self.ema_model.update() self.wait() if self.is_main and self.should_validate and divisible_by(step, self.val_every): total_val_recon_loss = 0. self.ema_model.eval() num_val_batches = self.val_num_batches * self.grad_accum_every for _ in range(num_val_batches): with self.accelerator.autocast(), torch.no_grad(): forward_kwargs = self.next_data_to_forward_kwargs(val_dl_iter) val_loss, (val_recon_loss, val_commit_loss) = self.ema_model( **forward_kwargs, return_loss_breakdown = True ) total_val_recon_loss += (val_recon_loss / num_val_batches) self.print(f'valid recon loss: {total_val_recon_loss:.3f}') self.log(val_loss = total_val_recon_loss) self.wait() if self.is_main and divisible_by(step, self.checkpoint_every): checkpoint_num = step // self.checkpoint_every self.save(self.checkpoint_folder / f'mesh-autoencoder.ckpt.{checkpoint_num}.pt') self.wait() self.print('training complete') # mesh transformer trainer @add_wandb_tracker_contextmanager() class MeshTransformerTrainer(Module): @beartype def __init__( self,
# constants DEFAULT_DDP_KWARGS = DistributedDataParallelKwargs( find_unused_parameters = True ) # helper functions def exists(v): return v is not None def default(v, d): return v if exists(v) else d def divisible_by(num, den): return (num % den) == 0 def cycle(dl): while True: for data in dl: yield data def maybe_del(d: dict, *keys): for key in keys: if key not in d: continue del d[key] # autoencoder trainer @add_wandb_tracker_contextmanager() class MeshAutoencoderTrainer(Module): @beartype def __init__( self, model: MeshAutoencoder, dataset: Dataset, num_train_steps: int, batch_size: int, grad_accum_every: int, val_dataset: Optional[Dataset] = None, val_every: int = 100, val_num_batches: int = 5, learning_rate: float = 1e-4, weight_decay: float = 0., max_grad_norm: Optional[float] = None, ema_kwargs: dict = dict(), scheduler: Optional[Type[_LRScheduler]] = None, scheduler_kwargs: dict = dict(), accelerator_kwargs: dict = dict(), optimizer_kwargs: dict = dict(), checkpoint_every = 1000, checkpoint_folder = './checkpoints', data_kwargs: Tuple[str, ...] = ['vertices', 'faces', 'face_edges'], warmup_steps = 1000, use_wandb_tracking = False ): super().__init__() # experiment tracker self.use_wandb_tracking = use_wandb_tracking if use_wandb_tracking: accelerator_kwargs['log_with'] = 'wandb' if 'kwargs_handlers' not in accelerator_kwargs: accelerator_kwargs['kwargs_handlers'] = [DEFAULT_DDP_KWARGS] # accelerator self.accelerator = Accelerator(**accelerator_kwargs) self.model = model if self.is_main: self.ema_model = EMA(model, **ema_kwargs) self.optimizer = OptimizerWithWarmupSchedule( accelerator = self.accelerator, optimizer = get_adam_optimizer(model.parameters(), lr = learning_rate, wd = weight_decay, **optimizer_kwargs), scheduler = scheduler, scheduler_kwargs = scheduler_kwargs, warmup_steps = warmup_steps, max_grad_norm = max_grad_norm ) self.dataloader = DataLoader( dataset, batch_size = batch_size, shuffle = True, drop_last = True, collate_fn = partial(custom_collate, pad_id = model.pad_id) ) self.should_validate = exists(val_dataset) if self.should_validate: assert len(val_dataset) > 0, 'your validation dataset is empty' self.val_every = val_every self.val_num_batches = val_num_batches self.val_dataloader = DataLoader( val_dataset, batch_size = batch_size, shuffle = True, drop_last = True, collate_fn = partial(custom_collate, pad_id = model.pad_id) ) if hasattr(dataset, 'data_kwargs') and exists(dataset.data_kwargs): assert is_bearable(dataset.data_kwargs, List[str]) self.data_kwargs = dataset.data_kwargs else: self.data_kwargs = data_kwargs ( self.model, self.dataloader ) = self.accelerator.prepare( self.model, self.dataloader ) self.grad_accum_every = grad_accum_every self.num_train_steps = num_train_steps self.register_buffer('step', torch.tensor(0)) self.checkpoint_every = checkpoint_every self.checkpoint_folder = Path(checkpoint_folder) self.checkpoint_folder.mkdir(exist_ok = True, parents = True) @property def ema_tokenizer(self): return self.ema_model.ema_model def tokenize(self, *args, **kwargs): return self.ema_tokenizer.tokenize(*args, **kwargs) def log(self, **data_kwargs): self.accelerator.log(data_kwargs, step = self.step.item()) @property def device(self): return self.unwrapped_model.device @property def is_main(self): return self.accelerator.is_main_process @property def unwrapped_model(self): return self.accelerator.unwrap_model(self.model) @property def is_local_main(self): return self.accelerator.is_local_main_process def wait(self): return self.accelerator.wait_for_everyone() def print(self, msg): return self.accelerator.print(msg) def save(self, path, overwrite = True): path = Path(path) assert overwrite or not path.exists() pkg = dict( model = self.unwrapped_model.state_dict(), ema_model = self.ema_model.state_dict(), optimizer = self.optimizer.state_dict(), version = __version__, step = self.step.item(), config = self.unwrapped_model._config ) torch.save(pkg, str(path)) def load(self, path): path = Path(path) assert path.exists() pkg = torch.load(str(path)) if version.parse(__version__) != version.parse(pkg['version']): self.print(f'loading saved mesh autoencoder at version {pkg["version"]}, but current package version is {__version__}') self.model.load_state_dict(pkg['model']) self.ema_model.load_state_dict(pkg['ema_model']) self.optimizer.load_state_dict(pkg['optimizer']) self.step.copy_(pkg['step']) def next_data_to_forward_kwargs(self, dl_iter) -> dict: data = next(dl_iter) if isinstance(data, tuple): forward_kwargs = dict(zip(self.data_kwargs, data)) elif isinstance(data, dict): forward_kwargs = data maybe_del(forward_kwargs, 'texts', 'text_embeds') return forward_kwargs def forward(self): step = self.step.item() dl_iter = cycle(self.dataloader) if self.is_main and self.should_validate: val_dl_iter = cycle(self.val_dataloader) while step < self.num_train_steps: for i in range(self.grad_accum_every): is_last = i == (self.grad_accum_every - 1) maybe_no_sync = partial(self.accelerator.no_sync, self.model) if not is_last else nullcontext forward_kwargs = self.next_data_to_forward_kwargs(dl_iter) with self.accelerator.autocast(), maybe_no_sync(): total_loss, (recon_loss, commit_loss) = self.model( **forward_kwargs, return_loss_breakdown = True ) self.accelerator.backward(total_loss / self.grad_accum_every) self.print(f'recon loss: {recon_loss.item():.3f} | commit loss: {commit_loss.sum().item():.3f}') self.log( total_loss = total_loss.item(), commit_loss = commit_loss.sum().item(), recon_loss = recon_loss.item() ) self.optimizer.step() self.optimizer.zero_grad() step += 1 self.step.add_(1) self.wait() if self.is_main: self.ema_model.update() self.wait() if self.is_main and self.should_validate and divisible_by(step, self.val_every): total_val_recon_loss = 0. self.ema_model.eval() num_val_batches = self.val_num_batches * self.grad_accum_every for _ in range(num_val_batches): with self.accelerator.autocast(), torch.no_grad(): forward_kwargs = self.next_data_to_forward_kwargs(val_dl_iter) val_loss, (val_recon_loss, val_commit_loss) = self.ema_model( **forward_kwargs, return_loss_breakdown = True ) total_val_recon_loss += (val_recon_loss / num_val_batches) self.print(f'valid recon loss: {total_val_recon_loss:.3f}') self.log(val_loss = total_val_recon_loss) self.wait() if self.is_main and divisible_by(step, self.checkpoint_every): checkpoint_num = step // self.checkpoint_every self.save(self.checkpoint_folder / f'mesh-autoencoder.ckpt.{checkpoint_num}.pt') self.wait() self.print('training complete') # mesh transformer trainer @add_wandb_tracker_contextmanager() class MeshTransformerTrainer(Module): @beartype def __init__( self,
model: MeshTransformer,
3
2023-11-29 14:58:15+00:00
16k
EricGuo5513/momask-codes
train_res_transformer.py
[ { "identifier": "ResidualTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class ResidualTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8, cond_drop_prob=0.1,\n num_heads=4, dropout=0.1, clip_dim=512, shared_codebook=False, share_weight=False,\n clip_version=None, opt=None, **kargs):\n super(ResidualTransformer, self).__init__()\n print(f'latent_dim: {latent_dim}, ff_size: {ff_size}, nlayers: {num_layers}, nheads: {num_heads}, dropout: {dropout}')\n\n # assert shared_codebook == True, \"Only support shared codebook right now!\"\n\n self.code_dim = code_dim\n self.latent_dim = latent_dim\n self.clip_dim = clip_dim\n self.dropout = dropout\n self.opt = opt\n\n self.cond_mode = cond_mode\n # self.cond_drop_prob = cond_drop_prob\n\n if self.cond_mode == 'action':\n assert 'num_actions' in kargs\n self.num_actions = kargs.get('num_actions', 1)\n self.cond_drop_prob = cond_drop_prob\n\n '''\n Preparing Networks\n '''\n self.input_process = InputProcess(self.code_dim, self.latent_dim)\n self.position_enc = PositionalEncoding(self.latent_dim, self.dropout)\n\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=num_heads,\n dim_feedforward=ff_size,\n dropout=dropout,\n activation='gelu')\n\n self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer,\n num_layers=num_layers)\n\n self.encode_quant = partial(F.one_hot, num_classes=self.opt.num_quantizers)\n self.encode_action = partial(F.one_hot, num_classes=self.num_actions)\n\n self.quant_emb = nn.Linear(self.opt.num_quantizers, self.latent_dim)\n # if self.cond_mode != 'no_cond':\n if self.cond_mode == 'text':\n self.cond_emb = nn.Linear(self.clip_dim, self.latent_dim)\n elif self.cond_mode == 'action':\n self.cond_emb = nn.Linear(self.num_actions, self.latent_dim)\n else:\n raise KeyError(\"Unsupported condition mode!!!\")\n\n\n _num_tokens = opt.num_tokens + 1 # one dummy tokens for padding\n self.pad_id = opt.num_tokens\n\n # self.output_process = OutputProcess_Bert(out_feats=opt.num_tokens, latent_dim=latent_dim)\n self.output_process = OutputProcess(out_feats=code_dim, latent_dim=latent_dim)\n\n if shared_codebook:\n token_embed = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim)))\n self.token_embed_weight = token_embed.expand(opt.num_quantizers-1, _num_tokens, code_dim)\n if share_weight:\n self.output_proj_weight = self.token_embed_weight\n self.output_proj_bias = None\n else:\n output_proj = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim)))\n output_bias = nn.Parameter(torch.zeros(size=(_num_tokens,)))\n # self.output_proj_bias = 0\n self.output_proj_weight = output_proj.expand(opt.num_quantizers-1, _num_tokens, code_dim)\n self.output_proj_bias = output_bias.expand(opt.num_quantizers-1, _num_tokens)\n\n else:\n if share_weight:\n self.embed_proj_shared_weight = nn.Parameter(torch.normal(mean=0, std=0.02, size=(opt.num_quantizers - 2, _num_tokens, code_dim)))\n self.token_embed_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim)))\n self.output_proj_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim)))\n self.output_proj_bias = None\n self.registered = False\n else:\n output_proj_weight = torch.normal(mean=0, std=0.02,\n size=(opt.num_quantizers - 1, _num_tokens, code_dim))\n\n self.output_proj_weight = nn.Parameter(output_proj_weight)\n self.output_proj_bias = nn.Parameter(torch.zeros(size=(opt.num_quantizers, _num_tokens)))\n token_embed_weight = torch.normal(mean=0, std=0.02,\n size=(opt.num_quantizers - 1, _num_tokens, code_dim))\n self.token_embed_weight = nn.Parameter(token_embed_weight)\n\n self.apply(self.__init_weights)\n self.shared_codebook = shared_codebook\n self.share_weight = share_weight\n\n if self.cond_mode == 'text':\n print('Loading CLIP...')\n self.clip_version = clip_version\n self.clip_model = self.load_and_freeze_clip(clip_version)\n\n # def\n\n def mask_cond(self, cond, force_mask=False):\n bs, d = cond.shape\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_drop_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_drop_prob).view(bs, 1)\n return cond * (1. - mask)\n else:\n return cond\n\n def __init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def parameters_wo_clip(self):\n return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n\n def load_and_freeze_clip(self, clip_version):\n clip_model, clip_preprocess = clip.load(clip_version, device='cpu',\n jit=False) # Must set jit=False for training\n # Cannot run on cpu\n clip.model.convert_weights(\n clip_model) # Actually this line is unnecessary since clip by default already on float16\n # Date 0707: It's necessary, only unecessary when load directly to gpu. Disable if need to run on cpu\n\n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n\n def encode_text(self, raw_text):\n device = next(self.parameters()).device\n text = clip.tokenize(raw_text, truncate=True).to(device)\n feat_clip_text = self.clip_model.encode_text(text).float()\n return feat_clip_text\n\n\n def q_schedule(self, bs, low, high):\n noise = uniform((bs,), device=self.opt.device)\n schedule = 1 - cosine_schedule(noise)\n return torch.round(schedule * (high - low)) + low\n\n def process_embed_proj_weight(self):\n if self.share_weight and (not self.shared_codebook):\n # if not self.registered:\n self.output_proj_weight = torch.cat([self.embed_proj_shared_weight, self.output_proj_weight_], dim=0)\n self.token_embed_weight = torch.cat([self.token_embed_weight_, self.embed_proj_shared_weight], dim=0)\n # self.registered = True\n\n def output_project(self, logits, qids):\n '''\n :logits: (bs, code_dim, seqlen)\n :qids: (bs)\n\n :return:\n -logits (bs, ntoken, seqlen)\n '''\n # (num_qlayers-1, num_token, code_dim) -> (bs, ntoken, code_dim)\n output_proj_weight = self.output_proj_weight[qids]\n # (num_qlayers, ntoken) -> (bs, ntoken)\n output_proj_bias = None if self.output_proj_bias is None else self.output_proj_bias[qids]\n\n output = torch.einsum('bnc, bcs->bns', output_proj_weight, logits)\n if output_proj_bias is not None:\n output += output + output_proj_bias.unsqueeze(-1)\n return output\n\n\n\n def trans_forward(self, motion_codes, qids, cond, padding_mask, force_mask=False):\n '''\n :param motion_codes: (b, seqlen, d)\n :padding_mask: (b, seqlen), all pad positions are TRUE else FALSE\n :param qids: (b), quantizer layer ids\n :param cond: (b, embed_dim) for text, (b, num_actions) for action\n :return:\n -logits: (b, num_token, seqlen)\n '''\n cond = self.mask_cond(cond, force_mask=force_mask)\n\n # (b, seqlen, d) -> (seqlen, b, latent_dim)\n x = self.input_process(motion_codes)\n\n # (b, num_quantizer)\n q_onehot = self.encode_quant(qids).float().to(x.device)\n\n q_emb = self.quant_emb(q_onehot).unsqueeze(0) # (1, b, latent_dim)\n cond = self.cond_emb(cond).unsqueeze(0) # (1, b, latent_dim)\n\n x = self.position_enc(x)\n xseq = torch.cat([cond, q_emb, x], dim=0) # (seqlen+2, b, latent_dim)\n\n padding_mask = torch.cat([torch.zeros_like(padding_mask[:, 0:2]), padding_mask], dim=1) # (b, seqlen+2)\n output = self.seqTransEncoder(xseq, src_key_padding_mask=padding_mask)[2:] # (seqlen, b, e)\n logits = self.output_process(output)\n return logits\n\n def forward_with_cond_scale(self,\n motion_codes,\n q_id,\n cond_vector,\n padding_mask,\n cond_scale=3,\n force_mask=False):\n bs = motion_codes.shape[0]\n # if cond_scale == 1:\n qids = torch.full((bs,), q_id, dtype=torch.long, device=motion_codes.device)\n if force_mask:\n logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask, force_mask=True)\n logits = self.output_project(logits, qids-1)\n return logits\n\n logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask)\n logits = self.output_project(logits, qids-1)\n if cond_scale == 1:\n return logits\n\n aux_logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask, force_mask=True)\n aux_logits = self.output_project(aux_logits, qids-1)\n\n scaled_logits = aux_logits + (logits - aux_logits) * cond_scale\n return scaled_logits\n\n def forward(self, all_indices, y, m_lens):\n '''\n :param all_indices: (b, n, q)\n :param y: raw text for cond_mode=text, (b, ) for cond_mode=action\n :m_lens: (b,)\n :return:\n '''\n\n self.process_embed_proj_weight()\n\n bs, ntokens, num_quant_layers = all_indices.shape\n device = all_indices.device\n\n # Positions that are PADDED are ALL FALSE\n non_pad_mask = lengths_to_mask(m_lens, ntokens) # (b, n)\n\n q_non_pad_mask = repeat(non_pad_mask, 'b n -> b n q', q=num_quant_layers)\n all_indices = torch.where(q_non_pad_mask, all_indices, self.pad_id) #(b, n, q)\n\n # randomly sample quantization layers to work on, [1, num_q)\n active_q_layers = q_schedule(bs, low=1, high=num_quant_layers, device=device)\n\n # print(self.token_embed_weight.shape, all_indices.shape)\n token_embed = repeat(self.token_embed_weight, 'q c d-> b c d q', b=bs)\n gather_indices = repeat(all_indices[..., :-1], 'b n q -> b n d q', d=token_embed.shape[2])\n # print(token_embed.shape, gather_indices.shape)\n all_codes = token_embed.gather(1, gather_indices) # (b, n, d, q-1)\n\n cumsum_codes = torch.cumsum(all_codes, dim=-1) #(b, n, d, q-1)\n\n active_indices = all_indices[torch.arange(bs), :, active_q_layers] # (b, n)\n history_sum = cumsum_codes[torch.arange(bs), :, :, active_q_layers - 1]\n\n force_mask = False\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(y)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(y).to(device).float()\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(bs, self.latent_dim).float().to(device)\n force_mask = True\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n logits = self.trans_forward(history_sum, active_q_layers, cond_vector, ~non_pad_mask, force_mask)\n logits = self.output_project(logits, active_q_layers-1)\n ce_loss, pred_id, acc = cal_performance(logits, active_indices, ignore_index=self.pad_id)\n\n return ce_loss, pred_id, acc\n\n @torch.no_grad()\n @eval_decorator\n def generate(self,\n motion_ids,\n conds,\n m_lens,\n temperature=1,\n topk_filter_thres=0.9,\n cond_scale=2,\n num_res_layers=-1, # If it's -1, use all.\n ):\n\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n self.process_embed_proj_weight()\n\n device = next(self.parameters()).device\n seq_len = motion_ids.shape[1]\n batch_size = len(conds)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n # token_embed = repeat(self.token_embed_weight, 'c d -> b c d', b=batch_size)\n # gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n # history_sum = token_embed.gather(1, gathered_ids)\n\n # print(pa, seq_len)\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, motion_ids.shape)\n motion_ids = torch.where(padding_mask, self.pad_id, motion_ids)\n all_indices = [motion_ids]\n history_sum = 0\n num_quant_layers = self.opt.num_quantizers if num_res_layers==-1 else num_res_layers+1\n\n for i in range(1, num_quant_layers):\n # print(f\"--> Working on {i}-th quantizer\")\n # Start from all tokens being masked\n # qids = torch.full((batch_size,), i, dtype=torch.long, device=motion_ids.device)\n token_embed = self.token_embed_weight[i-1]\n token_embed = repeat(token_embed, 'c d -> b c d', b=batch_size)\n gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n history_sum += token_embed.gather(1, gathered_ids)\n\n logits = self.forward_with_cond_scale(history_sum, i, cond_vector, padding_mask, cond_scale=cond_scale)\n # logits = self.trans_forward(history_sum, qids, cond_vector, padding_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n\n # probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # # print(probs / temperature)\n # pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n ids = torch.where(padding_mask, self.pad_id, pred_ids)\n\n motion_ids = ids\n all_indices.append(ids)\n\n all_indices = torch.stack(all_indices, dim=-1)\n # padding_mask = repeat(padding_mask, 'b n -> b n q', q=all_indices.shape[-1])\n # all_indices = torch.where(padding_mask, -1, all_indices)\n all_indices = torch.where(all_indices==self.pad_id, -1, all_indices)\n # all_indices = all_indices.masked_fill()\n return all_indices\n\n @torch.no_grad()\n @eval_decorator\n def edit(self,\n motion_ids,\n conds,\n m_lens,\n temperature=1,\n topk_filter_thres=0.9,\n cond_scale=2\n ):\n\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n self.process_embed_proj_weight()\n\n device = next(self.parameters()).device\n seq_len = motion_ids.shape[1]\n batch_size = len(conds)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n # token_embed = repeat(self.token_embed_weight, 'c d -> b c d', b=batch_size)\n # gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n # history_sum = token_embed.gather(1, gathered_ids)\n\n # print(pa, seq_len)\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, motion_ids.shape)\n motion_ids = torch.where(padding_mask, self.pad_id, motion_ids)\n all_indices = [motion_ids]\n history_sum = 0\n\n for i in range(1, self.opt.num_quantizers):\n # print(f\"--> Working on {i}-th quantizer\")\n # Start from all tokens being masked\n # qids = torch.full((batch_size,), i, dtype=torch.long, device=motion_ids.device)\n token_embed = self.token_embed_weight[i-1]\n token_embed = repeat(token_embed, 'c d -> b c d', b=batch_size)\n gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n history_sum += token_embed.gather(1, gathered_ids)\n\n logits = self.forward_with_cond_scale(history_sum, i, cond_vector, padding_mask, cond_scale=cond_scale)\n # logits = self.trans_forward(history_sum, qids, cond_vector, padding_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n\n # probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # # print(probs / temperature)\n # pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n ids = torch.where(padding_mask, self.pad_id, pred_ids)\n\n motion_ids = ids\n all_indices.append(ids)\n\n all_indices = torch.stack(all_indices, dim=-1)\n # padding_mask = repeat(padding_mask, 'b n -> b n q', q=all_indices.shape[-1])\n # all_indices = torch.where(padding_mask, -1, all_indices)\n all_indices = torch.where(all_indices==self.pad_id, -1, all_indices)\n # all_indices = all_indices.masked_fill()\n return all_indices" }, { "identifier": "ResidualTransformerTrainer", "path": "models/mask_transformer/transformer_trainer.py", "snippet": "class ResidualTransformerTrainer:\n def __init__(self, args, res_transformer, vq_model):\n self.opt = args\n self.res_transformer = res_transformer\n self.vq_model = vq_model\n self.device = args.device\n self.vq_model.eval()\n\n if args.is_train:\n self.logger = SummaryWriter(args.log_dir)\n # self.l1_criterion = torch.nn.SmoothL1Loss()\n\n\n def update_lr_warm_up(self, nb_iter, warm_up_iter, lr):\n\n current_lr = lr * (nb_iter + 1) / (warm_up_iter + 1)\n for param_group in self.opt_res_transformer.param_groups:\n param_group[\"lr\"] = current_lr\n\n return current_lr\n\n\n def forward(self, batch_data):\n\n conds, motion, m_lens = batch_data\n motion = motion.detach().float().to(self.device)\n m_lens = m_lens.detach().long().to(self.device)\n\n # (b, n, q), (q, b, n ,d)\n code_idx, all_codes = self.vq_model.encode(motion)\n m_lens = m_lens // 4\n\n conds = conds.to(self.device).float() if torch.is_tensor(conds) else conds\n\n ce_loss, pred_ids, acc = self.res_transformer(code_idx, conds, m_lens)\n\n return ce_loss, acc\n\n def update(self, batch_data):\n loss, acc = self.forward(batch_data)\n\n self.opt_res_transformer.zero_grad()\n loss.backward()\n self.opt_res_transformer.step()\n self.scheduler.step()\n\n return loss.item(), acc\n\n def save(self, file_name, ep, total_it):\n res_trans_state_dict = self.res_transformer.state_dict()\n clip_weights = [e for e in res_trans_state_dict.keys() if e.startswith('clip_model.')]\n for e in clip_weights:\n del res_trans_state_dict[e]\n state = {\n 'res_transformer': res_trans_state_dict,\n 'opt_res_transformer': self.opt_res_transformer.state_dict(),\n 'scheduler':self.scheduler.state_dict(),\n 'ep': ep,\n 'total_it': total_it,\n }\n torch.save(state, file_name)\n\n def resume(self, model_dir):\n checkpoint = torch.load(model_dir, map_location=self.device)\n missing_keys, unexpected_keys = self.res_transformer.load_state_dict(checkpoint['res_transformer'], strict=False)\n assert len(unexpected_keys) == 0\n assert all([k.startswith('clip_model.') for k in missing_keys])\n\n try:\n self.opt_res_transformer.load_state_dict(checkpoint['opt_res_transformer']) # Optimizer\n\n self.scheduler.load_state_dict(checkpoint['scheduler']) # Scheduler\n except:\n print('Resume wo optimizer')\n return checkpoint['ep'], checkpoint['total_it']\n\n def train(self, train_loader, val_loader, eval_val_loader, eval_wrapper, plot_eval):\n self.res_transformer.to(self.device)\n self.vq_model.to(self.device)\n\n self.opt_res_transformer = optim.AdamW(self.res_transformer.parameters(), betas=(0.9, 0.99), lr=self.opt.lr, weight_decay=1e-5)\n self.scheduler = optim.lr_scheduler.MultiStepLR(self.opt_res_transformer,\n milestones=self.opt.milestones,\n gamma=self.opt.gamma)\n\n epoch = 0\n it = 0\n\n if self.opt.is_continue:\n model_dir = pjoin(self.opt.model_dir, 'latest.tar') # TODO\n epoch, it = self.resume(model_dir)\n print(\"Load model epoch:%d iterations:%d\"%(epoch, it))\n\n start_time = time.time()\n total_iters = self.opt.max_epoch * len(train_loader)\n print(f'Total Epochs: {self.opt.max_epoch}, Total Iters: {total_iters}')\n print('Iters Per Epoch, Training: %04d, Validation: %03d' % (len(train_loader), len(val_loader)))\n logs = defaultdict(def_value, OrderedDict())\n\n best_fid, best_div, best_top1, best_top2, best_top3, best_matching, writer = evaluation_res_transformer(\n self.opt.save_root, eval_val_loader, self.res_transformer, self.vq_model, self.logger, epoch,\n best_fid=100, best_div=100,\n best_top1=0, best_top2=0, best_top3=0,\n best_matching=100, eval_wrapper=eval_wrapper,\n plot_func=plot_eval, save_ckpt=False, save_anim=False\n )\n best_loss = 100\n best_acc = 0\n\n while epoch < self.opt.max_epoch:\n self.res_transformer.train()\n self.vq_model.eval()\n\n for i, batch in enumerate(train_loader):\n it += 1\n if it < self.opt.warm_up_iter:\n self.update_lr_warm_up(it, self.opt.warm_up_iter, self.opt.lr)\n\n loss, acc = self.update(batch_data=batch)\n logs['loss'] += loss\n logs[\"acc\"] += acc\n logs['lr'] += self.opt_res_transformer.param_groups[0]['lr']\n\n if it % self.opt.log_every == 0:\n mean_loss = OrderedDict()\n # self.logger.add_scalar('val_loss', val_loss, it)\n # self.l\n for tag, value in logs.items():\n self.logger.add_scalar('Train/%s'%tag, value / self.opt.log_every, it)\n mean_loss[tag] = value / self.opt.log_every\n logs = defaultdict(def_value, OrderedDict())\n print_current_loss(start_time, it, total_iters, mean_loss, epoch=epoch, inner_iter=i)\n\n if it % self.opt.save_latest == 0:\n self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)\n\n epoch += 1\n self.save(pjoin(self.opt.model_dir, 'latest.tar'), epoch, it)\n\n print('Validation time:')\n self.vq_model.eval()\n self.res_transformer.eval()\n\n val_loss = []\n val_acc = []\n with torch.no_grad():\n for i, batch_data in enumerate(val_loader):\n loss, acc = self.forward(batch_data)\n val_loss.append(loss.item())\n val_acc.append(acc)\n\n print(f\"Validation loss:{np.mean(val_loss):.3f}, Accuracy:{np.mean(val_acc):.3f}\")\n\n self.logger.add_scalar('Val/loss', np.mean(val_loss), epoch)\n self.logger.add_scalar('Val/acc', np.mean(val_acc), epoch)\n\n if np.mean(val_loss) < best_loss:\n print(f\"Improved loss from {best_loss:.02f} to {np.mean(val_loss)}!!!\")\n self.save(pjoin(self.opt.model_dir, 'net_best_loss.tar'), epoch, it)\n best_loss = np.mean(val_loss)\n\n if np.mean(val_acc) > best_acc:\n print(f\"Improved acc from {best_acc:.02f} to {np.mean(val_acc)}!!!\")\n # self.save(pjoin(self.opt.model_dir, 'net_best_loss.tar'), epoch, it)\n best_acc = np.mean(val_acc)\n\n best_fid, best_div, best_top1, best_top2, best_top3, best_matching, writer = evaluation_res_transformer(\n self.opt.save_root, eval_val_loader, self.res_transformer, self.vq_model, self.logger, epoch, best_fid=best_fid,\n best_div=best_div, best_top1=best_top1, best_top2=best_top2, best_top3=best_top3,\n best_matching=best_matching, eval_wrapper=eval_wrapper,\n plot_func=plot_eval, save_ckpt=True, save_anim=(epoch%self.opt.eval_every_e==0)\n )" }, { "identifier": "RVQVAE", "path": "models/vq/model.py", "snippet": "class RVQVAE(nn.Module):\n def __init__(self,\n args,\n input_width=263,\n nb_code=1024,\n code_dim=512,\n output_emb_width=512,\n down_t=3,\n stride_t=2,\n width=512,\n depth=3,\n dilation_growth_rate=3,\n activation='relu',\n norm=None):\n\n super().__init__()\n assert output_emb_width == code_dim\n self.code_dim = code_dim\n self.num_code = nb_code\n # self.quant = args.quantizer\n self.encoder = Encoder(input_width, output_emb_width, down_t, stride_t, width, depth,\n dilation_growth_rate, activation=activation, norm=norm)\n self.decoder = Decoder(input_width, output_emb_width, down_t, stride_t, width, depth,\n dilation_growth_rate, activation=activation, norm=norm)\n rvqvae_config = {\n 'num_quantizers': args.num_quantizers,\n 'shared_codebook': args.shared_codebook,\n 'quantize_dropout_prob': args.quantize_dropout_prob,\n 'quantize_dropout_cutoff_index': 0,\n 'nb_code': nb_code,\n 'code_dim':code_dim, \n 'args': args,\n }\n self.quantizer = ResidualVQ(**rvqvae_config)\n\n def preprocess(self, x):\n # (bs, T, Jx3) -> (bs, Jx3, T)\n x = x.permute(0, 2, 1).float()\n return x\n\n def postprocess(self, x):\n # (bs, Jx3, T) -> (bs, T, Jx3)\n x = x.permute(0, 2, 1)\n return x\n\n def encode(self, x):\n N, T, _ = x.shape\n x_in = self.preprocess(x)\n x_encoder = self.encoder(x_in)\n # print(x_encoder.shape)\n code_idx, all_codes = self.quantizer.quantize(x_encoder, return_latent=True)\n # print(code_idx.shape)\n # code_idx = code_idx.view(N, -1)\n # (N, T, Q)\n # print()\n return code_idx, all_codes\n\n def forward(self, x):\n x_in = self.preprocess(x)\n # Encode\n x_encoder = self.encoder(x_in)\n\n ## quantization\n # x_quantized, code_idx, commit_loss, perplexity = self.quantizer(x_encoder, sample_codebook_temp=0.5,\n # force_dropout_index=0) #TODO hardcode\n x_quantized, code_idx, commit_loss, perplexity = self.quantizer(x_encoder, sample_codebook_temp=0.5)\n\n # print(code_idx[0, :, 1])\n ## decoder\n x_out = self.decoder(x_quantized)\n # x_out = self.postprocess(x_decoder)\n return x_out, commit_loss, perplexity\n\n def forward_decoder(self, x):\n x_d = self.quantizer.get_codes_from_indices(x)\n # x_d = x_d.view(1, -1, self.code_dim).permute(0, 2, 1).contiguous()\n x = x_d.sum(dim=0).permute(0, 2, 1)\n\n # decoder\n x_out = self.decoder(x)\n # x_out = self.postprocess(x_decoder)\n return x_out" }, { "identifier": "TrainT2MOptions", "path": "options/train_option.py", "snippet": "class TrainT2MOptions(BaseOptions):\n def initialize(self):\n BaseOptions.initialize(self)\n self.parser.add_argument('--batch_size', type=int, default=64, help='Batch size')\n self.parser.add_argument('--max_epoch', type=int, default=500, help='Maximum number of epoch for training')\n # self.parser.add_argument('--max_iters', type=int, default=150_000, help='Training iterations')\n\n '''LR scheduler'''\n self.parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate')\n self.parser.add_argument('--gamma', type=float, default=0.1, help='Learning rate schedule factor')\n self.parser.add_argument('--milestones', default=[50_000], nargs=\"+\", type=int,\n help=\"learning rate schedule (iterations)\")\n self.parser.add_argument('--warm_up_iter', default=2000, type=int, help='number of total iterations for warmup')\n\n '''Condition'''\n self.parser.add_argument('--cond_drop_prob', type=float, default=0.1, help='Drop ratio of condition, for classifier-free guidance')\n self.parser.add_argument(\"--seed\", default=3407, type=int, help=\"Seed\")\n\n self.parser.add_argument('--is_continue', action=\"store_true\", help='Is this trial continuing previous state?')\n self.parser.add_argument('--gumbel_sample', action=\"store_true\", help='Strategy for token sampling, True: Gumbel sampling, False: Categorical sampling')\n self.parser.add_argument('--share_weight', action=\"store_true\", help='Whether to share weight for projection/embedding, for residual transformer.')\n\n self.parser.add_argument('--log_every', type=int, default=50, help='Frequency of printing training progress, (iteration)')\n # self.parser.add_argument('--save_every_e', type=int, default=100, help='Frequency of printing training progress')\n self.parser.add_argument('--eval_every_e', type=int, default=10, help='Frequency of animating eval results, (epoch)')\n self.parser.add_argument('--save_latest', type=int, default=500, help='Frequency of saving checkpoint, (iteration)')\n\n\n self.is_train = True" }, { "identifier": "plot_3d_motion", "path": "utils/plot_script.py", "snippet": "def plot_3d_motion(save_path, kinematic_tree, joints, title, figsize=(10, 10), fps=120, radius=4):\n matplotlib.use('Agg')\n\n title_sp = title.split(' ')\n if len(title_sp) > 20:\n title = '\\n'.join([' '.join(title_sp[:10]), ' '.join(title_sp[10:20]), ' '.join(title_sp[20:])])\n elif len(title_sp) > 10:\n title = '\\n'.join([' '.join(title_sp[:10]), ' '.join(title_sp[10:])])\n\n def init():\n ax.set_xlim3d([-radius / 2, radius / 2])\n ax.set_ylim3d([0, radius])\n ax.set_zlim3d([0, radius])\n # print(title)\n fig.suptitle(title, fontsize=20)\n ax.grid(b=False)\n\n def plot_xzPlane(minx, maxx, miny, minz, maxz):\n ## Plot a plane XZ\n verts = [\n [minx, miny, minz],\n [minx, miny, maxz],\n [maxx, miny, maxz],\n [maxx, miny, minz]\n ]\n xz_plane = Poly3DCollection([verts])\n xz_plane.set_facecolor((0.5, 0.5, 0.5, 0.5))\n ax.add_collection3d(xz_plane)\n\n # return ax\n\n # (seq_len, joints_num, 3)\n data = joints.copy().reshape(len(joints), -1, 3)\n fig = plt.figure(figsize=figsize)\n ax = p3.Axes3D(fig)\n init()\n MINS = data.min(axis=0).min(axis=0)\n MAXS = data.max(axis=0).max(axis=0)\n colors = ['red', 'blue', 'black', 'red', 'blue',\n 'darkblue', 'darkblue', 'darkblue', 'darkblue', 'darkblue',\n 'darkred', 'darkred', 'darkred', 'darkred', 'darkred']\n frame_number = data.shape[0]\n # print(data.shape)\n\n height_offset = MINS[1]\n data[:, :, 1] -= height_offset\n trajec = data[:, 0, [0, 2]]\n\n data[..., 0] -= data[:, 0:1, 0]\n data[..., 2] -= data[:, 0:1, 2]\n\n # print(trajec.shape)\n\n def update(index):\n # print(index)\n ax.lines = []\n ax.collections = []\n ax.view_init(elev=120, azim=-90)\n ax.dist = 7.5\n # ax =\n plot_xzPlane(MINS[0] - trajec[index, 0], MAXS[0] - trajec[index, 0], 0, MINS[2] - trajec[index, 1],\n MAXS[2] - trajec[index, 1])\n # ax.scatter(data[index, :22, 0], data[index, :22, 1], data[index, :22, 2], color='black', s=3)\n\n if index > 1:\n ax.plot3D(trajec[:index, 0] - trajec[index, 0], np.zeros_like(trajec[:index, 0]),\n trajec[:index, 1] - trajec[index, 1], linewidth=1.0,\n color='blue')\n # ax = plot_xzPlane(ax, MINS[0], MAXS[0], 0, MINS[2], MAXS[2])\n\n for i, (chain, color) in enumerate(zip(kinematic_tree, colors)):\n # print(color)\n if i < 5:\n linewidth = 4.0\n else:\n linewidth = 2.0\n ax.plot3D(data[index, chain, 0], data[index, chain, 1], data[index, chain, 2], linewidth=linewidth,\n color=color)\n # print(trajec[:index, 0].shape)\n\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n\n ani = FuncAnimation(fig, update, frames=frame_number, interval=1000 / fps, repeat=False)\n\n # writer = FFMpegFileWriter(fps=fps)\n ani.save(save_path, fps=fps)\n plt.close()" }, { "identifier": "recover_from_ric", "path": "utils/motion_process.py", "snippet": "def recover_from_ric(data, joints_num):\n r_rot_quat, r_pos = recover_root_rot_pos(data)\n positions = data[..., 4:(joints_num - 1) * 3 + 4]\n positions = positions.view(positions.shape[:-1] + (-1, 3))\n\n '''Add Y-axis rotation to local joints'''\n positions = qrot(qinv(r_rot_quat[..., None, :]).expand(positions.shape[:-1] + (4,)), positions)\n\n '''Add root XZ to joints'''\n positions[..., 0] += r_pos[..., 0:1]\n positions[..., 2] += r_pos[..., 2:3]\n\n '''Concate root and joints'''\n positions = torch.cat([r_pos.unsqueeze(-2), positions], dim=-2)\n\n return positions" }, { "identifier": "get_opt", "path": "utils/get_opt.py", "snippet": "def get_opt(opt_path, device, **kwargs):\n opt = Namespace()\n opt_dict = vars(opt)\n\n skip = ('-------------- End ----------------',\n '------------ Options -------------',\n '\\n')\n print('Reading', opt_path)\n with open(opt_path, 'r') as f:\n for line in f:\n if line.strip() not in skip:\n # print(line.strip())\n key, value = line.strip('\\n').split(': ')\n if value in ('True', 'False'):\n opt_dict[key] = (value == 'True')\n # print(key, value)\n elif is_float(value):\n opt_dict[key] = float(value)\n elif is_number(value):\n opt_dict[key] = int(value)\n else:\n opt_dict[key] = str(value)\n\n # print(opt)\n opt_dict['which_epoch'] = 'finest'\n opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name)\n opt.model_dir = pjoin(opt.save_root, 'model')\n opt.meta_dir = pjoin(opt.save_root, 'meta')\n\n if opt.dataset_name == 't2m':\n opt.data_root = './dataset/HumanML3D/'\n opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')\n opt.text_dir = pjoin(opt.data_root, 'texts')\n opt.joints_num = 22\n opt.dim_pose = 263\n opt.max_motion_length = 196\n opt.max_motion_frame = 196\n opt.max_motion_token = 55\n elif opt.dataset_name == 'kit':\n opt.data_root = './dataset/KIT-ML/'\n opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')\n opt.text_dir = pjoin(opt.data_root, 'texts')\n opt.joints_num = 21\n opt.dim_pose = 251\n opt.max_motion_length = 196\n opt.max_motion_frame = 196\n opt.max_motion_token = 55\n else:\n raise KeyError('Dataset not recognized')\n if not hasattr(opt, 'unit_length'):\n opt.unit_length = 4\n opt.dim_word = 300\n opt.num_classes = 200 // opt.unit_length\n opt.dim_pos_ohot = len(POS_enumerator)\n opt.is_train = False\n opt.is_continue = False\n opt.device = device\n\n opt_dict.update(kwargs) # Overwrite with kwargs params\n\n return opt" }, { "identifier": "fixseed", "path": "utils/fixseed.py", "snippet": "def fixseed(seed):\n torch.backends.cudnn.benchmark = False\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)" }, { "identifier": "t2m_kinematic_chain", "path": "utils/paramUtil.py", "snippet": "" }, { "identifier": "Text2MotionDataset", "path": "data/t2m_dataset.py", "snippet": "class Text2MotionDataset(data.Dataset):\n def __init__(self, opt, mean, std, split_file):\n self.opt = opt\n self.max_length = 20\n self.pointer = 0\n self.max_motion_length = opt.max_motion_length\n min_motion_len = 40 if self.opt.dataset_name =='t2m' else 24\n\n data_dict = {}\n id_list = []\n with cs.open(split_file, 'r') as f:\n for line in f.readlines():\n id_list.append(line.strip())\n # id_list = id_list[:250]\n\n new_name_list = []\n length_list = []\n for name in tqdm(id_list):\n try:\n motion = np.load(pjoin(opt.motion_dir, name + '.npy'))\n if (len(motion)) < min_motion_len or (len(motion) >= 200):\n continue\n text_data = []\n flag = False\n with cs.open(pjoin(opt.text_dir, name + '.txt')) as f:\n for line in f.readlines():\n text_dict = {}\n line_split = line.strip().split('#')\n # print(line)\n caption = line_split[0]\n tokens = line_split[1].split(' ')\n f_tag = float(line_split[2])\n to_tag = float(line_split[3])\n f_tag = 0.0 if np.isnan(f_tag) else f_tag\n to_tag = 0.0 if np.isnan(to_tag) else to_tag\n\n text_dict['caption'] = caption\n text_dict['tokens'] = tokens\n if f_tag == 0.0 and to_tag == 0.0:\n flag = True\n text_data.append(text_dict)\n else:\n try:\n n_motion = motion[int(f_tag*20) : int(to_tag*20)]\n if (len(n_motion)) < min_motion_len or (len(n_motion) >= 200):\n continue\n new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name\n while new_name in data_dict:\n new_name = random.choice('ABCDEFGHIJKLMNOPQRSTUVW') + '_' + name\n data_dict[new_name] = {'motion': n_motion,\n 'length': len(n_motion),\n 'text':[text_dict]}\n new_name_list.append(new_name)\n length_list.append(len(n_motion))\n except:\n print(line_split)\n print(line_split[2], line_split[3], f_tag, to_tag, name)\n # break\n\n if flag:\n data_dict[name] = {'motion': motion,\n 'length': len(motion),\n 'text': text_data}\n new_name_list.append(name)\n length_list.append(len(motion))\n except Exception as e:\n # print(e)\n pass\n\n # name_list, length_list = zip(*sorted(zip(new_name_list, length_list), key=lambda x: x[1]))\n name_list, length_list = new_name_list, length_list\n\n self.mean = mean\n self.std = std\n self.length_arr = np.array(length_list)\n self.data_dict = data_dict\n self.name_list = name_list\n\n def inv_transform(self, data):\n return data * self.std + self.mean\n\n def __len__(self):\n return len(self.data_dict) - self.pointer\n\n def __getitem__(self, item):\n idx = self.pointer + item\n data = self.data_dict[self.name_list[idx]]\n motion, m_length, text_list = data['motion'], data['length'], data['text']\n # Randomly select a caption\n text_data = random.choice(text_list)\n caption, tokens = text_data['caption'], text_data['tokens']\n\n if self.opt.unit_length < 10:\n coin2 = np.random.choice(['single', 'single', 'double'])\n else:\n coin2 = 'single'\n\n if coin2 == 'double':\n m_length = (m_length // self.opt.unit_length - 1) * self.opt.unit_length\n elif coin2 == 'single':\n m_length = (m_length // self.opt.unit_length) * self.opt.unit_length\n idx = random.randint(0, len(motion) - m_length)\n motion = motion[idx:idx+m_length]\n\n \"Z Normalization\"\n motion = (motion - self.mean) / self.std\n\n if m_length < self.max_motion_length:\n motion = np.concatenate([motion,\n np.zeros((self.max_motion_length - m_length, motion.shape[1]))\n ], axis=0)\n # print(word_embeddings.shape, motion.shape)\n # print(tokens)\n return caption, motion, m_length\n\n def reset_min_len(self, length):\n assert length <= self.max_motion_length\n self.pointer = np.searchsorted(self.length_arr, length)\n print(\"Pointer Pointing at %d\" % self.pointer)" }, { "identifier": "get_dataset_motion_loader", "path": "motion_loaders/dataset_motion_loader.py", "snippet": "def get_dataset_motion_loader(opt_path, batch_size, fname, device):\n opt = get_opt(opt_path, device)\n\n # Configurations of T2M dataset and KIT dataset is almost the same\n if opt.dataset_name == 't2m' or opt.dataset_name == 'kit':\n print('Loading dataset %s ...' % opt.dataset_name)\n\n mean = np.load(pjoin(opt.meta_dir, 'mean.npy'))\n std = np.load(pjoin(opt.meta_dir, 'std.npy'))\n\n w_vectorizer = WordVectorizer('./glove', 'our_vab')\n split_file = pjoin(opt.data_root, '%s.txt'%fname)\n dataset = Text2MotionDatasetEval(opt, mean, std, split_file, w_vectorizer)\n dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4, drop_last=True,\n collate_fn=collate_fn, shuffle=True)\n else:\n raise KeyError('Dataset not Recognized !!')\n\n print('Ground Truth Dataset Loading Completed!!!')\n return dataloader, dataset" }, { "identifier": "EvaluatorModelWrapper", "path": "models/t2m_eval_wrapper.py", "snippet": "class EvaluatorModelWrapper(object):\n\n def __init__(self, opt):\n\n if opt.dataset_name == 't2m':\n opt.dim_pose = 263\n elif opt.dataset_name == 'kit':\n opt.dim_pose = 251\n else:\n raise KeyError('Dataset not Recognized!!!')\n\n opt.dim_word = 300\n opt.max_motion_length = 196\n opt.dim_pos_ohot = len(POS_enumerator)\n opt.dim_motion_hidden = 1024\n opt.max_text_len = 20\n opt.dim_text_hidden = 512\n opt.dim_coemb_hidden = 512\n\n # print(opt)\n\n self.text_encoder, self.motion_encoder, self.movement_encoder = build_models(opt)\n self.opt = opt\n self.device = opt.device\n\n self.text_encoder.to(opt.device)\n self.motion_encoder.to(opt.device)\n self.movement_encoder.to(opt.device)\n\n self.text_encoder.eval()\n self.motion_encoder.eval()\n self.movement_encoder.eval()\n\n # Please note that the results does not follow the order of inputs\n def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens):\n with torch.no_grad():\n word_embs = word_embs.detach().to(self.device).float()\n pos_ohot = pos_ohot.detach().to(self.device).float()\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motions[..., :-4]).detach()\n m_lens = m_lens // self.opt.unit_length\n motion_embedding = self.motion_encoder(movements, m_lens)\n\n '''Text Encoding'''\n text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens)\n text_embedding = text_embedding[align_idx]\n return text_embedding, motion_embedding\n\n # Please note that the results does not follow the order of inputs\n def get_motion_embeddings(self, motions, m_lens):\n with torch.no_grad():\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motions[..., :-4]).detach()\n m_lens = m_lens // self.opt.unit_length\n motion_embedding = self.motion_encoder(movements, m_lens)\n return motion_embedding" } ]
import os import torch import numpy as np from torch.utils.data import DataLoader from os.path import join as pjoin from models.mask_transformer.transformer import ResidualTransformer from models.mask_transformer.transformer_trainer import ResidualTransformerTrainer from models.vq.model import RVQVAE from options.train_option import TrainT2MOptions from utils.plot_script import plot_3d_motion from utils.motion_process import recover_from_ric from utils.get_opt import get_opt from utils.fixseed import fixseed from utils.paramUtil import t2m_kinematic_chain, kit_kinematic_chain from data.t2m_dataset import Text2MotionDataset from motion_loaders.dataset_motion_loader import get_dataset_motion_loader from models.t2m_eval_wrapper import EvaluatorModelWrapper
13,813
vq_opt = get_opt(opt_path, opt.device) vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {opt.vq_name}') vq_model.to(opt.device) return vq_model, vq_opt if __name__ == '__main__': parser = TrainT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) opt.model_dir = pjoin(opt.save_root, 'model') # opt.meta_dir = pjoin(opt.save_root, 'meta') opt.eval_dir = pjoin(opt.save_root, 'animation') opt.log_dir = pjoin('./log/res/', opt.dataset_name, opt.name) os.makedirs(opt.model_dir, exist_ok=True) # os.makedirs(opt.meta_dir, exist_ok=True) os.makedirs(opt.eval_dir, exist_ok=True) os.makedirs(opt.log_dir, exist_ok=True) if opt.dataset_name == 't2m': opt.data_root = './dataset/HumanML3D' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 22 opt.max_motion_len = 55 dim_pose = 263 radius = 4 fps = 20 kinematic_chain = t2m_kinematic_chain dataset_opt_path = './checkpoints/t2m/Comp_v6_KLD005/opt.txt' elif opt.dataset_name == 'kit': #TODO opt.data_root = './dataset/KIT-ML' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 21 radius = 240 * 8 fps = 12.5 dim_pose = 251 opt.max_motion_len = 55 kinematic_chain = kit_kinematic_chain dataset_opt_path = './checkpoints/kit/Comp_v6_KLD005/opt.txt' else: raise KeyError('Dataset Does Not Exist') opt.text_dir = pjoin(opt.data_root, 'texts') vq_model, vq_opt = load_vq_model() clip_version = 'ViT-B/32' opt.num_tokens = vq_opt.nb_code opt.num_quantizers = vq_opt.num_quantizers # if opt.is_v2: res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=opt.latent_dim, ff_size=opt.ff_size, num_layers=opt.n_layers, num_heads=opt.n_heads, dropout=opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=opt.share_weight, clip_version=clip_version, opt=opt) # else: # res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, # cond_mode='text', # latent_dim=opt.latent_dim, # ff_size=opt.ff_size, # num_layers=opt.n_layers, # num_heads=opt.n_heads, # dropout=opt.dropout, # clip_dim=512, # shared_codebook=vq_opt.shared_codebook, # cond_drop_prob=opt.cond_drop_prob, # # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, # clip_version=clip_version, # opt=opt) all_params = 0 pc_transformer = sum(param.numel() for param in res_transformer.parameters_wo_clip()) print(res_transformer) # print("Total parameters of t2m_transformer net: {:.2f}M".format(pc_transformer / 1000_000)) all_params += pc_transformer print('Total parameters of all models: {:.2f}M'.format(all_params / 1000_000)) mean = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'mean.npy')) std = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'std.npy')) train_split_file = pjoin(opt.data_root, 'train.txt') val_split_file = pjoin(opt.data_root, 'val.txt')
def plot_t2m(data, save_dir, captions, m_lengths): data = train_dataset.inv_transform(data) # print(ep_curves.shape) for i, (caption, joint_data) in enumerate(zip(captions, data)): joint_data = joint_data[:m_lengths[i]] joint = recover_from_ric(torch.from_numpy(joint_data).float(), opt.joints_num).numpy() save_path = pjoin(save_dir, '%02d.mp4'%i) # print(joint.shape) plot_3d_motion(save_path, kinematic_chain, joint, title=caption, fps=20) def load_vq_model(): opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_opt = get_opt(opt_path, opt.device) vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {opt.vq_name}') vq_model.to(opt.device) return vq_model, vq_opt if __name__ == '__main__': parser = TrainT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) opt.model_dir = pjoin(opt.save_root, 'model') # opt.meta_dir = pjoin(opt.save_root, 'meta') opt.eval_dir = pjoin(opt.save_root, 'animation') opt.log_dir = pjoin('./log/res/', opt.dataset_name, opt.name) os.makedirs(opt.model_dir, exist_ok=True) # os.makedirs(opt.meta_dir, exist_ok=True) os.makedirs(opt.eval_dir, exist_ok=True) os.makedirs(opt.log_dir, exist_ok=True) if opt.dataset_name == 't2m': opt.data_root = './dataset/HumanML3D' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 22 opt.max_motion_len = 55 dim_pose = 263 radius = 4 fps = 20 kinematic_chain = t2m_kinematic_chain dataset_opt_path = './checkpoints/t2m/Comp_v6_KLD005/opt.txt' elif opt.dataset_name == 'kit': #TODO opt.data_root = './dataset/KIT-ML' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 21 radius = 240 * 8 fps = 12.5 dim_pose = 251 opt.max_motion_len = 55 kinematic_chain = kit_kinematic_chain dataset_opt_path = './checkpoints/kit/Comp_v6_KLD005/opt.txt' else: raise KeyError('Dataset Does Not Exist') opt.text_dir = pjoin(opt.data_root, 'texts') vq_model, vq_opt = load_vq_model() clip_version = 'ViT-B/32' opt.num_tokens = vq_opt.nb_code opt.num_quantizers = vq_opt.num_quantizers # if opt.is_v2: res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=opt.latent_dim, ff_size=opt.ff_size, num_layers=opt.n_layers, num_heads=opt.n_heads, dropout=opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=opt.share_weight, clip_version=clip_version, opt=opt) # else: # res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, # cond_mode='text', # latent_dim=opt.latent_dim, # ff_size=opt.ff_size, # num_layers=opt.n_layers, # num_heads=opt.n_heads, # dropout=opt.dropout, # clip_dim=512, # shared_codebook=vq_opt.shared_codebook, # cond_drop_prob=opt.cond_drop_prob, # # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, # clip_version=clip_version, # opt=opt) all_params = 0 pc_transformer = sum(param.numel() for param in res_transformer.parameters_wo_clip()) print(res_transformer) # print("Total parameters of t2m_transformer net: {:.2f}M".format(pc_transformer / 1000_000)) all_params += pc_transformer print('Total parameters of all models: {:.2f}M'.format(all_params / 1000_000)) mean = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'mean.npy')) std = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'std.npy')) train_split_file = pjoin(opt.data_root, 'train.txt') val_split_file = pjoin(opt.data_root, 'val.txt')
train_dataset = Text2MotionDataset(opt, mean, std, train_split_file)
9
2023-11-29 19:21:27+00:00
16k
dvlab-research/LLMGA
llmga/diffusers/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py
[ { "identifier": "ConfigMixin", "path": "llmga/diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and\n saving classes that inherit from [`ConfigMixin`].\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129\n\n Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n return self._internal_dict[name]\n\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file is saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary.\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class is instantiated. Make sure to only load configuration\n files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the Python class.\n `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually\n overwrite the same named arguments in `config`.\n\n Returns:\n [`ModelMixin`] or [`SchedulerMixin`]:\n A model or scheduler object instantiated from a config dictionary.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Load a model or scheduler configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with\n [`~ConfigMixin.save_config`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config are returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the `commit_hash` of the loaded configuration are returned.\n\n Returns:\n `dict`:\n A dictionary of all the parameters stored in a JSON configuration file.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # Skip keys that were not present in the original config, so default __init__ values were used\n used_defaults = config_dict.get(\"_use_default_values\", [])\n config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != \"_use_default_values\"}\n\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if orig_cls_name != cls.__name__ and hasattr(diffusers_library, orig_cls_name):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes the configuration instance to a JSON string.\n\n Returns:\n `str`:\n String containing all the attributes that make up the configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n # Don't save \"_ignore_files\" or \"_use_default_values\"\n config_dict.pop(\"_ignore_files\", None)\n config_dict.pop(\"_use_default_values\", None)\n\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save the configuration instance's parameters to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file to save a configuration instance's parameters.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "register_to_config", "path": "llmga/diffusers/src/diffusers/configuration_utils.py", "snippet": "def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)" }, { "identifier": "logging", "path": "llmga/diffusers/src/diffusers/utils/logging.py", "snippet": "def _get_default_logging_level():\ndef _get_library_name() -> str:\ndef _get_library_root_logger() -> logging.Logger:\ndef _configure_library_root_logger() -> None:\ndef _reset_library_root_logger() -> None:\ndef get_log_levels_dict():\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\ndef get_verbosity() -> int:\ndef set_verbosity(verbosity: int) -> None:\ndef set_verbosity_info():\ndef set_verbosity_warning():\ndef set_verbosity_debug():\ndef set_verbosity_error():\ndef disable_default_handler() -> None:\ndef enable_default_handler() -> None:\ndef add_handler(handler: logging.Handler) -> None:\ndef remove_handler(handler: logging.Handler) -> None:\ndef disable_propagation() -> None:\ndef enable_propagation() -> None:\ndef enable_explicit_format() -> None:\ndef reset_format() -> None:\ndef warning_advice(self, *args, **kwargs):\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\n def __iter__(self):\n def __getattr__(self, _):\n def empty_fn(*args, **kwargs): # pylint: disable=unused-argument\n def __enter__(self):\n def __exit__(self, type_, value, traceback):\n def __call__(self, *args, **kwargs):\n def set_lock(self, *args, **kwargs):\n def get_lock(self):\ndef is_progress_bar_enabled() -> bool:\ndef enable_progress_bar():\ndef disable_progress_bar():\nclass EmptyTqdm:\nclass _tqdm_cls:" }, { "identifier": "deprecate", "path": "llmga/diffusers/src/diffusers/utils/deprecation_utils.py", "snippet": "def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True, stacklevel=2):\n from .. import __version__\n\n deprecated_kwargs = take_from\n values = ()\n if not isinstance(args[0], tuple):\n args = (args,)\n\n for attribute, version_name, message in args:\n if version.parse(version.parse(__version__).base_version) >= version.parse(version_name):\n raise ValueError(\n f\"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'\"\n f\" version {__version__} is >= {version_name}\"\n )\n\n warning = None\n if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs:\n values += (deprecated_kwargs.pop(attribute),)\n warning = f\"The `{attribute}` argument is deprecated and will be removed in version {version_name}.\"\n elif hasattr(deprecated_kwargs, attribute):\n values += (getattr(deprecated_kwargs, attribute),)\n warning = f\"The `{attribute}` attribute is deprecated and will be removed in version {version_name}.\"\n elif deprecated_kwargs is None:\n warning = f\"`{attribute}` is deprecated and will be removed in version {version_name}.\"\n\n if warning is not None:\n warning = warning + \" \" if standard_warn else \"\"\n warnings.warn(warning + message, FutureWarning, stacklevel=stacklevel)\n\n if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0:\n call_frame = inspect.getouterframes(inspect.currentframe())[1]\n filename = call_frame.filename\n line_number = call_frame.lineno\n function = call_frame.function\n key, value = next(iter(deprecated_kwargs.items()))\n raise TypeError(f\"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`\")\n\n if len(values) == 0:\n return\n elif len(values) == 1:\n return values[0]\n return values" }, { "identifier": "KarrasDiffusionSchedulers", "path": "llmga/diffusers/src/diffusers/schedulers/scheduling_utils.py", "snippet": "class KarrasDiffusionSchedulers(Enum):\n DDIMScheduler = 1\n DDPMScheduler = 2\n PNDMScheduler = 3\n LMSDiscreteScheduler = 4\n EulerDiscreteScheduler = 5\n HeunDiscreteScheduler = 6\n EulerAncestralDiscreteScheduler = 7\n DPMSolverMultistepScheduler = 8\n DPMSolverSinglestepScheduler = 9\n KDPM2DiscreteScheduler = 10\n KDPM2AncestralDiscreteScheduler = 11\n DEISMultistepScheduler = 12\n UniPCMultistepScheduler = 13\n DPMSolverSDEScheduler = 14" }, { "identifier": "SchedulerMixin", "path": "llmga/diffusers/src/diffusers/schedulers/scheduling_utils.py", "snippet": "class SchedulerMixin(PushToHubMixin):\n \"\"\"\n Base class for all schedulers.\n\n [`SchedulerMixin`] contains common functions shared by all schedulers such as general loading and saving\n functionalities.\n\n [`ConfigMixin`] takes care of storing the configuration attributes (like `num_train_timesteps`) that are passed to\n the scheduler's `__init__` function, and the attributes can be accessed by `scheduler.config.num_train_timesteps`.\n\n Class attributes:\n - **_compatibles** (`List[str]`) -- A list of scheduler classes that are compatible with the parent scheduler\n class. Use [`~ConfigMixin.from_config`] to load a different compatible scheduler class (should be overridden\n by parent class).\n \"\"\"\n\n config_name = SCHEDULER_CONFIG_NAME\n _compatibles = []\n has_compatibles = True\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,\n subfolder: Optional[str] = None,\n return_unused_kwargs=False,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a scheduler from a pre-defined JSON configuration file in a local directory or Hub repository.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the scheduler\n configuration saved with [`~SchedulerMixin.save_pretrained`].\n subfolder (`str`, *optional*):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n\n <Tip>\n\n To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with\n `huggingface-cli login`. You can also activate the special\n [\"offline-mode\"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a\n firewalled environment.\n\n </Tip>\n\n \"\"\"\n config, kwargs, commit_hash = cls.load_config(\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n subfolder=subfolder,\n return_unused_kwargs=True,\n return_commit_hash=True,\n **kwargs,\n )\n return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs)\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a scheduler configuration object to a directory so that it can be reloaded using the\n [`~SchedulerMixin.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)\n\n @property\n def compatibles(self):\n \"\"\"\n Returns all schedulers that are compatible with this scheduler\n\n Returns:\n `List[SchedulerMixin]`: List of compatible schedulers\n \"\"\"\n return self._get_compatibles()\n\n @classmethod\n def _get_compatibles(cls):\n compatible_classes_str = list(set([cls.__name__] + cls._compatibles))\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n compatible_classes = [\n getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)\n ]\n return compatible_classes" }, { "identifier": "SchedulerOutput", "path": "llmga/diffusers/src/diffusers/schedulers/scheduling_utils.py", "snippet": "class SchedulerOutput(BaseOutput):\n \"\"\"\n Base class for the output of a scheduler's `step` function.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n \"\"\"\n\n prev_sample: torch.FloatTensor" } ]
import math import numpy as np import torch from typing import List, Optional, Tuple, Union from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate, logging from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
12,063
self.order_list = self.get_order_list(num_inference_steps) # add an index counter for schedulers that allow duplicated timesteps self._step_index = None # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(sigma) # get distribution dists = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] # interpolate sigmas w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) # transform interpolation to time range t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / ((sigma**2 + 1) ** 0.5) sigma_t = sigma * alpha_t return alpha_t, sigma_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: """Constructs the noise schedule of Karras et al. (2022).""" sigma_min: float = in_sigmas[-1].item() sigma_max: float = in_sigmas[0].item() rho = 7.0 # 7.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def convert_model_output( self, model_output: torch.FloatTensor, *args, sample: torch.FloatTensor = None, **kwargs, ) -> torch.FloatTensor: """ Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an integral of the data prediction model. <Tip> The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise prediction and data prediction models. </Tip> Args: model_output (`torch.FloatTensor`): The direct output from the learned diffusion model. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. Returns: `torch.FloatTensor`: The converted model output. """ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError("missing `sample` as a required keyward argument") if timestep is not None:
# Copyright 2023 TSAIL Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): """ `DPMSolverSinglestepScheduler` is a fast dedicated high-order solver for diffusion ODEs. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. solver_order (`int`, defaults to 2): The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). thresholding (`bool`, defaults to `False`): Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such as Stable Diffusion. dynamic_thresholding_ratio (`float`, defaults to 0.995): The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. sample_max_value (`float`, defaults to 1.0): The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `algorithm_type="dpmsolver++"`. algorithm_type (`str`, defaults to `dpmsolver++`): Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, `sde-dpmsolver` or `sde-dpmsolver++`. The `dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) paper, and the `dpmsolver++` type implements the algorithms in the [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. solver_type (`str`, defaults to `midpoint`): Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. lower_order_final (`bool`, defaults to `True`): Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. use_karras_sigmas (`bool`, *optional*, defaults to `False`): Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, the sigmas are determined according to a sequence of noise levels {σi}. lambda_min_clipped (`float`, defaults to `-inf`): Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the cosine (`squaredcos_cap_v2`) noise schedule. variance_type (`str`, *optional*): Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output contains the predicted Gaussian variance. """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[np.ndarray] = None, solver_order: int = 2, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, sample_max_value: float = 1.0, algorithm_type: str = "dpmsolver++", solver_type: str = "midpoint", lower_order_final: bool = True, use_karras_sigmas: Optional[bool] = False, lambda_min_clipped: float = -float("inf"), variance_type: Optional[str] = None, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = ( torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) # Currently we only support VP-type noise schedule self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # settings for DPM-Solver if algorithm_type not in ["dpmsolver", "dpmsolver++"]: if algorithm_type == "deis": self.register_to_config(algorithm_type="dpmsolver++") else: raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}") if solver_type not in ["midpoint", "heun"]: if solver_type in ["logrho", "bh1", "bh2"]: self.register_to_config(solver_type="midpoint") else: raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}") # setable values self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.model_outputs = [None] * solver_order self.sample = None self.order_list = self.get_order_list(num_train_timesteps) self._step_index = None def get_order_list(self, num_inference_steps: int) -> List[int]: """ Computes the solver order at each time step. Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. """ steps = num_inference_steps order = self.config.solver_order if self.config.lower_order_final: if order == 3: if steps % 3 == 0: orders = [1, 2, 3] * (steps // 3 - 1) + [1, 2] + [1] elif steps % 3 == 1: orders = [1, 2, 3] * (steps // 3) + [1] else: orders = [1, 2, 3] * (steps // 3) + [1, 2] elif order == 2: if steps % 2 == 0: orders = [1, 2] * (steps // 2) else: orders = [1, 2] * (steps // 2) + [1] elif order == 1: orders = [1] * steps else: if order == 3: orders = [1, 2, 3] * (steps // 3) elif order == 2: orders = [1, 2] * (steps // 2) elif order == 1: orders = [1] * steps return orders @property def step_index(self): """ The index counter for current timestep. It will increae 1 after each scheduler step. """ return self._step_index def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.num_inference_steps = num_inference_steps # Clipping the minimum of all lambda(t) for numerical stability. # This is critical for cosine (squaredcos_cap_v2) noise schedule. clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) timesteps = ( np.linspace(0, self.config.num_train_timesteps - 1 - clipped_idx, num_inference_steps + 1) .round()[::-1][:-1] .copy() .astype(np.int64) ) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) if self.config.use_karras_sigmas: log_sigmas = np.log(sigmas) sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas).to(device=device) self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.model_outputs = [None] * self.config.solver_order self.sample = None if not self.config.lower_order_final and num_inference_steps % self.config.solver_order != 0: logger.warn( "Changing scheduler {self.config} to have `lower_order_final` set to True to handle uneven amount of inference steps. Please make sure to always use an even number of `num_inference steps when using `lower_order_final=True`." ) self.register_to_config(lower_order_final=True) self.order_list = self.get_order_list(num_inference_steps) # add an index counter for schedulers that allow duplicated timesteps self._step_index = None # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(sigma) # get distribution dists = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] # interpolate sigmas w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) # transform interpolation to time range t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / ((sigma**2 + 1) ** 0.5) sigma_t = sigma * alpha_t return alpha_t, sigma_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: """Constructs the noise schedule of Karras et al. (2022).""" sigma_min: float = in_sigmas[-1].item() sigma_max: float = in_sigmas[0].item() rho = 7.0 # 7.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def convert_model_output( self, model_output: torch.FloatTensor, *args, sample: torch.FloatTensor = None, **kwargs, ) -> torch.FloatTensor: """ Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an integral of the data prediction model. <Tip> The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise prediction and data prediction models. </Tip> Args: model_output (`torch.FloatTensor`): The direct output from the learned diffusion model. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. Returns: `torch.FloatTensor`: The converted model output. """ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError("missing `sample` as a required keyward argument") if timestep is not None:
deprecate(
3
2023-11-27 18:46:55+00:00
16k
sherwinbahmani/4dfy
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference']\n finite_difference_normal_eps: float = 0.01\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n mesh = trimesh.load(mesh_path)\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if self.cfg.normal_type == \"finite_difference\":\n eps = self.cfg.finite_difference_normal_eps\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(points_offset)\n normal = (\n 0.5 * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0]) / eps\n )\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n anneal_density_blob_std_config: Optional[dict] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n self.density_blob_std = self.cfg.density_blob_std\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if self.cfg.normal_type == \"finite_difference\":\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )\n \n def update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ) -> None:\n if self.cfg.anneal_density_blob_std_config is not None:\n min_step = self.cfg.anneal_density_blob_std_config.min_anneal_step\n max_step = self.cfg.anneal_density_blob_std_config.max_anneal_step\n if (\n global_step >= min_step\n and global_step <= max_step\n ): \n end_val = self.cfg.anneal_density_blob_std_config.end_val\n start_val = self.cfg.anneal_density_blob_std_config.start_val\n self.density_blob_std = start_val + (global_step - min_step)*(end_val - start_val)/(max_step - min_step)" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n vn_idx[i] = self.t_nrm_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n elif config.otype == \"HashGridSpatialTime\":\n encoding = TCNNEncodingSpatialTime(n_input_dims, config)\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
13,286
self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitVolume):
4
2023-11-29 05:15:56+00:00
16k
rlawjdghek/StableVITON
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.lossconfig = lossconfig\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = torch.nn.Identity()\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n def init_loss(self):\n self.loss = instantiate_from_config(self.lossconfig)\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx):\n real_img = self.get_input(batch, self.image_key)\n recon, posterior = self(real_img)\n loss = self.loss(real_img, recon, posterior)\n return loss\n \n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.decoder.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n print(f\"beta scheduler name : {schedule}\")\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "zero_module", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates, cond_output_dict = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates, cond_output_dict\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0, cond_output_dict = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n if cond_output_dict is not None:\n cond_output = cond_output_dict[\"cond_output\"] \n if self.model.use_noisy_cond:\n b = cond_output.shape[0]\n\n alphas = self.model.alphas_cumprod if ddim_use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if ddim_use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if ddim_use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if ddim_use_original_steps else self.ddim_sigmas\n\n device = cond_output.device\n a_t = torch.full((b, 1, 1, 1), alphas[0], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[0], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[0], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[0], device=device)\n\n c = cond_output_dict[\"cond_input\"]\n e_t = cond_output\n pred_c0 = (c - sqrt_one_minus_at * e_t) / a_t.sqrt()\n dir_ct = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(c.shape, device, False) * temperature\n\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n cond_output = a_prev.sqrt() * pred_c0 + dir_ct + noise \n cond_output_dict[f\"cond_sample\"] = cond_output\n return img, intermediates, cond_output_dict\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output, cond_output_dict = self.model.apply_model(x, t, c)\n else:\n # x_in = torch.cat([x] * 2)\n # t_in = torch.cat([t] * 2)\n # if isinstance(c, dict):\n # assert isinstance(unconditional_conditioning, dict)\n # c_in = dict()\n # for k in c:\n # if isinstance(c[k], list):\n # c_in[k] = [torch.cat([\n # unconditional_conditioning[k][i],\n # c[k][i]]) for i in range(len(c[k]))]\n # else:\n # c_in[k] = torch.cat([\n # unconditional_conditioning[k],\n # c[k]])\n # elif isinstance(c, list):\n # c_in = list()\n # assert isinstance(unconditional_conditioning, list)\n # for i in range(len(c)):\n # c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n # else:\n # c_in = torch.cat([unconditional_conditioning, c])\n x_in = x\n t_in = t\n model_t, cond_output_dict_cond = self.model.apply_model(x_in, t_in, c)\n model_uncond, cond_output_dict_uncond = self.model.apply_model(x_in, t_in, unconditional_conditioning)\n if isinstance(model_t, tuple):\n model_t, _ = model_t\n if isinstance(model_uncond, tuple):\n model_uncond, _ = model_uncond\n if cond_output_dict_cond is not None:\n cond_output_dict = dict()\n for k in cond_output_dict_cond.keys():\n cond_output_dict[k] = torch.cat([cond_output_dict_uncond[k], cond_output_dict_cond[k]])\n else:\n cond_output_dict = None\n # model_output, cond_output_dict = self.model.apply_model(x_in, t_in, c_in)\n # model_uncond, model_t = model_output.chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n\n return x_prev, pred_x0, cond_output_dict\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)[0]\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)[0]\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools import torchvision.transforms as T import random import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from torchvision.transforms.functional import resize from diffusers.models.autoencoder_kl import AutoencoderKLOutput from diffusers.models.vae import DecoderOutput from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like, zero_module, conv_nd from ldm.models.diffusion.ddim import DDIMSampler
13,207
assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, l_cond_simple_weight=1.0, l_cond_recon_weight=1.0, **kwargs ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.unet_config = unet_config self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.imagenet_norm = T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.l_cond_simple_weight = l_cond_simple_weight self.l_cond_recon_weight = l_cond_recon_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
2
2023-12-02 05:56:58+00:00
16k
AIFSH/NativeSpeaker
src/core.py
[ { "identifier": "HandleLog", "path": "src/log_helper.py", "snippet": "class HandleLog:\n \"\"\"\n 先创建日志记录器(logging.getLogger),然后再设置日志级别(logger.setLevel),\n 接着再创建日志文件,也就是日志保存的地方(logging.FileHandler),然后再设置日志格式(logging.Formatter),\n 最后再将日志处理程序记录到记录器(addHandler)\n \"\"\"\n\n def __init__(self):\n self.__now_time = datetime.now().strftime('%Y-%m-%d') # 当前日期格式化\n self.__all_log_path = os.path.join(log_path, self.__now_time + \"-all\" + \".log\") # 收集所有日志信息文件\n self.__error_log_path = os.path.join(log_path, self.__now_time + \"-error\" + \".log\") # 收集错误日志信息文件\n self.__logger = logging.getLogger() # 创建日志记录器\n self.__logger.setLevel(logging.DEBUG) # 设置默认日志记录器记录级别\n\n @staticmethod\n def __init_logger_handler(log_path):\n \"\"\"\n 创建日志记录器handler,用于收集日志\n :param log_path: 日志文件路径\n :return: 日志记录器\n \"\"\"\n # 写入文件,如果文件超过1M大小时,切割日志文件,仅保留3个文件\n logger_handler = RotatingFileHandler(filename=log_path, maxBytes=1 * 1024 * 1024, backupCount=3, encoding='utf-8')\n return logger_handler\n\n @staticmethod\n def __init_console_handle():\n \"\"\"创建终端日志记录器handler,用于输出到控制台\"\"\"\n console_handle = colorlog.StreamHandler()\n return console_handle\n\n def __set_log_handler(self, logger_handler, level=logging.DEBUG):\n \"\"\"\n 设置handler级别并添加到logger收集器\n :param logger_handler: 日志记录器\n :param level: 日志记录器级别\n \"\"\"\n logger_handler.setLevel(level=level)\n self.__logger.addHandler(logger_handler)\n\n def __set_color_handle(self, console_handle):\n \"\"\"\n 设置handler级别并添加到终端logger收集器\n :param console_handle: 终端日志记录器\n :param level: 日志记录器级别\n \"\"\"\n console_handle.setLevel(logging.DEBUG)\n self.__logger.addHandler(console_handle)\n\n @staticmethod\n def __set_color_formatter(console_handle, color_config):\n \"\"\"\n 设置输出格式-控制台\n :param console_handle: 终端日志记录器\n :param color_config: 控制台打印颜色配置信息\n :return:\n \"\"\"\n formatter = colorlog.ColoredFormatter(default_formats[\"color_format\"], log_colors=color_config)\n console_handle.setFormatter(formatter)\n\n @staticmethod\n def __set_log_formatter(file_handler):\n \"\"\"\n 设置日志输出格式-日志文件\n :param file_handler: 日志记录器\n \"\"\"\n formatter = logging.Formatter(default_formats[\"log_format\"], datefmt='%a, %d %b %Y %H:%M:%S')\n file_handler.setFormatter(formatter)\n\n @staticmethod\n def __close_handler(file_handler):\n \"\"\"\n 关闭handler\n :param file_handler: 日志记录器\n \"\"\"\n file_handler.close()\n\n def __console(self, level, message):\n \"\"\"构造日志收集器\"\"\"\n all_logger_handler = self.__init_logger_handler(self.__all_log_path) # 创建日志文件\n error_logger_handler = self.__init_logger_handler(self.__error_log_path)\n console_handle = self.__init_console_handle()\n\n self.__set_log_formatter(all_logger_handler) # 设置日志格式\n self.__set_log_formatter(error_logger_handler)\n self.__set_color_formatter(console_handle, log_colors_config)\n\n self.__set_log_handler(all_logger_handler) # 设置handler级别并添加到logger收集器\n self.__set_log_handler(error_logger_handler, level=logging.ERROR)\n self.__set_color_handle(console_handle)\n\n if level == 'info':\n self.__logger.info(message)\n elif level == 'debug':\n self.__logger.debug(message)\n elif level == 'warning':\n self.__logger.warning(message)\n elif level == 'error':\n self.__logger.error(message)\n elif level == 'critical':\n self.__logger.critical(message)\n\n self.__logger.removeHandler(all_logger_handler) # 避免日志输出重复问题\n self.__logger.removeHandler(error_logger_handler)\n self.__logger.removeHandler(console_handle)\n\n self.__close_handler(all_logger_handler) # 关闭handler\n self.__close_handler(error_logger_handler)\n\n def debug(self, message):\n self.__console('debug', message)\n\n def info(self, message):\n self.__console('info', message)\n\n def warning(self, message):\n self.__console('warning', message)\n\n def error(self, message):\n self.__console('error', message)\n\n def critical(self, message):\n self.__console('critical', message)" }, { "identifier": "AudioProcess", "path": "src/audio_bgm_split.py", "snippet": "class AudioProcess:\n def __init__(self, agg, is_half=False, tta=False):\n\n # model_path = os.path.join('weights', 'HP5-主旋律人声vocals+其他instrumentals.pth')\n model_path = load_file_from_url(url=\"https://hf-mirror.com/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-%E4%B8%BB%E6%97%8B%E5%BE%8B%E4%BA%BA%E5%A3%B0vocals%2B%E5%85%B6%E4%BB%96instrumentals.pth?download=true\", \n model_dir='weights', progress=True, file_name=\"HP5-主旋律人声vocals+其他instrumentals.pth\")\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.data = {\n # Processing Options\n \"postprocess\": False,\n \"tta\": tta,\n # Constants\n \"window_size\": 512,\n \"agg\": agg,\n \"high_end_process\": \"mirroring\",\n }\n mp = ModelParameters(\"src/third_part/uvr5_pack/lib_v5/modelparams/4band_v2.json\")\n model = Nets.CascadedASPPNet(mp.param[\"bins\"] * 2)\n cpk = torch.load(model_path, map_location=\"cpu\")\n model.load_state_dict(cpk)\n model.eval()\n if is_half:\n model = model.half().to(self.device)\n else:\n model = model.to(self.device)\n\n self.mp = mp\n self.model = model\n\n def split(self, music_file):\n \n X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}\n bands_n = len(self.mp.param[\"band\"])\n # print(bands_n)\n for d in range(bands_n, 0, -1):\n bp = self.mp.param[\"band\"][d]\n if d == bands_n: # high-end band\n (\n X_wave[d],\n _,\n ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑\n path=music_file,\n sr=bp[\"sr\"],\n mono=False,\n dtype=np.float32,\n res_type=bp[\"res_type\"],\n )\n if X_wave[d].ndim == 1:\n X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])\n else: # lower bands\n X_wave[d] = librosa.core.resample(\n y=X_wave[d + 1],\n orig_sr=self.mp.param[\"band\"][d + 1][\"sr\"],\n target_sr=bp[\"sr\"],\n res_type=bp[\"res_type\"],\n )\n # Stft of wave source\n X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(\n X_wave[d],\n bp[\"hl\"],\n bp[\"n_fft\"],\n self.mp.param[\"mid_side\"],\n self.mp.param[\"mid_side_b2\"],\n self.mp.param[\"reverse\"],\n )\n # pdb.set_trace()\n if d == bands_n and self.data[\"high_end_process\"] != \"none\":\n input_high_end_h = (bp[\"n_fft\"] // 2 - bp[\"crop_stop\"]) + (\n self.mp.param[\"pre_filter_stop\"] - self.mp.param[\"pre_filter_start\"]\n )\n input_high_end = X_spec_s[d][\n :, bp[\"n_fft\"] // 2 - input_high_end_h : bp[\"n_fft\"] // 2, :\n ]\n\n X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)\n aggresive_set = float(self.data[\"agg\"] / 100)\n aggressiveness = {\n \"value\": aggresive_set,\n \"split_bin\": self.mp.param[\"band\"][1][\"crop_stop\"],\n }\n with torch.no_grad():\n pred, X_mag, X_phase = inference(\n X_spec_m, self.device, self.model, aggressiveness, self.data\n )\n # Postprocess\n if self.data[\"postprocess\"]:\n pred_inv = np.clip(X_mag - pred, 0, np.inf)\n pred = spec_utils.mask_silence(pred, pred_inv)\n y_spec_m = pred * X_phase\n v_spec_m = X_spec_m - y_spec_m\n\n \n if self.data[\"high_end_process\"].startswith(\"mirroring\"):\n input_high_end_y = spec_utils.mirroring(\n self.data[\"high_end_process\"], y_spec_m, input_high_end, self.mp\n )\n wav_instrument = spec_utils.cmb_spectrogram_to_wave(\n y_spec_m, self.mp, input_high_end_h, input_high_end_y\n )\n \n input_high_end_v = spec_utils.mirroring(\n self.data[\"high_end_process\"], v_spec_m, input_high_end, self.mp\n )\n wav_vocals = spec_utils.cmb_spectrogram_to_wave(\n v_spec_m, self.mp, input_high_end_h, input_high_end_v\n )\n \n else:\n wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)\n wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)\n \n logger.info(\"vocal and instruments split done\")\n \n temp_manager = TempFileManager()\n voice_temp_file = temp_manager.create_temp_file(suffix='.wav')\n noise_temp_file = temp_manager.create_temp_file(suffix='.wav')\n \n sf.write(\n voice_temp_file,\n (np.array(wav_vocals) * 32768).astype(\"int16\"),\n self.mp.param[\"sr\"],\n )\n sf.write(\n noise_temp_file,\n (np.array(wav_instrument) * 32768).astype(\"int16\"),\n self.mp.param[\"sr\"],\n )\n return voice_temp_file.name, noise_temp_file.name" }, { "identifier": "VoiceCloner", "path": "src/voice_clone.py", "snippet": "class VoiceCloner:\n\n def __init__(self, version_name=\"v2.0.3\") -> None:\n self.temp_manager = TempFileManager()\n root_path = os.path.join('weights',f\"xtts_{version_name}\")\n config_path = load_file_from_url(url=f\"https://hf-mirror.com/coqui/XTTS-v2/resolve/{version_name}/config.json?download=true\",\n model_dir=root_path,\n file_name=\"config.json\")\n load_file_from_url(url=f\"https://hf-mirror.com/coqui/XTTS-v2/resolve/{version_name}/model.pth?download=true\",\n model_dir=root_path,\n file_name=\"model.pth\")\n load_file_from_url(url=f\"https://hf-mirror.com/coqui/XTTS-v2/resolve/{version_name}/vocab.json?download=true\",\n model_dir=root_path,\n file_name=\"vocab.json\")\n load_file_from_url(url=f\"https://hf-mirror.com/coqui/XTTS-v2/resolve/{version_name}/hash.md5?download=true\",\n model_dir=root_path,\n file_name=\"hash.md5\")\n # model_path = f\"{root_path}/model.pth\"\n # logger.info(f'model_path:{model_path}')\n self.tts = TTS(model_path=root_path,config_path=config_path,gpu=True)\n \n def __call__(self, text, lang_code, speaker_wav,speed=1.0,*args: Any, **kwds: Any) -> Any:\n temp_file = self.temp_manager.create_temp_file(suffix='.wav').name\n self.tts.tts_to_file(text=text,\n language=lang_code,\n speaker_wav=speaker_wav,\n speed=speed,\n file_path=temp_file)\n return temp_file" }, { "identifier": "TempFileManager", "path": "src/temp_manager.py", "snippet": "class TempFileManager:\n _instance = None\n temp_files = []\n\n def __new__(cls):\n if cls._instance is None:\n cls._instance = super().__new__(cls)\n atexit.register(cls.cleanup)\n return cls._instance\n\n def create_temp_file(self, suffix):\n temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)\n self.temp_files.append(temp_file.name)\n return temp_file\n\n @classmethod\n def cleanup(cls):\n for temp_file in cls.temp_files:\n try:\n # Remove the temporary file\n os.remove(temp_file)\n except OSError:\n pass" }, { "identifier": "Translator", "path": "src/translator.py", "snippet": "class Translator:\n\n def __init__(self,work_with_human=False) -> None:\n # _ = ts.preaccelerate_and_speedtest()\n self.work_with_human = work_with_human\n\n def __call__(self,text,from_lang,to_lang,*args: Any, **kwds: Any) -> Any:\n assert from_lang != to_lang,\"same lang code error,translator only work in language to another language\"\n if self.work_with_human:\n lience = input(\"!!!注意,出现这个提示是因为您自行修改了相关代码,请不要做偏离原文内容的手工翻译,否则后果自负,与该项目开源作者无关!我已经阅读并同意该声明。\\n(!!!Attention!This prompt appears because you modified the code yourself,Please do not deviate from the original content of manual translation, or bear the consequences,It has nothing to do with the author of this project! I have read and agree with the statement)\\t yes | no:\\n\").strip()\n if \"y\" not in lience:\n self.work_with_human = False\n \n if \"zh\" in to_lang:\n to_lang = \"zh\"\n logger.info(f\"{from_lang} {to_lang} {text} \")\n try:\n dst_text = ts.translate_text(query_text=text,translator=\"qqTranSmart\",\n from_language=from_lang,to_language=to_lang)\n except ts.server.TranslatorError:\n dst_text = input(\"translator failed,input by self:\")\n dst_text = dst_text.strip()\n return dst_text\n logger.info(\"dst_text:{}\".format(dst_text))\n if self.work_with_human:\n if_by_hand = input(\"translate by hand? 1 by hand, 0 pass:\\t\")\n if if_by_hand == \"1\":\n dst_text = input(\"input by hand:\\n\").strip()\n logger.info(f\"dst_text edited:{dst_text}\")\n\n return dst_text" }, { "identifier": "LipSync", "path": "src/lipsync.py", "snippet": "class LipSync:\n def __init__(self,model_name) -> None:\n self.model_name = model_name\n self.img_size = 96\n self.static = False\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.face_det_batch_size = 16\n self.wav2lip_batch_size = 16\n self.mel_step_size = 16\n self.pads = [0,20,0,0]\n self.nosmooth = True\n self.box = [-1, -1, -1, -1]\n self.fps = 25\n self.resize_factor = 2\n self.rotate = False\n self.crop = [0, -1, 0, -1]\n logger.info('Using {} for inference.'.format(self.device))\n\n load_file_from_url(url=\"https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth\",\n model_dir=\"src/third_part/wav2lip/face_detection/detection/sfd\",\n file_name=\"s3fd.pth\")\n load_file_from_url(url=\"https://hf-mirror.com/MarjorieSaul/wav2lip_sd_models/resolve/main/wav2lip.pth?download=true\",\n model_dir=\"weights\",\n file_name=\"wav2lip.pth\")\n load_file_from_url(url=\"https://hf-mirror.com/MarjorieSaul/wav2lip_sd_models/resolve/main/wav2lip_gan.pth?download=true\",\n model_dir=\"weights\",\n file_name=\"wav2lip_gan.pth\")\n self.tmp_manager = TempFileManager()\n \n\n def __call__(self, face,audio,outfile,voice,*args: Any, **kwds: Any) -> Any:\n if os.path.isfile(face) and face.split('.')[1] in ['jpg', 'png', 'jpeg']:\n self.static = True\n if not os.path.isfile(face):\n raise ValueError('face argument must be a valid path to video/image file')\n elif face.split('.')[1] in ['jpg', 'png', 'jpeg']:\n full_frames = [cv2.imread(face)]\n fps = self.fps\n else:\n video_stream = cv2.VideoCapture(face)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n logger.info('Reading video frames...')\n\n full_frames = []\n while 1:\n still_reading, frame = video_stream.read()\n if not still_reading:\n video_stream.release()\n break\n if self.resize_factor > 1:\n frame = cv2.resize(frame, (frame.shape[1]//self.resize_factor, frame.shape[0]//self.resize_factor))\n\n if self.rotate:\n frame = cv2.rotate(frame, cv2.cv2.ROTATE_90_CLOCKWISE)\n \n y1, y2, x1, x2 = self.crop\n if x2 == -1: x2 = frame.shape[1]\n if y2 == -1: y2 = frame.shape[0]\n frame = frame[y1:y2, x1:x2]\n full_frames.append(frame)\n\n logger.info(\"Number of frames available for inference: \"+str(len(full_frames)))\n\n assert audio.endswith('.wav'),\"audio file shoud end with .wav\"\n\n wav = load_wav(audio, sr=16000)\n mel = melspectrogram(wav)\n\n if np.isnan(mel.reshape(-1)).sum() > 0:\n raise ValueError('Mel contains nan! Using a TTS voice? Add a small epsilon noise to the wav file and try again')\n \n mel_chunks = []\n mel_idx_multiplier = 80./fps\n i = 0\n while 1:\n start_idx = int(i * mel_idx_multiplier)\n if start_idx + self.mel_step_size > len(mel[0]):\n mel_chunks.append(mel[:, len(mel[0]) - self.mel_step_size:])\n break\n mel_chunks.append(mel[:, start_idx : start_idx + self.mel_step_size])\n i += 1\n \n logger.info(\"Length of mel chunks: {}\".format(len(mel_chunks)))\n\n full_frames = full_frames[:len(mel_chunks)]\n\n batch_size = self.wav2lip_batch_size\n\n gen = self.datagen(full_frames.copy(), mel_chunks)\n while 1:\n try:\n for i, (img_batch, mel_batch, frames, coords) in enumerate(tqdm(gen,\n total=int(np.ceil(float(len(mel_chunks))/batch_size)))):\n if i == 0:\n model = self.load_model()\n logger.info(\"Model loaded\")\n frame_h, frame_w = full_frames[0].shape[:-1]\n temp_file = self.tmp_manager.create_temp_file(suffix='.avi').name\n out = cv2.VideoWriter(temp_file, \n\t\t\t\t\t\t\t\t\tcv2.VideoWriter_fourcc(*'DIVX'), fps, (frame_w, frame_h))\n img_batch = torch.FloatTensor(np.transpose(img_batch, (0, 3, 1, 2))).to(self.device)\n mel_batch = torch.FloatTensor(np.transpose(mel_batch, (0, 3, 1, 2))).to(self.device)\n\n with torch.no_grad():\n pred = model(mel_batch, img_batch)\n pred = pred.cpu().numpy().transpose(0, 2, 3, 1) * 255.\n for p, f, c in zip(pred, frames, coords):\n y1, y2, x1, x2 = c\n try:\n p = cv2.resize(p.astype(np.uint8), (x2 - x1, y2 - y1))\n f[y1:y2, x1:x2] = p\n except cv2.error:\n pass\n out.write(f)\n out.release()\n except RuntimeError:\n if batch_size == 1: \n raise RuntimeError('Image too big to run wav2lip on GPU. Please use the --resize_factor argument')\n batch_size //= 2\n continue\n break\n command = 'ffmpeg -y -i {} -i {} -strict -2 -q:v 1 {}'.format(voice, temp_file, outfile)\n subprocess.call(command, shell=platform.system() != 'Windows')\n \n def load_model(self):\n model = Wav2Lip()\n logger.info(\"Load checkpoint from: {}\".format(self.model_name))\n checkpoint = self._load()\n s = checkpoint[\"state_dict\"]\n new_s = {}\n for k, v in s.items():\n new_s[k.replace('module.', '')] = v\n model.load_state_dict(new_s)\n model = model.to(self.device)\n return model.eval()\n\n def _load(self):\n checkpoint_path = \"weights/{}.pth\".format(self.model_name)\n if self.device == 'cuda':\n checkpoint = torch.load(checkpoint_path)\n else:\n checkpoint = torch.load(checkpoint_path,\n map_location=lambda storage, loc: storage)\n return checkpoint\n \n\n def datagen(self,frames, mels):\n img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []\n\n if self.box[0] == -1:\n if not self.static:\n face_det_results = self.face_detect(frames) # BGR2RGB for CNN face detection\n else:\n face_det_results = self.face_detect([frames[0]])\n else:\n logger.info('Using the specified bounding box instead of face detection...')\n y1, y2, x1, x2 = self.box\n face_det_results = [[f[y1: y2, x1:x2], (y1, y2, x1, x2)] for f in frames]\n \n for i, m in enumerate(mels):\n idx = 0 if self.static else i%len(frames)\n frame_to_save = frames[idx].copy()\n face, coords = face_det_results[idx].copy()\n\n try:\n face = cv2.resize(face, (self.img_size, self.img_size))\n except cv2.error:\n face = np.zeros((10, 10,3), np.uint8)\n face = cv2.resize(face, (self.img_size, self.img_size))\n \n img_batch.append(face)\n mel_batch.append(m)\n frame_batch.append(frame_to_save)\n coords_batch.append(coords)\n\n if len(img_batch) >= self.wav2lip_batch_size:\n img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)\n\n img_masked = img_batch.copy()\n img_masked[:, self.img_size//2:] = 0\n img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.\n mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])\n yield img_batch, mel_batch, frame_batch, coords_batch\n img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []\n \n \n if len(img_batch) > 0:\n img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)\n img_masked = img_batch.copy()\n img_masked[:, self.img_size//2:] = 0\n img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.\n mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])\n yield img_batch, mel_batch, frame_batch, coords_batch\n\n\n def face_detect(self,images):\n detector = face_detection.FaceAlignment(face_detection.LandmarksType._2D,\n flip_input=False,device=self.device\n )\n batch_size = self.face_det_batch_size\n while 1:\n predictions = []\n try:\n for i in tqdm(range(0,len(images),batch_size)):\n predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))\n except RuntimeError:\n if batch_size == 1: \n raise RuntimeError('Image too big to run face detection on GPU. Please use the --resize_factor argument')\n batch_size //= 2\n logger.warning('Recovering from OOM error; New batch size: {}'.format(batch_size))\n continue\n break\n results = []\n pady1, pady2, padx1, padx2 = self.pads\n for rect, image in zip(predictions, images):\n if rect is None:\n rect = (0,20,0,0)\n y1 = max(0, rect[1] - pady1)\n y2 = min(image.shape[0], rect[3] + pady2)\n x1 = max(0, rect[0] - padx1)\n x2 = min(image.shape[1], rect[2] + padx2)\n results.append([x1,y1,x2,y2])\n boxes = np.array(results)\n if not self.nosmooth: boxes = self.get_smoothened_boxes(boxes, T=5)\n results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]\n import gc; gc.collect(); torch.cuda.empty_cache();del detector\n return results\n\n def get_smoothened_boxes(self,boxes, T):\n for i in range(len(boxes)):\n if i + T > len(boxes):\n window = boxes[len(boxes) - T:]\n else:\n window = boxes[i : i + T]\n boxes[i] = np.mean(window, axis=0)\n return boxes" }, { "identifier": "Upscale", "path": "src/upscale.py", "snippet": "class Upscale:\n def __init__(self,fidelity_weight=0.9) -> None:\n self.pretrain_model_url = {\n 'restoration': 'https://mirror.ghproxy.com/https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth',\n }\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.use_half = True \n self.bg_tile = 400\n self.w = fidelity_weight\n self.bg_upsampler = 'realesrgan'\n self.face_upsample = True\n self.has_aligned = False\n self.detection_model = \"retinaface_resnet50\"\n self.upscale = 2\n self.only_center_face = False\n self.draw_box = False\n self.suffix = None\n\n\n def __call__(self,input_path:str,output_path:str,audio,*args: Any, **kwds: Any) -> Any:\n \n input_video = False\n if input_path.endswith(('jpg', 'jpeg', 'png', 'JPG', 'JPEG', 'PNG')): # input single img path\n input_img_list = [input_path]\n result_root = f'results/test_img_{self.w}'\n elif input_path.endswith(('mp4', 'mov', 'avi', 'MP4', 'MOV', 'AVI')): # input video path\n input_img_list = []\n vidreader = VideoReader(input_path)\n image = vidreader.get_frame()\n while image is not None:\n input_img_list.append(image)\n image = vidreader.get_frame()\n # audio = vidreader.get_audio()\n fps = vidreader.get_fps() \n video_name = os.path.basename(input_path)[:-4]\n result_root = f'results/{video_name}_{self.w}'\n input_video = True\n vidreader.close()\n else: # input img folder\n if input_path.endswith('/'): # solve when path ends with /\n input_path = input_path[:-1]\n # scan all the jpg and png images\n input_img_list = sorted(glob.glob(os.path.join(input_path, '*.[jpJP][pnPN]*[gG]')))\n result_root = f'results/{os.path.basename(input_path)}_{self.w}'\n \n if not output_path is None: # set output path\n result_root = output_path\n\n test_img_num = len(input_img_list)\n if test_img_num == 0:\n raise FileNotFoundError('No input image/video is found...\\n' \n '\\tNote that --input_path for video should end with .mp4|.mov|.avi')\n\n # ------------------ set up background upsampler ------------------\n if self.bg_upsampler == 'realesrgan':\n bg_upsampler = self.set_realesrgan()\n else:\n bg_upsampler = None\n \n # ------------------ set up face upsampler ------------------\n if self.face_upsample:\n if bg_upsampler is not None:\n face_upsampler = bg_upsampler\n else:\n face_upsampler = self.set_realesrgan()\n else:\n face_upsampler = None\n \n # ------------------ set up CodeFormer restorer -------------------\n net = CodeFormer(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, \n connect_list=['32', '64', '128', '256']).to(self.device)\n # ckpt_path = 'weights/CodeFormer/codeformer.pth'\n ckpt_path = load_file_from_url(url=self.pretrain_model_url['restoration'], \n model_dir='weights/CodeFormer', progress=True, file_name=None)\n checkpoint = torch.load(ckpt_path)['params_ema']\n net.load_state_dict(checkpoint)\n net.eval()\n\n # ------------------ set up FaceRestoreHelper -------------------\n # large det_model: 'YOLOv5l', 'retinaface_resnet50'\n # small det_model: 'YOLOv5n', 'retinaface_mobile0.25'\n if not self.has_aligned: \n logger.info(f'Face detection model: {self.detection_model}')\n if bg_upsampler is not None: \n logger.info(f'Background upsampling: True, Face upsampling: {self.face_upsample}')\n else:\n logger.info(f'Background upsampling: False, Face upsampling: {self.face_upsample}')\n\n # -------------------- start to processing ---------------------\n logger.info(\"multi thread processing \")\n '''\n with ThreadPoolExecutor(max_workers=20) as executor:\n for i, img_path in enumerate(input_img_list):\n executor.submit(self.enhance_face,img_path,i,video_name,test_img_num,\n bg_upsampler,result_root,input_video,net,face_upsampler)\n '''\n Parallel(n_jobs=4)(delayed(self.enhance_face)(img_path,i,video_name,test_img_num,\\\n bg_upsampler,result_root,input_video,\\\n net,face_upsampler) for i,img_path in enumerate(input_img_list))\n\n # save enhanced video\n if input_video:\n logger.info('Video Saving...')\n # load images\n video_frames = []\n img_list = sorted(glob.glob(os.path.join(result_root, 'final_results', '*.[jp][pn]g')))\n for img_path in img_list:\n img = cv2.imread(img_path)\n video_frames.append(img)\n # write images to video\n height, width = video_frames[0].shape[:2]\n if self.suffix is not None:\n video_name = f'{video_name}_{self.suffix}.png'\n save_restore_path = os.path.join(result_root, f'{video_name}.avi')\n vidwriter = cv2.VideoWriter(save_restore_path,cv2.VideoWriter_fourcc(*'DIVX'),fps, (width, height))\n \n for f in tqdm(video_frames,desc=\"Combining png to avi...\",total=len(video_frames)):\n vidwriter.write(f)\n \n vidwriter.release()\n \n out_file = os.path.join(result_root, f'{video_name}.mp4')\n command = 'ffmpeg -y -i {} -i {} -strict -2 -q:v 1 {}'.format(audio, save_restore_path, out_file)\n subprocess.call(command, shell=platform.system() != 'Windows')\n\n logger.info(f'\\nAll results are saved in {result_root}')\n\n def enhance_face(self,img_path,i,video_name,test_img_num,bg_upsampler,result_root,input_video,net,face_upsampler):\n # clean all the intermediate results to process the next image\n face_helper = FaceRestoreHelper(\n self.upscale,\n face_size=512,\n crop_ratio=(1, 1),\n det_model = self.detection_model,\n save_ext='png',\n use_parse=True,\n device=self.device)\n with num_lock:\n if isinstance(img_path, str):\n img_name = os.path.basename(img_path)\n basename, ext = os.path.splitext(img_name)\n logger.info(f'[{i+1}/{test_img_num}] Processing: {img_name}')\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n else: # for video processing\n basename = str(i).zfill(6)\n img_name = f'{video_name}_{basename}' if input_video else basename\n logger.info(f'[{i+1}/{test_img_num}] Processing: {img_name}')\n img = img_path\n\n if self.has_aligned: \n # the input faces are already cropped and aligned\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)\n # face_helper.is_gray = is_gray(img, threshold=10)\n if face_helper.is_gray:\n logger.info('Grayscale input: True')\n face_helper.cropped_faces = [img]\n else:\n face_helper.read_image(img)\n # get face landmarks for each face\n num_det_faces = face_helper.get_face_landmarks_5(\n only_center_face=self.only_center_face, resize=640, eye_dist_threshold=5)\n logger.info(f'\\tdetect {num_det_faces} faces')\n # align and warp each face\n face_helper.align_warp_face()\n\n # face restoration for each cropped face\n for idx, cropped_face in enumerate(face_helper.cropped_faces):\n # prepare data\n cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)\n normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)\n cropped_face_t = cropped_face_t.unsqueeze(0).to(self.device)\n\n try:\n with torch.no_grad():\n output = net(cropped_face_t, w=self.w, adain=True)[0]\n restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))\n del output\n torch.cuda.empty_cache()\n except Exception as error:\n logger.info(f'\\tFailed inference for CodeFormer: {error}')\n restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))\n\n restored_face = restored_face.astype('uint8')\n face_helper.add_restored_face(restored_face, cropped_face)\n\n # paste_back\n if not self.has_aligned:\n # upsample the background\n if bg_upsampler is not None:\n # Now only support RealESRGAN for upsampling background\n bg_img = bg_upsampler.enhance(img, outscale=self.upscale)[0]\n else:\n bg_img = None\n face_helper.get_inverse_affine(None)\n # paste each restored face to the input image\n if self.face_upsample and face_upsampler is not None: \n restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=self.draw_box, face_upsampler=face_upsampler)\n else:\n restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=self.draw_box)\n \n \n # save faces\n for idx, (cropped_face, restored_face) in enumerate(zip(face_helper.cropped_faces, face_helper.restored_faces)):\n # save cropped face\n if not self.has_aligned: \n save_crop_path = os.path.join(result_root, 'cropped_faces', f'{basename}_{idx:02d}.png')\n imwrite(cropped_face, save_crop_path)\n # save restored face\n if self.has_aligned:\n save_face_name = f'{basename}.png'\n else:\n save_face_name = f'{basename}_{idx:02d}.png'\n if self.suffix is not None:\n save_face_name = f'{save_face_name[:-4]}_{self.suffix}.png'\n save_restore_path = os.path.join(result_root, 'restored_faces', save_face_name)\n imwrite(restored_face, save_restore_path)\n \n # save restored img\n if not self.has_aligned and restored_img is not None:\n if self.suffix is not None:\n basename = f'{basename}_{self.suffix}'\n save_restore_path = os.path.join(result_root, 'final_results', f'{basename}.png')\n imwrite(restored_img, save_restore_path)\n\n\n def set_realesrgan(self):\n if torch.cuda.is_available():\n no_half_gpu_list = ['1650', '1660'] # set False for GPUs that don't support f16\n if not True in [gpu in torch.cuda.get_device_name(0) for gpu in no_half_gpu_list]:\n self.use_half = True\n model = RRDBNet(\n num_in_ch=3,\n num_out_ch=3,\n num_feat=64,\n num_block=23,\n num_grow_ch=32,\n scale=2\n )\n upsampler = RealESRGANer(\n scale=2,\n model_path=\"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/RealESRGAN_x2plus.pth\",\n model=model,\n tile=self.bg_tile,\n tile_pad=40,\n pre_pad=0,\n half=self.use_half\n )\n if not torch.cuda.is_available():\n logger.warning('Running on CPU now! Make sure your PyTorch version matches your CUDA.')\n return upsampler" }, { "identifier": "analyse_video", "path": "src/nfsw.py", "snippet": "@lru_cache(maxsize = None)\ndef analyse_video(video_path : str) -> bool:\n\tvideo_frame_total = count_video_frame_total(video_path)\n\tfps = detect_fps(video_path)\n\tframe_range = range( 0, video_frame_total)\n\trate = 0.0\n\tcounter = 0\n\twith tqdm(total = len(frame_range), desc = 'video content analysing', unit = 'frame', ascii = ' =') as progress:\n\t\tfor frame_number in frame_range:\n\t\t\tif frame_number % int(fps) == 0:\n\t\t\t\tframe = get_video_frame(video_path, frame_number)\n\t\t\t\tif analyse_frame(frame):\n\t\t\t\t\tcounter += 1\n\t\t\trate = counter * int(fps) / len(frame_range) * 100\n\t\t\tprogress.update()\n\t\t\tprogress.set_postfix(rate = rate)\n\treturn rate > MAX_RATE" }, { "identifier": "load_model", "path": "src/third_part/whisperx/transcribe.py", "snippet": "def cli():" }, { "identifier": "load_audio", "path": "src/third_part/whisperx/audio.py", "snippet": "def load_audio(file: str, sr: int = SAMPLE_RATE):\n \"\"\"\n Open an audio file and read as mono waveform, resampling as necessary\n\n Parameters\n ----------\n file: str\n The audio file to open\n\n sr: int\n The sample rate to resample the audio if necessary\n\n Returns\n -------\n A NumPy array containing the audio waveform, in float32 dtype.\n \"\"\"\n try:\n # Launches a subprocess to decode audio while down-mixing and resampling as necessary.\n # Requires the ffmpeg CLI to be installed.\n cmd = [\n \"ffmpeg\",\n \"-nostdin\",\n \"-threads\",\n \"0\",\n \"-i\",\n file,\n \"-f\",\n \"s16le\",\n \"-ac\",\n \"1\",\n \"-acodec\",\n \"pcm_s16le\",\n \"-ar\",\n str(sr),\n \"-\",\n ]\n out = subprocess.run(cmd, capture_output=True, check=True).stdout\n except subprocess.CalledProcessError as e:\n raise RuntimeError(f\"Failed to load audio: {e.stderr.decode()}\") from e\n\n return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0" }, { "identifier": "DiarizationPipeline", "path": "src/third_part/whisperx/diarize.py", "snippet": "class DiarizationPipeline:\n def __init__(\n self,\n model_name=\"pyannote/speaker-diarization-3.0\",\n use_auth_token=None,\n device: Optional[Union[str, torch.device]] = \"cpu\",\n ):\n if isinstance(device, str):\n device = torch.device(device)\n self.model = Pipeline.from_pretrained(model_name, use_auth_token=use_auth_token).to(device)\n\n def __call__(self, audio: Union[str, np.ndarray], min_speakers=None, max_speakers=None):\n if isinstance(audio, str):\n audio = load_audio(audio)\n audio_data = {\n 'waveform': torch.from_numpy(audio[None, :]),\n 'sample_rate': SAMPLE_RATE\n }\n segments = self.model(audio_data, min_speakers=min_speakers, max_speakers=max_speakers)\n diarize_df = pd.DataFrame(segments.itertracks(yield_label=True), columns=['segment', 'label', 'speaker'])\n diarize_df['start'] = diarize_df['segment'].apply(lambda x: x.start)\n diarize_df['end'] = diarize_df['segment'].apply(lambda x: x.end)\n return diarize_df" } ]
import os import torch import soundfile as sf import gc; gc.collect(); torch.cuda.empty_cache(); del cloner import gc; gc.collect(); torch.cuda.empty_cache(); del diarize_model import gc; gc.collect(); torch.cuda.empty_cache(); del whisper from typing import Any from tqdm import tqdm from src.log_helper import HandleLog from moviepy.editor import VideoFileClip,concatenate_videoclips from pathlib import Path from pydub import AudioSegment from src.audio_bgm_split import AudioProcess from src.voice_clone import VoiceCloner from src.temp_manager import TempFileManager from src.translator import Translator from src.lipsync import LipSync from src.upscale import Upscale from src.nfsw import analyse_video from src.third_part.whisperx import load_model,load_audio,DiarizationPipeline
12,328
seg_len = len(whispher_segments) cloner = VoiceCloner(self.xt_version_name) root_path = Path(self.output_file).parent zimu_txt = os.path.join(root_path,"zimu.txt") for i, segment in tqdm(enumerate(whispher_segments), desc="voice cloning", total=seg_len): start = segment['start'] * 1000 end = segment['end'] * 1000 text_list = segment['text_list'] if i == 0: vocal_cloned_audio += org_vocal[:start] bgm_audio_extend += bgm_audio[:start] video_extend_list.append(org_video_clip.subclip(0, start / 1000)) total_cloned_vocal = AudioSegment.silent(0) if len(text_list) > 0: for src_text in text_list: dst_text = self.translotor(src_text,src_lang_code,self.lang_code) with open(zimu_txt,mode="a",encoding="utf-8",newline="\n") as w: w.write(src_text) w.write("\n") w.write(dst_text) w.write("\n") cloned_vocal_path = cloner(text=dst_text, lang_code=self.lang_code, speaker_wav=[segment['wav'],speakers_wav[segment['speaker']]]) cloned_vocal = AudioSegment.from_file(cloned_vocal_path) total_cloned_vocal += cloned_vocal else: logger.info(f'no sound there') total_cloned_vocal = AudioSegment.silent(end - start) vocal_cloned_audio += total_cloned_vocal tmp_bgm_audio = bgm_audio[start:end] bgm_audio_extend += self.bgm_map_vocal(tmp_bgm_audio, total_cloned_vocal) tmp_video_clip = org_video_clip.subclip(start/1000, end/1000) tmp_video_clip = self.video_map_vocal(tmp_video_clip, total_cloned_vocal) video_extend_list.append(tmp_video_clip) if i < seg_len - 1: # duration vocal_cloned_audio += org_vocal[end:whispher_segments[i+1]['start']*1000] bgm_audio_extend += bgm_audio[end:whispher_segments[i+1]['start']*1000] video_extend_list.append(org_video_clip.subclip(end/1000, whispher_segments[i+1]['start'])) if i == seg_len - 1: # duration vocal_cloned_audio += org_vocal[end:] bgm_audio_extend += bgm_audio[end:] video_extend_list.append(org_video_clip.subclip(end/1000)) vocal_cloned_path = os.path.join(root_path,"vocal_cloned.wav") vocal_cloned_audio.export(vocal_cloned_path, format="wav") logger.info("vocal_cloned.wav saved in {}, you can check it".format(root_path)) bgm_extend_path = os.path.join(root_path,"bgm_extend.wav") bgm_audio_extend.export(bgm_extend_path, format="wav") logger.info("bgm_extend.wav saved in {}, you can check it".format(root_path)) voice_cloned_path = os.path.join(root_path,"voice_cloned.wav") self.combie_audio(vocal_cloned_audio,bgm_audio_extend,voice_cloned_path) logger.info("voice_cloned.wav saved in {}, you can check it".format(root_path)) video_extend_path = os.path.join(root_path,"video_extend.mp4") video_extended = concatenate_videoclips(video_extend_list) video_extended.write_videofile(video_extend_path,fps=25,audio=False) logger.info("video_extend.mp4 saved in {}, you can check it".format(root_path)) # delete model if low on GPU resources logger.critical("[Step 5] Wav2Lip by vocal_cloned.wav and video_extend.mp4") lipsync = LipSync(self.model_name) lipsync(video_extend_path,vocal_cloned_path,self.output_file,voice_cloned_path) logger.critical("[Step 6] Upscale output video last step") upscaler = Upscale() upscale_workplace_path = os.path.join(root_path,"upscale_workplace") upscaler(input_path = self.output_file,output_path=upscale_workplace_path,audio=voice_cloned_path) self.temp_manager.cleanup() def combie_audio(self,vocal_audio:AudioSegment,bgm_audio:AudioSegment,file_path): new_audio = vocal_audio.overlay(bgm_audio) new_audio.export(file_path, format="wav") def bgm_map_vocal(self,bgm_audio:AudioSegment,vocal_audio:AudioSegment): audio_duration = vocal_audio.duration_seconds ratio = audio_duration / bgm_audio.duration_seconds print("audio.duration_seconds / bgm.duration_seconds = {}".format(ratio)) tmp_bgm_path = self.temp_manager.create_temp_file(suffix='.wav').name bgm_audio.export(tmp_bgm_path, format="wav") bgm_path = self.temp_manager.create_temp_file(suffix='.wav').name y,sr = sf.read(tmp_bgm_path) sf.write(bgm_path,y,int(sr*ratio)) bgm_extended = AudioSegment.from_file(bgm_path) return bgm_extended[:audio_duration * 1000] def video_map_vocal(self,vido_clip:VideoFileClip,vocal_audio:AudioSegment): audio_duration = vocal_audio.duration_seconds video_duration = vido_clip.duration ratio = video_duration / audio_duration print("video_duration / audio_duration =ratio:{}".format(ratio)) new_video = vido_clip.fl_time(lambda t: ratio*t,apply_to=['mask', 'audio']) new_video1 = new_video.set_duration(audio_duration) new_video2 = new_video1.set_fps(new_video1.fps / video_duration * audio_duration) return new_video2.subclip(0,audio_duration) def speech_to_text(self, vocal_file):
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' logger = HandleLog() class Core: def __init__(self, args) -> None: cur_path = os.path.dirname(os.path.realpath(__file__)) # current path self.weights_path = os.path.join(os.path.dirname(cur_path), 'weights') # weights_path to save model if not os.path.exists(self.weights_path): os.mkdir(self.weights_path) # self.input_file = args.input_file_path self.output_file = args.output_file_path self.lang_code = args.lang_code self.device = "cuda" if torch.cuda.is_available() else "cpu" self.hf_token = args.hf_token self.temp_manager = TempFileManager() self.translotor = Translator() self.model_name = args.model_name self.xt_version_name = args.xt_version_name if analyse_video(args.input_file_path): raise("sorry! nativespeaker is not for you") def __call__(self, *args: Any, **kwds: Any) -> Any: logger.critical("[Step 1] Moviepy split voice and frames from video") org_voice_path = os.path.join(Path(self.input_file).parent, "org_voice.wav") org_video_clip = VideoFileClip(self.input_file) org_video_clip.audio.write_audiofile(org_voice_path,codec='pcm_s16le') logger.info("save original voice in {}".format(org_voice_path)) logger.critical("[Step 2] H5 Split vocal and bgm from voice") audio_process = AudioProcess(15) vocal_file, bgm_file = audio_process.split(org_voice_path) logger.critical("[Step 3] whisperx from speech to text") whispher_segments, src_lang_code, speakers_wav = self.speech_to_text(vocal_file) logger.critical("[Step 4] translate,text to speech,video and voice_cloned aligment") vocal_cloned_audio = AudioSegment.silent(0) bgm_audio_extend = AudioSegment.silent(0) video_extend_list = [] org_vocal = AudioSegment.from_file(vocal_file) bgm_audio = AudioSegment.from_file(bgm_file) seg_len = len(whispher_segments) cloner = VoiceCloner(self.xt_version_name) root_path = Path(self.output_file).parent zimu_txt = os.path.join(root_path,"zimu.txt") for i, segment in tqdm(enumerate(whispher_segments), desc="voice cloning", total=seg_len): start = segment['start'] * 1000 end = segment['end'] * 1000 text_list = segment['text_list'] if i == 0: vocal_cloned_audio += org_vocal[:start] bgm_audio_extend += bgm_audio[:start] video_extend_list.append(org_video_clip.subclip(0, start / 1000)) total_cloned_vocal = AudioSegment.silent(0) if len(text_list) > 0: for src_text in text_list: dst_text = self.translotor(src_text,src_lang_code,self.lang_code) with open(zimu_txt,mode="a",encoding="utf-8",newline="\n") as w: w.write(src_text) w.write("\n") w.write(dst_text) w.write("\n") cloned_vocal_path = cloner(text=dst_text, lang_code=self.lang_code, speaker_wav=[segment['wav'],speakers_wav[segment['speaker']]]) cloned_vocal = AudioSegment.from_file(cloned_vocal_path) total_cloned_vocal += cloned_vocal else: logger.info(f'no sound there') total_cloned_vocal = AudioSegment.silent(end - start) vocal_cloned_audio += total_cloned_vocal tmp_bgm_audio = bgm_audio[start:end] bgm_audio_extend += self.bgm_map_vocal(tmp_bgm_audio, total_cloned_vocal) tmp_video_clip = org_video_clip.subclip(start/1000, end/1000) tmp_video_clip = self.video_map_vocal(tmp_video_clip, total_cloned_vocal) video_extend_list.append(tmp_video_clip) if i < seg_len - 1: # duration vocal_cloned_audio += org_vocal[end:whispher_segments[i+1]['start']*1000] bgm_audio_extend += bgm_audio[end:whispher_segments[i+1]['start']*1000] video_extend_list.append(org_video_clip.subclip(end/1000, whispher_segments[i+1]['start'])) if i == seg_len - 1: # duration vocal_cloned_audio += org_vocal[end:] bgm_audio_extend += bgm_audio[end:] video_extend_list.append(org_video_clip.subclip(end/1000)) vocal_cloned_path = os.path.join(root_path,"vocal_cloned.wav") vocal_cloned_audio.export(vocal_cloned_path, format="wav") logger.info("vocal_cloned.wav saved in {}, you can check it".format(root_path)) bgm_extend_path = os.path.join(root_path,"bgm_extend.wav") bgm_audio_extend.export(bgm_extend_path, format="wav") logger.info("bgm_extend.wav saved in {}, you can check it".format(root_path)) voice_cloned_path = os.path.join(root_path,"voice_cloned.wav") self.combie_audio(vocal_cloned_audio,bgm_audio_extend,voice_cloned_path) logger.info("voice_cloned.wav saved in {}, you can check it".format(root_path)) video_extend_path = os.path.join(root_path,"video_extend.mp4") video_extended = concatenate_videoclips(video_extend_list) video_extended.write_videofile(video_extend_path,fps=25,audio=False) logger.info("video_extend.mp4 saved in {}, you can check it".format(root_path)) # delete model if low on GPU resources logger.critical("[Step 5] Wav2Lip by vocal_cloned.wav and video_extend.mp4") lipsync = LipSync(self.model_name) lipsync(video_extend_path,vocal_cloned_path,self.output_file,voice_cloned_path) logger.critical("[Step 6] Upscale output video last step") upscaler = Upscale() upscale_workplace_path = os.path.join(root_path,"upscale_workplace") upscaler(input_path = self.output_file,output_path=upscale_workplace_path,audio=voice_cloned_path) self.temp_manager.cleanup() def combie_audio(self,vocal_audio:AudioSegment,bgm_audio:AudioSegment,file_path): new_audio = vocal_audio.overlay(bgm_audio) new_audio.export(file_path, format="wav") def bgm_map_vocal(self,bgm_audio:AudioSegment,vocal_audio:AudioSegment): audio_duration = vocal_audio.duration_seconds ratio = audio_duration / bgm_audio.duration_seconds print("audio.duration_seconds / bgm.duration_seconds = {}".format(ratio)) tmp_bgm_path = self.temp_manager.create_temp_file(suffix='.wav').name bgm_audio.export(tmp_bgm_path, format="wav") bgm_path = self.temp_manager.create_temp_file(suffix='.wav').name y,sr = sf.read(tmp_bgm_path) sf.write(bgm_path,y,int(sr*ratio)) bgm_extended = AudioSegment.from_file(bgm_path) return bgm_extended[:audio_duration * 1000] def video_map_vocal(self,vido_clip:VideoFileClip,vocal_audio:AudioSegment): audio_duration = vocal_audio.duration_seconds video_duration = vido_clip.duration ratio = video_duration / audio_duration print("video_duration / audio_duration =ratio:{}".format(ratio)) new_video = vido_clip.fl_time(lambda t: ratio*t,apply_to=['mask', 'audio']) new_video1 = new_video.set_duration(audio_duration) new_video2 = new_video1.set_fps(new_video1.fps / video_duration * audio_duration) return new_video2.subclip(0,audio_duration) def speech_to_text(self, vocal_file):
vocal_audio = load_audio(vocal_file)
9
2023-12-01 12:23:19+00:00
16k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras" }, { "identifier": "qvec2rotmat", "path": "scene/colmap_loader.py", "snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])" }, { "identifier": "read_extrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,\n format_char_sequence=\"ddq\"*num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8*num_params,\n format_char_sequence=\"d\"*num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras" }, { "identifier": "read_points3D_binary", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8*track_length,\n format_char_sequence=\"ii\"*track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors" }, { "identifier": "read_points3D_text", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n num_points = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n num_points += 1\n\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n count = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n xyzs[count] = xyz\n rgbs[count] = rgb\n errors[count] = error\n count += 1\n\n return xyzs, rgbs, errors" }, { "identifier": "getWorld2View2", "path": "utils/graphics_utils.py", "snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)" }, { "identifier": "focal2fov", "path": "utils/graphics_utils.py", "snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))" }, { "identifier": "fov2focal", "path": "utils/graphics_utils.py", "snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))" }, { "identifier": "SH2RGB", "path": "utils/sh_utils.py", "snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5" }, { "identifier": "BasicPointCloud", "path": "scene/gaussian_model.py", "snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation, transform):\n def __init__(self, sh_degree : int, smpl_type : str, motion_offset_flag : bool, actor_gender: str):\n def capture(self):\n def restore(self, model_args, training_args):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_covariance(self, scaling_modifier = 1, transform=None):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def kl_densify_and_clone(self, grads, grad_threshold, scene_extent, kl_threshold=0.4):\n def kl_densify_and_split(self, grads, grad_threshold, scene_extent, kl_threshold=0.4, N=2):\n def kl_merge(self, grads, grad_threshold, scene_extent, kl_threshold=0.1):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size, kl_threshold=0.4, t_vertices=None, iter=None):\n def kl_div(self, mu_0, rotation_0_q, scaling_0_diag, mu_1, rotation_1_q, scaling_1_diag):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n def coarse_deform_c2source(self, query_pts, params, t_params, t_vertices, lbs_weights=None, correct_Rs=None, return_transl=False):\ndef read_pickle(pkl_path):\ndef SMPL_to_tensor(params, device):\ndef batch_rodrigues_torch(poses):\ndef get_rigid_transformation_torch(rot_mats, joints, parents):\ndef get_transform_params_torch(smpl, params, rot_mats=None, correct_Rs=None):\ndef batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)\n L_0 = rotation_0 @ scaling_0\n A = torch.matmul(bweights, A.reshape(bs, joints_num, -1))\n A = torch.reshape(A, (bs, -1, 4, 4))\n A = torch.matmul(bweights, self.s_A.reshape(bs, joints_num, -1))\n A = torch.reshape(A, (bs, -1, 4, 4))\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1)\n K = K.reshape([batch_size, 3, 3])\n A = get_rigid_transformation_torch(rot_mats, joints, parents)\n R = params['R'] \n K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \\\n .view((batch_size, 3, 3))" }, { "identifier": "SMPL", "path": "smpl/smpl_numpy.py", "snippet": "class SMPL():\n def __init__(self, sex, model_dir):\n super(SMPL, self).__init__()\n\n model_paths = {\n 'male': os.path.join(model_dir, MALE_PATH),\n 'female': os.path.join(model_dir, FEMALE_PATH),\n # 'neutral': os.path.join(model_dir, NEUTRAL_PATH)\n 'neutral': os.path.join('assets/SMPL_NEUTRAL.pkl')\n }\n\n with open(model_paths[sex], 'rb') as f:\n smpl_model = pickle.load(f, encoding='latin1')\n self.J_regressor = np.array(smpl_model['J_regressor'].todense()) # (24, 6890)\n self.weights = smpl_model['weights'] # (6890, 24)\n self.posedirs = smpl_model['posedirs'] # (6890, 3, 207)\n self.v_template = smpl_model['v_template'] # (6890, 3)\n self.shapedirs = np.array(smpl_model['shapedirs']) # (6890, 3, 10)\n self.faces = smpl_model['f'].astype('int32') # (13776, 3)\n self.kintree_table = smpl_model['kintree_table'].astype('int64') # (2, 24)\n\n id_to_col = {self.kintree_table[1, i].item(): i for i in range(self.kintree_table.shape[1])}\n self.parent = np.array([id_to_col[self.kintree_table[0, it]] for it in range(1, self.kintree_table.shape[1])])\n\n self.pose_shape = [24, 3]\n self.beta_shape = [10]\n self.pose = np.zeros(self.pose_shape)\n self.beta = np.zeros(self.beta_shape)\n\n self.verts = None\n self.J = None\n self.R = None\n\n def __call__(self, pose, beta):\n\n v_template = self.v_template # (6890, 3)\n shapedirs = self.shapedirs.reshape(-1,10) # (6890*3, 10)\n beta = beta[:, None] # (10, 1)\n\n v_shaped = shapedirs.dot(beta).reshape(6890, 3) + v_template # (6890, 3)\n J = self.J_regressor.dot(v_shaped) # (24, 3)\n\n # input is a rotation matrix: (24,3,3)\n if pose.shape == (24, 3, 3):\n R = pose\n # input is a rotation axis-angle vector: (1, 72), (72, 1) or (72, )\n elif pose.shape == (1, 72) or pose.shape == (72, 1) or pose.shape == (72,):\n pose_vectors = pose.reshape(-1, 3) # (24, 3)\n R = np.array([rodrigues(pose_vectors[p_idx])[0] \n for p_idx in range(pose_vectors.shape[0])\n ], \n dtype='float32') # (24, 3, 3)\n else:\n raise ValueError(\"Unsupported Pose Inputs - the Pose Shape is {}\".format(pose.shape))\n\n Is = np.eye(3, dtype='float32')[None, :] # (1, 3, 3)\n lrotmin = (R[1:,:] - Is).reshape(-1, 1) # (23x3x3, 1)\n posedirs = self.posedirs.reshape(-1,207) # (6890x3, 207)\n v_posed = v_shaped + posedirs.dot(lrotmin).reshape(6890, 3) # (6890, 3)\n\n J_ = J.copy()\n J_[1:, :] = J[1:, :] - J[self.parent, :] # (24, 3)\n G_ = np.concatenate([R, J_[:, :, None]], axis=-1) # (24, 3, 4)\n pad_rows = np.array([[0, 0, 0, 1]], dtype='float32')\n pad_rows = np.repeat(pad_rows, 24, axis=0).reshape(-1, 1, 4)\n G_ = np.concatenate([G_, pad_rows], axis=1) # (24, 4, 4)\n\n G = [G_[0].copy()]\n for i in range(1, 24):\n G.append(G[self.parent[i-1]].dot(G_[i, :, :]))\n G = np.stack(G, axis=0) # (24, 4, 4)\n\n joints = G[:, :3, 3]\n rest_joints = np.concatenate([J, np.zeros((24, 1))], axis=-1)[:, :, None] # (24, 4, 1)\n zeros = np.zeros((24, 4, 3), dtype='float32') # (24, 4, 3)\n rest_joints_mtx = np.concatenate([zeros, rest_joints], axis=-1) # (24, 4, 4) \n # print(\"G1: \", G[0], \"rest_joints_mtx1: \", rest_joints_mtx[0])\n posed_joints_mtx = np.matmul(G, rest_joints_mtx)\n # print(\"rest_joints_mtx2: \", posed_joints_mtx[0])\n G = G - posed_joints_mtx\n # print(G[0]) \n rest_shape_h = np.concatenate([v_posed, np.ones(v_posed.shape[0])[:, None]], axis=-1) #(6890, 4)\n T = self.weights.dot(G.reshape(24, -1)).reshape(6890, 4, 4)\n v = np.matmul(T, rest_shape_h[:, :, None])[:, :3, 0]\n \n return v, joints" }, { "identifier": "SMPLX", "path": "smplx/body_models.py", "snippet": "class SMPLX(SMPLH):\n '''\n SMPL-X (SMPL eXpressive) is a unified body model, with shape parameters\n trained jointly for the face, hands and body.\n SMPL-X uses standard vertex based linear blend skinning with learned\n corrective blend shapes, has N=10475 vertices and K=54 joints,\n which includes joints for the neck, jaw, eyeballs and fingers.\n '''\n\n NUM_BODY_JOINTS = SMPLH.NUM_BODY_JOINTS\n NUM_HAND_JOINTS = 15\n NUM_FACE_JOINTS = 3\n NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS + NUM_FACE_JOINTS\n EXPRESSION_SPACE_DIM = 100\n NECK_IDX = 12\n\n def __init__(\n self, model_path: str,\n kid_template_path: str = '',\n num_expression_coeffs: int = 10,\n create_expression: bool = True,\n expression: Optional[Tensor] = None,\n create_jaw_pose: bool = True,\n jaw_pose: Optional[Tensor] = None,\n create_leye_pose: bool = True,\n leye_pose: Optional[Tensor] = None,\n create_reye_pose=True,\n reye_pose: Optional[Tensor] = None,\n use_face_contour: bool = False,\n batch_size: int = 1,\n gender: str = 'neutral',\n age: str = 'adult',\n dtype=torch.float32,\n ext: str = 'npz',\n **kwargs\n ) -> None:\n ''' SMPLX model constructor\n\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n num_expression_coeffs: int, optional\n Number of expression components to use\n (default = 10).\n create_expression: bool, optional\n Flag for creating a member variable for the expression space\n (default = True).\n expression: torch.tensor, optional, Bx10\n The default value for the expression member variable.\n (default = None)\n create_jaw_pose: bool, optional\n Flag for creating a member variable for the jaw pose.\n (default = False)\n jaw_pose: torch.tensor, optional, Bx3\n The default value for the jaw pose variable.\n (default = None)\n create_leye_pose: bool, optional\n Flag for creating a member variable for the left eye pose.\n (default = False)\n leye_pose: torch.tensor, optional, Bx10\n The default value for the left eye pose variable.\n (default = None)\n create_reye_pose: bool, optional\n Flag for creating a member variable for the right eye pose.\n (default = False)\n reye_pose: torch.tensor, optional, Bx10\n The default value for the right eye pose variable.\n (default = None)\n use_face_contour: bool, optional\n Whether to compute the keypoints that form the facial contour\n batch_size: int, optional\n The batch size used for creating the member variables\n gender: str, optional\n Which gender to load\n dtype: torch.dtype\n The data type for the created variables\n '''\n\n # Load the model\n if osp.isdir(model_path):\n model_fn = 'SMPLX_{}.{ext}'.format(gender.upper(), ext=ext)\n smplx_path = os.path.join(model_path, model_fn)\n else:\n smplx_path = model_path\n assert osp.exists(smplx_path), 'Path {} does not exist!'.format(\n smplx_path)\n\n if ext == 'pkl':\n with open(smplx_path, 'rb') as smplx_file:\n model_data = pickle.load(smplx_file, encoding='latin1')\n elif ext == 'npz':\n model_data = np.load(smplx_path, allow_pickle=True)\n else:\n raise ValueError('Unknown extension: {}'.format(ext))\n\n data_struct = Struct(**model_data)\n\n super(SMPLX, self).__init__(\n model_path=model_path,\n kid_template_path=kid_template_path,\n data_struct=data_struct,\n dtype=dtype,\n batch_size=batch_size,\n vertex_ids=VERTEX_IDS['smplx'],\n gender=gender, age=age, ext=ext,\n **kwargs)\n\n lmk_faces_idx = data_struct.lmk_faces_idx\n self.register_buffer('lmk_faces_idx',\n torch.tensor(lmk_faces_idx, dtype=torch.long))\n lmk_bary_coords = data_struct.lmk_bary_coords\n self.register_buffer('lmk_bary_coords',\n torch.tensor(lmk_bary_coords, dtype=dtype))\n\n self.use_face_contour = use_face_contour\n if self.use_face_contour:\n dynamic_lmk_faces_idx = data_struct.dynamic_lmk_faces_idx\n dynamic_lmk_faces_idx = torch.tensor(\n dynamic_lmk_faces_idx,\n dtype=torch.long)\n self.register_buffer('dynamic_lmk_faces_idx',\n dynamic_lmk_faces_idx)\n\n dynamic_lmk_bary_coords = data_struct.dynamic_lmk_bary_coords\n dynamic_lmk_bary_coords = torch.tensor(\n dynamic_lmk_bary_coords, dtype=dtype)\n self.register_buffer('dynamic_lmk_bary_coords',\n dynamic_lmk_bary_coords)\n\n neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents)\n self.register_buffer(\n 'neck_kin_chain',\n torch.tensor(neck_kin_chain, dtype=torch.long))\n\n if create_jaw_pose:\n if jaw_pose is None:\n default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype)\n jaw_pose_param = nn.Parameter(default_jaw_pose,\n requires_grad=True)\n self.register_parameter('jaw_pose', jaw_pose_param)\n\n if create_leye_pose:\n if leye_pose is None:\n default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_leye_pose = torch.tensor(leye_pose, dtype=dtype)\n leye_pose_param = nn.Parameter(default_leye_pose,\n requires_grad=True)\n self.register_parameter('leye_pose', leye_pose_param)\n\n if create_reye_pose:\n if reye_pose is None:\n default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_reye_pose = torch.tensor(reye_pose, dtype=dtype)\n reye_pose_param = nn.Parameter(default_reye_pose,\n requires_grad=True)\n self.register_parameter('reye_pose', reye_pose_param)\n\n shapedirs = data_struct.shapedirs\n if len(shapedirs.shape) < 3:\n shapedirs = shapedirs[:, :, None]\n if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM +\n self.EXPRESSION_SPACE_DIM):\n print(f'WARNING: You are using a {self.name()} model, with only'\n ' 10 shape and 10 expression coefficients.')\n expr_start_idx = 10\n expr_end_idx = 20\n num_expression_coeffs = min(num_expression_coeffs, 10)\n else:\n expr_start_idx = self.SHAPE_SPACE_DIM\n expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs\n num_expression_coeffs = min(\n num_expression_coeffs, self.EXPRESSION_SPACE_DIM)\n\n self._num_expression_coeffs = num_expression_coeffs\n\n expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx]\n self.register_buffer(\n 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype))\n\n if create_expression:\n if expression is None:\n default_expression = torch.zeros(\n [batch_size, self.num_expression_coeffs], dtype=dtype)\n else:\n default_expression = torch.tensor(expression, dtype=dtype)\n expression_param = nn.Parameter(default_expression,\n requires_grad=True)\n self.register_parameter('expression', expression_param)\n\n def name(self) -> str:\n return 'SMPL-X'\n\n @property\n def num_expression_coeffs(self):\n return self._num_expression_coeffs\n\n def create_mean_pose(self, data_struct, flat_hand_mean=False):\n # Create the array for the mean pose. If flat_hand is false, then use\n # the mean that is given by the data, rather than the flat open hand\n global_orient_mean = torch.zeros([3], dtype=self.dtype)\n body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3],\n dtype=self.dtype)\n jaw_pose_mean = torch.zeros([3], dtype=self.dtype)\n leye_pose_mean = torch.zeros([3], dtype=self.dtype)\n reye_pose_mean = torch.zeros([3], dtype=self.dtype)\n # pose_mean = np.concatenate([global_orient_mean, body_pose_mean, jaw_pose_mean, leye_pose_mean, reye_pose_mean, self.left_hand_mean, self.right_hand_mean], axis=0)\n pose_mean = torch.cat([global_orient_mean, body_pose_mean, jaw_pose_mean, leye_pose_mean, reye_pose_mean, self.left_hand_mean, self.right_hand_mean], 0)\n\n return pose_mean\n\n def extra_repr(self):\n msg = super(SMPLX, self).extra_repr()\n msg = [\n msg,\n f'Number of Expression Coefficients: {self.num_expression_coeffs}'\n ]\n return '\\n'.join(msg)\n\n def forward(\n self,\n betas: Optional[Tensor] = None,\n global_orient: Optional[Tensor] = None,\n body_pose: Optional[Tensor] = None,\n left_hand_pose: Optional[Tensor] = None,\n right_hand_pose: Optional[Tensor] = None,\n transl: Optional[Tensor] = None,\n expression: Optional[Tensor] = None,\n jaw_pose: Optional[Tensor] = None,\n leye_pose: Optional[Tensor] = None,\n reye_pose: Optional[Tensor] = None,\n return_verts: bool = True,\n return_full_pose: bool = False,\n pose2rot: bool = True,\n return_shaped: bool = True,\n **kwargs\n ) -> TensorOutput:\n '''\n Forward pass for the SMPLX model\n\n Parameters\n ----------\n global_orient: torch.tensor, optional, shape Bx3\n If given, ignore the member variable and use it as the global\n rotation of the body. Useful if someone wishes to predicts this\n with an external model. (default=None)\n betas: torch.tensor, optional, shape BxN_b\n If given, ignore the member variable `betas` and use it\n instead. For example, it can used if shape parameters\n `betas` are predicted from some external model.\n (default=None)\n expression: torch.tensor, optional, shape BxN_e\n If given, ignore the member variable `expression` and use it\n instead. For example, it can used if expression parameters\n `expression` are predicted from some external model.\n body_pose: torch.tensor, optional, shape Bx(J*3)\n If given, ignore the member variable `body_pose` and use it\n instead. For example, it can used if someone predicts the\n pose of the body joints are predicted from some external model.\n It should be a tensor that contains joint rotations in\n axis-angle format. (default=None)\n left_hand_pose: torch.tensor, optional, shape BxP\n If given, ignore the member variable `left_hand_pose` and\n use this instead. It should either contain PCA coefficients or\n joint rotations in axis-angle format.\n right_hand_pose: torch.tensor, optional, shape BxP\n If given, ignore the member variable `right_hand_pose` and\n use this instead. It should either contain PCA coefficients or\n joint rotations in axis-angle format.\n jaw_pose: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `jaw_pose` and\n use this instead. It should either joint rotations in\n axis-angle format.\n transl: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `transl` and use it\n instead. For example, it can used if the translation\n `transl` is predicted from some external model.\n (default=None)\n return_verts: bool, optional\n Return the vertices. (default=True)\n return_full_pose: bool, optional\n Returns the full axis-angle pose vector (default=False)\n\n Returns\n -------\n output: ModelOutput\n A named tuple of type `ModelOutput`\n '''\n\n # If no shape and pose parameters are passed along, then use the\n # ones from the module\n global_orient = (global_orient if global_orient is not None else\n self.global_orient)\n body_pose = body_pose if body_pose is not None else self.body_pose\n betas = betas if betas is not None else self.betas\n\n left_hand_pose = (left_hand_pose if left_hand_pose is not None else\n self.left_hand_pose)\n right_hand_pose = (right_hand_pose if right_hand_pose is not None else\n self.right_hand_pose)\n jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose\n leye_pose = leye_pose if leye_pose is not None else self.leye_pose\n reye_pose = reye_pose if reye_pose is not None else self.reye_pose\n expression = expression if expression is not None else self.expression\n\n apply_trans = transl is not None or hasattr(self, 'transl')\n if transl is None:\n if hasattr(self, 'transl'):\n transl = self.transl\n\n if self.use_pca:\n left_hand_pose = torch.einsum(\n 'bi,ij->bj', [left_hand_pose, self.left_hand_components])\n right_hand_pose = torch.einsum(\n 'bi,ij->bj', [right_hand_pose, self.right_hand_components])\n\n full_pose = torch.cat([global_orient.reshape(-1, 1, 3),\n body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3),\n jaw_pose.reshape(-1, 1, 3),\n leye_pose.reshape(-1, 1, 3),\n reye_pose.reshape(-1, 1, 3),\n left_hand_pose.reshape(-1, 15, 3),\n right_hand_pose.reshape(-1, 15, 3)],\n dim=1).reshape(-1, 165).to(self.pose_mean.device)\n\n # Add the mean pose of the model. Does not affect the body, only the\n # hands when flat_hand_mean == False\n full_pose += self.pose_mean\n\n batch_size = max(betas.shape[0], global_orient.shape[0],\n body_pose.shape[0])\n # Concatenate the shape and expression coefficients\n scale = int(batch_size / betas.shape[0])\n if scale > 1:\n betas = betas.expand(scale, -1)\n shape_components = torch.cat([betas, expression], dim=-1).to(self.pose_mean.device)\n\n shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1)\n\n vertices, joints, A, T = lbs(shape_components, full_pose, self.v_template,\n shapedirs, self.posedirs,\n self.J_regressor, self.parents,\n self.lbs_weights, pose2rot=pose2rot,\n )\n\n lmk_faces_idx = self.lmk_faces_idx.unsqueeze(\n dim=0).expand(batch_size, -1).contiguous()\n lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat(\n self.batch_size, 1, 1)\n if self.use_face_contour:\n lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords(\n vertices, full_pose, self.dynamic_lmk_faces_idx,\n self.dynamic_lmk_bary_coords,\n self.neck_kin_chain,\n pose2rot=True,\n )\n dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords\n\n lmk_faces_idx = torch.cat([lmk_faces_idx,\n dyn_lmk_faces_idx], 1)\n lmk_bary_coords = torch.cat(\n [lmk_bary_coords.expand(batch_size, -1, -1),\n dyn_lmk_bary_coords], 1)\n\n landmarks = vertices2landmarks(vertices, self.faces_tensor,\n lmk_faces_idx,\n lmk_bary_coords)\n\n # import matplotlib.pyplot as plt\n # import numpy as np\n # xs = joints[0,:,0]\n # ys = joints[0,:,1]\n # plt.scatter(xs, ys)\n\n # # zip joins x and y coordinates in pairs\n # count = 0\n # for x,y in zip(xs, ys):\n\n # label = \"{:.2f}\".format(count)\n\n # plt.annotate(label, # this is the text\n # (x,y), # these are the coordinates to position the label\n # textcoords=\"offset points\", # how to position the text\n # xytext=(0,10), # distance from text to points (x,y)\n # ha='center') # horizontal alignment can be left, right or center\n # count += 1\n # plt.savefig(\"joints.png\")\n # import pdb; pdb.set_trace()\n\n # Add any extra joints that might be needed\n joints = self.vertex_joint_selector(vertices, joints)\n # Add the landmarks to the joints\n joints = torch.cat([joints, landmarks], dim=1)\n # Map the joints to the current dataset\n\n if self.joint_mapper is not None:\n joints = self.joint_mapper(joints=joints, vertices=vertices)\n\n if apply_trans:\n joints += transl.unsqueeze(dim=1)\n vertices += transl.unsqueeze(dim=1)\n # clone because we are modifying them in-place\n A = A.clone()\n A[..., :3, 3] += transl.unsqueeze(dim=1)\n T = T.clone()\n T[..., :3, 3] += transl.unsqueeze(dim=1)\n\n v_shaped = None\n if return_shaped:\n v_shaped = self.v_template + blend_shapes(betas, self.shapedirs)\n else:\n v_shaped = Tensor(0)\n\n output = TensorOutput(vertices=vertices if return_verts else None,\n joints=joints,\n betas=betas,\n expression=expression,\n global_orient=global_orient,\n body_pose=body_pose,\n left_hand_pose=left_hand_pose,\n right_hand_pose=right_hand_pose,\n jaw_pose=jaw_pose,\n v_shaped=v_shaped,\n full_pose=full_pose if return_full_pose else None,\n A=A,\n T=T,\n f=self.faces)\n return output" }, { "identifier": "SMCReader", "path": "data/dna_rendering/dna_rendering_sample_code/SMCReader.py", "snippet": "class SMCReader:\n\n def __init__(self, file_path):\n \"\"\"Read SenseMocapFile endswith \".smc\".\n\n Args:\n file_path (str):\n Path to an SMC file.\n body_model (nn.Module or dict):\n Only needed for SMPL transformation to device frame\n if nn.Module: a body_model instance\n if dict: a body_model config\n \"\"\"\n self.smc = h5py.File(file_path, 'r')\n self.__calibration_dict__ = None\n self.__kinect_calib_dict__ = None \n self.__available_keys__ = list(self.smc.keys())\n \n self.actor_info = None \n if hasattr(self.smc, 'attrs') and len(self.smc.attrs.keys()) > 0:\n self.actor_info = dict(\n id=self.smc.attrs['actor_id'],\n perf_id=self.smc.attrs['performance_id'],\n age=self.smc.attrs['age'],\n gender=self.smc.attrs['gender'],\n height=self.smc.attrs['height'],\n weight=self.smc.attrs['weight'],\n ethnicity=self.smc.attrs['ethnicity'],\n )\n\n self.Camera_5mp_info = None \n if 'Camera_5mp' in self.smc:\n self.Camera_5mp_info = dict(\n num_device=self.smc['Camera_5mp'].attrs['num_device'],\n num_frame=self.smc['Camera_5mp'].attrs['num_frame'],\n resolution=self.smc['Camera_5mp'].attrs['resolution'],\n )\n self.Camera_12mp_info = None \n if 'Camera_12mp' in self.smc:\n self.Camera_12mp_info = dict(\n num_device=self.smc['Camera_12mp'].attrs['num_device'],\n num_frame=self.smc['Camera_12mp'].attrs['num_frame'],\n resolution=self.smc['Camera_12mp'].attrs['resolution'],\n )\n self.Kinect_info = None\n if 'Kinect' in self.smc:\n self.Kinect_info=dict(\n num_device=self.smc['Kinect'].attrs['num_device'],\n num_frame=self.smc['Kinect'].attrs['num_frame'],\n resolution=self.smc['Kinect'].attrs['resolution'],\n )\n\n def get_available_keys(self):\n return self.__available_keys__ \n\n def get_actor_info(self):\n return self.actor_info\n \n def get_Camera_12mp_info(self):\n return self.Camera_12mp_info\n\n def get_Camera_5mp_info(self):\n return self.Camera_5mp_info\n \n def get_Kinect_info(self):\n return self.Kinect_info\n \n ### RGB Camera Calibration\n def get_Calibration_all(self):\n \"\"\"Get calibration matrix of all cameras and save it in self\n \n Args:\n None\n\n Returns:\n Dictionary of calibration matrixs of all matrixs.\n dict( \n Camera_Parameter: Camera_id : Matrix_type : value\n )\n Notice:\n Camera_id(str) in {'Camera_5mp': '0'~'47', 'Camera_12mp':'48'~'60'}\n Matrix_type in ['D', 'K', 'RT', 'Color_Calibration'] \n \"\"\" \n if not 'Camera_Parameter' in self.smc:\n print(\"=== no key: Camera_Parameter.\\nplease check available keys!\")\n return None \n\n if self.__calibration_dict__ is not None:\n return self.__calibration_dict__\n\n self.__calibration_dict__ = dict()\n for ci in self.smc['Camera_Parameter'].keys():\n self.__calibration_dict__.setdefault(ci,dict())\n for mt in ['D', 'K', 'RT', 'Color_Calibration'] :\n self.__calibration_dict__[ci][mt] = \\\n self.smc['Camera_Parameter'][ci][mt][()]\n return self.__calibration_dict__\n\n def get_Calibration(self, Camera_id):\n \"\"\"Get calibration matrixs of a certain camera by its type and id \n\n Args:\n Camera_id (int/str of a number):\n Camera_id(str) in {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60'}\n Returns:\n Dictionary of calibration matrixs.\n ['D', 'K', 'RT', 'Color_Calibration'] \n \"\"\"\n if not 'Camera_Parameter' in self.smc:\n print(\"=== no key: Camera_Parameter.\\nplease check available keys!\")\n return None \n\n rs = dict()\n for k in ['D', 'K', 'RT', 'Color_Calibration'] :\n rs[k] = self.smc['Camera_Parameter'][f'{int(Camera_id):02d}'][k][()]\n return rs\n\n ### Kinect Camera Calibration\n def get_Kinect_Calibration_all(self):\n \"\"\"Get calibration matrix of all kinect cameras and save it in self\n \n Args:\n None\n\n Returns:\n Dictionary of calibration matrixs of all matrixs.\n dict( \n Camera_group: Camera_id : Matrix_type : value\n )\n Notice:\n Camera_group(str) in ['Kinect']\n Camera_id(str) in {'Kinect': '0'~'7'}\n Matrix_type in ['D', 'K', 'RT'] \n \"\"\" \n if not 'Calibration' in self.smc:\n print(\"=== no key: Calibration.\\nplease check available keys!\")\n return None \n\n if self.__kinect_calib_dict__ is not None:\n return self.__kinect_calib_dict__\n\n self.__kinect_calib_dict__ = dict()\n for cg in ['Kinect']:\n self.__kinect_calib_dict__.setdefault(cg,dict())\n for ci in self.smc['Calibration'][cg].keys():\n self.__kinect_calib_dict__[cg].setdefault(ci,dict())\n for mt in ['D', 'K', 'RT'] :\n self.__kinect_calib_dict__[cg][ci][mt] = \\\n self.smc['Calibration'][cg][ci][mt][()]\n return self.__kinect_calib_dict__\n\n def get_kinect_Calibration(self, Camera_id):\n \"\"\"Get calibration matrixs of a certain kinect camera by its type and id \n\n Args:\n Camera_group (str):\n Camera_group in ['Kinect'].\n Camera_id (int/str of a number):\n CameraID(str) in {'Kinect': '0'~'7'}\n Returns:\n Dictionary of calibration matrixs.\n ['D', 'K', 'RT'] \n \"\"\" \n if not 'Calibration' in self.smc:\n print(\"=== no key: Calibration.\\nplease check available keys!\")\n return None \n\n Camera_id = f'{int(Camera_id):02d}'\n assert(Camera_id in self.smc['Calibration'][\"Kinect\"].keys())\n rs = dict()\n for k in ['D', 'K', 'RT']:\n rs[k] = self.smc['Calibration'][\"Kinect\"][Camera_id][k][()]\n return rs\n\n ### RGB image\n def __read_color_from_bytes__(self, color_array):\n \"\"\"Decode an RGB image from an encoded byte array.\"\"\"\n return cv2.imdecode(color_array, cv2.IMREAD_COLOR)\n\n def get_mask(self, Camera_id, Frame_id=None, disable_tqdm=True):\n \"\"\"Get mask from Camera_id, Frame_id\n\n Args:\n Camera_id (int/str of a number):\n Camera_id (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',\n 'Kinect': '0'~'7'}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame'\n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not 'Mask' in self.smc:\n print(\"=== no key: Mask.\\nplease check available keys!\")\n return None \n\n Camera_id = str(Camera_id)\n\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = str(Frame_id)\n assert(Frame_id in self.smc['Mask'][Camera_id]['mask'].keys())\n img_byte = self.smc['Mask'][Camera_id]['mask'][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n img_color = np.max(img_color,2)\n return img_color \n else:\n if Frame_id is None:\n Frame_id_list =sorted([int(l) for l in self.smc['Mask'][Camera_id]['mask'].keys()])\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list, disable=disable_tqdm):\n rs.append(self.get_mask(Camera_id,fi))\n return np.stack(rs,axis=0)\n\n def get_img(self, Camera_group, Camera_id, Image_type, Frame_id=None, disable_tqdm=True):\n \"\"\"Get image its Camera_group, Camera_id, Image_type and Frame_id\n\n Args:\n Camera_group (str):\n Camera_group in ['Camera_12mp', 'Camera_5mp','Kinect'].\n Camera_id (int/str of a number):\n CameraID (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',\n 'Kinect': '0'~'7'}\n Image_type(str) in \n {'Camera_5mp': ['color'], \n 'Camera_12mp': ['color'],\n 'Kinect': ['depth', 'mask']}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not Camera_group in self.smc:\n print(\"=== no key: %s.\\nplease check available keys!\" % Camera_group)\n return None\n\n assert(Camera_group in ['Camera_12mp', 'Camera_5mp','Kinect'])\n Camera_id = str(Camera_id)\n assert(Camera_id in self.smc[Camera_group].keys())\n assert(Image_type in self.smc[Camera_group][Camera_id].keys())\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = str(Frame_id)\n assert(Frame_id in self.smc[Camera_group][Camera_id][Image_type].keys())\n if Image_type in ['color']:\n img_byte = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n if Image_type == 'mask':\n img_byte = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n img_color = np.max(img_color,2)\n if Image_type == 'depth':\n img_color = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n return img_color \n else:\n if Frame_id is None:\n Frame_id_list =sorted([int(l) for l in self.smc[Camera_group][Camera_id][Image_type].keys()])\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm(Frame_id_list, disable=disable_tqdm):\n rs.append(self.get_img(Camera_group, Camera_id, Image_type,fi))\n return np.stack(rs,axis=0)\n \n ###Keypoints2d\n def get_Keypoints2d(self, Camera_id, Frame_id=None):\n \"\"\"Get keypoint2D by its Camera_group, Camera_id and Frame_id\n\n Args:\n Camera_id (int/str of a number):\n CameraID (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame-1'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not 'Keypoints_2D' in self.smc:\n print(\"=== no key: Keypoints_2D.\\nplease check available keys!\")\n return None \n\n Camera_id = f'{int(Camera_id):02d}'\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = int(Frame_id)\n return self.smc['Keypoints_2D'][Camera_id][()][Frame_id,:]\n else:\n if Frame_id is None:\n return self.smc['Keypoints_2D'][Camera_id][()]\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list):\n rs.append(self.get_Keypoints2d(Camera_id,fi))\n return np.stack(rs,axis=0)\n\n ###Keypoints3d\n def get_Keypoints3d(self, Frame_id=None):\n \"\"\"Get keypoint3D Frame_id, TODO coordinate\n\n Args:\n Frame_id a.(int/str of a number): '0' ~ 'num_frame-1'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n Keypoints3d tensor: np.ndarray of shape ([N], ,3)\n \"\"\" \n if not 'Keypoints_3D' in self.smc:\n print(\"=== no key: Keypoints_3D.\\nplease check available keys!\")\n return None \n\n if isinstance(Frame_id, (str,int)):\n Frame_id = int(Frame_id)\n return self.smc['Keypoints_3D'][\"keypoints3d\"][Frame_id,:]\n else:\n if Frame_id is None:\n return self.smc['Keypoints_3D'][\"keypoints3d\"]\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list):\n rs.append(self.get_Keypoints3d(fi))\n return np.stack(rs,axis=0)\n\n ###SMPLx\n def get_SMPLx(self, Frame_id=None):\n \"\"\"Get SMPL (world coordinate) computed by mocap processing pipeline.\n\n Args:\n Frame_id (int, list or None, optional):\n int: frame id of one selected frame\n list: a list of frame id\n None: all frames will be returned\n Defaults to None.\n\n Returns:\n dict:\n 'global_orient': np.ndarray of shape (N, 3)\n 'body_pose': np.ndarray of shape (N, 21, 3)\n 'transl': np.ndarray of shape (N, 3)\n 'betas': np.ndarray of shape (1, 10)\n \"\"\"\n if not 'SMPLx' in self.smc:\n print(\"=== no key: SMPLx.\\nplease check available keys!\")\n return None \n\n t_frame = self.smc['SMPLx']['betas'][()].shape[0]\n if Frame_id is None:\n frame_list = range(t_frame)\n elif isinstance(Frame_id, list):\n frame_list = [int(fi) for fi in Frame_id]\n elif isinstance(Frame_id, (int,str)):\n Frame_id = int(Frame_id)\n assert Frame_id < t_frame,\\\n f'Invalid frame_index {Frame_id}'\n frame_list = Frame_id\n else:\n raise TypeError('frame_id should be int, list or None.')\n\n smpl_dict = {}\n for key in ['betas', 'expression', 'fullpose', 'transl']:\n smpl_dict[key] = self.smc['SMPLx'][key][()][frame_list, ...]\n smpl_dict['scale'] = self.smc['SMPLx']['scale'][()]\n\n return smpl_dict\n\n def release(self):\n self.smc = None \n self.__calibration_dict__ = None\n self.__kinect_calib_dict__ = None\n self.__available_keys__ = None\n self.actor_info = None \n self.Camera_5mp_info = None\n self.Camera_12mp_info = None \n self.Kinect_info = None" } ]
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
14,058
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple):
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
11
2023-11-29 07:10:39+00:00
16k
emdgroup/baybe
examples/Backtesting/impute_mode.py
[ { "identifier": "Campaign", "path": "baybe/campaign.py", "snippet": "class Campaign(SerialMixin):\n \"\"\"Main class for interaction with BayBE.\n\n Campaigns define and record an experimentation process, i.e. the execution of a\n series of measurements and the iterative sequence of events involved.\n\n In particular, a campaign:\n * Defines the objective of an experimentation process.\n * Defines the search space over which the experimental parameter may vary.\n * Defines a strategy for traversing the search space.\n * Records the measurement data collected during the process.\n * Records metadata about the progress of the experimentation process.\n \"\"\"\n\n # DOE specifications\n searchspace: SearchSpace = field()\n \"\"\"The search space in which the experiments are conducted.\"\"\"\n\n objective: Objective = field()\n \"\"\"The optimization objective.\"\"\"\n\n strategy: Strategy = field(factory=TwoPhaseStrategy)\n \"\"\"The employed strategy\"\"\"\n\n # Data\n measurements_exp: pd.DataFrame = field(factory=pd.DataFrame, eq=eq_dataframe)\n \"\"\"The experimental representation of the conducted experiments.\"\"\"\n\n numerical_measurements_must_be_within_tolerance: bool = field(default=True)\n \"\"\"Flag for forcing numerical measurements to be within tolerance.\"\"\"\n\n # Metadata\n n_batches_done: int = field(default=0)\n \"\"\"The number of already processed batches.\"\"\"\n\n n_fits_done: int = field(default=0)\n \"\"\"The number of fits already done.\"\"\"\n\n # Private\n _cached_recommendation: pd.DataFrame = field(factory=pd.DataFrame, eq=eq_dataframe)\n \"\"\"The cached recommendations.\"\"\"\n\n @property\n def parameters(self) -> List[Parameter]:\n \"\"\"The parameters of the underlying search space.\"\"\"\n return self.searchspace.parameters\n\n @property\n def targets(self) -> List[NumericalTarget]:\n \"\"\"The targets of the underlying objective.\"\"\"\n # TODO: Currently, the `Objective` class is directly coupled to\n # `NumericalTarget`, hence the return type.\n return self.objective.targets\n\n @property\n def measurements_parameters_comp(self) -> pd.DataFrame:\n \"\"\"The computational representation of the measured parameters.\"\"\"\n if len(self.measurements_exp) < 1:\n return pd.DataFrame()\n return self.searchspace.transform(self.measurements_exp)\n\n @property\n def measurements_targets_comp(self) -> pd.DataFrame:\n \"\"\"The computational representation of the measured targets.\"\"\"\n if len(self.measurements_exp) < 1:\n return pd.DataFrame()\n return self.objective.transform(self.measurements_exp)\n\n @classmethod\n def from_config(cls, config_json: str) -> Campaign:\n \"\"\"Create a campaign from a configuration JSON.\n\n Args:\n config_json: The string with the configuration JSON.\n\n Returns:\n The constructed campaign.\n \"\"\"\n config = json.loads(config_json)\n config[\"searchspace\"] = {\n \"parameters\": config.pop(\"parameters\"),\n \"constraints\": config.pop(\"constraints\", None),\n }\n return _config_converter.structure(config, Campaign)\n\n @classmethod\n def to_config(cls) -> str:\n \"\"\"Extract the configuration of the campaign as JSON string.\n\n Note: This is not yet implemented. Use\n :func:`baybe.utils.serialization.SerialMixin.to_json` instead\n\n Returns:\n The configuration as JSON string.\n\n Raises:\n NotImplementedError: When trying to use this function.\n \"\"\"\n # TODO: Ideally, this should extract a \"minimal\" configuration, that is,\n # default values should not be exported, which cattrs supports via the\n # 'omit_if_default' option. Can be Implemented once the converter structure\n # has been cleaned up.\n raise NotImplementedError()\n\n @classmethod\n def validate_config(cls, config_json: str) -> None:\n \"\"\"Validate a given campaign configuration JSON.\n\n Args:\n config_json: The JSON that should be validated.\n \"\"\"\n config = json.loads(config_json)\n config[\"searchspace\"] = {\n \"parameters\": config.pop(\"parameters\"),\n \"constraints\": config.pop(\"constraints\", None),\n }\n _validation_converter.structure(config, Campaign)\n\n def add_measurements(self, data: pd.DataFrame) -> None:\n \"\"\"Add results from a dataframe to the internal database.\n\n Each addition of data is considered a new batch. Added results are checked for\n validity. Categorical values need to have an exact match. For numerical values,\n a campaign flag determines if values that lie outside a specified tolerance\n are accepted.\n Note that this modifies the provided data in-place.\n\n Args:\n data: The data to be added (with filled values for targets). Preferably\n created via :func:`baybe.campaign.Campaign.recommend`.\n\n Raises:\n ValueError: If one of the targets has missing values or NaNs in the provided\n dataframe.\n TypeError: If the target has non-numeric entries in the provided dataframe.\n \"\"\"\n # Invalidate recommendation cache first (in case of uncaught exceptions below)\n self._cached_recommendation = pd.DataFrame()\n\n # Check if all targets have valid values\n for target in self.targets:\n if data[target.name].isna().any():\n raise ValueError(\n f\"The target '{target.name}' has missing values or NaNs in the \"\n f\"provided dataframe. Missing target values are not supported.\"\n )\n if data[target.name].dtype.kind not in \"iufb\":\n raise TypeError(\n f\"The target '{target.name}' has non-numeric entries in the \"\n f\"provided dataframe. Non-numeric target values are not supported.\"\n )\n\n # Check if all targets have valid values\n for param in self.parameters:\n if data[param.name].isna().any():\n raise ValueError(\n f\"The parameter '{param.name}' has missing values or NaNs in the \"\n f\"provided dataframe. Missing parameter values are not supported.\"\n )\n if param.is_numeric and (data[param.name].dtype.kind not in \"iufb\"):\n raise TypeError(\n f\"The numerical parameter '{param.name}' has non-numeric entries in\"\n f\" the provided dataframe.\"\n )\n\n # Update meta data\n # TODO: refactor responsibilities\n self.searchspace.discrete.mark_as_measured(\n data, self.numerical_measurements_must_be_within_tolerance\n )\n\n # Read in measurements and add them to the database\n self.n_batches_done += 1\n to_insert = data.copy()\n to_insert[\"BatchNr\"] = self.n_batches_done\n to_insert[\"FitNr\"] = np.nan\n\n self.measurements_exp = pd.concat(\n [self.measurements_exp, to_insert], axis=0, ignore_index=True\n )\n\n # Telemetry\n telemetry_record_value(TELEM_LABELS[\"COUNT_ADD_RESULTS\"], 1)\n telemetry_record_recommended_measurement_percentage(\n self._cached_recommendation,\n data,\n self.parameters,\n self.numerical_measurements_must_be_within_tolerance,\n )\n\n def recommend(self, batch_quantity: int = 5) -> pd.DataFrame:\n \"\"\"Provide the recommendations for the next batch of experiments.\n\n Args:\n batch_quantity: Number of requested recommendations.\n\n Returns:\n Dataframe containing the recommendations in experimental representation.\n\n Raises:\n ValueError: If ``batch_quantity`` is smaller than 1.\n \"\"\"\n if batch_quantity < 1:\n raise ValueError(\n f\"You must at least request one recommendation per batch, but provided \"\n f\"{batch_quantity=}.\"\n )\n\n # If there are cached recommendations and the batch size of those is equal to\n # the previously requested one, we just return those\n if len(self._cached_recommendation) == batch_quantity:\n return self._cached_recommendation\n\n # Update recommendation meta data\n if len(self.measurements_exp) > 0:\n self.n_fits_done += 1\n self.measurements_exp[\"FitNr\"].fillna(self.n_fits_done, inplace=True)\n\n # Get the recommended search space entries\n rec = self.strategy.recommend(\n self.searchspace,\n batch_quantity,\n self.measurements_parameters_comp,\n self.measurements_targets_comp,\n )\n\n # Cache the recommendations\n self._cached_recommendation = rec.copy()\n\n # Telemetry\n telemetry_record_value(TELEM_LABELS[\"COUNT_RECOMMEND\"], 1)\n telemetry_record_value(TELEM_LABELS[\"BATCH_QUANTITY\"], batch_quantity)\n\n return rec" }, { "identifier": "Objective", "path": "baybe/objective.py", "snippet": "class Objective(SerialMixin):\n \"\"\"Class for managing optimization objectives.\"\"\"\n\n # TODO: The class currently directly depends on `NumericalTarget`. Once this\n # direct dependence is replaced with a dependence on `Target`, the type\n # annotations should be changed.\n\n mode: Literal[\"SINGLE\", \"DESIRABILITY\"] = field()\n \"\"\"The optimization mode.\"\"\"\n\n targets: List[Target] = field(validator=min_len(1))\n \"\"\"The list of targets used for the objective.\"\"\"\n\n weights: List[float] = field(converter=_normalize_weights)\n \"\"\"The weights used to balance the different targets. By default, all\n weights are equally important.\"\"\"\n\n combine_func: Literal[\"MEAN\", \"GEOM_MEAN\"] = field(\n default=\"GEOM_MEAN\", validator=in_([\"MEAN\", \"GEOM_MEAN\"])\n )\n \"\"\"The function used to combine the different targets.\"\"\"\n\n @weights.default\n def _default_weights(self) -> List[float]:\n \"\"\"Create the default weights.\"\"\"\n # By default, all targets are equally important.\n return [1.0] * len(self.targets)\n\n @targets.validator\n def _validate_targets( # noqa: DOC101, DOC103\n self, _: Any, targets: List[NumericalTarget]\n ) -> None:\n \"\"\"Validate targets depending on the objective mode.\n\n Raises:\n ValueError: If multiple targets are specified when using objective mode\n ``SINGLE``.\n \"\"\"\n # Raises a ValueError if multiple targets are specified when using objective\n # mode SINGLE.\n if (self.mode == \"SINGLE\") and (len(targets) != 1):\n raise ValueError(\n \"For objective mode 'SINGLE', exactly one target must be specified.\"\n )\n # Raises a ValueError if there are unbounded targets when using objective mode\n # DESIRABILITY.\n if self.mode == \"DESIRABILITY\":\n if any(not target.bounds.is_bounded for target in targets):\n raise ValueError(\n \"In 'DESIRABILITY' mode for multiple targets, each target must \"\n \"have bounds defined.\"\n )\n\n @weights.validator\n def _validate_weights( # noqa: DOC101, DOC103\n self, _: Any, weights: List[float]\n ) -> None:\n \"\"\"Validate target weights.\n\n Raises:\n ValueError: If the number of weights and the number of targets differ.\n \"\"\"\n if weights is None:\n return\n\n # Assert that weights is a list of numbers\n validator = deep_iterable(instance_of(float), instance_of(list))\n validator(self, _, weights)\n\n if len(weights) != len(self.targets):\n raise ValueError(\n f\"Weights list for your objective has {len(weights)} values, but you \"\n f\"defined {len(self.targets)} targets.\"\n )\n\n def transform(self, data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Transform targets from experimental to computational representation.\n\n Args:\n data: The data to be transformed. Must contain all target values, can\n contain more columns.\n\n Returns:\n A new dataframe with the targets in computational representation. Columns\n will be as in the input (except when objective mode is ``DESIRABILITY``).\n\n Raises:\n ValueError: If the specified averaging function is unknown.\n \"\"\"\n # Perform transformations that are required independent of the mode\n transformed = data[[t.name for t in self.targets]].copy()\n for target in self.targets:\n transformed[target.name] = target.transform(data[target.name])\n\n # In desirability mode, the targets are additionally combined further into one\n if self.mode == \"DESIRABILITY\":\n if self.combine_func == \"GEOM_MEAN\":\n func = geom_mean\n elif self.combine_func == \"MEAN\":\n func = partial(np.average, axis=1)\n else:\n raise ValueError(\n f\"The specified averaging function {self.combine_func} is unknown.\"\n )\n\n vals = func(transformed.values, weights=self.weights)\n transformed = pd.DataFrame({\"Comp_Target\": vals}, index=transformed.index)\n\n return transformed" }, { "identifier": "NumericalDiscreteParameter", "path": "baybe/parameters/numerical.py", "snippet": "class NumericalDiscreteParameter(DiscreteParameter):\n \"\"\"Parameter class for discrete numerical parameters (a.k.a. setpoints).\"\"\"\n\n # class variables\n is_numeric: ClassVar[bool] = True\n # See base class.\n\n # object variables\n # NOTE: The parameter values are assumed to be sorted by the tolerance validator.\n _values: Tuple[float, ...] = field(\n # FIXME[typing]: https://github.com/python-attrs/cattrs/issues/111\n converter=lambda x: sorted(cattrs.structure(x, Tuple[float, ...])), # type: ignore\n # FIXME[typing]: https://github.com/python-attrs/attrs/issues/1197\n validator=[\n min_len(2),\n validate_unique_values, # type: ignore\n validate_is_finite,\n ],\n )\n \"\"\"The values the parameter can take.\"\"\"\n\n tolerance: float = field(default=0.0)\n \"\"\"The absolute tolerance used for deciding whether a value is in range. A tolerance\n larger than half the minimum distance between parameter values is not allowed\n because that could cause ambiguity when inputting data points later.\"\"\"\n\n @tolerance.validator\n def _validate_tolerance( # noqa: DOC101, DOC103\n self, _: Any, tolerance: float\n ) -> None:\n \"\"\"Validate that the given tolerance is safe.\n\n The tolerance is the allowed experimental uncertainty when\n reading in measured values. A tolerance larger than half the minimum\n distance between parameter values is not allowed because that could cause\n ambiguity when inputting data points later.\n\n Raises:\n ValueError: If the tolerance is not safe.\n \"\"\"\n # For zero tolerance, the only left requirement is that all parameter values\n # are distinct, which is already ensured by the corresponding validator.\n if tolerance == 0.0:\n return\n\n min_dist = np.diff(self.values).min()\n if min_dist == (eps := np.nextafter(0, 1, dtype=DTypeFloatNumpy)):\n raise NumericalUnderflowError(\n f\"The distance between any two parameter values must be at least \"\n f\"twice the size of the used floating point resolution of {eps}.\"\n )\n\n if tolerance >= (max_tol := min_dist / 2.0):\n raise ValueError(\n f\"Parameter '{self.name}' is initialized with tolerance {tolerance} \"\n f\"but due to the given parameter values {self.values}, the specified \"\n f\"tolerance must be smaller than {max_tol} to avoid ambiguity.\"\n )\n\n @property\n def values(self) -> tuple: # noqa: D102\n # See base class.\n return self._values\n\n @cached_property\n def comp_df(self) -> pd.DataFrame: # noqa: D102\n # See base class.\n comp_df = pd.DataFrame({self.name: self.values}, index=self.values)\n return comp_df\n\n def is_in_range(self, item: float) -> bool: # noqa: D102\n # See base class.\n differences_acceptable = [\n np.abs(val - item) <= self.tolerance for val in self.values\n ]\n return any(differences_acceptable)" }, { "identifier": "SubstanceParameter", "path": "baybe/parameters/substance.py", "snippet": "class SubstanceParameter(DiscreteParameter):\n \"\"\"Generic substances that are treated with cheminformatics descriptors.\n\n Only a decorrelated subset of descriptors should be used as otherwise this can\n result in a large number of features. For a handful of molecules, keeping only\n descriptors that have a maximum correlation of 0.7 reduces the number of\n descriptors to about 5-20. The number might be substantially higher with more\n labels given.\n \"\"\"\n\n # class variables\n is_numeric: ClassVar[bool] = False\n # See base class.\n\n # object variables\n data: Dict[str, Smiles] = field(\n validator=deep_mapping(\n mapping_validator=min_len(2),\n # FIXME[typing]: https://github.com/python-attrs/attrs/issues/1206\n key_validator=and_(instance_of(str), min_len(1)),\n value_validator=lambda *x: None,\n )\n )\n \"\"\"A mapping that provides the SMILES strings for all available parameter values.\"\"\"\n\n decorrelate: Union[bool, float] = field(\n default=True, validator=validate_decorrelation\n )\n \"\"\"Specifies the used decorrelation mode for the parameter encoding.\n\n - ``False``: The encoding is used as is.\n - ``True``: The encoding is decorrelated using a default correlation threshold.\n - float in (0, 1): The encoding is decorrelated using the specified threshold.\n \"\"\"\n\n encoding: SubstanceEncoding = field(\n default=SubstanceEncoding.MORDRED, converter=SubstanceEncoding\n )\n # See base class.\n\n @encoding.validator\n def _validate_encoding(self, _: Any, value: str) -> None: # noqa: DOC101, DOC103\n \"\"\"Validate that the chosen encoding can be used.\n\n This validation is necessary since certain encodings are only usable when\n additional dependencies, in particular the ``chem`` dependency, have been\n installed.\n\n Raises:\n ImportError: If the ``chem``dependency was not installed but an encoding\n requiring this dependency is requested.\n \"\"\"\n if value is SubstanceEncoding.MORDRED and not (\n _MORDRED_INSTALLED and _RDKIT_INSTALLED\n ):\n raise ImportError(\n \"The mordred/rdkit packages are not installed, a SubstanceParameter \"\n \"with MORDRED encoding cannot be used. Consider installing baybe with \"\n \"'chem' dependency like 'pip install baybe[chem]'\"\n )\n if (\n value in [SubstanceEncoding.RDKIT, SubstanceEncoding.MORGAN_FP]\n and not _RDKIT_INSTALLED\n ):\n raise ImportError(\n \"The rdkit package is not installed, a SubstanceParameter with \"\n \"RDKIT or MORGAN_FP encoding cannot be used. Consider installing baybe \"\n \"with 'chem' dependency like 'pip install baybe[chem]'\"\n )\n\n @data.validator\n def _validate_substance_data( # noqa: DOC101, DOC103\n self, _: Any, data: Dict[str, Smiles]\n ) -> None:\n \"\"\"Validate that the substance data, provided as SMILES, is valid.\n\n Raises:\n ValueError: If one or more of the SMILES are invalid.\n ValueError: If the several entries represent the same substance.\n \"\"\"\n # Check for invalid SMILES\n canonical_smiles = {}\n exceptions = []\n for name, smiles in data.items():\n try:\n canonical_smiles[name] = get_canonical_smiles(smiles)\n except ValueError:\n exceptions.append(\n ValueError(\n f\"The SMILES '{smiles}' for molecule '{name}' does \"\n f\"not appear to be valid.\"\n )\n )\n if exceptions:\n raise ExceptionGroup(\"invalid SMILES\", exceptions)\n\n # Check for duplicate substances\n if groups := group_duplicate_values(canonical_smiles):\n exceptions = []\n for group, substances in groups.items():\n group_data = {s: data[s] for s in substances}\n exceptions.append(\n ValueError(\n f\"The following entries all represent the same substance \"\n f\"'{group}': {group_data}.\"\n )\n )\n raise ExceptionGroup(\"duplicate substances\", exceptions)\n\n @property\n def values(self) -> tuple:\n \"\"\"Returns the labels of the given set of molecules.\"\"\"\n # Since the order of dictionary keys is important here, this will only work\n # for Python 3.7 or higher\n return tuple(self.data.keys())\n\n @cached_property\n def comp_df(self) -> pd.DataFrame: # noqa: D102\n # See base class.\n vals = list(self.data.values())\n pref = self.name + \"_\"\n\n # Get the raw descriptors\n if self.encoding is SubstanceEncoding.MORDRED:\n comp_df = smiles_to_mordred_features(vals, prefix=pref)\n elif self.encoding is SubstanceEncoding.RDKIT:\n comp_df = smiles_to_rdkit_features(vals, prefix=pref)\n elif self.encoding is SubstanceEncoding.MORGAN_FP:\n comp_df = smiles_to_fp_features(vals, prefix=pref)\n else:\n raise ValueError(\n f\"Unknown parameter encoding {self.encoding} for parameter {self.name}.\"\n )\n\n # Drop NaN and constant columns\n comp_df = comp_df.loc[:, ~comp_df.isna().any(axis=0)]\n comp_df = df_drop_single_value_columns(comp_df)\n\n # If there are bool columns, convert them to int (possible for Mordred)\n comp_df.loc[:, comp_df.dtypes == bool] = comp_df.loc[\n :, comp_df.dtypes == bool\n ].astype(int)\n\n # Label the rows with the molecule names\n comp_df.index = pd.Index(self.values)\n\n # Get a decorrelated subset of the descriptors\n if self.decorrelate:\n if isinstance(self.decorrelate, bool):\n comp_df = df_uncorrelated_features(comp_df)\n else:\n comp_df = df_uncorrelated_features(comp_df, threshold=self.decorrelate)\n\n return comp_df" }, { "identifier": "RandomRecommender", "path": "baybe/recommenders/sampling.py", "snippet": "class RandomRecommender(NonPredictiveRecommender):\n \"\"\"Recommends experiments randomly.\"\"\"\n\n # Class variables\n compatibility: ClassVar[SearchSpaceType] = SearchSpaceType.HYBRID\n # See base class.\n\n def _recommend_hybrid(\n self,\n searchspace: SearchSpace,\n batch_quantity: int,\n candidates_comp: Optional[pd.DataFrame] = None,\n ) -> pd.DataFrame:\n # See base class.\n\n if searchspace.type == SearchSpaceType.DISCRETE:\n if candidates_comp is None:\n raise TypeError(\n \"\"\"You did not provide a dataframe of candidates when applying the\n random recommender to a purely discrete space. Please ensure that\n this dataframe is not None.\"\"\"\n )\n return candidates_comp.sample(batch_quantity)\n cont_random = searchspace.continuous.samples_random(n_points=batch_quantity)\n if searchspace.type == SearchSpaceType.CONTINUOUS:\n return cont_random\n disc_candidates, _ = searchspace.discrete.get_candidates(True, True)\n\n # TODO decide mechanism if number of possible discrete candidates is smaller\n # than batch size\n disc_random = disc_candidates.sample(\n n=batch_quantity,\n replace=len(disc_candidates) < batch_quantity,\n )\n\n cont_random.reset_index(drop=True)\n cont_random.index = disc_random.index\n return pd.concat([disc_random, cont_random], axis=1)" }, { "identifier": "SearchSpace", "path": "baybe/searchspace/core.py", "snippet": "class SearchSpace(SerialMixin):\n \"\"\"Class for managing the overall search space.\n\n The search space might be purely discrete, purely continuous, or hybrid.\n Note that created objects related to the computational representations of parameters\n (e.g., parameter bounds, computational dataframes, etc.) may use a different\n parameter order than what is specified through the constructor: While the\n passed parameter list can contain parameters in arbitrary order, the\n aforementioned objects (by convention) list discrete parameters first, followed\n by continuous ones.\n \"\"\"\n\n discrete: SubspaceDiscrete = field(factory=SubspaceDiscrete.empty)\n \"\"\"The (potentially empty) discrete subspace of the overall search space.\"\"\"\n\n continuous: SubspaceContinuous = field(factory=SubspaceContinuous.empty)\n \"\"\"The (potentially empty) continuous subspace of the overall search space.\"\"\"\n\n def __attrs_post_init__(self):\n \"\"\"Perform validation and record telemetry values.\"\"\"\n validate_parameters(self.parameters)\n validate_constraints(self.constraints, self.parameters)\n\n # Telemetry\n telemetry_record_value(TELEM_LABELS[\"COUNT_SEARCHSPACE_CREATION\"], 1)\n telemetry_record_value(TELEM_LABELS[\"NUM_PARAMETERS\"], len(self.parameters))\n telemetry_record_value(\n TELEM_LABELS[\"NUM_CONSTRAINTS\"],\n len(self.constraints) if self.constraints else 0,\n )\n\n @classmethod\n def from_product(\n cls,\n parameters: List[Parameter],\n constraints: Optional[List[Constraint]] = None,\n empty_encoding: bool = False,\n ) -> SearchSpace:\n \"\"\"Create a search space from a cartesian product.\n\n In the search space, optional subsequent constraints are applied.\n That is, the discrete subspace becomes the (filtered) cartesian product\n containing all discrete parameter combinations while, analogously, the\n continuous subspace represents the (filtered) cartesian product of all\n continuous parameters.\n\n Args:\n parameters: The parameters spanning the search space.\n constraints: An optional set of constraints restricting the valid parameter\n space.\n empty_encoding: If ``True``, uses an \"empty\" encoding for all parameters.\n This is useful, for instance, in combination with random search\n strategies that do not read the actual parameter values, since it avoids\n the (potentially costly) transformation of the parameter values to their\n computational representation.\n\n Returns:\n The constructed search space.\n \"\"\"\n # IMPROVE: The arguments get pre-validated here to avoid the potentially costly\n # creation of the subspaces. Perhaps there is an elegant way to bypass the\n # default validation in the initializer (which is required for other\n # ways of object creation) in this particular case.\n validate_parameters(parameters)\n if constraints:\n validate_constraints(constraints, parameters)\n else:\n constraints = []\n\n discrete: SubspaceDiscrete = SubspaceDiscrete.from_product(\n parameters=[\n cast(DiscreteParameter, p) for p in parameters if p.is_discrete\n ],\n constraints=[\n cast(DiscreteConstraint, c) for c in constraints if c.is_discrete\n ],\n empty_encoding=empty_encoding,\n )\n continuous: SubspaceContinuous = SubspaceContinuous(\n parameters=[\n cast(NumericalContinuousParameter, p)\n for p in parameters\n if not p.is_discrete\n ],\n constraints_lin_eq=[\n cast(ContinuousLinearEqualityConstraint, c)\n for c in constraints\n if isinstance(c, ContinuousLinearEqualityConstraint)\n ],\n constraints_lin_ineq=[\n cast(ContinuousLinearInequalityConstraint, c)\n for c in constraints\n if isinstance(c, ContinuousLinearInequalityConstraint)\n ],\n )\n\n return SearchSpace(discrete=discrete, continuous=continuous)\n\n @property\n def parameters(self) -> List[Parameter]:\n \"\"\"Return the list of parameters of the search space.\"\"\"\n return self.discrete.parameters + self.continuous.parameters\n\n @property\n def constraints(self) -> List[Constraint]:\n \"\"\"Return the constraints of the search space.\"\"\"\n return (\n self.discrete.constraints\n + self.continuous.constraints_lin_eq\n + self.continuous.constraints_lin_ineq\n )\n\n @property\n def type(self) -> SearchSpaceType:\n \"\"\"Return the type of the search space.\"\"\"\n if self.discrete.is_empty and not self.continuous.is_empty:\n return SearchSpaceType.CONTINUOUS\n if not self.discrete.is_empty and self.continuous.is_empty:\n return SearchSpaceType.DISCRETE\n if not self.discrete.is_empty and not self.continuous.is_empty:\n return SearchSpaceType.HYBRID\n raise RuntimeError(\"This line should be impossible to reach.\")\n\n @property\n def contains_mordred(self) -> bool:\n \"\"\"Indicates if any of the discrete parameters uses ``MORDRED`` encoding.\"\"\"\n return any(\n p.encoding is SubstanceEncoding.MORDRED for p in self.discrete.parameters\n )\n\n @property\n def contains_rdkit(self) -> bool:\n \"\"\"Indicates if any of the discrete parameters uses ``RDKIT`` encoding.\"\"\"\n return any(\n p.encoding is SubstanceEncoding.RDKIT for p in self.discrete.parameters\n )\n\n @property\n def param_bounds_comp(self) -> torch.Tensor:\n \"\"\"Return bounds as tensor.\"\"\"\n return torch.hstack(\n [self.discrete.param_bounds_comp, self.continuous.param_bounds_comp]\n )\n\n @property\n def task_idx(self) -> Optional[int]:\n \"\"\"The column index of the task parameter in computational representation.\"\"\"\n try:\n # TODO [16932]: Redesign metadata handling\n task_param = next(\n p for p in self.parameters if isinstance(p, TaskParameter)\n )\n except StopIteration:\n return None\n # TODO[11611]: The current approach has two limitations:\n # 1. It matches by column name and thus assumes that the parameter name\n # is used as the column name.\n # 2. It relies on the current implementation detail that discrete parameters\n # appear first in the computational dataframe.\n # --> Fix this when refactoring the data\n return self.discrete.comp_rep.columns.get_loc(task_param.name)\n\n @property\n def n_tasks(self) -> int:\n \"\"\"The number of tasks encoded in the search space.\"\"\"\n # TODO [16932]: This approach only works for a single task parameter. For\n # multiple task parameters, we need to align what the output should even\n # represent (e.g. number of combinatorial task combinations, number of\n # tasks per task parameter, etc).\n try:\n task_param = next(\n p for p in self.parameters if isinstance(p, TaskParameter)\n )\n return len(task_param.values)\n\n # When there are no task parameters, we effectively have a single task\n except StopIteration:\n return 1\n\n def transform(\n self,\n data: pd.DataFrame,\n ) -> pd.DataFrame:\n \"\"\"Transform data from experimental to computational representation.\n\n This function can e.g. be used to transform data obtained from measurements.\n Continuous parameters are not transformed but included.\n\n Args:\n data: The data to be transformed. Must contain all specified parameters, can\n contain more columns.\n\n Returns:\n A dataframe with the parameters in computational representation.\n \"\"\"\n # Transform subspaces separately\n df_discrete = self.discrete.transform(data)\n df_continuous = self.continuous.transform(data)\n\n # Combine Subspaces\n comp_rep = pd.concat([df_discrete, df_continuous], axis=1)\n\n return comp_rep" }, { "identifier": "simulate_scenarios", "path": "baybe/simulation.py", "snippet": "def simulate_scenarios(\n scenarios: Dict[Any, Campaign],\n lookup: Optional[Union[pd.DataFrame, Callable]] = None,\n /,\n *,\n batch_quantity: int = 1,\n n_doe_iterations: Optional[int] = None,\n initial_data: Optional[List[pd.DataFrame]] = None,\n groupby: Optional[List[str]] = None,\n n_mc_iterations: int = 1,\n impute_mode: Literal[\n \"error\", \"worst\", \"best\", \"mean\", \"random\", \"ignore\"\n ] = \"error\",\n noise_percent: Optional[float] = None,\n) -> pd.DataFrame:\n \"\"\"Simulate multiple Bayesian optimization scenarios.\n\n A wrapper function around :func:`baybe.simulation.simulate_experiment` that\n allows to specify multiple simulation settings at once.\n\n Args:\n scenarios: A dictionary mapping scenario identifiers to DOE specifications.\n lookup: See :func:`baybe.simulation.simulate_experiment`.\n batch_quantity: See :func:`baybe.simulation.simulate_experiment`.\n n_doe_iterations: See :func:`baybe.simulation.simulate_experiment`.\n initial_data: A list of initial data sets for which the scenarios should be\n simulated.\n groupby: The names of the parameters to be used to partition the search space.\n A separate simulation will be conducted for each partition, with the search\n restricted to that partition.\n n_mc_iterations: The number of Monte Carlo simulations to be used.\n impute_mode: See :func:`baybe.simulation.simulate_experiment`.\n noise_percent: See :func:`baybe.simulation.simulate_experiment`.\n\n Returns:\n A dataframe like returned from :func:`baybe.simulation.simulate_experiment` but\n with additional columns. See the ``Note`` for details.\n\n Note:\n The following additional columns are contained in the dataframe returned by this\n function:\n\n * ``Scenario``: Specifies the scenario identifier of the respective simulation.\n * ``Random_Seed``: Specifies the random seed used for the respective simulation.\n * Optional, if ``initial_data`` is provided: A column ``Initial_Data`` that\n specifies the index of the initial data set used for the respective\n simulation.\n * Optional, if ``groupby`` is provided: A column for each ``groupby`` parameter\n that specifies the search space partition considered for the respective\n simulation.\n \"\"\"\n _RESULT_VARIABLE = \"simulation_result\"\n\n @dataclass\n class SimulationResult:\n \"\"\"A thin wrapper to enable dataframe-valued return values with xyzpy.\n\n Args:\n result: The result of the simulation.\n \"\"\"\n\n result: pd.DataFrame\n\n @xyz.label(var_names=[_RESULT_VARIABLE])\n def simulate(\n Scenario: str,\n Random_Seed=None,\n Initial_Data=None,\n ):\n \"\"\"Callable for xyzpy simulation.\"\"\"\n data = None if initial_data is None else initial_data[Initial_Data]\n return SimulationResult(\n _simulate_groupby(\n scenarios[Scenario],\n lookup,\n batch_quantity=batch_quantity,\n n_doe_iterations=n_doe_iterations,\n initial_data=data,\n groupby=groupby,\n random_seed=Random_Seed,\n impute_mode=impute_mode,\n noise_percent=noise_percent,\n )\n )\n\n def unpack_simulation_results(array: DataArray) -> pd.DataFrame:\n \"\"\"Turn the xyzpy simulation results into a flat dataframe.\"\"\"\n # Convert to dataframe and remove the wrapper layer\n series = array.to_series()\n series = series.apply(lambda x: x.result)\n\n # Un-nest all simulation results\n dfs = []\n for setting, df_result in series.items():\n df_setting = pd.DataFrame(\n [setting], columns=series.index.names, index=df_result.index\n )\n dfs.append(pd.concat([df_setting, df_result], axis=1))\n\n # Concatenate all results into a single dataframe\n return pd.concat(dfs, ignore_index=True)\n\n # Collect the settings to be simulated\n combos = {\"Scenario\": scenarios.keys()}\n combos[\"Random_Seed\"] = range(_DEFAULT_SEED, _DEFAULT_SEED + n_mc_iterations)\n if initial_data:\n combos[\"Initial_Data\"] = range(len(initial_data))\n\n # Simulate and unpack\n da_results = simulate.run_combos(combos)[_RESULT_VARIABLE]\n df_results = unpack_simulation_results(da_results)\n\n return df_results" }, { "identifier": "TwoPhaseStrategy", "path": "baybe/strategies/composite.py", "snippet": "class TwoPhaseStrategy(Strategy):\n \"\"\"A two-phased strategy that switches the recommender at a certain specified point.\n\n The recommender is switched when a new (batch) recommendation is requested and\n the training data set size (i.e., the total number of collected measurements\n including those gathered before the strategy was active) is equal to or greater\n than the number specified via the ``switch_after`` parameter.\n\n Note:\n Throughout each phase, the strategy reuses the **same** recommender object,\n that is, no new instances are created. Therefore, special attention is required\n when using the strategy with stateful recommenders.\n \"\"\"\n\n initial_recommender: Recommender = field(factory=RandomRecommender)\n \"\"\"The initial recommender used by the strategy.\"\"\"\n\n recommender: Recommender = field(factory=SequentialGreedyRecommender)\n \"\"\"The recommender used by the strategy after the switch.\"\"\"\n\n switch_after: int = field(default=1)\n \"\"\"The number of experiments after which the recommender is switched for the next\n requested batch.\"\"\"\n\n def select_recommender( # noqa: D102\n self,\n searchspace: SearchSpace,\n batch_quantity: int = 1,\n train_x: Optional[pd.DataFrame] = None,\n train_y: Optional[pd.DataFrame] = None,\n ) -> Recommender:\n # See base class.\n\n # FIXME: enable predictive recommenders for empty training data\n if (train_x is None or len(train_x) == 0) and not isinstance(\n self.initial_recommender, NonPredictiveRecommender\n ):\n raise _unsupported_recommender_error\n\n return (\n self.recommender\n if len(train_x) >= self.switch_after\n else self.initial_recommender\n )" }, { "identifier": "NumericalTarget", "path": "baybe/targets/numerical.py", "snippet": "class NumericalTarget(Target, SerialMixin):\n \"\"\"Class for numerical targets.\"\"\"\n\n # NOTE: The type annotations of `bounds` are correctly overridden by the attrs\n # converter. Nonetheless, PyCharm's linter might incorrectly raise a type warning\n # when calling the constructor. This is a known issue:\n # https://youtrack.jetbrains.com/issue/PY-34243\n # Quote from attrs docs:\n # If a converter’s first argument has a type annotation, that type will\n # appear in the signature for __init__. A converter will override an explicit\n # type annotation or type argument.\n\n mode: TargetMode = field(converter=TargetMode)\n \"\"\"The target mode.\"\"\"\n\n bounds: Interval = field(default=None, converter=convert_bounds)\n \"\"\"Optional target bounds.\"\"\"\n\n transformation: Optional[TargetTransformation] = field(\n converter=lambda x: None if x is None else TargetTransformation(x)\n )\n \"\"\"An optional target transformation.\"\"\"\n\n @transformation.default\n def _default_transformation(self) -> Optional[TargetTransformation]:\n \"\"\"Provide the default transformation for bounded targets.\"\"\"\n if self.bounds.is_bounded:\n fun = _VALID_TRANSFORMATIONS[self.mode][0]\n warnings.warn(\n f\"The transformation for target '{self.name}' \"\n f\"in '{self.mode.name}' mode has not been specified. \"\n f\"Setting the transformation to '{fun.name}'.\",\n UserWarning,\n )\n return fun\n return None\n\n @bounds.validator\n def _validate_bounds(self, _: Any, bounds: Interval) -> None: # noqa: DOC101, DOC103\n \"\"\"Validate the bounds.\n\n Raises:\n ValueError: If the target is defined on a half-bounded interval.\n ValueError: If the target is in ``MATCH`` mode but the provided bounds\n are infinite.\n \"\"\"\n # IMPROVE: We could also include half-way bounds, which however don't work\n # for the desirability approach\n if bounds.is_half_bounded:\n raise ValueError(\"Targets on half-bounded intervals are not supported.\")\n if self.mode is TargetMode.MATCH and not bounds.is_bounded:\n raise ValueError(\n f\"Target '{self.name}' is in {TargetMode.MATCH.name} mode,\"\n f\"which requires finite bounds.\"\n )\n\n @transformation.validator\n def _validate_transformation( # noqa: DOC101, DOC103\n self, _: Any, value: Optional[TargetTransformation]\n ) -> None:\n \"\"\"Validate that the given transformation is compatible with the specified mode.\n\n Raises:\n ValueError: If the target transformation and mode are not compatible.\n \"\"\"\n if (value is not None) and (value not in _VALID_TRANSFORMATIONS[self.mode]):\n raise ValueError(\n f\"You specified bounds for target '{self.name}', but your \"\n f\"specified transformation '{value}' is not compatible \"\n f\"with the target mode {self.mode}'. It must be one \"\n f\"of {_VALID_TRANSFORMATIONS[self.mode]}.\"\n )\n\n def transform(self, data: pd.DataFrame) -> pd.DataFrame: # noqa: D102\n # See base class.\n\n # When bounds are given, apply the respective transformation\n if self.bounds.is_bounded:\n func = _get_target_transformation(\n # TODO[typing]: For bounded targets (see if clause), the attrs default\n # ensures there is always a transformation specified.\n # Use function overloads to make this explicit.\n self.mode,\n cast(TargetTransformation, self.transformation),\n )\n transformed = pd.DataFrame(\n func(data, *self.bounds.to_tuple()), index=data.index\n )\n\n # If no bounds are given, simply negate all target values for ``MIN`` mode.\n # For ``MAX`` mode, nothing needs to be done.\n # For ``MATCH`` mode, the validators avoid a situation without specified bounds.\n elif self.mode is TargetMode.MIN:\n transformed = -data\n\n else:\n transformed = data.copy()\n\n return transformed" } ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from baybe import Campaign from baybe.objective import Objective from baybe.parameters import NumericalDiscreteParameter, SubstanceParameter from baybe.recommenders import RandomRecommender from baybe.searchspace import SearchSpace from baybe.simulation import simulate_scenarios from baybe.strategies import TwoPhaseStrategy from baybe.targets import NumericalTarget
11,199
### Example for full simulation loop using a table-based lookup mechanism with incomplete data # This example shows a simulation for a direct arylation where not all combinations were measured. # This allows us to access information about previously conducted experiments from .xlsx-files. # This examples assumes some basic familiarity with using BayBE and the lookup mechanism. # We refer to [`campaign`](./../Basics/campaign.md) for a more basic example resp. # to [`full_lookup`](./full_lookup.md) for details on the lookup mechanism. #### Necessary imports for this example #### Parameters for a full simulation loop # For the full simulation, we need to define some additional parameters. # These are the number of Monte Carlo runs and the number of experiments to be conducted per run. N_MC_ITERATIONS = 2 N_DOE_ITERATIONS = 5 #### Lookup functionality and data creation # See [`full_lookup`](./full_lookup.md) for details. try: lookup = pd.read_excel("./lookup_withmissing.xlsx") except FileNotFoundError: try: lookup = pd.read_excel("examples/Backtesting/lookup_withmissing.xlsx") except FileNotFoundError as e: print(e) # As usual, we set up some experiment. # Note that we now need to ensure that the names fit the names in the provided .xlsx file! dict_solvent = { "DMAc": r"CC(N(C)C)=O", "Butyornitrile": r"CCCC#N", "Butyl Ester": r"CCCCOC(C)=O", "p-Xylene": r"CC1=CC=C(C)C=C1", } dict_base = { "Potassium acetate": r"O=C([O-])C.[K+]", "Potassium pivalate": r"O=C([O-])C(C)(C)C.[K+]", "Cesium acetate": r"O=C([O-])C.[Cs+]", "Cesium pivalate": r"O=C([O-])C(C)(C)C.[Cs+]", } dict_ligand = { "BrettPhos": r"CC(C)C1=CC(C(C)C)=C(C(C(C)C)=C1)C2=C(P(C3CCCCC3)C4CCCCC4)C(OC)=" "CC=C2OC", "Di-tert-butylphenylphosphine": r"CC(C)(C)P(C1=CC=CC=C1)C(C)(C)C", "(t-Bu)PhCPhos": r"CN(C)C1=CC=CC(N(C)C)=C1C2=CC=CC=C2P(C(C)(C)C)C3=CC=CC=C3", "Tricyclohexylphosphine": r"P(C1CCCCC1)(C2CCCCC2)C3CCCCC3", "PPh3": r"P(C1=CC=CC=C1)(C2=CC=CC=C2)C3=CC=CC=C3", "XPhos": r"CC(C1=C(C2=CC=CC=C2P(C3CCCCC3)C4CCCCC4)C(C(C)C)=CC(C(C)C)=C1)C", "P(2-furyl)3": r"P(C1=CC=CO1)(C2=CC=CO2)C3=CC=CO3", "Methyldiphenylphosphine": r"CP(C1=CC=CC=C1)C2=CC=CC=C2", "1268824-69-6": r"CC(OC1=C(P(C2CCCCC2)C3CCCCC3)C(OC(C)C)=CC=C1)C", "JackiePhos": r"FC(F)(F)C1=CC(P(C2=C(C3=C(C(C)C)C=C(C(C)C)C=C3C(C)C)C(OC)=CC=C2OC)" r"C4=CC(C(F)(F)F)=CC(C(F)(F)F)=C4)=CC(C(F)(F)F)=C1", "SCHEMBL15068049": r"C[C@]1(O2)O[C@](C[C@]2(C)P3C4=CC=CC=C4)(C)O[C@]3(C)C1", "Me2PPh": r"CP(C)C1=CC=CC=C1", } #### Creating the searchspace and the objective # Here, we create the parameter objects, the searchspace and the objective. solvent = SubstanceParameter(name="Solvent", data=dict_solvent, encoding="MORDRED") base = SubstanceParameter(name="Base", data=dict_base, encoding="MORDRED") ligand = SubstanceParameter(name="Ligand", data=dict_ligand, encoding="MORDRED")
### Example for full simulation loop using a table-based lookup mechanism with incomplete data # This example shows a simulation for a direct arylation where not all combinations were measured. # This allows us to access information about previously conducted experiments from .xlsx-files. # This examples assumes some basic familiarity with using BayBE and the lookup mechanism. # We refer to [`campaign`](./../Basics/campaign.md) for a more basic example resp. # to [`full_lookup`](./full_lookup.md) for details on the lookup mechanism. #### Necessary imports for this example #### Parameters for a full simulation loop # For the full simulation, we need to define some additional parameters. # These are the number of Monte Carlo runs and the number of experiments to be conducted per run. N_MC_ITERATIONS = 2 N_DOE_ITERATIONS = 5 #### Lookup functionality and data creation # See [`full_lookup`](./full_lookup.md) for details. try: lookup = pd.read_excel("./lookup_withmissing.xlsx") except FileNotFoundError: try: lookup = pd.read_excel("examples/Backtesting/lookup_withmissing.xlsx") except FileNotFoundError as e: print(e) # As usual, we set up some experiment. # Note that we now need to ensure that the names fit the names in the provided .xlsx file! dict_solvent = { "DMAc": r"CC(N(C)C)=O", "Butyornitrile": r"CCCC#N", "Butyl Ester": r"CCCCOC(C)=O", "p-Xylene": r"CC1=CC=C(C)C=C1", } dict_base = { "Potassium acetate": r"O=C([O-])C.[K+]", "Potassium pivalate": r"O=C([O-])C(C)(C)C.[K+]", "Cesium acetate": r"O=C([O-])C.[Cs+]", "Cesium pivalate": r"O=C([O-])C(C)(C)C.[Cs+]", } dict_ligand = { "BrettPhos": r"CC(C)C1=CC(C(C)C)=C(C(C(C)C)=C1)C2=C(P(C3CCCCC3)C4CCCCC4)C(OC)=" "CC=C2OC", "Di-tert-butylphenylphosphine": r"CC(C)(C)P(C1=CC=CC=C1)C(C)(C)C", "(t-Bu)PhCPhos": r"CN(C)C1=CC=CC(N(C)C)=C1C2=CC=CC=C2P(C(C)(C)C)C3=CC=CC=C3", "Tricyclohexylphosphine": r"P(C1CCCCC1)(C2CCCCC2)C3CCCCC3", "PPh3": r"P(C1=CC=CC=C1)(C2=CC=CC=C2)C3=CC=CC=C3", "XPhos": r"CC(C1=C(C2=CC=CC=C2P(C3CCCCC3)C4CCCCC4)C(C(C)C)=CC(C(C)C)=C1)C", "P(2-furyl)3": r"P(C1=CC=CO1)(C2=CC=CO2)C3=CC=CO3", "Methyldiphenylphosphine": r"CP(C1=CC=CC=C1)C2=CC=CC=C2", "1268824-69-6": r"CC(OC1=C(P(C2CCCCC2)C3CCCCC3)C(OC(C)C)=CC=C1)C", "JackiePhos": r"FC(F)(F)C1=CC(P(C2=C(C3=C(C(C)C)C=C(C(C)C)C=C3C(C)C)C(OC)=CC=C2OC)" r"C4=CC(C(F)(F)F)=CC(C(F)(F)F)=C4)=CC(C(F)(F)F)=C1", "SCHEMBL15068049": r"C[C@]1(O2)O[C@](C[C@]2(C)P3C4=CC=CC=C4)(C)O[C@]3(C)C1", "Me2PPh": r"CP(C)C1=CC=CC=C1", } #### Creating the searchspace and the objective # Here, we create the parameter objects, the searchspace and the objective. solvent = SubstanceParameter(name="Solvent", data=dict_solvent, encoding="MORDRED") base = SubstanceParameter(name="Base", data=dict_base, encoding="MORDRED") ligand = SubstanceParameter(name="Ligand", data=dict_ligand, encoding="MORDRED")
temperature = NumericalDiscreteParameter(
2
2023-11-27 17:02:40+00:00
16k
UX-Decoder/LLaVA-Grounding
llava/model/language_model/llava_llama_gd.py
[ { "identifier": "LlavaMetaModel", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaModel:\n\n def __init__(self, config):\n super(LlavaMetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n self.vision_tower = build_vision_tower(config, delay_load=True)\n self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n def get_vision_tower(self):\n vision_tower = getattr(self, 'vision_tower', None)\n if type(vision_tower) is list:\n vision_tower = vision_tower[0]\n return vision_tower\n\n def initialize_vision_modules(self, model_args, fsdp=None):\n vision_tower = model_args.vision_tower\n mm_vision_select_layer = model_args.mm_vision_select_layer\n mm_vision_select_feature = model_args.mm_vision_select_feature\n pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter\n\n self.config.mm_vision_tower = vision_tower\n\n vision_tower = build_vision_tower(model_args)\n\n if fsdp is not None and len(fsdp) > 0:\n self.vision_tower = [vision_tower]\n else:\n self.vision_tower = vision_tower\n\n self.config.use_mm_proj = True\n self.config.mm_hidden_size = vision_tower.hidden_size\n self.config.mm_vision_select_layer = mm_vision_select_layer\n self.config.mm_vision_select_feature = mm_vision_select_feature\n\n if not hasattr(self, 'mm_projector'):\n self.mm_projector = nn.Linear(self.config.mm_hidden_size, self.config.hidden_size)\n\n if pretrain_mm_mlp_adapter is not None:\n mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')\n def get_w(weights, keyword):\n return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}\n\n # self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))\n self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))" }, { "identifier": "LlavaMetaForCausalLM", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaForCausalLM(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features)\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n else:\n # import pdb; pdb.set_trace()\n num_new_tokens = tokenizer.add_tokens([grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n inits=['[',']','.']\n nums=[tokenizer.encode(init)[1] for init in inits]\n # inp_embs = self.get_input_embeddings().weight.data[nums]\n # out_embs = self.get_output_embeddings().weight.data[nums]\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n # print(\"Emb length:\", len(self.get_input_embeddings().weight.data))\n # if len(self.get_input_embeddings().weight.data) > 0:\n # if len(self.get_input_embeddings().weight.data) > 0:\n # self.get_input_embeddings().weight.data[-num_new_tokens:] = inp_embs\n # self.get_output_embeddings().weight.data[-num_new_tokens:] = out_embs\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n #\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n #\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True" }, { "identifier": "LlavaMetaForCausalLM_gd", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaForCausalLM_gd(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features.to(self.get_model().mm_projector.state_dict()[\"weight\"].dtype))\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]))\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n else:\n # import pdb; pdb.set_trace()\n num_new_tokens = tokenizer.add_tokens([grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n inits=['[',']','.']\n nums=[tokenizer.encode(init)[1] for init in inits]\n\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n #\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n #\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n def initialize_seg_modules(self, cfg):\n seg_model = BaseModel(cfg, build_model(cfg))\n seg_model = seg_model.from_pretrained(cfg.MODEL.WEIGHTS)\n self.seg_model = seg_model\n\n def freeze_seg_modules(self):\n for p in self.seg_model.parameters():\n p.requires_grad = False" }, { "identifier": "LlavaMetaForCausalLM_gd_interactive", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaForCausalLM_gd_interactive(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features.to(self.get_model().mm_projector.state_dict()[\"weight\"].dtype))\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images,obj_feats=None,num_it=0\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]))\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if batch_idx >= len(input_ids) - num_it:\n obj_idx = cur_input_ids == 1273\n idx_in_inter=batch_idx-(len(input_ids)-num_it)\n cur_new_input_embeds[-1][obj_idx] = obj_feats[idx_in_inter].to(cur_new_input_embeds[-1].dtype)\n if labels is not None:\n cur_labels[cur_labels==1273]=IGNORE_INDEX\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n def prepare_inputs_labels_for_multimodal_NoInter(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]))\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n else:\n # import pdb; pdb.set_trace()\n num_new_tokens = tokenizer.add_tokens([grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n inits=['[',']','.']\n nums=[tokenizer.encode(init)[1] for init in inits]\n # inp_embs = self.get_input_embeddings().weight.data[nums]\n # out_embs = self.get_output_embeddings().weight.data[nums]\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n #\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n #\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n def initialize_seg_modules(self, cfg):\n seg_model = BaseModel(cfg, build_model(cfg))\n seg_model = seg_model.from_pretrained(cfg.MODEL.WEIGHTS)\n self.seg_model = seg_model\n\n def initialize_interactive_modules(self, cfg):\n from .semsam.BaseModel import BaseModel as SemSamBaseModel\n from .semsam import build_model as build_semsam_model\n\n seg_model = SemSamBaseModel(cfg, build_semsam_model(cfg))\n if not (cfg.MODEL.WEIGHTS == \"None\"):\n seg_model = seg_model.from_pretrained(cfg.MODEL.WEIGHTS)\n self.interactive_model = seg_model\n def freeze_seg_modules(self):\n for p in self.seg_model.parameters():\n p.requires_grad = False" } ]
from typing import List, Optional, Tuple, Union from torch.nn import CrossEntropyLoss from transformers import AutoConfig, AutoModelForCausalLM, \ LlamaConfig, LlamaModel, LlamaForCausalLM from transformers.modeling_outputs import CausalLMOutputWithPast from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM, LlavaMetaForCausalLM_gd,LlavaMetaForCausalLM_gd_interactive import torch import torch.nn as nn import transformers
12,155
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX=-100 # @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" # tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances,tokenizer): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) input_ids = input_ids[:, :tokenizer.model_max_length] labels = labels[:, :tokenizer.model_max_length] batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(tokenizer.pad_token_id), ) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava"
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX=-100 # @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" # tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances,tokenizer): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) input_ids = input_ids[:, :tokenizer.model_max_length] labels = labels[:, :tokenizer.model_max_length] batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(tokenizer.pad_token_id), ) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava"
class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
0
2023-12-04 10:59:21+00:00
16k
daveredrum/SceneTex
models/pipeline/texture_pipeline.py
[ { "identifier": "TextureMesh", "path": "models/modules/meshes.py", "snippet": "class TextureMesh(nn.Module):\n def __init__(self, \n config,\n device\n ): \n \n super().__init__()\n \n self.config = config\n self.device = device\n\n self.num_instances = 0\n\n self._init_mesh()\n\n def apply_texture_to_mesh(self, mesh, faces, aux, texture_tensor, sampling_mode=\"bilinear\"):\n new_mesh = mesh.clone() # in-place operation - DANGER!!!\n new_mesh.textures = TexturesUV(\n maps=texture_tensor, # B, H, W, C\n faces_uvs=faces.textures_idx[None, ...],\n verts_uvs=aux.verts_uvs[None, ...],\n sampling_mode=sampling_mode,\n # align_corners=False\n )\n\n return new_mesh\n \n def repeat_meshes_as_batch(self, mesh, batch_size):\n return join_meshes_as_batch(\n [mesh for _ in range(batch_size)],\n include_textures=True\n )\n\n def _init_mesh(self):\n cache_dir = self.config.log_dir\n\n self.mesh_dict = init_multiple_meshes_as_scene(\n json.load(open(self.config.scene_config_path)), \n str(cache_dir), \n self.device, \n subdivide_factor=self.config.subdivide_factor,\n return_dict=True\n )\n\n self.mesh, self.texture = self._init_texture(self.mesh_dict)\n\n if self.config.use_background:\n self.background_mesh_dict = init_background(\n self.config.background,\n self.mesh.get_bounding_boxes().cpu().numpy()[0],\n str(cache_dir),\n self.device,\n return_dict=True\n )\n\n self.background_mesh, self.background_texture = self._init_texture(self.background_mesh_dict)\n\n def _init_texture(self, mesh_dict):\n texture = torch.randn((\n 1, \n self.config.latent_texture_size, \n self.config.latent_texture_size, \n self.config.latent_channels\n ), requires_grad=True, device=self.device)\n\n mesh = self.apply_texture_to_mesh(\n mesh_dict[\"mesh\"],\n mesh_dict[\"faces\"],\n mesh_dict[\"aux\"],\n texture\n )\n\n if self.config.texture_type == \"hashgrid\":\n texture = HashGrid(\n 2,\n self.config.hashgrid_config.otype,\n self.config.hashgrid_config.n_levels,\n self.config.hashgrid_config.n_features_per_level,\n self.config.hashgrid_config.log2_hashmap_size,\n self.config.hashgrid_config.base_resolution,\n self.config.hashgrid_config.max_resolution,\n torch.float16 if self.config.hashgrid_config.dtype == \"half\" else torch.float32 # full precision to avoid NaN\n )\n \n elif self.config.texture_type == \"hashgrid_mlp\":\n texture = HashGridMLP(\n 2,\n self.config.hashgrid_config,\n self.config.mlp_config\n )\n\n else:\n texture = torch.randn((\n 1, \n self.config.latent_texture_size, \n self.config.latent_texture_size, \n self.config.latent_channels\n ), requires_grad=True, device=self.device)\n\n mesh = self.apply_texture_to_mesh(\n mesh_dict[\"mesh\"],\n mesh_dict[\"faces\"],\n mesh_dict[\"aux\"],\n texture\n )\n\n return mesh, texture\n \n def sort_rand_gpu(self, pop_size, num_samples):\n \"\"\"Generate a random torch.Tensor (GPU) and sort it to generate indices.\"\"\"\n return torch.argsort(torch.rand(pop_size, device=self.device))[:num_samples]\n\n def build_instance_map(self, studio, cache_dir):\n # build instance masks\n instance_map = build_instance_map(studio, \n cache_dir, cache_dir,\n self.config.dummy_texture_path, \n self.device, self.config.texture_size, self.config.render_size, 500).to(self.device)\n\n assert len(instance_map.shape) == 2, \"instance map should be in shape (W, H)\"\n\n # replace the dummy texture with the instance map\n self.mesh = self.apply_texture_to_mesh(\n self.mesh_dict[\"mesh\"],\n self.mesh_dict[\"faces\"],\n self.mesh_dict[\"aux\"],\n instance_map[None, :, :, None].repeat(1, 1, 1, 3),\n \"nearest\"\n )\n \n self.instance_map = instance_map\n \n def sample_instance_anchors(self, cache_dir):\n cache_path = Path(cache_dir) / \"anchors.pth\"\n\n if cache_path.exists():\n print(\"=> loading instance anchors from {}...\".format(str(cache_path)))\n self.instance_anchors = torch.load(str(cache_path))\n self.num_instances = self.instance_anchors.shape[0]\n else:\n print(\"=> sampling instance anchors...\")\n instance_labels = torch.unique(self.instance_map)\n assert instance_labels.shape[0] > 1\n instance_labels = instance_labels[instance_labels != 0]\n\n instance_anchors = []\n for instance_id in instance_labels:\n instance_mask = self.instance_map == instance_id\n uv_coords = torch.nonzero(instance_mask) # NumInsTex, 2\n sampled_ids = self.sort_rand_gpu(uv_coords.shape[0], self.config.num_anchors)\n sampled_uv_coords = uv_coords[sampled_ids, :]\n instance_anchors.append(sampled_uv_coords)\n\n instance_anchors = torch.stack(instance_anchors) # M, NumAnchor, 2\n instance_anchors = instance_anchors.float() / self.config.texture_size\n\n assert instance_anchors.min() >= 0 and instance_anchors.max() <= 1\n\n print(\"=> saving anchors to {}\".format(str(cache_path)))\n torch.save(instance_anchors, str(cache_path))\n\n self.instance_anchors = instance_anchors\n self.num_instances = self.instance_anchors.shape[0]" }, { "identifier": "Studio", "path": "models/modules/studio.py", "snippet": "class Studio(nn.Module):\n def __init__(self, \n config,\n device\n ): \n \n super().__init__()\n \n self.config = config\n self.device = device\n\n # render function\n self.render_func = self._init_render_func()\n\n self._init_camera_settings()\n\n def _init_camera_settings(self):\n if self.config.use_sphere_cameras and not self.config.use_blenderproc_cameras: # use random cameras\n\n self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)\n \n dist_linspace = np.linspace(\n self.sphere_cameras.dist.min,\n self.sphere_cameras.dist.max,\n 1 if self.sphere_cameras.dist.min == self.sphere_cameras.dist.max else self.sphere_cameras.dist.num_linspace,\n )\n elev_linspace = np.linspace(\n self.sphere_cameras.elev.min,\n self.sphere_cameras.elev.max,\n 1 if self.sphere_cameras.elev.min == self.sphere_cameras.elev.max else self.sphere_cameras.elev.num_linspace,\n )\n azim_linspace = np.linspace(\n self.sphere_cameras.azim.min,\n self.sphere_cameras.azim.max,\n 1 if self.sphere_cameras.azim.min == self.sphere_cameras.azim.max else self.sphere_cameras.azim.num_linspace,\n )\n fov_linspace = np.linspace(\n self.sphere_cameras.fov.min,\n self.sphere_cameras.fov.max,\n 1 if self.sphere_cameras.fov.min == self.sphere_cameras.fov.max else self.sphere_cameras.fov.num_linspace,\n )\n at = np.array(self.sphere_cameras.at)\n\n combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)\n dist_list = combinations[:, 0].tolist()\n elev_list = combinations[:, 1].tolist()\n azim_list = combinations[:, 2].tolist()\n\n self.Rs, self.Ts = init_trajectory(dist_list, elev_list, azim_list, at)\n self.fov_list = combinations[:, 3].tolist()\n\n self.num_cameras = len(self.Rs)\n\n print(\"=> using {} spherical cameras for training\".format(self.num_cameras))\n\n elif not self.config.use_sphere_cameras and self.config.use_blenderproc_cameras:\n\n poses = json.load(open(self.config.blenderproc_cameras))\n self.Rs, self.Ts = init_blenderproc_trajectory(poses, self.device)\n\n self.num_cameras = len(self.Rs)\n self.fov_list = [self.config.fov] * self.num_cameras\n\n print(\"=> using {} blenderproc cameras for training\".format(self.num_cameras))\n\n elif self.config.use_sphere_cameras and self.config.use_blenderproc_cameras:\n\n # spherical cameras\n self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)\n \n dist_linspace = np.linspace(\n self.sphere_cameras.dist.min,\n self.sphere_cameras.dist.max,\n 1 if self.sphere_cameras.dist.min == self.sphere_cameras.dist.max else self.sphere_cameras.dist.num_linspace,\n )\n elev_linspace = np.linspace(\n self.sphere_cameras.elev.min,\n self.sphere_cameras.elev.max,\n 1 if self.sphere_cameras.elev.min == self.sphere_cameras.elev.max else self.sphere_cameras.elev.num_linspace,\n )\n azim_linspace = np.linspace(\n self.sphere_cameras.azim.min,\n self.sphere_cameras.azim.max,\n 1 if self.sphere_cameras.azim.min == self.sphere_cameras.azim.max else self.sphere_cameras.azim.num_linspace,\n )\n fov_linspace = np.linspace(\n self.sphere_cameras.fov.min,\n self.sphere_cameras.fov.max,\n 1 if self.sphere_cameras.fov.min == self.sphere_cameras.fov.max else self.sphere_cameras.fov.num_linspace,\n )\n at = np.array(self.sphere_cameras.at)\n\n combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)\n dist_list = combinations[:, 0].tolist()\n elev_list = combinations[:, 1].tolist()\n azim_list = combinations[:, 2].tolist()\n\n sphere_Rs, sphere_Ts = init_trajectory(dist_list, elev_list, azim_list, at)\n sphere_fov_list = combinations[:, 3].tolist()\n\n # blenderproc cameras\n poses = json.load(open(self.config.blenderproc_cameras))\n blenderproc_Rs, blenderproc_Ts = init_blenderproc_trajectory(poses, self.device)\n blenderproc_fov_list = [self.config.fov] * len(blenderproc_Rs)\n \n self.Rs = sphere_Rs + blenderproc_Rs\n self.Ts = sphere_Ts + blenderproc_Ts\n self.fov_list = sphere_fov_list + blenderproc_fov_list\n self.num_cameras = len(self.Rs)\n\n print(\"=> using {} spherical cameras and {} blenderproc cameras for training\".format(len(sphere_Rs), len(blenderproc_Rs)))\n\n # self.sphere_Rs = sphere_Rs\n # self.sphere_Ts = sphere_Ts\n # self.sphere_fov_list = sphere_fov_list\n # self.num_sphere_cameras = len(self.sphere_Rs)\n\n # self.Rs = sphere_Rs + blenderproc_Rs\n # self.Ts = sphere_Ts + blenderproc_Ts\n # self.fov_list = sphere_fov_list + blenderproc_fov_list\n # self.num_cameras = len(self.Rs)\n\n # print(\"=> using {} spherical cameras and {} blenderproc cameras for training\".format(len(sphere_Rs), len(blenderproc_Rs)))\n # print(\"=> using {} cameras before annealing and {} cameras afterwards\".format(self.num_sphere_cameras, self.num_cameras))\n\n else: # use fixed cameras\n raise NotImplementedError\n\n # for inference \n # FIXME only support spherical cameras for now\n # spherical cameras\n self.sphere_cameras = OmegaConf.load(self.config.sphere_cameras)\n\n dist_linspace = [self.sphere_cameras.dist.min] # always take the min dist from spherical cameras\n elev_linspace = [self.config.elev]\n azim_linspace = np.linspace(\n self.config.azim[0],\n self.config.azim[1],\n self.config.log_latents_views,\n )\n fov_linspace = [self.config.fov]\n at = np.array(self.sphere_cameras.at) # always take the cameras center from spherical cameras\n\n combinations = np.array(np.meshgrid(dist_linspace, elev_linspace, azim_linspace, fov_linspace)).T.reshape(-1, 4)\n self.inference_dist_list = combinations[:, 0].tolist()\n self.inference_elev_list = combinations[:, 1].tolist()\n self.inference_azim_list = combinations[:, 2].tolist()\n self.inference_fov_list = combinations[:, 3].tolist()\n self.inference_at = at\n\n self.num_inference_cameras = len(self.inference_dist_list)\n\n print(\"=> using {} cameras for training, {} cameras for inference.\".format(self.num_cameras, self.num_inference_cameras))\n\n def _init_render_func(self):\n if self.config.render_func_type == \"mlp\":\n if self.config.texture_type == \"hashgrid\":\n in_channels = self.config.hashgrid_config.n_levels * self.config.hashgrid_config.n_features_per_level\n elif self.config.texture_type == \"hashgrid_mlp\":\n in_channels = self.config.mlp_config.out_channels\n else:\n in_channels = self.config.latent_channels\n\n render_func = MLP(\n in_channels,\n self.config.render_channels,\n self.config.view_embedding_hidden_dim,\n self.config.num_view_embedding_layers,\n dtype=torch.float32\n ).to(self.device)\n \n elif self.config.render_func_type == \"none\":\n render_func = nn.Identity()\n\n else:\n raise NotImplementedError(\"not supported render function type: {}\".format(self.config.render_func_type))\n\n return render_func\n \n def init_anchor_func(self, num_instances):\n if self.config.texture_type == \"hashgrid\":\n anchor_dim = self.config.hashgrid_config.n_levels * self.config.hashgrid_config.n_features_per_level\n elif self.config.texture_type == \"hashgrid_mlp\":\n anchor_dim = self.config.mlp_config.out_channels\n else:\n anchor_dim = self.config.latent_channels\n\n anchor_func = AnchorTransformer(self.config, self.device, anchor_dim=anchor_dim, num_instances=num_instances).to(self.device)\n\n self.anchor_func = anchor_func\n\n def set_cameras(self, R, T, fov, image_size):\n return init_camera_R_T(R, T, image_size, self.device, fov)\n \n def set_renderer(self, camera, image_size):\n return init_renderer(camera,\n shader=init_flat_texel_shader(\n camera=camera,\n device=self.device\n ),\n image_size=image_size, \n faces_per_pixel=self.config.faces_per_pixel\n )\n\n def _sample_one_camera(self, step, random_cameras=False, inference=False):\n R, T, fov, idx = None, None, None, None\n if inference:\n idx = step % self.num_inference_cameras\n dist = self.inference_dist_list[idx]\n elev = self.inference_elev_list[idx]\n azim = self.inference_azim_list[idx]\n fov = self.inference_fov_list[idx]\n at = self.inference_at\n R, T = look_at_view_transform(dist, elev, azim, at=at)\n else:\n\n if random_cameras:\n idx = random.choice(range(self.num_cameras))\n else:\n idx = step % self.num_cameras\n\n R, T, fov = self.Rs[idx], self.Ts[idx], self.fov_list[idx]\n\n # if self.config.use_sphere_cameras and self.config.use_blenderproc_cameras and step < self.config.num_anneal_steps:\n \n # if random_cameras:\n # idx = random.choice(range(self.num_sphere_cameras))\n # else:\n # idx = step % self.num_sphere_cameras\n\n # R, T, fov = self.sphere_Rs[idx], self.sphere_Ts[idx], self.sphere_fov_list[idx]\n\n # else:\n\n # if random_cameras:\n # idx = random.choice(range(self.num_cameras))\n # else:\n # idx = step % self.num_cameras\n\n # R, T, fov = self.Rs[idx], self.Ts[idx], self.fov_list[idx]\n\n return R, T, fov, idx\n \n def sample_cameras(self, step, num_samples, random_cameras=False, inference=False):\n if num_samples == 1:\n return self._sample_one_camera(step, random_cameras, inference)\n else:\n Rs, Ts, fovs, ids = [], [], [], []\n cur_step = step % self.num_cameras\n \n if random_cameras:\n pool = [e for e in range(self.num_cameras) if e != cur_step]\n next_steps = random.sample(pool, k=num_samples-1)\n else:\n next_steps = [(cur_step+s+1) % self.num_cameras for s in range(num_samples-1)]\n\n steps = [cur_step] + next_steps\n for s in steps:\n R, T, fov, idx = self._sample_one_camera(s)\n Rs.append(R)\n Ts.append(T)\n fovs.append(fov)\n ids.append(idx)\n\n Rs = torch.cat(Rs, dim=0)\n Ts = torch.cat(Ts, dim=0)\n\n return Rs, Ts, fovs, ids\n\n def get_uv_coordinates(self, mesh, fragments):\n xyzs = mesh.verts_padded() # (N, V, 3)\n faces = mesh.faces_padded() # (N, F, 3)\n\n faces_uvs = mesh.textures.faces_uvs_padded()\n verts_uvs = mesh.textures.verts_uvs_padded()\n\n # NOTE Meshes are replicated in batch. Taking the first one is enough.\n batch_size, _, _ = xyzs.shape\n xyzs, faces, faces_uvs, verts_uvs = xyzs[0], faces[0], faces_uvs[0], verts_uvs[0]\n faces_coords = verts_uvs[faces_uvs] # (F, 3, 2)\n\n # replicate the coordinates as batch\n faces_coords = faces_coords.repeat(batch_size, 1, 1)\n\n invalid_mask = fragments.pix_to_face == -1\n target_coords = interpolate_face_attributes(\n fragments.pix_to_face, fragments.bary_coords, faces_coords\n ) # (N, H, W, 1, 3)\n _, H, W, K, _ = target_coords.shape\n target_coords[invalid_mask] = 0\n assert K == 1 # pixel_per_faces should be 1\n target_coords = target_coords.squeeze(3) # (N, H, W, 2)\n\n return target_coords\n\n def get_relative_depth_map(self, zbuf, pad_value=10):\n absolute_depth = zbuf[..., 0] # B, H, W\n no_depth = -1\n\n depth_min, depth_max = absolute_depth[absolute_depth != no_depth].min(), absolute_depth[absolute_depth != no_depth].max()\n target_min, target_max = 50, 255\n\n depth_value = absolute_depth[absolute_depth != no_depth]\n depth_value = depth_max - depth_value # reverse values\n\n depth_value /= (depth_max - depth_min)\n depth_value = depth_value * (target_max - target_min) + target_min\n\n relative_depth = absolute_depth.clone()\n relative_depth[absolute_depth != no_depth] = depth_value\n relative_depth[absolute_depth == no_depth] = pad_value # not completely black\n\n return absolute_depth, relative_depth\n\n def query_texture(self, coords, texture, encode=True):\n assert \"hashgrid\" in self.config.texture_type\n\n if encode:\n B, H, W, C = coords.shape\n inputs = coords.reshape(-1, C)\n outputs = texture(inputs)\n outputs = outputs.reshape(B, H, W, -1)\n else:\n outputs = coords\n\n return outputs.to(torch.float32)\n \n def query_anchor_features(self, anchors, texture, features, instances_in_view, is_background=False):\n if is_background:\n anchor_features = features\n else:\n # with torch.no_grad():\n # anchors = self.query_texture(anchors.unsqueeze(2), texture).squeeze(2) # M, NumAnchor, C\n # if self.config.detach_anchors:\n # anchors = anchors.detach() # the original UV features won't be updated\n\n anchors = self.query_texture(anchors.unsqueeze(2), texture).squeeze(2) # M, NumAnchor, C\n if self.config.detach_anchors:\n anchors = anchors.detach() # the original UV features won't be updated\n \n anchor_features = self.anchor_func(anchors, features, instances_in_view) # M, C\n\n return anchor_features\n\n def render_features(self, renderer, mesh, texture, is_direct=False, is_background=False, anchors=None):\n # if enable_anchor_embedding is True\n # latents will be the rendered instance map\n latents, fragments = renderer(mesh) # image: (N, H, W, C)\n\n if is_direct:\n features = latents\n else:\n uv_coords = self.get_uv_coordinates(mesh, fragments)\n features = self.query_texture(uv_coords, texture)\n\n if self.config.enable_anchor_embedding:\n features = self.query_anchor_features(anchors, texture, features, latents[..., 0], is_background)\n\n features = self.render_func(features)\n\n absolute_depth, relative_depth = self.get_relative_depth_map(fragments.zbuf)\n\n return features, fragments, absolute_depth, relative_depth # (N, H, W, C)\n \n def render(self, renderer, mesh, texture, background=None, background_texture=None, anchors=None, is_direct=False):\n features, fragments, absolute_depth, relative_depth = self.render_features(renderer, mesh, texture, is_direct=is_direct, is_background=False, anchors=anchors)\n\n # blend background\n # NOTE there's no need to render background if no views see the background\n if background is not None and -1 in fragments.zbuf:\n background_features, background_fragments, _, _ = self.render_features(renderer, background, background_texture, is_direct=is_direct, is_background=True, anchors=None)\n\n # blend rendering\n background_mask = fragments.zbuf == -1\n background_mask = background_mask.repeat(1, 1, 1, background_features.shape[-1])\n features[background_mask] = background_features[background_mask]\n\n # blend depth\n background_mask = fragments.zbuf == -1\n blend_zbuf = fragments.zbuf\n blend_zbuf[background_mask] = background_fragments.zbuf[background_mask]\n absolute_depth, relative_depth = self.get_relative_depth_map(blend_zbuf)\n\n return features, absolute_depth, relative_depth" }, { "identifier": "Guidance", "path": "models/modules/guidance.py", "snippet": "class Guidance(nn.Module):\n def __init__(self, \n config,\n device\n ): \n \n super().__init__()\n \n self.config = config\n self.device = device\n\n self.prompt = config.prompt + \", \" + config.a_prompt if config.a_prompt else config.prompt\n self.n_prompt = config.n_prompt\n \n self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32\n\n self._init_guidance()\n\n def _init_guidance(self):\n self._init_backbone()\n self._init_t_schedule()\n\n def _init_backbone(self):\n if self.config.diffusion_type == \"t2i\":\n from diffusers import StableDiffusionPipeline as DiffusionPipeline\n checkpoint_name = \"stabilityai/stable-diffusion-2-1-base\"\n # diffusion_model = DiffusionPipeline.from_pretrained(checkpoint_name).to(self.device)\n # checkpoint_name = \"runwayml/stable-diffusion-v1-5\"\n diffusion_model = DiffusionPipeline.from_pretrained(checkpoint_name).to(self.device)\n elif self.config.diffusion_type == \"d2i\":\n from diffusers import StableDiffusionDepth2ImgPipeline as DiffusionPipeline\n checkpoint_name = \"stabilityai/stable-diffusion-2-depth\"\n diffusion_model = DiffusionPipeline.from_pretrained(checkpoint_name).to(self.device)\n elif self.config.diffusion_type == \"d2i_controlnet\":\n from diffusers import StableDiffusionControlNetPipeline as DiffusionPipeline\n controlnet_name = \"lllyasviel/control_v11f1p_sd15_depth\"\n controlnet = ControlNetModel.from_pretrained(controlnet_name)\n checkpoint_name = \"runwayml/stable-diffusion-v1-5\"\n diffusion_model = DiffusionPipeline.from_pretrained(checkpoint_name, controlnet=controlnet).to(self.device)\n\n # freeze controlnet\n self.controlnet = diffusion_model.controlnet.to(self.weights_dtype)\n self.controlnet.requires_grad_(False)\n else:\n raise ValueError(\"invalid diffusion type.\")\n\n if self.config.enable_memory_efficient_attention:\n print(\"=> Enable memory efficient attention.\")\n diffusion_model.enable_xformers_memory_efficient_attention()\n\n # pretrained diffusion model\n self.tokenizer = diffusion_model.tokenizer\n self.text_encoder = diffusion_model.text_encoder\n self.vae = diffusion_model.vae\n self.unet = diffusion_model.unet.to(self.weights_dtype)\n\n self.text_encoder.requires_grad_(False)\n self.vae.requires_grad_(False)\n self.unet.requires_grad_(False)\n\n # use DDIMScheduler by default\n self.scheduler = DDIMScheduler.from_pretrained(checkpoint_name, subfolder=\"scheduler\")\n self.scheduler.betas = self.scheduler.betas.to(self.device)\n self.scheduler.alphas = self.scheduler.alphas.to(self.device)\n self.scheduler.alphas_cumprod = self.scheduler.alphas_cumprod.to(self.device)\n\n self.num_train_timesteps = len(self.scheduler.betas)\n\n if self.config.generation_mode == \"t2i\":\n self.scheduler.set_timesteps(self.config.num_steps)\n raise NotImplementedError\n else:\n self.scheduler.set_timesteps(self.num_train_timesteps)\n\n # phi\n # unet_phi is the same instance as unet that has been modified in-place\n # unet_phi not grad -> only train unet_phi_layers\n if self.config.loss_type == \"vsd\":\n self.unet_phi, self.unet_phi_layers = extract_lora_diffusers(self.unet, self.device)\n\n # load pretrained lora\n if len(self.config.load_lora_weights) > 0 and os.path.exists(self.config.load_lora_weights):\n print(\"=> loading pretrained LoRA weights from: {}\".format(self.config.load_lora_weights))\n self.unet_phi.load_attn_procs(self.config.load_lora_weights)\n\n # loss weights\n self.loss_weights = self._init_loss_weights(self.scheduler.betas)\n\n self.avg_loss_vsd = []\n self.avg_loss_phi = []\n self.avg_loss_rgb = []\n\n if self.config.loss_type == \"l2\": \n self.label = torchvision.io.read_image(self.config.label_path).float().to(self.device) / 255.\n self.label = self.label * 2 - 1 # -1 to 1\n self.label = self.label.unsqueeze(0)\n\n max_memory_allocated = torch.cuda.max_memory_allocated()\n print(f\"=> Maximum GPU memory allocated by PyTorch: {max_memory_allocated / 1024**3:.2f} GB\")\n\n def _init_loss_weights(self, betas): \n num_train_timesteps = len(betas)\n betas = torch.tensor(betas).to(torch.float32) if not torch.is_tensor(betas) else betas\n alphas = 1.0 - betas\n alphas_cumprod = torch.cumprod(alphas, axis=0)\n sqrt_1m_alphas_cumprod = torch.sqrt(1. - alphas_cumprod)\n \n weights = []\n for i in range(num_train_timesteps):\n weights.append(sqrt_1m_alphas_cumprod[i]**2)\n \n return weights\n \n def _init_t_schedule(self, t_start=0.02, t_end=0.98):\n # Create a list of time steps from 0 to num_train_timesteps\n ts = list(range(self.num_train_timesteps))\n # set ts to U[0.02,0.98] as least\n t_start = int(t_start * self.num_train_timesteps)\n t_end = int(t_end * self.num_train_timesteps)\n ts = ts[t_start:t_end]\n\n # If the scheduling strategy is \"random\", choose args.num_steps random time steps without replacement\n if self.config.t_schedule == \"random\":\n chosen_ts = np.random.choice(ts, self.config.num_steps, replace=True)\n\n # If the scheduling strategy is \"t_stages\", the total number of time steps are divided into several stages.\n # In each stage, a decreasing portion of the total time steps is considered for selection.\n # For each stage, time steps are randomly selected with replacement from the respective portion.\n # The final list of chosen time steps is a concatenation of the time steps selected in all stages.\n # Note: The total number of time steps should be evenly divisible by the number of stages.\n elif \"t_stages\" in self.config.t_schedule:\n # Parse the number of stages from the scheduling strategy string\n num_stages = int(self.config.t_schedule[8:]) if len(self.config.t_schedule[8:]) > 0 else 2\n chosen_ts = []\n for i in range(num_stages):\n # Define the portion of ts to be considered in this stage\n portion = ts[:int((num_stages-i)*len(ts)//num_stages)]\n selected_ts = np.random.choice(portion, self.config.num_steps//num_stages, replace=True).tolist()\n chosen_ts += selected_ts\n \n elif \"anneal\" in self.config.t_schedule:\n print(\"=> time step annealing after {} steps\".format(self.config.num_anneal_steps))\n\n ts_before_anneal = np.random.choice(ts, self.config.num_anneal_steps, replace=True).tolist()\n ts_after_anneal = np.random.choice(ts[:len(ts)//2], self.config.num_steps-self.config.num_anneal_steps, replace=True).tolist()\n chosen_ts = ts_before_anneal + ts_after_anneal\n \n else:\n raise ValueError(f\"Unknown scheduling strategy: {self.config.t_schedule}\")\n\n # Return the list of chosen time steps\n self.chosen_ts = chosen_ts\n\n def init_text_embeddings(self, batch_size):\n ### get text embedding\n text_input = self.tokenizer(\n [self.prompt], \n padding=\"max_length\", \n max_length=self.tokenizer.model_max_length, \n truncation=True, \n return_tensors=\"pt\"\n ).input_ids.to(self.device)\n\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input)[0].repeat(batch_size, 1, 1)\n\n max_length = text_input.shape[-1]\n uncond_input = self.tokenizer(\n [self.n_prompt], \n padding=\"max_length\", \n max_length=max_length, \n return_tensors=\"pt\"\n ).input_ids.to(self.device)\n\n with torch.no_grad():\n uncond_embeddings = self.text_encoder(uncond_input)[0].repeat(batch_size, 1, 1)\n\n self.text_embeddings = torch.cat([uncond_embeddings, text_embeddings])\n\n def prepare_depth_map(self, depth_map):\n assert len(depth_map.shape) == 4\n if \"controlnet\" in self.config.diffusion_type:\n depth_map = depth_map.repeat(1, 3, 1, 1).float()\n depth_map = F.interpolate(depth_map, (self.config.render_size, self.config.render_size), mode=\"bilinear\", align_corners=False)\n \n # expected range [0,1]\n depth_map /= 255.0\n else:\n # down-sample and normalize\n depth_map = F.interpolate(depth_map, (self.config.latent_size, self.config.latent_size), mode=\"bilinear\", align_corners=False)\n\n # expected range [-1,1]\n depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)\n depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)\n depth_map = 2.0 * (depth_map - depth_min) / (depth_max - depth_min) - 1.0\n # depth_map /= 255.0\n # depth_map = 2.0 * depth_map - 1.0\n\n depth_map = depth_map.to(torch.float32)\n\n return depth_map\n \n @torch.no_grad()\n def decode_latent_texture(self, inputs, use_patches=False):\n outputs = 1 / self.vae.config.scaling_factor * inputs\n\n if use_patches:\n assert self.config.latent_texture_size % self.config.decode_texture_size == 0\n batch_size = inputs.shape[0]\n num_iter_x = self.config.latent_texture_size // self.config.decode_texture_size\n num_iter_y = self.config.latent_texture_size // self.config.decode_texture_size\n patch_stride = self.config.decode_texture_size\n decoded_stride = self.config.decode_texture_size * 8\n decoded_size = self.config.latent_texture_size * 8\n decoded_texture = torch.zeros(batch_size, 3, decoded_size, decoded_size).to(self.device)\n\n for x in range(num_iter_x):\n for y in range(num_iter_y):\n patch = outputs[:, :, x*patch_stride:(x+1)*patch_stride, y*patch_stride:(y+1)*patch_stride]\n patch = self.vae.decode(patch.contiguous()).sample # B, 3, H, W\n\n decoded_texture[:, :, x*decoded_stride:(x+1)*decoded_stride, y*decoded_stride:(y+1)*decoded_stride] = patch\n \n outputs = (decoded_texture / 2 + 0.5).clamp(0, 1)\n\n else:\n outputs = self.vae.decode(outputs.contiguous()).sample # B, 3, H, W\n outputs = (outputs / 2 + 0.5).clamp(0, 1)\n\n return outputs\n \n def encode_latent_texture(self, inputs, deterministic=False):\n inputs = inputs.clamp(-1, 1)\n \n h = self.vae.encoder(inputs)\n moments = self.vae.quant_conv(h)\n mean, logvar = torch.chunk(moments, 2, dim=1)\n std = torch.zeros_like(mean) if deterministic else torch.exp(0.5 * logvar)\n sample = mean + std * torch.randn_like(mean)\n \n return self.vae.config.scaling_factor * sample\n\n def normalize_latent_texture(self, inputs):\n outputs = (inputs / 2 + 0.5).clamp(0, 1)\n\n return outputs\n \n def prepare_one_latent(self, latents, t):\n noise = torch.randn_like(latents).to(self.device)\n noisy_latents = self.scheduler.add_noise(latents, noise, t)\n clean_latents = self.scheduler.step(noise, t, noisy_latents).pred_original_sample\n\n return noise, noisy_latents, clean_latents\n\n def prepare_latents(self, latents, t, batch_size):\n t = torch.tensor([t]).to(self.device)\n noise, noisy_latents, clean_latents = self.prepare_one_latent(latents, t)\n\n return t, noise, noisy_latents, clean_latents\n \n def predict_noise(self, unet, noisy_latents, t, cross_attention_kwargs, guidance_scale, control=None):\n down_block_res_samples, mid_block_res_sample = None, None\n\n if guidance_scale == 1:\n latent_model_input = noisy_latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n \n text_embeddings, _ = self.text_embeddings.chunk(2)\n\n if control is not None: \n if \"controlnet\" in self.config.diffusion_type:\n with torch.no_grad():\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n latent_model_input.to(self.weights_dtype),\n t,\n encoder_hidden_states=text_embeddings.to(self.weights_dtype),\n controlnet_cond=control.to(self.weights_dtype),\n conditioning_scale=1.0,\n guess_mode=False,\n return_dict=False,\n )\n\n down_block_res_samples = [e.to(self.weights_dtype) for e in down_block_res_samples]\n mid_block_res_sample = mid_block_res_sample.to(self.weights_dtype)\n else:\n latent_model_input = torch.cat([latent_model_input, control], dim=1)\n\n # if self.config.verbose_mode: start = time.time()\n noise_pred = unet(\n latent_model_input.to(self.weights_dtype), \n t, \n encoder_hidden_states=text_embeddings.to(self.weights_dtype), \n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample\n ).sample.to(torch.float32)\n # if self.config.verbose_mode: print(\"=> UNet forward: {}s\".format(time.time() - start))\n else:\n latent_model_input = torch.cat([noisy_latents] * 2)\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n \n if control is not None: \n if \"controlnet\" in self.config.diffusion_type:\n with torch.no_grad():\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n latent_model_input.to(self.weights_dtype),\n t,\n encoder_hidden_states=self.text_embeddings.to(self.weights_dtype),\n controlnet_cond=torch.cat([control]*2).to(self.weights_dtype),\n conditioning_scale=1.0,\n guess_mode=False,\n return_dict=False,\n )\n\n down_block_res_samples = [e.to(self.weights_dtype) for e in down_block_res_samples]\n mid_block_res_sample = mid_block_res_sample.to(self.weights_dtype)\n else:\n latent_model_input = torch.cat([latent_model_input, torch.cat([control]*2)], dim=1)\n\n # if self.config.verbose_mode: start = time.time()\n noise_pred = unet(\n latent_model_input.to(self.weights_dtype), \n t, \n encoder_hidden_states=self.text_embeddings.to(self.weights_dtype), \n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample\n ).sample.to(torch.float32)\n # if self.config.verbose_mode: print(\"=> UNet forward: {}s\".format(time.time() - start))\n\n # perform guidance\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n return noise_pred\n\n def compute_sds_loss(self, latents, noisy_latents, noise, t, control=None):\n with torch.no_grad():\n noise_pred = self.predict_noise(\n self.unet, \n noisy_latents, \n t, \n cross_attention_kwargs={},\n guidance_scale=self.config.guidance_scale,\n control=control\n )\n\n grad = self.config.grad_scale * (noise_pred - noise)\n grad = torch.nan_to_num(grad)\n\n grad *= self.loss_weights[int(t)]\n \n # d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad\n target = (latents - grad).detach()\n loss = 0.5 * F.mse_loss(latents, target, reduction=\"mean\")\n\n return loss\n \n def compute_vsd_loss(self, latents, noisy_latents, noise, t, cross_attention_kwargs, control=None): \n with torch.no_grad():\n # predict the noise residual with unet\n # set cross_attention_kwargs={\"scale\": 0} to use the pre-trained model\n if self.config.verbose_mode: start = time.time()\n noise_pred = self.predict_noise(\n self.unet, \n noisy_latents, \n t, \n cross_attention_kwargs={\"scale\": 0},\n guidance_scale=self.config.guidance_scale,\n control=control\n )\n if self.config.verbose_mode: print(\"=> VSD pretrained forward: {}s\".format(time.time() - start))\n\n if self.config.verbose_mode: start = time.time()\n noise_pred_phi = self.predict_noise(\n self.unet_phi, \n noisy_latents, \n t, \n cross_attention_kwargs=cross_attention_kwargs,\n guidance_scale=self.config.guidance_scale_phi,\n control=control\n )\n if self.config.verbose_mode: print(\"=> VSD lora forward: {}s\".format(time.time() - start))\n\n grad = self.config.grad_scale * (noise_pred - noise_pred_phi.detach())\n grad = torch.nan_to_num(grad)\n\n grad *= self.loss_weights[int(t)]\n \n # d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad\n target = (latents - grad).detach()\n loss = 0.5 * F.mse_loss(latents, target, reduction=\"none\")\n\n return loss, loss.mean()\n \n def compute_vsd_phi_loss(self, noisy_latents, clean_latents, noise, t, cross_attention_kwargs, control=None):\n if self.config.verbose_mode: start = time.time()\n noise_pred_phi = self.predict_noise(\n self.unet_phi, \n noisy_latents, \n t, \n cross_attention_kwargs=cross_attention_kwargs,\n guidance_scale=self.config.guidance_scale_phi,\n control=control\n )\n\n if self.config.verbose_mode: print(\"=> phi lora forward: {}s\".format(time.time() - start))\n\n target = noise\n\n loss = self.config.grad_scale * F.mse_loss(noise_pred_phi, target, reduction=\"none\")\n\n return loss, loss.mean()" } ]
import random import wandb import json import os import time import torch import torch.nn as nn import torch.nn.functional as F import torchvision import numpy as np import pytorch_lightning as pl import matplotlib.pyplot as plt import sys import open_clip from torch.optim import Adam, AdamW from torch.optim.lr_scheduler import LinearLR from omegaconf import OmegaConf from tqdm import tqdm from omegaconf import OmegaConf from PIL import Image from copy import deepcopy from pathlib import Path from pytorch3d.io import ( load_obj, load_objs_as_meshes ) from pytorch3d.renderer import TexturesUV from pytorch3d.ops import interpolate_face_attributes from models.modules import TextureMesh, Studio, Guidance
10,858
# mat # customized sys.path.append("./lib") class TexturePipeline(nn.Module): def __init__(self, config, stamp, device ): super().__init__() self.config = config self.stamp = stamp self.prompt = config.prompt + ", " + config.a_prompt if config.a_prompt else config.prompt self.n_prompt = config.n_prompt self.device = device self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32 print("=> Use precision: {}".format(self.weights_dtype)) pl.seed_everything(self.config.seed) """call this after to(device)""" def configure(self, inference_mode=False): if not inference_mode: self.log_name = "_".join(self.config.prompt.split(' ')) self.log_stamp = self.stamp self.log_dir = os.path.join(self.config.log_dir, self.log_name, self.config.loss_type, self.log_stamp) # override config self.config.log_name = self.log_name self.config.log_stamp = self.log_stamp self.config.log_dir = self.log_dir # 3D assets self._init_mesh() # studio self._init_studio() # instances self._init_anchors() if not inference_mode: # diffusion self._init_guidance() # optimization self._configure_optimizers() self._init_logger() if self.config.enable_clip_benchmark: self.clip, _, self.clip_preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') self.clip_tokenizer = open_clip.get_tokenizer('ViT-B-32') def _init_studio(self): self.studio = Studio(self.config, self.device) def _init_mesh(self): self.texture_mesh = TextureMesh(self.config, self.device) def _init_guidance(self):
# mat # customized sys.path.append("./lib") class TexturePipeline(nn.Module): def __init__(self, config, stamp, device ): super().__init__() self.config = config self.stamp = stamp self.prompt = config.prompt + ", " + config.a_prompt if config.a_prompt else config.prompt self.n_prompt = config.n_prompt self.device = device self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32 print("=> Use precision: {}".format(self.weights_dtype)) pl.seed_everything(self.config.seed) """call this after to(device)""" def configure(self, inference_mode=False): if not inference_mode: self.log_name = "_".join(self.config.prompt.split(' ')) self.log_stamp = self.stamp self.log_dir = os.path.join(self.config.log_dir, self.log_name, self.config.loss_type, self.log_stamp) # override config self.config.log_name = self.log_name self.config.log_stamp = self.log_stamp self.config.log_dir = self.log_dir # 3D assets self._init_mesh() # studio self._init_studio() # instances self._init_anchors() if not inference_mode: # diffusion self._init_guidance() # optimization self._configure_optimizers() self._init_logger() if self.config.enable_clip_benchmark: self.clip, _, self.clip_preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') self.clip_tokenizer = open_clip.get_tokenizer('ViT-B-32') def _init_studio(self): self.studio = Studio(self.config, self.device) def _init_mesh(self): self.texture_mesh = TextureMesh(self.config, self.device) def _init_guidance(self):
self.guidance = Guidance(self.config, self.device)
2
2023-11-28 15:38:40+00:00
16k
Vchitect/VBench
vbench/third_party/umt/datasets/build.py
[ { "identifier": "TubeMaskingGenerator", "path": "vbench/third_party/umt/datasets/masking_generator.py", "snippet": "class TubeMaskingGenerator:\n def __init__(self, input_size, mask_ratio):\n self.frames, self.height, self.width = input_size\n self.num_patches_per_frame = self.height * self.width\n self.total_patches = self.frames * self.num_patches_per_frame \n self.num_masks_per_frame = int(mask_ratio * self.num_patches_per_frame)\n self.total_masks = self.frames * self.num_masks_per_frame\n\n def __repr__(self):\n repr_str = \"Maks: total patches {}, mask patches {}\".format(\n self.total_patches, self.total_masks\n )\n return repr_str\n\n def __call__(self):\n mask_per_frame = np.hstack([\n np.zeros(self.num_patches_per_frame - self.num_masks_per_frame),\n np.ones(self.num_masks_per_frame),\n ])\n np.random.shuffle(mask_per_frame)\n mask = np.tile(mask_per_frame, (self.frames, 1)).flatten()\n return mask " }, { "identifier": "RandomMaskingGenerator", "path": "vbench/third_party/umt/datasets/masking_generator.py", "snippet": "class RandomMaskingGenerator:\n def __init__(self, input_size, mask_ratio):\n if not isinstance(input_size, tuple):\n input_size = (input_size, ) * 3\n\n self.frames, self.height, self.width = input_size\n\n self.num_patches = self.frames * self.height * self.width # 8x14x14\n self.num_mask = int(mask_ratio * self.num_patches)\n\n def __repr__(self):\n repr_str = \"Maks: total patches {}, mask patches {}\".format(\n self.num_patches, self.num_mask)\n return repr_str\n\n def __call__(self):\n mask = np.hstack([\n np.zeros(self.num_patches - self.num_mask),\n np.ones(self.num_mask),\n ])\n np.random.shuffle(mask)\n return mask # [196*8]" }, { "identifier": "VideoMAE", "path": "vbench/third_party/umt/datasets/mae.py", "snippet": "class VideoMAE(torch.utils.data.Dataset):\n \"\"\"Load your own video classification dataset.\n Parameters\n ----------\n root : str, required.\n Path to the root folder storing the dataset.\n setting : str, required.\n A text file describing the dataset, each line per video sample.\n There are three items in each line: (1) video path; (2) video length and (3) video label.\n prefix : str, required.\n The prefix for loading data.\n split : str, required.\n The split character for metadata.\n train : bool, default True.\n Whether to load the training or validation set.\n test_mode : bool, default False.\n Whether to perform evaluation on the test set.\n Usually there is three-crop or ten-crop evaluation strategy involved.\n name_pattern : str, default None.\n The naming pattern of the decoded video frames.\n For example, img_00012.jpg.\n video_ext : str, default 'mp4'.\n If video_loader is set to True, please specify the video format accordinly.\n is_color : bool, default True.\n Whether the loaded image is color or grayscale.\n modality : str, default 'rgb'.\n Input modalities, we support only rgb video frames for now.\n Will add support for rgb difference image and optical flow image later.\n num_segments : int, default 1.\n Number of segments to evenly divide the video into clips.\n A useful technique to obtain global video-level information.\n Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.\n num_crop : int, default 1.\n Number of crops for each image. default is 1.\n Common choices are three crops and ten crops during evaluation.\n new_length : int, default 1.\n The length of input video clip. Default is a single image, but it can be multiple video frames.\n For example, new_length=16 means we will extract a video clip of consecutive 16 frames.\n new_step : int, default 1.\n Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.\n new_step=2 means we will extract a video clip of every other frame.\n temporal_jitter : bool, default False.\n Whether to temporally jitter if new_step > 1.\n video_loader : bool, default False.\n Whether to use video loader to load data.\n use_decord : bool, default True.\n Whether to use Decord video loader to load data. Otherwise load image.\n transform : function, default None.\n A function that takes data and label and transforms them.\n data_aug : str, default 'v1'.\n Different types of data augmentation auto. Supports v1, v2, v3 and v4.\n lazy_init : bool, default False.\n If set to True, build a dataset instance without loading any dataset.\n \"\"\"\n def __init__(self,\n root,\n setting,\n prefix='',\n split=' ',\n train=True,\n test_mode=False,\n name_pattern='img_%05d.jpg',\n video_ext='mp4',\n is_color=True,\n modality='rgb',\n num_segments=1,\n num_crop=1,\n new_length=1,\n new_step=1,\n transform=None,\n temporal_jitter=False,\n video_loader=False,\n use_decord=True,\n lazy_init=False,\n num_sample=1,\n ):\n\n super(VideoMAE, self).__init__()\n self.root = root\n self.setting = setting\n self.prefix = prefix\n self.split = split\n self.train = train\n self.test_mode = test_mode\n self.is_color = is_color\n self.modality = modality\n self.num_segments = num_segments\n self.num_crop = num_crop\n self.new_length = new_length\n self.new_step = new_step\n self.skip_length = self.new_length * self.new_step\n self.temporal_jitter = temporal_jitter\n self.name_pattern = name_pattern\n self.video_loader = video_loader\n self.video_ext = video_ext\n self.use_decord = use_decord\n self.transform = transform\n self.lazy_init = lazy_init\n self.num_sample = num_sample\n\n # sparse sampling, num_segments != 1\n if self.num_segments != 1:\n print('Use sparse sampling, change frame and stride')\n self.new_length = self.num_segments\n self.skip_length = 1\n\n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if not self.lazy_init:\n self.clips = self._make_dataset(root, setting)\n if len(self.clips) == 0:\n raise(RuntimeError(\"Found 0 video clips in subfolders of: \" + root + \"\\n\"\n \"Check your data directory (opt.data-dir).\"))\n\n def __getitem__(self, index):\n while True:\n try:\n images = None\n if self.use_decord:\n directory, target = self.clips[index]\n if self.video_loader:\n if '.' in directory.split('/')[-1]:\n # data in the \"setting\" file already have extension, e.g., demo.mp4\n video_name = directory\n else:\n # data in the \"setting\" file do not have extension, e.g., demo\n # So we need to provide extension (i.e., .mp4) to complete the file name.\n video_name = '{}.{}'.format(directory, self.video_ext)\n\n video_name = os.path.join(self.prefix, video_name)\n if video_name.startswith('s3'):\n video_bytes = self.client.get(video_name)\n decord_vr = VideoReader(io.BytesIO(video_bytes),\n num_threads=1,\n ctx=cpu(0))\n else:\n decord_vr = decord.VideoReader(video_name, num_threads=1, ctx=cpu(0))\n duration = len(decord_vr)\n \n segment_indices, skip_offsets = self._sample_train_indices(duration)\n images = self._video_TSN_decord_batch_loader(directory, decord_vr, duration, segment_indices, skip_offsets)\n \n else:\n video_name, total_frame, target = self.clips[index]\n video_name = os.path.join(self.prefix, video_name)\n\n segment_indices, skip_offsets = self._sample_train_indices(total_frame)\n frame_id_list = self._get_frame_id_list(total_frame, segment_indices, skip_offsets)\n images = []\n for idx in frame_id_list:\n frame_fname = os.path.join(video_name, self.name_pattern.format(idx))\n img_bytes = self.client.get(frame_fname)\n img_np = np.frombuffer(img_bytes, np.uint8)\n img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)\n cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)\n images.append(Image.fromarray(img)) \n if images is not None:\n break\n except Exception as e:\n print(\"Failed to load video from {} with error {}\".format(\n video_name, e))\n index = random.randint(0, len(self.clips) - 1)\n \n if self.num_sample > 1:\n process_data_list = []\n mask_list = []\n for _ in range(self.num_sample):\n process_data, mask = self.transform((images, None))\n process_data = process_data.view((self.new_length, 3) + process_data.size()[-2:]).transpose(0, 1)\n process_data_list.append(process_data)\n mask_list.append(mask)\n return process_data_list, mask_list\n else:\n process_data, mask = self.transform((images, None)) # T*C,H,W\n process_data = process_data.view((self.new_length, 3) + process_data.size()[-2:]).transpose(0, 1) # T*C,H,W -> T,C,H,W -> C,T,H,W\n return (process_data, mask)\n\n def __len__(self):\n return len(self.clips)\n\n def _make_dataset(self, directory, setting):\n if not os.path.exists(setting):\n raise(RuntimeError(\"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. \" % (setting)))\n clips = []\n\n print(f'Load dataset using decord: {self.use_decord}')\n with open(setting) as split_f:\n data = split_f.readlines()\n for line in data:\n line_info = line.split(self.split)\n if len(line_info) < 2:\n raise(RuntimeError('Video input format is not correct, missing one or more element. %s' % line))\n if self.use_decord:\n # line format: video_path, video_label\n clip_path = os.path.join(line_info[0])\n target = int(line_info[1])\n item = (clip_path, target)\n else:\n # line format: video_path, video_duration, video_label\n clip_path = os.path.join(line_info[0])\n total_frame = int(line_info[1])\n target = int(line_info[2])\n item = (clip_path, total_frame, target)\n clips.append(item)\n return clips\n\n def _sample_train_indices(self, num_frames):\n average_duration = (num_frames - self.skip_length + 1) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(list(range(self.num_segments)),\n average_duration)\n offsets = offsets + np.random.randint(average_duration,\n size=self.num_segments)\n elif num_frames > max(self.num_segments, self.skip_length):\n offsets = np.sort(np.random.randint(\n num_frames - self.skip_length + 1,\n size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments,))\n\n if self.temporal_jitter:\n skip_offsets = np.random.randint(\n self.new_step, size=self.skip_length // self.new_step)\n else:\n skip_offsets = np.zeros(\n self.skip_length // self.new_step, dtype=int)\n return offsets + 1, skip_offsets\n\n def _get_frame_id_list(self, duration, indices, skip_offsets):\n frame_id_list = []\n for seg_ind in indices:\n offset = int(seg_ind)\n for i, _ in enumerate(range(0, self.skip_length, self.new_step)):\n if offset + skip_offsets[i] <= duration:\n frame_id = offset + skip_offsets[i] - 1\n else:\n frame_id = offset - 1\n frame_id_list.append(frame_id)\n if offset + self.new_step < duration:\n offset += self.new_step\n return frame_id_list\n\n def _video_TSN_decord_batch_loader(self, directory, video_reader, duration, indices, skip_offsets):\n sampled_list = []\n frame_id_list = []\n for seg_ind in indices:\n offset = int(seg_ind)\n for i, _ in enumerate(range(0, self.skip_length, self.new_step)):\n if offset + skip_offsets[i] <= duration:\n frame_id = offset + skip_offsets[i] - 1\n else:\n frame_id = offset - 1\n frame_id_list.append(frame_id)\n if offset + self.new_step < duration:\n offset += self.new_step\n try:\n video_data = video_reader.get_batch(frame_id_list).asnumpy()\n sampled_list = [Image.fromarray(video_data[vid, :, :, :]).convert('RGB') for vid, _ in enumerate(frame_id_list)]\n except:\n raise RuntimeError('Error occured in reading frames {} from video {} of duration {}.'.format(frame_id_list, directory, duration))\n return sampled_list" }, { "identifier": "VideoClsDataset", "path": "vbench/third_party/umt/datasets/kinetics.py", "snippet": "class VideoClsDataset(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,\n frame_sample_rate=2, crop_size=224, short_side_size=256,\n new_height=256, new_width=340, keep_aspect_ratio=True,\n num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,\n args=None):\n self.anno_path = anno_path\n self.prefix = prefix\n self.split = split\n self.mode = mode\n self.clip_len = clip_len\n self.frame_sample_rate = frame_sample_rate\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n assert num_segment == 1\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = Compose([\n Resize(self.short_side_size, interpolation='bilinear'),\n CenterCrop(size=(self.crop_size, self.crop_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = Compose([\n Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = Compose([\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n scale_t = 1\n\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=chunk_nb)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=chunk_nb)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n if self.test_num_crop == 1:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) / 2\n spatial_start = int(spatial_step)\n else:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[:, spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[:, :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True ,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n\n def loadvideo_decord(self, sample, sample_rate_scale=1, chunk_nb=0):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n fname = os.path.join(self.prefix, fname)\n\n try:\n if self.keep_aspect_ratio:\n if fname.startswith('s3'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n if fname.startswith('s3:'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n width=self.new_width,\n height=self.new_height,\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n\n # handle temporal segments\n converted_len = int(self.clip_len * self.frame_sample_rate)\n seg_len = len(vr) // self.num_segment\n\n if self.mode == 'test':\n temporal_step = max(1.0 * (len(vr) - converted_len) / (self.test_num_segment - 1), 0)\n temporal_start = int(chunk_nb * temporal_step)\n\n bound = min(temporal_start + converted_len, len(vr))\n all_index = [x for x in range(temporal_start, bound, self.frame_sample_rate)]\n while len(all_index) < self.clip_len:\n all_index.append(all_index[-1])\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n all_index = []\n for i in range(self.num_segment):\n if seg_len <= converted_len:\n index = np.linspace(0, seg_len, num=seg_len // self.frame_sample_rate)\n index = np.concatenate((index, np.ones(self.clip_len - seg_len // self.frame_sample_rate) * seg_len))\n index = np.clip(index, 0, seg_len - 1).astype(np.int64)\n else:\n if self.mode == 'validation':\n end_idx = (seg_len - converted_len) // 2\n else:\n end_idx = np.random.randint(converted_len, seg_len)\n str_idx = end_idx - converted_len\n index = np.linspace(str_idx, end_idx, num=self.clip_len)\n index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)\n index = index + i*seg_len\n all_index.extend(list(index))\n\n all_index = all_index[::int(sample_rate_scale)]\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "VideoClsDataset_sparse", "path": "vbench/third_party/umt/datasets/kinetics_sparse.py", "snippet": "class VideoClsDataset_sparse(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,\n frame_sample_rate=2, crop_size=224, short_side_size=256,\n new_height=256, new_width=340, keep_aspect_ratio=True,\n num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,\n args=None):\n self.anno_path = anno_path\n self.prefix = prefix\n self.split = split\n self.mode = mode\n self.clip_len = clip_len\n self.frame_sample_rate = frame_sample_rate\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n assert num_segment == 1\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = Compose([\n Resize(self.short_side_size, interpolation='bilinear'),\n CenterCrop(size=(self.crop_size, self.crop_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = Compose([\n Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = Compose([\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=-1) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=-1)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=0)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=0)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=chunk_nb)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=chunk_nb)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n if self.test_num_crop == 1:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) / 2\n spatial_start = int(spatial_step)\n else:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[:, spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[:, :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True ,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n def _get_seq_frames(self, video_size, num_frames, clip_idx=-1):\n seg_size = max(0., float(video_size - 1) / num_frames)\n max_frame = int(video_size) - 1\n seq = []\n # index from 1, must add 1\n if clip_idx == -1:\n for i in range(num_frames):\n start = int(np.round(seg_size * i))\n end = int(np.round(seg_size * (i + 1)))\n idx = min(random.randint(start, end), max_frame)\n seq.append(idx)\n else:\n num_segment = 1\n if self.mode == 'test':\n num_segment = self.test_num_segment\n duration = seg_size / (num_segment + 1)\n for i in range(num_frames):\n start = int(np.round(seg_size * i))\n frame_index = start + int(duration * (clip_idx + 1))\n idx = min(frame_index, max_frame)\n seq.append(idx)\n return seq\n\n def loadvideo_decord(self, sample, chunk_nb=0):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n fname = os.path.join(self.prefix, fname)\n\n try:\n if self.keep_aspect_ratio:\n if fname.startswith('s3'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n if fname.startswith('s3:'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n width=self.new_width,\n height=self.new_height,\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n\n all_index = self._get_seq_frames(len(vr), self.clip_len, clip_idx=chunk_nb)\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "SSVideoClsDataset", "path": "vbench/third_party/umt/datasets/ssv2.py", "snippet": "class SSVideoClsDataset(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,\n crop_size=224, short_side_size=256, new_height=256,\n new_width=340, keep_aspect_ratio=True, num_segment=1,\n num_crop=1, test_num_segment=10, test_num_crop=3, args=None):\n self.anno_path = anno_path\n self.prefix = prefix\n self.split = split\n self.mode = mode\n self.clip_len = clip_len\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n \n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = Compose([\n Resize(self.short_side_size, interpolation='bilinear'),\n CenterCrop(size=(self.crop_size, self.crop_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = Compose([\n Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = Compose([\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n scale_t = 1\n\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n temporal_start = chunk_nb # 0/1\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::2, \\\n spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::2, \\\n :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n\n def loadvideo_decord(self, sample, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n fname = os.path.join(self.prefix, fname)\n\n try:\n if self.keep_aspect_ratio:\n if fname.startswith('s3'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n if fname.startswith('s3:'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n width=self.new_width,\n height=self.new_height,\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n\n if self.mode == 'test':\n tick = len(vr) / float(self.num_segment)\n all_index = list(np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)] +\n [int(tick * x) for x in range(self.num_segment)]))\n while len(all_index) < (self.num_segment * self.test_num_segment):\n all_index.append(all_index[-1])\n all_index = np.sort(np.array(all_index))\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n elif self.mode == 'validation':\n tick = len(vr) / float(self.num_segment)\n all_index = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)])\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n # handle temporal segments\n average_duration = len(vr) // self.num_segment\n if average_duration > 0:\n all_index = list(np.multiply(list(range(self.num_segment)), average_duration) + np.random.randint(average_duration,\n size=self.num_segment))\n elif len(vr) > self.num_segment:\n all_index = list(np.sort(np.random.randint(len(vr), size=self.num_segment)))\n else:\n all_index = list(np.zeros((self.num_segment,)))\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "SSRawFrameClsDataset", "path": "vbench/third_party/umt/datasets/ssv2.py", "snippet": "class SSRawFrameClsDataset(Dataset):\n \"\"\"Load your own raw frame classification dataset.\"\"\"\n\n def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,\n crop_size=224, short_side_size=256, new_height=256, new_width=340,\n keep_aspect_ratio=True, num_segment=1, num_crop=1, test_num_segment=10,\n test_num_crop=3, filename_tmpl='img_{:05}.jpg', args=None):\n self.anno_path = anno_path\n self.prefix = prefix\n self.split = split\n self.mode = mode\n self.clip_len = clip_len\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.filename_tmpl = filename_tmpl\n self.args = args\n self.aug = False\n self.rand_erase = False\n\n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\n \"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)\n self.dataset_samples = list(cleaned.values[:, 0])\n self.total_frames = list(cleaned.values[:, 1])\n self.label_array = list(cleaned.values[:, -1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = Compose([\n Resize(self.short_side_size,\n interpolation='bilinear'),\n CenterCrop(size=(self.crop_size,\n self.crop_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = Compose([\n Resize(size=(short_side_size),\n interpolation='bilinear')\n ])\n self.data_transform = Compose([\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_total_frames = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n self.test_seg.append((ck, cp))\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_total_frames.append(self.total_frames[idx])\n self.test_label_array.append(self.label_array[idx])\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args\n scale_t = 1\n\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(sample,\n total_frame,\n sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during training\".format(\n sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(sample,\n total_frame,\n sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n\n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(sample, total_frame)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during validation\".\n format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.load_frame(sample, total_frame)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\n \"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n total_frame = self.test_total_frames[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_frame(sample, total_frame)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n total_frame = self.test_total_frames[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_frame(sample, total_frame)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n temporal_start = chunk_nb\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::self.test_num_segment, \\\n spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::self.test_num_segment, \\\n :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [transforms.ToPILImage()(frame) for frame in buffer]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C\n\n # T H W C\n buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False)\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n def load_frame(self, sample, num_frames, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n fname = os.path.join(self.prefix, fname)\n\n if self.mode == 'test':\n tick = num_frames / float(self.num_segment)\n all_index = []\n for t_seg in range(self.test_num_segment):\n tmp_index = [\n int(t_seg * tick / self.test_num_segment + tick * x)\n for x in range(self.num_segment)\n ]\n all_index.extend(tmp_index)\n all_index = list(np.sort(np.array(all_index)))\n imgs = []\n for idx in all_index:\n frame_fname = os.path.join(fname, self.filename_tmpl.format(idx + 1)) \n img_bytes = self.client.get(frame_fname)\n img_np = np.frombuffer(img_bytes, np.uint8)\n img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)\n cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)\n imgs.append(img)\n buffer = np.array(imgs)\n return buffer\n\n # handle temporal segments\n average_duration = num_frames // self.num_segment\n all_index = []\n if average_duration > 0:\n if self.mode == 'validation':\n all_index = list(\n np.multiply(list(range(self.num_segment)),\n average_duration) +\n np.ones(self.num_segment, dtype=int) *\n (average_duration // 2))\n else:\n all_index = list(\n np.multiply(list(range(self.num_segment)),\n average_duration) +\n np.random.randint(average_duration, size=self.num_segment))\n elif num_frames > self.num_segment:\n if self.mode == 'validation':\n all_index = list(range(self.num_segment))\n else:\n all_index = list(\n np.sort(\n np.random.randint(num_frames, size=self.num_segment)))\n else:\n all_index = [0] * (self.num_segment - num_frames) + list(\n range(num_frames))\n all_index = list(np.array(all_index))\n imgs = []\n for idx in all_index:\n frame_fname = os.path.join(fname, self.filename_tmpl.format(idx + 1))\n img_bytes = self.client.get(frame_fname)\n img_np = np.frombuffer(img_bytes, np.uint8)\n img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)\n cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)\n imgs.append(img)\n buffer = np.array(imgs)\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" } ]
import os from torchvision import transforms from .transforms import * from .masking_generator import TubeMaskingGenerator, RandomMaskingGenerator from .mae import VideoMAE from .kinetics import VideoClsDataset from .kinetics_sparse import VideoClsDataset_sparse from .ssv2 import SSVideoClsDataset, SSRawFrameClsDataset
14,384
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) if args.color_jitter > 0: self.transform = transforms.Compose([ self.train_augmentation, GroupColorJitter(args.color_jitter), GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) else: self.transform = transforms.Compose([ self.train_augmentation, GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) elif args.mask_type == 'random':
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) if args.color_jitter > 0: self.transform = transforms.Compose([ self.train_augmentation, GroupColorJitter(args.color_jitter), GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) else: self.transform = transforms.Compose([ self.train_augmentation, GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) elif args.mask_type == 'random':
self.masked_position_generator = RandomMaskingGenerator(
1
2023-11-27 12:41:46+00:00
16k
HyeonHo99/Video-Motion-Customization
showone/models/unet_3d_condition.py
[ { "identifier": "TransformerTemporalModel", "path": "showone/models/transformer_temporal.py", "snippet": "class TransformerTemporalModel(ModelMixin, ConfigMixin):\n \"\"\"\n A Transformer model for video-like data.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.\n in_channels (`int`, *optional*):\n The number of channels in the input and output (specify if the input is **continuous**).\n num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.\n sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).\n This is fixed during training since it is used to learn a number of position embeddings.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`): Activation function to use in feed-forward.\n attention_bias (`bool`, *optional*):\n Configure if the `TransformerBlock` attention should contain a bias parameter.\n double_self_attention (`bool`, *optional*):\n Configure if each `TransformerBlock` should contain two self-attention layers.\n \"\"\"\n\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n activation_fn: str = \"geglu\",\n norm_elementwise_affine: bool = True,\n double_self_attention: bool = True,\n ):\n super().__init__()\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n self.in_channels = in_channels\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n attention_bias=attention_bias,\n double_self_attention=double_self_attention,\n norm_elementwise_affine=norm_elementwise_affine,\n )\n for d in range(num_layers)\n ]\n )\n\n self.proj_out = nn.Linear(inner_dim, in_channels)\n\n def forward(\n self,\n hidden_states,\n encoder_hidden_states=None,\n timestep=None,\n class_labels=None,\n num_frames=1,\n cross_attention_kwargs=None,\n return_dict: bool = True,\n ):\n \"\"\"\n The [`TransformerTemporal`] forward method.\n\n Args:\n hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):\n Input hidden_states.\n encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):\n Conditional embeddings for cross attention layer. If not given, cross-attention defaults to\n self-attention.\n timestep ( `torch.long`, *optional*):\n Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.\n class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):\n Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in\n `AdaLayerZeroNorm`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain\n tuple.\n\n Returns:\n [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:\n If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is\n returned, otherwise a `tuple` where the first element is the sample tensor.\n \"\"\"\n # 1. Input\n batch_frames, channel, height, width = hidden_states.shape\n batch_size = batch_frames // num_frames\n\n residual = hidden_states\n\n hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width)\n hidden_states = hidden_states.permute(0, 2, 1, 3, 4)\n\n hidden_states = self.norm(hidden_states)\n hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel)\n\n hidden_states = self.proj_in(hidden_states)\n\n # 2. Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n class_labels=class_labels,\n )\n\n # 3. Output\n hidden_states = self.proj_out(hidden_states)\n hidden_states = (\n hidden_states[None, None, :]\n .reshape(batch_size, height, width, channel, num_frames)\n .permute(0, 3, 4, 1, 2)\n .contiguous()\n )\n hidden_states = hidden_states.reshape(batch_frames, channel, height, width)\n\n output = hidden_states + residual\n\n if not return_dict:\n return (output,)\n\n return TransformerTemporalModelOutput(sample=output)" }, { "identifier": "CrossAttnDownBlock3D", "path": "showone/models/unet_3d_blocks.py", "snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n downsample_padding=1,\n add_downsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n resnets = []\n attentions = []\n temp_attentions = []\n temp_convs = []\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1,\n )\n )\n attentions.append(\n Transformer2DModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n num_frames: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n output_states = ()\n\n for resnet, temp_conv, attn, temp_attn in zip(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions\n ):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs,)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, **ckpt_kwargs,)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, **ckpt_kwargs,\n ).sample\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n\n output_states = output_states + (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "CrossAttnUpBlock3D", "path": "showone/models/unet_3d_blocks.py", "snippet": "class CrossAttnUpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n cross_attention_dim=1280,\n output_scale_factor=1.0,\n add_upsample=True,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n attentions = []\n temp_attentions = []\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1,\n )\n )\n attentions.append(\n Transformer2DModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n num_attention_heads,\n out_channels // num_attention_heads,\n in_channels=out_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n num_frames: int = 1,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n for resnet, temp_conv, attn, temp_attn in zip(\n self.resnets, self.temp_convs, self.attentions, self.temp_attentions\n ):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs,)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, **ckpt_kwargs,)\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(attn, return_dict=False),\n hidden_states,\n encoder_hidden_states,\n None, # timestep\n None, # class_labels\n cross_attention_kwargs,\n attention_mask,\n encoder_attention_mask,\n **ckpt_kwargs,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "DownBlock3D", "path": "showone/models/unet_3d_blocks.py", "snippet": "class DownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_downsample=True,\n downsample_padding=1,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else out_channels\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1,\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_downsample:\n self.downsamplers = nn.ModuleList(\n [\n Downsample2D(\n out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name=\"op\"\n )\n ]\n )\n else:\n self.downsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, temb=None, num_frames=1):\n output_states = ()\n\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, use_reentrant=False)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n output_states = output_states + (hidden_states,)\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "UNetMidBlock3DCrossAttn", "path": "showone/models/unet_3d_blocks.py", "snippet": "class UNetMidBlock3DCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n transformer_layers_per_block: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n num_attention_heads=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n dual_cross_attention=False,\n use_linear_projection=False,\n upcast_attention=False,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n self.num_attention_heads = num_attention_heads\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n ]\n temp_convs = [\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1,\n )\n ]\n attentions = []\n temp_attentions = []\n\n for _ in range(num_layers):\n attentions.append(\n Transformer2DModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=transformer_layers_per_block,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n num_attention_heads,\n in_channels // num_attention_heads,\n in_channels=in_channels,\n num_layers=1, #todo: transformer_layers_per_block?\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1,\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n num_frames: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ) -> torch.FloatTensor:\n hidden_states = self.resnets[0](hidden_states, temb)\n hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames)\n for attn, temp_attn, resnet, temp_conv in zip(\n self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:]\n ):\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n return hidden_states" }, { "identifier": "UNetMidBlock3DSimpleCrossAttn", "path": "showone/models/unet_3d_blocks.py", "snippet": "class UNetMidBlock3DSimpleCrossAttn(nn.Module):\n def __init__(\n self,\n in_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n attention_head_dim=1,\n output_scale_factor=1.0,\n cross_attention_dim=1280,\n skip_time_act=False,\n only_cross_attention=False,\n cross_attention_norm=None,\n ):\n super().__init__()\n\n self.has_cross_attention = True\n\n self.attention_head_dim = attention_head_dim\n resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)\n\n self.num_heads = in_channels // self.attention_head_dim\n\n # there is always at least one resnet\n resnets = [\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n skip_time_act=skip_time_act,\n )\n ]\n temp_convs = [\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1,\n )\n ]\n attentions = []\n temp_attentions = []\n\n for _ in range(num_layers):\n processor = (\n AttnAddedKVProcessor2_0() if hasattr(F, \"scaled_dot_product_attention\") else AttnAddedKVProcessor()\n )\n\n attentions.append(\n Attention(\n query_dim=in_channels,\n cross_attention_dim=in_channels,\n heads=self.num_heads,\n dim_head=self.attention_head_dim,\n added_kv_proj_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n bias=True,\n upcast_softmax=True,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n processor=processor,\n )\n )\n temp_attentions.append(\n TransformerTemporalModel(\n self.attention_head_dim,\n in_channels // self.attention_head_dim,\n in_channels=in_channels,\n num_layers=1,\n cross_attention_dim=cross_attention_dim,\n norm_num_groups=resnet_groups,\n )\n )\n resnets.append(\n ResnetBlock2D(\n in_channels=in_channels,\n out_channels=in_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n skip_time_act=skip_time_act,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n in_channels,\n in_channels,\n dropout=0.1,\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n self.attentions = nn.ModuleList(attentions)\n self.temp_attentions = nn.ModuleList(temp_attentions)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n num_frames: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n\n if attention_mask is None:\n # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask.\n mask = None if encoder_hidden_states is None else encoder_attention_mask\n else:\n # when attention_mask is defined: we don't even check for encoder_attention_mask.\n # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks.\n # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask.\n # then we can simplify this whole if/else block to:\n # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask\n mask = attention_mask\n\n hidden_states = self.resnets[0](hidden_states, temb)\n hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames)\n for attn, temp_attn, resnet, temp_conv in zip(\n self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:]\n ):\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=mask,\n **cross_attention_kwargs,\n )\n hidden_states = temp_attn(\n hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs\n ).sample\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n return hidden_states" }, { "identifier": "UpBlock3D", "path": "showone/models/unet_3d_blocks.py", "snippet": "class UpBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n prev_output_channel: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers: int = 1,\n resnet_eps: float = 1e-6,\n resnet_time_scale_shift: str = \"default\",\n resnet_act_fn: str = \"swish\",\n resnet_groups: int = 32,\n resnet_pre_norm: bool = True,\n output_scale_factor=1.0,\n add_upsample=True,\n ):\n super().__init__()\n resnets = []\n temp_convs = []\n\n for i in range(num_layers):\n res_skip_channels = in_channels if (i == num_layers - 1) else out_channels\n resnet_in_channels = prev_output_channel if i == 0 else out_channels\n\n resnets.append(\n ResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=resnet_eps,\n groups=resnet_groups,\n dropout=dropout,\n time_embedding_norm=resnet_time_scale_shift,\n non_linearity=resnet_act_fn,\n output_scale_factor=output_scale_factor,\n pre_norm=resnet_pre_norm,\n )\n )\n temp_convs.append(\n TemporalConvLayer(\n out_channels,\n out_channels,\n dropout=0.1,\n )\n )\n\n self.resnets = nn.ModuleList(resnets)\n self.temp_convs = nn.ModuleList(temp_convs)\n\n if add_upsample:\n self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])\n else:\n self.upsamplers = None\n\n self.gradient_checkpointing = False\n\n def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, num_frames=1):\n for resnet, temp_conv in zip(self.resnets, self.temp_convs):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False)\n hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temp_conv), hidden_states, num_frames, use_reentrant=False)\n else:\n hidden_states = resnet(hidden_states, temb)\n hidden_states = temp_conv(hidden_states, num_frames=num_frames)\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states" }, { "identifier": "get_down_block", "path": "showone/models/unet_3d_blocks.py", "snippet": "def get_down_block(\n down_block_type,\n num_layers,\n in_channels,\n out_channels,\n temb_channels,\n add_downsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n downsample_padding=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n downsample_type=None,\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n if down_block_type == \"DownBlock3D\":\n return DownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"CrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnDownBlock3D\")\n return CrossAttnDownBlock3D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n downsample_padding=downsample_padding,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif down_block_type == \"SimpleCrossAttnDownBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnDownBlock3D\")\n return SimpleCrossAttnDownBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif down_block_type == \"ResnetDownsampleBlock3D\":\n return ResnetDownsampleBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n add_downsample=add_downsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n raise ValueError(f\"{down_block_type} does not exist.\")" }, { "identifier": "get_up_block", "path": "showone/models/unet_3d_blocks.py", "snippet": "def get_up_block(\n up_block_type,\n num_layers,\n in_channels,\n out_channels,\n prev_output_channel,\n temb_channels,\n add_upsample,\n resnet_eps,\n resnet_act_fn,\n transformer_layers_per_block=1,\n num_attention_heads=None,\n resnet_groups=None,\n cross_attention_dim=None,\n dual_cross_attention=False,\n use_linear_projection=False,\n only_cross_attention=False,\n upcast_attention=False,\n resnet_time_scale_shift=\"default\",\n resnet_skip_time_act=False,\n resnet_out_scale_factor=1.0,\n cross_attention_norm=None,\n attention_head_dim=None,\n upsample_type=None,\n):\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n if up_block_type == \"UpBlock3D\":\n return UpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"CrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for CrossAttnUpBlock3D\")\n return CrossAttnUpBlock3D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"SimpleCrossAttnUpBlock3D\":\n if cross_attention_dim is None:\n raise ValueError(\"cross_attention_dim must be specified for SimpleCrossAttnUpBlock3D\")\n return SimpleCrossAttnUpBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n attention_head_dim=attention_head_dim,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n only_cross_attention=only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif up_block_type == \"ResnetUpsampleBlock3D\":\n return ResnetUpsampleBlock3D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n output_scale_factor=resnet_out_scale_factor,\n )\n raise ValueError(f\"{up_block_type} does not exist.\")" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.loaders import UNet2DConditionLoadersMixin from diffusers.utils import BaseOutput, logging from diffusers.models.activations import get_activation from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor from diffusers.models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from diffusers.models.modeling_utils import ModelMixin from .transformer_temporal import TransformerTemporalModel from .unet_3d_blocks import ( CrossAttnDownBlock3D, CrossAttnUpBlock3D, DownBlock3D, UNetMidBlock3DCrossAttn, UNetMidBlock3DSimpleCrossAttn, UpBlock3D, get_down_block, get_up_block, ) from diffusers.utils import WEIGHTS_NAME import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint import os, json
13,182
for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ # count = len(self.attn_processors.keys()) # ignore temporal attention count = len({k: v for k, v in self.attn_processors.items() if "temp_" not in k}.keys()) # Show-1 original line #count = len(self.attn_processors.keys()) # --> If BoxDiff: use this line if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor") and "temp_" not in name: if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False):
# Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved. # Copyright 2023 The ModelScope Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from diffusers.models.transformer_temporal import TransformerTemporalModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNet3DConditionOutput(BaseOutput): """ Args: sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep and returns sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the models (such as downloading or saving, etc.) Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, it will skip the normalization and activation layers in post-processing norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D", ), mid_block_type: Optional[str] = "UNetMidBlock3DCrossAttn", up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: int = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, transfromer_in_opt: bool =False, ): super().__init__() self.sample_size = sample_size self.transformer_in_opt = transfromer_in_opt if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) if self.transformer_in_opt: self.transformer_in = TransformerTemporalModel( num_attention_heads=8, attention_head_dim=64, in_channels=block_out_channels[0], num_layers=1, ) # time if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock3DCrossAttn": self.mid_block = UNetMidBlock3DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) elif mid_block_type == "UNetMidBlock3DSimpleCrossAttn": self.mid_block = UNetMidBlock3DSimpleCrossAttn( in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], attention_head_dim=attention_head_dim[-1], resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif mid_block_type is None: self.mid_block = None else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps ) self.conv_act = get_activation(act_fn) else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding ) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "set_processor"): processors[f"{name}.processor"] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ # count = len(self.attn_processors.keys()) # ignore temporal attention count = len({k: v for k, v in self.attn_processors.items() if "temp_" not in k}.keys()) # Show-1 original line #count = len(self.attn_processors.keys()) # --> If BoxDiff: use this line if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor") and "temp_" not in name: if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):
6
2023-11-29 17:23:45+00:00
16k
xmu-xiaoma666/X-Dreamer
train_x_dreamer.py
[ { "identifier": "DatasetMesh", "path": "dataset/dataset_mesh.py", "snippet": "class DatasetMesh(torch.utils.data.Dataset):\n\n\n def __init__(self, glctx, FLAGS, validate=False, gif=False):\n # Init \n self.glctx = glctx\n self.FLAGS = FLAGS\n self.validate = validate\n self.gif = gif\n self.aspect = FLAGS.train_res[1] / FLAGS.train_res[0]\n self.fovy_range_min = np.deg2rad(FLAGS.fovy_range[0])\n self.fovy_range_max = np.deg2rad(FLAGS.fovy_range[1])\n self.elevation_range_min= np.deg2rad(FLAGS.elevation_range[0])\n self.elevation_range_max= np.deg2rad(FLAGS.elevation_range[1])\n self.angle_front = np.deg2rad(FLAGS.front_threshold)\n \n\n def _gif_scene(self, itr):\n fovy = np.deg2rad(45)\n proj_mtx = util.perspective(fovy, self.FLAGS.display_res[1] / self.FLAGS.display_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n ang = (itr / 100) * np.pi * 2\n rotate_x = np.deg2rad(20)\n prompt_index = 0\n mv = util.translate(0, 0, -3) @ (util.rotate_x(-rotate_x) @ util.rotate_y(ang ))\n normal_rotate = util.rotate_y_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(ang), torch.tensor([fovy])\n \n \n\n def _validate_scene(self, itr):\n fovy = np.deg2rad(45)\n proj_mtx = util.perspective(fovy, self.FLAGS.train_res[1] / self.FLAGS.train_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n ang = (itr / 4) * np.pi * 2\n rotate_x = np.random.uniform(-np.pi/4,np.pi/18)\n prompt_index = 0\n mv = util.translate(0, 0, -3) @ (util.rotate_x(rotate_x) @ util.rotate_y( ang ))\n normal_rotate = util.rotate_y_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(ang), torch.tensor([fovy])\n\n def _train_scene(self, itr):\n fovy = np.random.uniform(self.fovy_range_min, self.fovy_range_max)\n proj_mtx = util.perspective(fovy, self.FLAGS.train_res[1] / self.FLAGS.train_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n if self.FLAGS.gpu_number == 8: # All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n if self.FLAGS.local_rank in [0,4]:\n rotate_y = np.random.uniform(np.deg2rad(-45), np.deg2rad(45))\n elif self.FLAGS.local_rank in [1,5]:\n rotate_y = np.random.uniform(np.deg2rad(45), np.deg2rad(135))\n elif self.FLAGS.local_rank in [2,6]:#back\n rotate_y = np.random.uniform( np.deg2rad(135), np.deg2rad(225))\n elif self.FLAGS.local_rank in [3,7]:\n rotate_y = np.random.uniform(np.deg2rad(-135), np.deg2rad(-45)) \n if rotate_y > np.pi:\n rotate_y = rotate_y - np.pi*2\n elif self.FLAGS.gpu_number == 4: #All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n if self.FLAGS.local_rank in [0]:\n rotate_y = np.random.uniform(np.deg2rad(-45), np.deg2rad(45))\n elif self.FLAGS.local_rank in [1]:\n rotate_y = np.random.uniform(np.deg2rad(45), np.deg2rad(135))\n elif self.FLAGS.local_rank in [2]:#back\n rotate_y = np.random.uniform( np.deg2rad(135), np.deg2rad(225))\n elif self.FLAGS.local_rank in [3]:\n rotate_y = np.random.uniform(np.deg2rad(-135), np.deg2rad(-45)) \n if rotate_y > np.pi:\n rotate_y = rotate_y - np.pi*2\n else:\n rotate_y = np.random.uniform(np.deg2rad(-180), np.deg2rad(180)) #All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n \n rotate_x = -np.random.uniform(self.elevation_range_min, self.elevation_range_max)\n # angle_front = np.deg2rad(45)\n prompt_index = get_view_direction(thetas= rotate_x, phis = rotate_y, front= self.angle_front)\n cam_radius = 3\n x = np.random.uniform(-self.FLAGS.camera_random_jitter, self.FLAGS.camera_random_jitter)\n y = np.random.uniform(-self.FLAGS.camera_random_jitter, self.FLAGS.camera_random_jitter)\n mv = util.translate(x, y, -cam_radius) @ (util.rotate_x(rotate_x) @ util.rotate_y(rotate_y))\n if ((itr+1)/self.FLAGS.batch) <=self.FLAGS.coarse_iter:\n rotate_y1 = np.random.uniform(0,np.pi*2) \n rotate_x1 = np.random.uniform(-np.pi,np.pi)\n normal_rotate = util.rotate_y_1(rotate_y1 )@ util.rotate_x_1(rotate_x1) \n else:\n normal_rotate = util.rotate_y_1(0)@util.rotate_x_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(rotate_y), torch.tensor([fovy])\n\n def __len__(self):\n if self.gif == True:\n return 100\n else:\n return 4 if self.validate else (self.FLAGS.iter + 1) * self.FLAGS.batch\n\n def __getitem__(self, itr):\n if self.gif:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._gif_scene(itr)\n elif self.validate:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._validate_scene(itr)\n else:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._train_scene(itr)\n\n return {\n 'mv' : mv,\n 'mvp' : mvp,\n 'campos' : campos,\n 'resolution' : iter_res,\n 'spp' : iter_spp,\n 'normal_rotate': normal_rotate,\n 'prompt_index' : prompt_index,\n 'elev': elev,\n 'azim': azim,\n 'fov': fov\n }\n def collate(self, batch):\n iter_res, iter_spp = batch[0]['resolution'], batch[0]['spp']\n return {\n 'mv' : torch.cat(list([item['mv'] for item in batch]), dim=0),\n 'mvp' : torch.cat(list([item['mvp'] for item in batch]), dim=0),\n 'campos' : torch.cat(list([item['campos'] for item in batch]), dim=0),\n 'resolution' : iter_res,\n 'spp' : iter_spp,\n 'normal_rotate' : torch.cat(list([item['normal_rotate'] for item in batch]), dim=0),\n # 'prompt_index' : torch.cat(list([item['prompt_index'] for item in batch]), dim=0),\n 'prompt_index' : np.array([item['prompt_index'] for item in batch], dtype=np.int32),\n 'elev' : np.array([item['elev'] for item in batch], dtype=np.float16),\n 'azim' : np.array([item['azim'] for item in batch], dtype=np.float16),\n 'fov' : torch.cat(list([item['fov'] for item in batch]), dim=0),\n }" }, { "identifier": "get_camera_params", "path": "dataset/dataset_mesh.py", "snippet": "def get_camera_params(resolution= 512, fov=45, elev_angle=-20, azim_angle=0):\n fovy = np.deg2rad(fov) \n elev = np.radians( elev_angle )\n azim = np.radians( azim_angle ) \n proj_mtx = util.perspective(fovy, resolution /resolution, 1, 50)\n mv = util.translate(0, 0, -3) @ (util.rotate_x(elev) @ util.rotate_y(azim))\n normal_rotate = util.rotate_y_1(-azim ) @ util.rotate_x_1(-elev) \n # nomral_rotate = util.rotate_y_1(0) @ util.rotate_x_1(0) \n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n bkgs = torch.ones(1, resolution, resolution, 3, dtype=torch.float32, device='cuda')\n return {\n 'mvp' : mvp[None, ...].cuda(),\n 'mv' : mv[None, ...].cuda(),\n 'campos' : campos[None, ...].cuda(),\n 'resolution' : [resolution, resolution], \n 'spp' : 1,\n 'background' : bkgs,\n 'normal_rotate' : normal_rotate[None,...].cuda(),\n 'elev_angle' : torch.tensor(elev_angle).cuda(),\n 'azim_angle' : torch.tensor(azim_angle).cuda(),\n 'fov' : torch.tensor(fovy).cuda(),\n }" }, { "identifier": "DMTetGeometry", "path": "geometry/dmtet_x_dreamer.py", "snippet": "class DMTetGeometry(torch.nn.Module):\n def __init__(self, grid_res, scale, FLAGS):\n super(DMTetGeometry, self).__init__()\n\n self.FLAGS = FLAGS\n self.grid_res = grid_res\n self.marching_tets = DMTet()\n \n tets = np.load('data/tets/{}_tets.npz'.format(self.grid_res))\n self.verts = torch.tensor(tets['vertices'], dtype=torch.float32, device='cuda') * scale\n print(\"tet grid min/max\", torch.min(self.verts).item(), torch.max(self.verts).item())\n self.decoder = Decoder(multires=0 , AABB= self.getAABB(), mesh_scale= scale)\n self.indices = torch.tensor(tets['indices'], dtype=torch.long, device='cuda')\n self.generate_edges()\n self.pos_encoder = CameraEncoder().to(self.verts.device)\n\n def generate_edges(self):\n with torch.no_grad():\n edges = torch.tensor([0,1,0,2,0,3,1,2,1,3,2,3], dtype = torch.long, device = \"cuda\")\n all_edges = self.indices[:,edges].reshape(-1,2) \n all_edges_sorted = torch.sort(all_edges, dim=1)[0]\n self.all_edges = torch.unique(all_edges_sorted, dim=0)\n\n @torch.no_grad()\n def getAABB(self):\n return torch.min(self.verts, dim=0).values, torch.max(self.verts, dim=0).values\n\n def getMesh(self, material):\n pred= self.decoder(self.verts)\n \n self.sdf , self.deform = pred[:, 0], pred[:, 1:] \n v_deformed = self.verts + 1 / (self.grid_res ) * torch.tanh(self.deform)\n verts, faces = self.marching_tets(v_deformed, self.sdf, self.indices)\n \n imesh = mesh.Mesh(verts, faces, material=material)\n imesh = mesh.auto_normals(imesh)\n return imesh\n\n def render(self, glctx, target, lgt, opt_material, bsdf=None, if_normal=False, mode = 'geometry_modeling', if_flip_the_normal = False, if_use_bump = False):\n opt_mesh = self.getMesh(opt_material) \n return render.render_mesh(glctx, \n opt_mesh, \n target['mvp'], \n target['campos'], \n lgt, \n target['resolution'], \n spp=target['spp'], \n msaa= True,\n background= target['background'],\n bsdf= bsdf,\n if_normal= if_normal,\n normal_rotate= target['normal_rotate'],\n mode = mode,\n if_flip_the_normal = if_flip_the_normal,\n if_use_bump = if_use_bump\n )\n\n \n def tick(self, glctx, target, lgt, opt_material, iteration, if_normal, guidance, mode, if_flip_the_normal, if_use_bump):\n # ==============================================================================================\n # Render optimizable object with identical conditions\n # ==============================================================================================\n buffers= self.render(glctx, target, lgt, opt_material, if_normal= if_normal, mode = mode, if_flip_the_normal = if_flip_the_normal, if_use_bump = if_use_bump)\n if self.FLAGS.add_directional_text:\n text_embeddings = torch.cat([guidance.uncond_z[target['prompt_index']], guidance.text_z[target['prompt_index']]]) # [B*2, 77, 1024]\n indexs = torch.cat([guidance.uncond_index[target['prompt_index']], guidance.index[target['prompt_index']]]) # [B*2, 77, 1024]\n else:\n text_embeddings = torch.cat([guidance.uncond_z, guidance.text_z]) # [B * 2, 77, 1024]\n indexs = torch.cat([guidance.uncond_index, guidance.index]) # [B*2, 77, 1024]\n\n \n if iteration <=self.FLAGS.coarse_iter:\n t = torch.randint( guidance.min_step_early, guidance.max_step_early + 1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n pred_rgb_512 = buffers['shaded'][..., 0:4].permute(0, 3, 1, 2).contiguous() # [B, 4, 64, 64]\n latents = F.interpolate(pred_rgb_512, (64, 64), mode='bilinear', align_corners=False)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n \n else:\n t = torch.randint(guidance.min_step_late, guidance.max_step_late + 1, [self.FLAGS.batch], dtype=torch.long, device='cuda')\n srgb = buffers['shaded'][...,0:3] #* buffers['shaded'][..., 3:4] # normal * mask\n # \n pred_rgb_512 = srgb.permute(0, 3, 1, 2).contiguous() # [B, 3, 512, 512]\n latents = guidance.encode_imgs(pred_rgb_512)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n\n ### calculate camera pos feature\n came_pos = torch.cat([target['campos'],torch.from_numpy(target['elev']).unsqueeze(-1).cuda(),torch.from_numpy(target['azim']).cuda().unsqueeze(-1),target['fov'].unsqueeze(-1)],dim=-1)\n came_pos = torch.cat([came_pos,came_pos],dim=0) #bs*2, 5\n came_pos = normalize_camera(came_pos,self.FLAGS)\n came_posfeat = self.pos_encoder(came_pos)\n\n # add noise\n noise = torch.randn_like(latents)\n latents_noisy = guidance.scheduler.add_noise(latents, noise, t)\n # pred noise\n latent_model_input = torch.cat([latents_noisy] * 2)\n tt = torch.cat([t] * 2)\n noise_pred, attention_map = guidance.unet(latent_model_input, tt, encoder_hidden_states=text_embeddings, index=indexs, came_posfeat=came_posfeat)\n noise_pred = noise_pred.sample\n\n attention_map[0] = attention_map[0].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n attention_map[1] = attention_map[1].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[2] = attention_map[2].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[3] = attention_map[3].reshape(self.FLAGS.batch*2, 8 , 8 ).contiguous()\n attention_map[4] = attention_map[4].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[5] = attention_map[5].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[6] = attention_map[6].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred =noise_pred_uncond + guidance.guidance_weight * (noise_pred_text - noise_pred_uncond) # [B, 4, 64, 64]\n if iteration <= self.FLAGS.coarse_iter:\n w = (1 - guidance.alphas[t]) # [B]\n else:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n w = w[:, None, None, None] # [B, 1, 1, 1]\n grad = w * (noise_pred - noise ) #*w1\n grad = torch.nan_to_num(grad)\n \n sds_loss = SpecifyGradient.apply(latents, grad) \n img_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n reg_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n\n attention_loss = 0\n mask_sizes = [(64, 64), (32,32), (16,16), (8,8), (16,16), (32,32), (64,64)]\n for i in range(7):\n _, attention_map_text = attention_map[i].chunk(2)\n if(self.FLAGS.batch==1):\n mask2 = F.interpolate(mask2.unsqueeze(0).unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n else:\n mask2 = F.interpolate(mask2.unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n attention_map_text = (attention_map_text - attention_map_text.min())/(attention_map_text.max() - attention_map_text.min()+1e-6)\n attention_map_text = F.interpolate(attention_map_text.unsqueeze(0), size=mask_sizes[i], mode='bilinear', align_corners=False).squeeze()\n attention_loss = 0.1*F.l1_loss(mask2.float(), attention_map_text.float(), reduction=\"mean\") #0.1 1 10\n attention_loss = attention_loss/7\n \n return sds_loss, img_loss, reg_loss, attention_loss" }, { "identifier": "DLMesh", "path": "geometry/dlmesh_x_dreamer.py", "snippet": "class DLMesh(torch.nn.Module):\n def __init__(self, initial_guess, FLAGS):\n super(DLMesh, self).__init__()\n self.FLAGS = FLAGS\n self.initial_guess = initial_guess\n self.mesh = initial_guess.clone()\n self.pos_encoder = CameraEncoder().cuda()\n print(\"Base mesh has %d triangles and %d vertices.\" % (self.mesh.t_pos_idx.shape[0], self.mesh.v_pos.shape[0]))\n \n @torch.no_grad()\n def getAABB(self):\n return mesh.aabb(self.mesh)\n\n def getMesh(self, material):\n self.mesh.material = material\n\n imesh = mesh.Mesh(base=self.mesh)\n # Compute normals and tangent space\n imesh = mesh.auto_normals(imesh)\n imesh = mesh.compute_tangents(imesh)\n return imesh\n\n def render(self, glctx, target, lgt, opt_material, bsdf=None,if_normal=False, mode = 'appearance_modeling', if_flip_the_normal = False, if_use_bump = False):\n opt_mesh = self.getMesh(opt_material)\n return render.render_mesh(glctx, \n opt_mesh,\n target['mvp'],\n target['campos'],\n lgt,\n target['resolution'], \n spp=target['spp'], \n msaa=True,\n background= target['background'] ,\n bsdf= bsdf,\n if_normal=if_normal,\n normal_rotate=target['normal_rotate'], \n mode = mode,\n if_flip_the_normal = if_flip_the_normal,\n if_use_bump = if_use_bump\n )\n\n def tick(self, glctx, target, lgt, opt_material, iteration, if_normal, guidance, mode, if_flip_the_normal, if_use_bump):\n # ==============================================================================================\n # Render optimizable object with identical conditions\n # ==============================================================================================\n buffers= self.render(glctx, target, lgt, opt_material, if_normal = if_normal, mode = mode, if_flip_the_normal = if_flip_the_normal, if_use_bump = if_use_bump)\n if self.FLAGS.add_directional_text:\n text_embeddings = torch.cat([guidance.uncond_z[target['prompt_index']], guidance.text_z[target['prompt_index']]])\n indexs = torch.cat([guidance.uncond_index[target['prompt_index']], guidance.index[target['prompt_index']]]) # [B*2, 77, 1024]\n else:\n text_embeddings = torch.cat([guidance.uncond_z, guidance.text_z])\n indexs = torch.cat([guidance.uncond_index, guidance.index]) # [B*2, 77, 1024]\n\n\n if iteration <= self.FLAGS.coarse_iter:\n srgb = buffers['shaded'][...,0:3]\n srgb = util.rgb_to_srgb(srgb)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n t = torch.randint( guidance.min_step_early, guidance.max_step_early+1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n else:\n srgb = buffers['shaded'][...,0:3]\n srgb = util.rgb_to_srgb(srgb)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n t = torch.randint( guidance.min_step_late, guidance.max_step_late+1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n\n pred_rgb_512 = srgb.permute(0, 3, 1, 2).contiguous() # [1, 3, H, W]\n latents = guidance.encode_imgs(pred_rgb_512)\n \n ### calculate camera pos feature\n came_pos = torch.cat([target['campos'],torch.from_numpy(target['elev']).unsqueeze(-1).cuda(),torch.from_numpy(target['azim']).cuda().unsqueeze(-1),target['fov'].unsqueeze(-1)],dim=-1)\n came_pos = torch.cat([came_pos,came_pos],dim=0) #bs*2, 5\n came_pos = normalize_camera(came_pos,self.FLAGS)\n came_posfeat = self.pos_encoder(came_pos)\n\n\n # add noise\n noise = torch.randn_like(latents)\n latents_noisy = guidance.scheduler.add_noise(latents, noise, t)\n # pred noise\n latent_model_input = torch.cat([latents_noisy] * 2)\n tt = torch.cat([t] * 2)\n noise_pred, attention_map = guidance.unet(latent_model_input, tt, encoder_hidden_states= text_embeddings, index=indexs, came_posfeat=came_posfeat)#.sample######################\n noise_pred = noise_pred.sample\n\n attention_map[0] = attention_map[0].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n attention_map[1] = attention_map[1].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[2] = attention_map[2].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[3] = attention_map[3].reshape(self.FLAGS.batch*2, 8 , 8 ).contiguous()\n attention_map[4] = attention_map[4].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[5] = attention_map[5].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[6] = attention_map[6].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance.guidance_weight * (noise_pred_text - noise_pred_uncond)\n \n if guidance.sds_weight_strategy == 0:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n elif guidance.sds_weight_strategy == 1:\n w = 1 / (1 - guidance.alphas[t])\n elif guidance.sds_weight_strategy == 2:\n if iteration <= self.FLAGS.coarse_iter:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n else:\n w = 1 / (1 - guidance.alphas[t])\n w = w[:, None, None, None] # [B, 1, 1, 1]\n grad = w* (noise_pred -noise) \n grad = torch.nan_to_num(grad)\n sds_loss = SpecifyGradient.apply(latents, grad) \n img_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n reg_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n \n attention_loss = 0\n mask_sizes = [(64, 64), (32,32), (16,16), (8,8), (16,16), (32,32), (64,64)]\n for i in range(7):\n _, attention_map_text = attention_map[i].chunk(2)\n if(self.FLAGS.batch==1):\n mask2 = F.interpolate(mask2.unsqueeze(0).unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n else:\n mask2 = F.interpolate(mask2.unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n attention_map_text = (attention_map_text - attention_map_text.min())/(attention_map_text.max() - attention_map_text.min()+1e-6)\n attention_map_text = F.interpolate(attention_map_text.unsqueeze(0), size=mask2.shape, mode='bilinear', align_corners=False).squeeze()\n attention_loss = 0.1*F.l1_loss(mask2.float(), attention_map_text.float(), reduction=\"mean\") #0.1 1 10\n attention_loss = attention_loss/7\n \n return sds_loss, img_loss, reg_loss, attention_loss" }, { "identifier": "obj", "path": "render/obj.py", "snippet": "def _find_mat(materials, name):\ndef load_obj(filename, clear_ks=True, mtl_override=None):\ndef write_obj(folder, mesh, save_material=True):" }, { "identifier": "material", "path": "render/material.py", "snippet": "class Material(torch.nn.Module):\n def __init__(self, mat_dict):\n def __contains__(self, key):\n def __getitem__(self, key):\n def __setitem__(self, key, val):\n def __delitem__(self, key):\n def keys(self):\ndef load_mtl(fn, clear_ks=True):\ndef save_mtl(fn, material):\ndef _upscale_replicate(x, full_res):\ndef merge_materials(materials, texcoords, tfaces, mfaces):" }, { "identifier": "util", "path": "render/util.py", "snippet": "def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\ndef reflect(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor:\ndef length(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef safe_normalize(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef to_hvec(x: torch.Tensor, w: float) -> torch.Tensor:\ndef _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef _srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef reinhard(f: torch.Tensor) -> torch.Tensor:\ndef mse_to_psnr(mse):\ndef psnr_to_mse(psnr):\ndef get_miplevels(texture: np.ndarray) -> float:\ndef tex_2d(tex_map : torch.Tensor, coords : torch.Tensor, filter='nearest') -> torch.Tensor:\ndef cube_to_dir(s, x, y):\ndef latlong_to_cubemap(latlong_map, res):\ndef cubemap_to_latlong(cubemap, res):\ndef scale_img_hwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef scale_img_nhwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef avg_pool_nhwc(x : torch.Tensor, size) -> torch.Tensor:\ndef segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:\ndef fovx_to_fovy(fovx, aspect):\ndef focal_length_to_fovy(focal_length, sensor_height):\ndef perspective(fovy=0.7854, aspect=1.0, n=0.1, f= 1000.0, device=None):\ndef perspective_offcenter(fovy, fraction, rx, ry, aspect=1.0, n=0.1, f=1000.0, device=None):\ndef translate(x, y, z, device=None):\ndef rotate_x(a, device=None):\ndef rotate_x_1(a, device=None):\ndef rotate_y(a, device=None):\ndef rotate_y_1(a, device=None):\ndef rotate_y_2(a, device=None):\ndef rotate_x_2(a, device=None):\ndef scale(s, device=None):\ndef lookAt(eye, at, up):\ndef random_rotation_translation(t, device=None):\ndef random_rotation(device=None):\ndef lines_focal(o, d):\ndef cosine_sample(N, size=None):\ndef bilinear_downsample(x : torch.tensor) -> torch.Tensor:\ndef bilinear_downsample(x : torch.tensor, spp) -> torch.Tensor:\ndef init_glfw():\ndef save_image(fn, x : np.ndarray):\ndef save_image_raw(fn, x : np.ndarray):\ndef load_image_raw(fn) -> np.ndarray:\ndef load_image(fn) -> np.ndarray:\ndef time_to_text(x):\ndef checkerboard(res, checker_size) -> np.ndarray:\ndef get_random_bg(h, w):\n R, L = aspect*y, -aspect*y\n T, B = y, -y\n I = torch.eye(3, dtype=o.dtype, device=o.device)\n S = torch.sum(d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...], dim=0)\n C = torch.sum((d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...]) @ o[..., None], dim=0).squeeze(1)\n N = N/torch.linalg.norm(N)" }, { "identifier": "mesh", "path": "render/mesh.py", "snippet": "class Mesh:\n def __init__(self, v_pos=None, t_pos_idx=None, v_nrm=None, t_nrm_idx=None, v_tex=None, t_tex_idx=None, v_tng=None, t_tng_idx=None, material=None, base=None):\n def copy_none(self, other):\n def clone(self):\ndef load_mesh(filename, mtl_override=None):\ndef aabb(mesh):\ndef compute_edges(attr_idx, return_inverse=False):\ndef compute_edge_to_face_mapping(attr_idx, return_inverse=False):\ndef unit_size(mesh):\ndef center_by_reference(base_mesh, ref_aabb, scale):\ndef auto_normals(imesh):\ndef compute_tangents(imesh):" }, { "identifier": "texture", "path": "render/texture.py", "snippet": "class texture2d_mip(torch.autograd.Function):\nclass Texture2D(torch.nn.Module):\n def forward(ctx, texture):\n def backward(ctx, dout):\n def __init__(self, init, min_max=None):\n def sample(self, texc, texc_deriv, filter_mode='linear-mipmap-linear'):\n def getRes(self):\n def getChannels(self):\n def getMips(self):\n def clamp_(self):\n def normalize_(self):\ndef create_trainable(init, res=None, auto_mipmaps=True, min_max=None):\ndef srgb_to_rgb(texture):\ndef rgb_to_srgb(texture):\ndef _load_mip2D(fn, lambda_fn=None, channels=None):\ndef load_texture2D(fn, lambda_fn=None, channels=None):\ndef _save_mip2D(fn, mip, mipidx, lambda_fn):\ndef save_texture2D(fn, tex, lambda_fn=None):" }, { "identifier": "mlptexture", "path": "render/mlptexture.py", "snippet": "class _MLP(torch.nn.Module):\nclass MLPTexture3D(torch.nn.Module):\n def __init__(self, cfg, loss_scale=1.0):\n def forward(self, x):\n def _init_weights(m):\n def __init__(self, AABB, channels = 3, internal_dims = 32, hidden = 1, min_max = None):\n def sample(self, texc):\n def clamp_(self):\n def cleanup(self):" }, { "identifier": "light", "path": "render/light.py", "snippet": "class cubemap_mip(torch.autograd.Function):\nclass EnvironmentLight(torch.nn.Module):\n def forward(ctx, cubemap):\n def backward(ctx, dout):\n def __init__(self, base):\n def xfm(self, mtx):\n def clone(self):\n def clamp_(self, min=None, max=None):\n def get_mip(self, roughness):\n def build_mips(self, cutoff=0.99):\n def regularizer(self):\n def shade(self, gb_pos, gb_normal, kd, ks, view_pos, specular=True):\ndef _load_env_hdr(fn, scale=1.0):\ndef load_env(fn, scale=1.0):\ndef save_env_map(fn, light):\ndef create_trainable_env_rnd(base_res, scale=0.5, bias=0.25):\n LIGHT_MIN_RES = 16\n MIN_ROUGHNESS = 0.08\n MAX_ROUGHNESS = 0.5" }, { "identifier": "render", "path": "render/render.py", "snippet": "def interpolate(attr, rast, attr_idx, rast_db=None):\ndef shade(\n gb_pos,\n gb_geometric_normal,\n gb_normal,\n gb_tangent,\n gb_texc,\n gb_texc_deriv,\n view_pos,\n lgt,\n material,\n bsdf,\n if_normal,\n normal_rotate,\n mode,\n if_flip_the_normal,\n if_use_bump\n ):\ndef render_layer(\n rast,\n rast_deriv,\n mesh,\n view_pos,\n lgt,\n resolution,\n spp,\n msaa,\n bsdf,\n if_normal,\n normal_rotate,\n mode,\n if_flip_the_normal,\n if_use_bump\n ):\ndef render_mesh(\n ctx,\n mesh,\n mtx_in,\n view_pos,\n lgt,\n resolution,\n spp = 1,\n num_layers = 1,\n msaa = False,\n background = None, \n bsdf = None,\n if_normal = False,\n normal_rotate = None,\n mode = 'geometry_modeling',\n if_flip_the_normal = False,\n if_use_bump = False\n ):\n def prepare_input_vector(x):\n def composite_buffer(key, layers, background, antialias):\ndef render_uv(ctx, mesh, resolution, mlp_texture):\ndef uv_padding(image, hole_mask, padding = 2, uv_padding_block = 4):\ndef render_uv1(ctx, mesh, resolution, mlp_texture, uv_padding_block):" }, { "identifier": "StableDiffusion", "path": "sd_cglora.py", "snippet": "class StableDiffusion(nn.Module):\n def __init__(self, \n device, \n mode='geometry', \n text= '', \n add_directional_text= False, \n batch = 1, \n guidance_weight = 100, \n sds_weight_strategy = 0,\n early_time_step_range = [0.02, 0.5],\n late_time_step_range = [0.02, 0.5]):\n super().__init__()\n\n self.device = device\n self.mode = mode\n self.text= text\n self.add_directional_text = add_directional_text\n self.batch = batch \n print(f'[INFO] loading stable diffusion...')\n model_key = \"stabilityai/stable-diffusion-2-1-base\"\n self.vae = AutoencoderKL.from_pretrained(model_key, subfolder=\"vae\",torch_dtype=torch.float16).to(self.device)\n self.tokenizer = CLIPTokenizer.from_pretrained(model_key, subfolder=\"tokenizer\",torch_dtype=torch.float16)\n self.text_encoder = CLIPTextModel.from_pretrained(model_key, subfolder=\"text_encoder\",torch_dtype=torch.float16).to(self.device)\n self.unet = UNet2DConditionModel.from_pretrained(model_key, subfolder=\"unet\",torch_dtype=torch.float16).to(self.device)\n if is_xformers_available():\n self.unet.enable_xformers_memory_efficient_attention()\n self.negative_text = ''\n if add_directional_text:\n self.text_z = []\n self.uncond_z = []\n self.index = []\n self.uncond_index = []\n for d in ['front', 'side', 'back', 'side']:\n text = f\"{self.text}, {d} view\"\n # text = f\"{d} view of {self.text}\"\n negative_text = f\"{self.negative_text}\"\n # if d == 'back': negative_text += \"face\"\n text_z, index = self.get_text_embeds([text], batch = 1)\n uncond_z, uncond_index =self.get_uncond_embeds([negative_text], batch = 1)\n self.text_z.append(text_z)\n self.uncond_z.append(uncond_z)\n self.index.append(index)\n self.uncond_index.append(uncond_index)\n self.text_z = torch.cat(self.text_z)\n self.uncond_z = torch.cat(self.uncond_z)\n self.index = torch.cat(self.index)\n self.uncond_index = torch.cat(self.uncond_index)\n else: \n self.text_z, self.index = self.get_text_embeds([self.text], batch = self.batch)\n self.uncond_z =self.get_uncond_embeds([self.negative_text], batch = self.batch)\n # del self.text_encoder\n self.scheduler = DPMSolverMultistepScheduler.from_pretrained(model_key, subfolder=\"scheduler\", torch_dtype=torch.float16)\n self.num_train_timesteps = self.scheduler.config.num_train_timesteps\n self.min_step_early = int(self.num_train_timesteps * early_time_step_range[0])\n self.max_step_early = int(self.num_train_timesteps * early_time_step_range[1])\n self.min_step_late = int(self.num_train_timesteps * late_time_step_range[0])\n self.max_step_late = int(self.num_train_timesteps * late_time_step_range[1])\n self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience\n self.guidance_weight = guidance_weight\n self.sds_weight_strategy = sds_weight_strategy\n print(f'[INFO] loaded stable diffusion!')\n\n for p in self.parameters():\n p.requires_grad_(False)\n self.unet_lora_params, self.names = inject_trainable_cglora(self.unet) # This will\n\n\n def get_text_embeds_global(self, prompt, batch=1):\n text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt')\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]\n if batch > 1:\n text_embeddings = text_embeddings.repeat(batch, 1, 1)\n \n global_embedding = text_embeddings[:,text_input['input_ids'].argmax(dim=-1),:].squeeze()\n \n return global_embedding\n\n\n def get_text_embeds(self, prompt, batch=1):\n text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt')\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]\n if batch > 1:\n text_embeddings = text_embeddings.repeat(batch, 1, 1)\n ###################################################################\n index = text_input['input_ids'].argmax(dim=-1)\n #global_embedding = text_embeddings[:, index, :].squeeze()\n ##################################################################\n \n return text_embeddings, index\n \n def get_uncond_embeds(self, negative_prompt, batch):\n uncond_input = self.tokenizer(negative_prompt, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt')\n with torch.no_grad():\n uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]\n \n if batch > 1:\n uncond_embeddings = uncond_embeddings.repeat(batch, 1, 1)\n ###################################################################\n index = uncond_input['input_ids'].argmax(dim=-1)\n # global_embedding = uncond_embeddings[:, index, :].squeeze()\n ##################################################################\n return uncond_embeddings,index\n\n def encode_imgs(self, imgs):\n # imgs: [B, 3, H, W]\n if self.mode == 'appearance_modeling':\n \n imgs = 2 * imgs - 1\n\n posterior = self.vae.encode(imgs).latent_dist\n latents = posterior.sample() * 0.18215\n\n return latents" }, { "identifier": "util", "path": "render/util.py", "snippet": "def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\ndef reflect(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor:\ndef length(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef safe_normalize(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef to_hvec(x: torch.Tensor, w: float) -> torch.Tensor:\ndef _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef _srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef reinhard(f: torch.Tensor) -> torch.Tensor:\ndef mse_to_psnr(mse):\ndef psnr_to_mse(psnr):\ndef get_miplevels(texture: np.ndarray) -> float:\ndef tex_2d(tex_map : torch.Tensor, coords : torch.Tensor, filter='nearest') -> torch.Tensor:\ndef cube_to_dir(s, x, y):\ndef latlong_to_cubemap(latlong_map, res):\ndef cubemap_to_latlong(cubemap, res):\ndef scale_img_hwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef scale_img_nhwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef avg_pool_nhwc(x : torch.Tensor, size) -> torch.Tensor:\ndef segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:\ndef fovx_to_fovy(fovx, aspect):\ndef focal_length_to_fovy(focal_length, sensor_height):\ndef perspective(fovy=0.7854, aspect=1.0, n=0.1, f= 1000.0, device=None):\ndef perspective_offcenter(fovy, fraction, rx, ry, aspect=1.0, n=0.1, f=1000.0, device=None):\ndef translate(x, y, z, device=None):\ndef rotate_x(a, device=None):\ndef rotate_x_1(a, device=None):\ndef rotate_y(a, device=None):\ndef rotate_y_1(a, device=None):\ndef rotate_y_2(a, device=None):\ndef rotate_x_2(a, device=None):\ndef scale(s, device=None):\ndef lookAt(eye, at, up):\ndef random_rotation_translation(t, device=None):\ndef random_rotation(device=None):\ndef lines_focal(o, d):\ndef cosine_sample(N, size=None):\ndef bilinear_downsample(x : torch.tensor) -> torch.Tensor:\ndef bilinear_downsample(x : torch.tensor, spp) -> torch.Tensor:\ndef init_glfw():\ndef save_image(fn, x : np.ndarray):\ndef save_image_raw(fn, x : np.ndarray):\ndef load_image_raw(fn) -> np.ndarray:\ndef load_image(fn) -> np.ndarray:\ndef time_to_text(x):\ndef checkerboard(res, checker_size) -> np.ndarray:\ndef get_random_bg(h, w):\n R, L = aspect*y, -aspect*y\n T, B = y, -y\n I = torch.eye(3, dtype=o.dtype, device=o.device)\n S = torch.sum(d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...], dim=0)\n C = torch.sum((d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...]) @ o[..., None], dim=0).squeeze(1)\n N = N/torch.linalg.norm(N)" }, { "identifier": "Video", "path": "render/video.py", "snippet": "class Video():\n def __init__(self, path, name='video_log.mp4', mode='I', fps=30, codec='libx264', bitrate='16M') -> None:\n \n if path[-1] != \"/\":\n path += \"/\"\n \n self.writer = imageio.get_writer(path+name, mode=mode, fps=fps, codec=codec, bitrate=bitrate)\n \n def ready_image(self, image, write_video=True):\n # assuming channels last - as renderer returns it\n if len(image.shape) == 4: \n image = image.squeeze(0)[..., :3].detach().cpu().numpy()\n else:\n image = image[..., :3].detach().cpu().numpy()\n\n image = np.clip(np.rint(image*255.0), 0, 255).astype(np.uint8)\n\n if write_video:\n self.writer.append_data(image)\n\n return image\n\n def close(self):\n self.writer.close()" } ]
import os import time import argparse import json import math import numpy as np import torch import nvdiffrast.torch as dr import itertools import xatlas import open3d as o3d import random import imageio import os.path as osp import pickle from dataset.dataset_mesh import DatasetMesh from dataset.dataset_mesh import get_camera_params from geometry.dmtet_x_dreamer import DMTetGeometry from geometry.dlmesh_x_dreamer import DLMesh from render import obj from render import material from render import util from render import mesh from render import texture from render import mlptexture from render import light from render import render from sd_cglora import StableDiffusion from tqdm import tqdm from render import util from render.video import Video
12,095
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh) mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal']) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'],
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh) mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal']) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'],
'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]),
8
2023-11-27 13:44:01+00:00
16k
zhenzhiwang/intercontrol
sample/global_joint_control.py
[ { "identifier": "ControlGaussianDiffusion", "path": "diffusion/control_diffusion.py", "snippet": "class ControlGaussianDiffusion(SpacedDiffusion):\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std = th.tensor(self.std, dtype=data.dtype, device=data.device, requires_grad=False)\n mean = th.tensor(self.mean, dtype=data.dtype, device=data.device, requires_grad=False)\n output = th.add(th.mul(data, std), mean)\n return output\n \n def q_sample(self, x_start, t, noise=None, model_kwargs=None):\n \"\"\"\n overrides q_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n if noise is None:\n noise = th.randn_like(x_start)\n assert noise.shape == x_start.shape\n\n bs, feat, _, frames = noise.shape\n noise *= 1. #- model_kwargs['y']['inpainting_mask']\n\n return (\n _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n * noise\n )\n \n def global_joint_bfgs_optimize(self, x, model_kwargs=None):\n assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!'\n pred_joint = self.humanml_to_global_joint(x)\n cond_joint = model_kwargs['y']['global_joint']\n mask = model_kwargs['y']['global_joint_mask']\n pred_joint = th.masked_select(pred_joint, mask.bool())\n cond_joint = th.masked_select(cond_joint, mask.bool())\n assert pred_joint.shape == cond_joint.shape, f\"pred_joint: {pred_joint.shape}, cond_joint: {cond_joint.shape}\"\n loss = self.mse_loss(pred_joint, cond_joint)\n return loss\n \n def humanml_to_global_joint(self, x):\n n_joints = 22 if x.shape[1] == 263 else 21\n pred_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n assert pred_joint.shape[1] == 1\n pred_joint = recover_from_ric(pred_joint, n_joints)\n pred_joint = pred_joint.view(-1, *pred_joint.shape[2:]).permute(0, 2, 3, 1)\n return pred_joint\n \n def global_joint_position_conditioning(self, x, model_kwargs=None):\n n_joints = 22 if x.shape[1] == 263 else 21\n assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!'\n pred_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n pred_joint = recover_from_ric(pred_joint, n_joints)\n pred_joint = pred_joint.view(-1, *pred_joint.shape[2:]).permute(0, 2, 3, 1)\n #pred_joint.requires_grad = True\n assert pred_joint.shape == model_kwargs['y']['global_joint'].shape == model_kwargs['y']['global_joint_mask'].shape, f\"pred_joint: {pred_joint.shape}, global_joint: {model_kwargs['y']['global_joint'].shape}, global_joint_mask: {model_kwargs['y']['global_joint_mask'].shape}\"\n loss = self.global_joint_condition_loss(pred_joint, model_kwargs['y']['global_joint'], model_kwargs['y']['global_joint_mask'])\n diff_scale = ((pred_joint.clamp(min=1e-4) - model_kwargs['y']['global_joint'].clamp(min=1e-4)).abs() / model_kwargs['y']['global_joint'].clamp(min=1e-4).abs()).mean().item()\n #loss.requires_grad = True\n gradient = th.autograd.grad(loss, x, \n grad_outputs=th.ones_like(loss),\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n return gradient.clone().detach(), loss.item(), diff_scale\n\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n use_posterior=False,\n ):\n \"\"\"\n overrides p_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n #assert use_posterior == False\n p_mean_variance_func = self.p_mean_variance_bfgs_posterior if use_posterior else self.p_mean_variance_bfgs_x0\n out = p_mean_variance_func(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n k_first = self.bfgs_times_first,\n k_last = self.bfgs_times_last,\n )\n \n noise = th.randn_like(x)\n if const_noise:\n noise = noise[[0]].repeat(x.shape[0], 1, 1, 1)\n\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n\n sample = out[\"mean\"] + nonzero_mask * th.exp(0.5 * out[\"log_variance\"]) * noise\n \n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n \n def condition_mean_with_grad(self, cond_fn, x_mean, x_var, t, strength, model_kwargs=None):\n \"\"\"\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x. In particular, cond_fn computes grad(log(p(y|x))), and we want to\n condition on y.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n \"\"\"\n with th.enable_grad():\n x_mean = x_mean.clone().detach().requires_grad_(True)\n gradient, loss_value, diff_scale = cond_fn(x_mean, model_kwargs) # p_mean_var[\"mean\"]\n gradient_guidance = - strength * gradient.float() # x_var.clamp(min = 0.01) \n new_mean = (x_mean + gradient_guidance).clone().detach()\n return new_mean, loss_value, gradient_guidance.clone().detach().abs().cpu(), x_mean.clone().detach().abs().cpu(), diff_scale\n\n\n def condition_mean_bfgs(self, x_mean, num_condition, model_kwargs=None):\n \"\"\"\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n \"\"\"\n \n with th.enable_grad():\n x_mean = x_mean.clone().detach().contiguous().requires_grad_(True)\n def closure():\n lbfgs.zero_grad()\n objective = self.global_joint_bfgs_optimize(x_mean, model_kwargs)\n objective.backward()\n return objective\n lbfgs = optim.LBFGS([x_mean],\n history_size=10, \n max_iter=4, \n line_search_fn=\"strong_wolfe\")\n for _ in range(num_condition):\n lbfgs.step(closure)\n #loss_value = self.global_joint_bfgs_optimize(x_mean, model_kwargs).item()\n return x_mean #, loss_value\n\n def p_mean_variance_bfgs_x0(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, \n k_first = 1,\n k_last = 10,\n t_threshold = 10,\n ):\n \"\"\"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n original_model_output = model(x, self._scale_timesteps(t), **model_kwargs)\n model_output = original_model_output.clone().detach()\n\n if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n if self.model_var_type == ModelVarType.LEARNED:\n model_log_variance = model_var_values\n model_variance = th.exp(model_log_variance)\n else:\n min_log = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x.shape\n )\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n else:\n model_variance, model_log_variance = {\n # for fixedlarge, we set the initial (log-)variance like so\n # to get a better decoder log likelihood.\n ModelVarType.FIXED_LARGE: (\n np.append(self.posterior_variance[1], self.betas[1:]),\n np.log(np.append(self.posterior_variance[1], self.betas[1:])),\n ),\n ModelVarType.FIXED_SMALL: (\n self.posterior_variance,\n self.posterior_log_variance_clipped,\n ),\n }[self.model_var_type]\n\n model_variance = _extract_into_tensor(model_variance, t, x.shape)\n model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)\n\n\n # loss-guided condition\n #assert k_first ==1, \"k_first must be 1, {}\".format(k_first)\n num_condition = k_first if t[0] >= t_threshold else k_last # t[0] count from 1000 to 1, assume all t are equal\n model_output = self.condition_mean_bfgs(model_output, num_condition, model_kwargs=model_kwargs) # , loss_value\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n # print('clip_denoised', clip_denoised)\n return x.clamp(-1, 1)\n return x\n\n if self.model_mean_type == ModelMeanType.PREVIOUS_X:\n pred_xstart = process_xstart(\n self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)\n )\n model_mean = model_output\n elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: # THIS IS US!\n if self.model_mean_type == ModelMeanType.START_X:\n pred_xstart = process_xstart(model_output)\n else:\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n )\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n else:\n raise NotImplementedError(self.model_mean_type)\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n )\n\n \n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def p_mean_variance_bfgs_posterior(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, \n k_first = 1,\n k_last = 10,\n t_threshold = 10,\n ):\n \"\"\"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n original_model_output = model(x, self._scale_timesteps(t), **model_kwargs)\n model_output = original_model_output.clone().detach()\n\n if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n if self.model_var_type == ModelVarType.LEARNED:\n model_log_variance = model_var_values\n model_variance = th.exp(model_log_variance)\n else:\n min_log = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x.shape\n )\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n else:\n model_variance, model_log_variance = {\n # for fixedlarge, we set the initial (log-)variance like so\n # to get a better decoder log likelihood.\n ModelVarType.FIXED_LARGE: (\n np.append(self.posterior_variance[1], self.betas[1:]),\n np.log(np.append(self.posterior_variance[1], self.betas[1:])),\n ),\n ModelVarType.FIXED_SMALL: (\n self.posterior_variance,\n self.posterior_log_variance_clipped,\n ),\n }[self.model_var_type]\n\n model_variance = _extract_into_tensor(model_variance, t, x.shape)\n model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)\n\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n # print('clip_denoised', clip_denoised)\n return x.clamp(-1, 1)\n return x\n\n if self.model_mean_type == ModelMeanType.PREVIOUS_X:\n pred_xstart = process_xstart(\n self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)\n )\n model_mean = model_output\n elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: # THIS IS US!\n if self.model_mean_type == ModelMeanType.START_X:\n pred_xstart = process_xstart(model_output)\n else:\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n )\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n else:\n raise NotImplementedError(self.model_mean_type)\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n )\n\n # loss-guided condition\n #assert k_first ==1, \"k_first must be 1, {}\".format(k_first)\n num_condition = k_first if t[0] >= t_threshold else k_last # t[0] count from 1000 to 1, assume all t are equal\n model_mean = self.condition_mean_bfgs(model_mean, num_condition, model_kwargs=model_kwargs) # , loss_value\n\n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def training_losses(self, model, x_start, t, model_kwargs=None, noise=None, dataset=None,\n use_posterior = True,\n k_first = 1,\n k_last = 10,\n t_threshold = 10,):\n \"\"\"\n Compute training losses for a single timestep.\n\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param t: a batch of timestep indices.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param noise: if specified, the specific Gaussian noise to try to remove.\n :return: a dict with the key \"loss\" containing a tensor of shape [N].\n Some mean or variance settings may also have other keys.\n \"\"\"\n\n # enc = model.model._modules['module']\n model = self._wrap_model(model)\n \n enc = model.model\n mask = model_kwargs['y']['mask']\n get_xyz = lambda sample: enc.rot2xyz(sample, mask=None, pose_rep=enc.pose_rep, translation=enc.translation,\n glob=enc.glob,\n # jointstype='vertices', # 3.4 iter/sec # USED ALSO IN MotionCLIP\n jointstype='smpl', # 3.4 iter/sec\n vertstrans=False)\n\n if model_kwargs is None:\n model_kwargs = {}\n if noise is None:\n noise = th.randn_like(x_start)\n x_t = self.q_sample(x_start, t, noise=noise, model_kwargs=model_kwargs)\n \n #assert k_first == 1, \"k_first must be 1, {}\".format(k_first)\n #assert k_last == 10, \"k_last must be 10, {}\".format(k_last)\n assert use_posterior == True, \"use_posterior must be True, {}\".format(use_posterior)\n if use_posterior:\n '''\n # loss-guided condition in training time\n if t[0] >= t_threshold:\n assert (t >= t_threshold).all(), f\"all t should be >=10 or <10 : t={t}\"\n num_condition = k_first # else k_last\n else:\n num_condition = k_last\n assert (t < t_threshold).all(), f\"all t should be >=10 or <10 : t={t}\"\n '''\n num_condition = k_first\n x_t = self.condition_mean_bfgs(x_t, num_condition, model_kwargs=model_kwargs)\n\n terms = {}\n if self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:\n model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)\n\n target = {\n ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(\n x_start=x_start, x_t=x_t, t=t\n )[0],\n ModelMeanType.START_X: x_start,\n ModelMeanType.EPSILON: noise,\n }[self.model_mean_type]\n\n assert model_output.shape == target.shape == x_start.shape, \"model_output {}, target {}, x_start {}\".format(model_output.shape ,target.shape ,x_start.shape) # [bs, njoints, nfeats, nframes]\n\n terms[\"rot_mse\"] = self.masked_l2(target, model_output, mask) # mean_flat(rot_mse)\n\n terms[\"loss\"] = terms[\"rot_mse\"] + terms.get('vb', 0.) +\\\n (self.lambda_vel * terms.get('vel_mse', 0.)) +\\\n (self.lambda_rcxyz * terms.get('rcxyz_mse', 0.)) + \\\n (self.lambda_fc * terms.get('fc', 0.))\n else:\n raise NotImplementedError(self.loss_type)\n\n return terms" }, { "identifier": "SpacedDiffusion", "path": "diffusion/respace.py", "snippet": "class SpacedDiffusion(GaussianDiffusion):\n \"\"\"\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n \"\"\"\n\n def __init__(self, use_timesteps, **kwargs):\n self.use_timesteps = set(use_timesteps)\n self.timestep_map = []\n self.original_num_steps = len(kwargs[\"betas\"])\n\n base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa\n last_alpha_cumprod = 1.0\n new_betas = []\n for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):\n if i in self.use_timesteps:\n new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)\n last_alpha_cumprod = alpha_cumprod\n self.timestep_map.append(i)\n kwargs[\"betas\"] = np.array(new_betas)\n super().__init__(**kwargs)\n\n def p_mean_variance(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)\n\n def training_losses(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().training_losses(self._wrap_model(model), *args, **kwargs)\n\n def condition_mean(self, cond_fn, *args, **kwargs):\n return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)\n\n def condition_score(self, cond_fn, *args, **kwargs):\n return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)\n\n def _wrap_model(self, model):\n if isinstance(model, _WrappedModel):\n return model\n return _WrappedModel(\n model, self.timestep_map, self.rescale_timesteps, self.original_num_steps\n )\n\n def _scale_timesteps(self, t):\n # Scaling is done by the wrapped model.\n return t" }, { "identifier": "fixseed", "path": "utils/fixseed.py", "snippet": "def fixseed(seed):\n torch.backends.cudnn.benchmark = False\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)" }, { "identifier": "edit_control_args", "path": "utils/parser_util.py", "snippet": "def edit_control_args():\n parser = ArgumentParser()\n # args specified by the user: (all other will be loaded from the model)\n add_base_options(parser)\n add_sampling_options(parser)\n add_edit_inpainting_options(parser)\n return parse_and_load_from_model(parser)" }, { "identifier": "load_controlmdm_and_diffusion", "path": "utils/model_util.py", "snippet": "def load_controlmdm_and_diffusion(args, data, device, ModelClass=ControlMDM, DiffusionClass=ControlGaussianDiffusion): \n model, diffusion = create_model_and_diffusion(args, data, ModelClass=ControlMDM, DiffusionClass=DiffusionClass)\n model_path = args.model_path\n print(f\"Loading checkpoints from [{model_path}]...\")\n state_dict = torch.load(model_path, map_location='cpu')\n load_model_wo_clip(model, state_dict)\n model.mean = data.dataset.t2m_dataset.mean\n model.std = data.dataset.t2m_dataset.std\n\n model.to(device)\n model.eval() # disable random masking\n model = wrap_model(model, args)\n return model, diffusion" }, { "identifier": "dist_util", "path": "utils/dist_util.py", "snippet": "GPUS_PER_NODE = 8\nSETUP_RETRY_COUNT = 3\ndef setup_dist(device=0):\ndef dev():\ndef load_state_dict(path, **kwargs):\ndef sync_params(params):\ndef _find_free_port():" }, { "identifier": "wrap_model", "path": "model/cfg_sampler.py", "snippet": "def wrap_model(model, args):\n if args.guidance_param not in [0., 1.]:\n return ClassifierFreeSampleModel(model) # wrapping model with the classifier-free sampler\n elif args.guidance_param == 0:\n return UnconditionedModel(model)\n else:\n return model" }, { "identifier": "get_dataset_loader", "path": "data_loaders/get_data.py", "snippet": "def get_dataset_loader(name, batch_size, num_frames, split='train', load_mode='train', opt=None, short_db=False, cropping_sampler=False, size=None):\n if load_mode == 'text_only':\n load_mode = 'train'\n dataset = get_dataset(name, num_frames, split, load_mode, batch_size, opt, short_db, cropping_sampler, size)\n collate = get_collate_fn(name, load_mode)\n\n n_workers = 1 if load_mode in ['movement_train', 'evaluator_train'] else 8\n loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=True,\n num_workers=n_workers, drop_last=True, collate_fn=collate\n )\n\n return loader" }, { "identifier": "recover_from_ric", "path": "data_loaders/humanml/scripts/motion_process.py", "snippet": "def recover_from_ric(data, joints_num):\n r_rot_quat, r_pos = recover_root_rot_pos(data)\n positions = data[..., 4:(joints_num - 1) * 3 + 4]\n positions = positions.view(positions.shape[:-1] + (-1, 3))\n\n '''Add Y-axis rotation to local joints'''\n positions = qrot(qinv(r_rot_quat[..., None, :]).expand(positions.shape[:-1] + (4,)), positions)\n\n '''Add root XZ to joints'''\n positions[..., 0] += r_pos[..., 0:1]\n positions[..., 2] += r_pos[..., 2:3]\n\n '''Concate root and joints'''\n positions = torch.cat([r_pos.unsqueeze(-2), positions], dim=-2)\n\n return positions" }, { "identifier": "get_control_mask", "path": "data_loaders/humanml_utils.py", "snippet": "def get_control_mask(mask_name, shape, **kwargs):\n assert mask_name == \"global_joint\", \"mask_name must be 'global_joint', got {}\".format(mask_name)\n mask = np.zeros(shape)\n mask = np.maximum(mask, get_global_joint_mask(shape, **kwargs))\n return mask" }, { "identifier": "HML_JOINT_NAMES", "path": "data_loaders/humanml_utils.py", "snippet": "HML_JOINT_NAMES = [\n 'pelvis',\n 'left_hip',\n 'right_hip',\n 'spine1',\n 'left_knee',\n 'right_knee',\n 'spine2',\n 'left_ankle',\n 'right_ankle',\n 'spine3',\n 'left_foot',\n 'right_foot',\n 'neck',\n 'left_collar',\n 'right_collar',\n 'head',\n 'left_shoulder',\n 'right_shoulder',\n 'left_elbow',\n 'right_elbow',\n 'left_wrist',\n 'right_wrist',\n]" }, { "identifier": "plot_3d_motion", "path": "data_loaders/humanml/utils/plot_script.py", "snippet": "def plot_3d_motion(save_path, kinematic_tree, joints, title, dataset, figsize=(8, 8), fps=120, radius=4,\n vis_mode='default', gt_frames=[], handshake_size=0, blend_size=0, step_sizes=[], lengths = [], joints2=None, painting_features=[], guidance=None):\n matplotlib.use('Agg')\n \"\"\"\n A wrapper around explicit_plot_3d_motion that \n uses gt_frames to determine the colors of the frames\n \"\"\"\n data = joints.copy().reshape(len(joints), -1, 3)\n frames_number = data.shape[0]\n frame_colors = ['blue' if index in gt_frames else 'orange' for index in range(frames_number)]\n if vis_mode == 'unfold':\n frame_colors = ['purple'] *handshake_size + ['blue']*blend_size + ['orange'] *(120-handshake_size*2-blend_size*2) +['orange']*blend_size\n frame_colors = ['orange'] *(120-handshake_size-blend_size) + ['orange']*blend_size + frame_colors*1024\n elif vis_mode == 'unfold_arb_len':\n for ii, step_size in enumerate(step_sizes):\n if ii == 0:\n frame_colors = ['orange']*(step_size - handshake_size - blend_size) + ['orange']*blend_size + ['purple'] * (handshake_size//2)\n continue\n if ii == len(step_sizes)-1:\n frame_colors += ['purple'] * (handshake_size//2) + ['orange'] * blend_size + ['orange'] * (lengths[ii] - handshake_size - blend_size)\n continue\n frame_colors += ['purple'] * (handshake_size // 2) + ['orange'] * blend_size + ['orange'] * (\n lengths[ii] - 2 * handshake_size - 2 * blend_size) + ['orange'] * blend_size + \\\n ['purple'] * (handshake_size // 2)\n elif vis_mode == 'gt':\n frame_colors = ['blue'] * frames_number\n explicit_plot_3d_motion(save_path, kinematic_tree, joints, title, dataset, figsize=figsize, fps=fps, radius=radius, \n vis_mode=vis_mode, frame_colors=frame_colors, joints2=joints2, painting_features=painting_features, guidance=guidance)" }, { "identifier": "ControlMDM", "path": "model/ControlMDM.py", "snippet": "class ControlMDM(MDM):\n\n def __init__(self, modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1,\n ablation=None, activation=\"gelu\", legacy=False, data_rep='rot6d', dataset='amass', clip_dim=512,\n arch='trans_enc', emb_trans_dec=False, clip_version=None, args=None, **kargs):\n\n super(ControlMDM, self).__init__(modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim, ff_size, num_layers, num_heads, dropout,\n ablation, activation, legacy, data_rep, dataset, clip_dim,\n arch, emb_trans_dec, clip_version, **kargs)\n self.args = args\n self.num_layers = num_layers\n self.multi_person = args.multi_person\n self.upper_orientation_index = [0, 16, 17] # root, l_shoulder, r_shoulder\n self.lower_orientation_index = [0, 1, 2] # root, l_hip, r_hip\n\n # linear layers init with zeros\n if self.dataset == 'kit':\n self.first_zero_linear = nn.Linear(21*3*2 + 2*3, self.latent_dim)\n elif self.dataset == 'humanml':\n self.first_zero_linear = nn.Linear(22*3*2 + 2*3, self.latent_dim)\n else:\n raise NotImplementedError('Supporting only kit and humanml dataset, got {}'.format(self.dataset))\n \n nn.init.zeros_(self.first_zero_linear.weight)\n nn.init.zeros_(self.first_zero_linear.bias)\n self.mid_zero_linear = nn.ModuleList(\n [nn.Linear(self.latent_dim, self.latent_dim) for _ in range(self.num_layers)])\n for m in self.mid_zero_linear:\n nn.init.zeros_(m.weight)\n nn.init.zeros_(m.bias)\n\n if self.arch == 'trans_enc':\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation)\n del self.seqTransEncoder\n self.seqTransEncoder_mdm = TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n self.seqTransEncoder_control = TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n else:\n raise ValueError('Supporting only trans_enc arch.')\n\n self.freeze_block(self.input_process)\n self.freeze_block(self.sequence_pos_encoder)\n self.freeze_block(self.seqTransEncoder_mdm)\n self.freeze_block(self.embed_timestep)\n if 'text' in self.cond_mode:\n self.freeze_block(self.embed_text)\n self.freeze_block(self.output_process)\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std = torch.tensor(self.std, dtype=data.dtype, device=data.device, requires_grad=False)\n mean = torch.tensor(self.mean, dtype=data.dtype, device=data.device, requires_grad=False)\n output = torch.add(torch.mul(data, std), mean)\n return output\n \n def compute_triangle_normals(self, triangles):\n # Compute the vectors from the first point to the other two points\n v1 = triangles[:,:, 1] - triangles[:, :,0]\n v2 = triangles[:,:, 2] - triangles[:,:,0]\n\n # Compute the cross product of v1 and v2 to get the normal vectors\n normals = torch.cross(v2, v1, dim=-1)\n\n # Normalize the normal vectors to unit length\n normals = nn.functional.normalize(normals, dim=-1)\n return normals\n \n def humanml_to_global_joint(self, x):\n n_joints = 22 if x.shape[1] == 263 else 21\n curr_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n assert curr_joint.shape[1] == 1\n curr_joint = recover_from_ric(curr_joint, n_joints)\n curr_joint = curr_joint.view(-1, *curr_joint.shape[2:]).permute(0, 2, 3, 1)\n # change root positions for multi-person purpose\n if self.multi_person:\n curr_joint[1::2, :,2,:] *= -1\n curr_joint[1::2, :,0,:] *= -1\n curr_joint[1::2, :,2,:] += 2\n\n # more than 3 people\n #curr_joint[1, :,2,:] *= -1\n #curr_joint[1, :,0,:] *= -1\n #curr_joint[1, :,2,:] += 2\n #curr_joint[2, :,0,:] += 1\n return curr_joint\n\n def forward(self, x, timesteps, y=None):\n bs, njoints, nfeats, seqlen = x.shape\n control_bs, n_global_joints, xyz_dim, control_frames = y['global_joint'].shape\n assert bs == control_bs and seqlen == control_frames, \"bs {} != {} or seqlen {} != {}\".format(bs, control_bs, seqlen, control_frames)\n assert xyz_dim ==3, \"xyz_dim {} != 3\".format(xyz_dim)\n # prepare global joints for controlmdm\n curr_joint = self.humanml_to_global_joint(x).clone().detach() # [bs, njoints, 3, seqlen]\n curr_joint.requires_grad = False\n\n # Build embedding vector\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n # Embed motion to latent space (frame by frame)\n x = self.input_process(x) #[seqlen, bs, d]\n\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n\n # controlmdm\n # orientation\n upper_triangles = curr_joint[:,self.upper_orientation_index,:,:].permute(3,0,1,2) # [seqlen, bs, 3, 3]\n lower_triangles = curr_joint[:,self.lower_orientation_index,:,:].permute(3,0,1,2) # [seqlen, bs, 3, 3]\n upper_orientation = self.compute_triangle_normals(upper_triangles) # [seqlen, bs, 3]\n lower_orientation = self.compute_triangle_normals(lower_triangles) # [seqlen, bs, 3]\n\n # relative position to joint\n '''\n relative_position = torch.zeros_like(curr_joint, device = xseq.device, dtype=torch.float32) # [bs, njoints, 3, seqlen]\n relative_position[1::2,:,:,:] = ((y['global_joint'][::2,:,:,:].unsqueeze(1).float() - \\\n curr_joint[:,1::2,:,:].unsqueeze(2))*y['global_joint_mask'][::2,:,:,:].bool().float()).float().sum(1)\n relative_position[::2,:,:,:] = ((y['global_joint'][1::2,:,:,:].unsqueeze(1).float() - \\\n curr_joint[:,::2,:,:].unsqueeze(2))*y['global_joint_mask'][1::2,:,:,:].bool().float()).float().sum(1)\n '''\n relative_position = ((y['global_joint'].float() - curr_joint)*y['global_joint_mask'].bool().float()).float() # [bs, njoints, 3, seqlen]\n relative_position = relative_position.permute(3, 0, 1, 2).reshape(control_frames, control_bs, -1) # [seqlen, bs, 22*3]\n\n # relative position to root\n relative_root = ((y['global_joint'].float() - curr_joint[:,[0],:,:])*y['global_joint_mask'].bool().float()).float() # [bs, njoints, 3, seqlen]\n relative_root = relative_root.permute(3, 0, 1, 2).reshape(control_frames, control_bs, -1) # [seqlen, bs, 22*3]\n global_joint_feat = torch.cat((relative_position, relative_root, upper_orientation, lower_orientation), axis=-1) # [seqlen, bs, 22*3 *2 +3 +3]\n \n global_joint_feat = self.first_zero_linear(global_joint_feat) # [seqlen, bs, d]\n control_input = xseq + torch.cat((torch.zeros_like(emb, device = xseq.device, dtype=torch.float32), global_joint_feat), axis=0) # [seqlen+1, bs, d]\n control_output_list = self.seqTransEncoder_control.return_all_layers(control_input) # [seqlen+1, bs, d]\n for i in range(self.num_layers):\n control_output_list[i] = self.mid_zero_linear[i](control_output_list[i])\n \n output = self.seqTransEncoder_mdm.forward_with_condition(xseq, control_output_list)[1:] # [seqlen, bs, d]\n output = self.output_process(output) # [bs, njoints, nfeats, seqlen]\n return output\n\n def trainable_parameters(self):\n return [p for name, p in self.named_parameters() if p.requires_grad]\n # return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n \n def trainable_parameter_names(self):\n return [name for name, p in self.named_parameters() if p.requires_grad]\n\n def freeze_block(self, block):\n block.eval()\n for p in block.parameters():\n p.requires_grad = False\n\n def unfreeze_block(self, block):\n block.train()\n for p in block.parameters():\n p.requires_grad = True\n \n def forward_without_control(self, x, timesteps, y=None): #\n # Build embedding vector\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n # Embed motion to latent space (frame by frame)\n x = self.input_process(x) #[seqlen, bs, d]\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n output = self.seqTransEncoder_mdm(xseq)[1:] # [seqlen, bs, d]\n output = self.output_process(output) # [bs, njoints, nfeats, seqlen]\n return output" } ]
from diffusion.control_diffusion import ControlGaussianDiffusion from diffusion.respace import SpacedDiffusion from utils.fixseed import fixseed from utils.parser_util import edit_control_args from utils.model_util import load_controlmdm_and_diffusion from utils import dist_util from model.cfg_sampler import wrap_model from data_loaders.get_data import get_dataset_loader from data_loaders.humanml.scripts.motion_process import recover_from_ric from data_loaders.humanml_utils import get_control_mask, HML_JOINT_NAMES from data_loaders.humanml.utils.plot_script import plot_3d_motion from model.ControlMDM import ControlMDM import os import numpy as np import torch import data_loaders.humanml.utils.paramUtil as paramUtil import shutil
10,812
# This code is based on https://github.com/openai/guided-diffusion """ Generate a large batch of image samples from a model and save them as a large numpy array. This can be used to produce samples for FID evaluation. """ def main(): args = edit_control_args() assert args.multi_person == False, 'multi-person is not supported for this script' fixseed(args.seed) out_path = args.output_dir name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') max_frames = 196 if args.dataset in ['kit', 'humanml'] else 60 fps = 12.5 if args.dataset == 'kit' else 20 dist_util.setup_dist(args.device) if out_path == '': out_path = os.path.join(os.path.dirname(args.model_path), 'edit_{}_{}_{}_seed{}'.format(name, niter, args.inpainting_mask, args.seed)) if args.text_condition != '': out_path += '_' + args.text_condition.replace(' ', '_').replace('.', '') print('Loading dataset...') assert args.num_samples <= args.batch_size, \ f'Please either increase batch_size({args.batch_size}) or reduce num_samples({args.num_samples})' # So why do we need this check? In order to protect GPU from a memory overload in the following line. # If your GPU can handle batch size larger then default, you can specify it through --batch_size flag. # If it doesn't, and you still want to sample more prompts, run this script with different seeds # (specify through the --seed flag) args.batch_size = args.num_samples # Sampling a single batch from the testset, with exactly args.num_samples data = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=max_frames, split='test', load_mode='train', size=args.num_samples) # in train mode, you get both text and motion. # data.fixed_length = n_frames total_num_samples = args.num_samples * args.num_repetitions print("Creating model and diffusion...")
# This code is based on https://github.com/openai/guided-diffusion """ Generate a large batch of image samples from a model and save them as a large numpy array. This can be used to produce samples for FID evaluation. """ def main(): args = edit_control_args() assert args.multi_person == False, 'multi-person is not supported for this script' fixseed(args.seed) out_path = args.output_dir name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') max_frames = 196 if args.dataset in ['kit', 'humanml'] else 60 fps = 12.5 if args.dataset == 'kit' else 20 dist_util.setup_dist(args.device) if out_path == '': out_path = os.path.join(os.path.dirname(args.model_path), 'edit_{}_{}_{}_seed{}'.format(name, niter, args.inpainting_mask, args.seed)) if args.text_condition != '': out_path += '_' + args.text_condition.replace(' ', '_').replace('.', '') print('Loading dataset...') assert args.num_samples <= args.batch_size, \ f'Please either increase batch_size({args.batch_size}) or reduce num_samples({args.num_samples})' # So why do we need this check? In order to protect GPU from a memory overload in the following line. # If your GPU can handle batch size larger then default, you can specify it through --batch_size flag. # If it doesn't, and you still want to sample more prompts, run this script with different seeds # (specify through the --seed flag) args.batch_size = args.num_samples # Sampling a single batch from the testset, with exactly args.num_samples data = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=max_frames, split='test', load_mode='train', size=args.num_samples) # in train mode, you get both text and motion. # data.fixed_length = n_frames total_num_samples = args.num_samples * args.num_repetitions print("Creating model and diffusion...")
DiffusionClass = ControlGaussianDiffusion if args.filter_noise else SpacedDiffusion
0
2023-11-27 05:28:02+00:00
16k
moonbow721/DPoser
run/completion.py
[ { "identifier": "DistributedEvalSampler", "path": "lib/dataset/EvaSampler.py", "snippet": "class DistributedEvalSampler(Sampler):\n r\"\"\"\n DistributedEvalSampler is different from DistributedSampler.\n It does NOT add extra samples to make it evenly divisible.\n DistributedEvalSampler should NOT be used for training. The distributed processes could hang forever.\n See this issue for details: https://github.com/pytorch/pytorch/issues/22584\n shuffle is disabled by default\n\n DistributedEvalSampler is for evaluation purpose where synchronization does not happen every epoch.\n Synchronization should be done outside the dataloader loop.\n\n Sampler that restricts data loading to a subset of the dataset.\n\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such a case, each\n process can pass a :class`~torch.utils.data.DistributedSampler` instance as a\n :class:`~torch.utils.data.DataLoader` sampler, and load a subset of the\n original dataset that is exclusive to it.\n\n .. note::\n Dataset is assumed to be of constant size.\n\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (int, optional): Number of processes participating in\n distributed training. By default, :attr:`rank` is retrieved from the\n current distributed group.\n rank (int, optional): Rank of the current process within :attr:`num_replicas`.\n By default, :attr:`rank` is retrieved from the current distributed\n group.\n shuffle (bool, optional): If ``True`` (default), sampler will shuffle the\n indices.\n seed (int, optional): random seed used to shuffle the sampler if\n :attr:`shuffle=True`. This number should be identical across all\n processes in the distributed group. Default: ``0``.\n\n .. warning::\n In distributed mode, calling the :meth`set_epoch(epoch) <set_epoch>` method at\n the beginning of each epoch **before** creating the :class:`DataLoader` iterator\n is necessary to make shuffling work properly across multiple epochs. Otherwise,\n the same ordering will be always used.\n\n Example::\n\n >>> sampler = DistributedSampler(dataset) if is_distributed else None\n >>> loader = DataLoader(dataset, shuffle=(sampler is None),\n ... sampler=sampler)\n >>> for epoch in range(start_epoch, n_epochs):\n ... if is_distributed:\n ... sampler.set_epoch(epoch)\n ... train(loader)\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=False, seed=0):\n if num_replicas is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n num_replicas = dist.get_world_size()\n if rank is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n rank = dist.get_rank()\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.total_size = len(self.dataset) # true value without extra samples\n # indices = list(range(self.total_size))\n # indices = indices[self.rank:self.total_size:self.num_replicas]\n # self.num_samples = len(indices) # true value without extra samples\n\n # split dataset in order [c1] [c2] [c3] ..\n self.base_num = self.total_size // num_replicas\n self.mod_num = self.total_size % num_replicas\n if rank < self.mod_num:\n self.num_samples = self.base_num + 1\n else:\n self.num_samples = self.base_num\n\n self.shuffle = shuffle\n self.seed = seed\n\n def __iter__(self):\n if self.shuffle:\n # deterministically shuffle based on epoch and seed\n g = torch.Generator()\n g.manual_seed(self.seed + self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n assert 0, 'not supported'\n else:\n indices = list(range(len(self.dataset)))\n\n # subsample\n if self.rank <= self.mod_num:\n # preceding batches have larger size\n start_idx = self.rank * (self.base_num + 1)\n indices = indices[start_idx:start_idx + self.num_samples]\n else:\n start_idx = self.mod_num * (self.base_num + 1) + \\\n (self.rank - self.mod_num) * self.base_num\n indices = indices[start_idx:start_idx + self.num_samples]\n\n # indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n\n yield from iter(indices)\n\n def __len__(self):\n return self.num_samples\n\n def set_epoch(self, epoch):\n r\"\"\"\n Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas\n use a different random ordering for each epoch. Otherwise, the next iteration of this\n sampler will yield the same ordering.\n\n Arguments:\n epoch (int): _epoch number.\n \"\"\"\n self.epoch = epoch" }, { "identifier": "create_mask", "path": "lib/utils/misc.py", "snippet": "def create_mask(body_poses, part='legs', observation_type='noise'):\r\n assert len(body_poses.shape) == 2 and body_poses.shape[1] % N_POSES == 0\r\n rot_N = body_poses.shape[1] // N_POSES\r\n assert rot_N in [3, 6]\r\n # for axis-angle or rot6d\r\n mask_joints = getattr(BodyPartIndices, part)\r\n mask = body_poses.new_ones(body_poses.shape)\r\n mask_indices = torch.tensor(mask_joints).view(-1, 1) * rot_N + torch.arange(rot_N).view(1, -1)\r\n mask_indices = mask_indices.flatten()\r\n mask[:, mask_indices] = 0\r\n\r\n # masked data as Gaussian noise\r\n observation = body_poses.clone()\r\n if observation_type == 'noise':\r\n observation[:, mask_indices] = torch.randn_like(observation[:, mask_indices])\r\n # load the mean pose as observation\r\n else:\r\n batch_size = body_poses.shape[0]\r\n smpl_mean_params = np.load(constants.SMPL_MEAN_PATH)\r\n rot6d_body_poses = torch.tensor(smpl_mean_params['pose'][6:,], dtype=torch.float32, device=body_poses.device) # [138]\r\n axis_body_pose = rot6d_to_axis_angle(rot6d_body_poses.reshape(-1, 6)).reshape(-1) # [69]\r\n if rot_N == 3:\r\n observation[:, mask_indices] = axis_body_pose[None, mask_indices].repeat(batch_size, 1)\r\n elif rot_N == 6:\r\n observation[:, mask_indices] = rot6d_body_poses[None, mask_indices].repeat(batch_size, 1)\r\n else:\r\n raise NotImplementedError\r\n\r\n return mask, observation\r" }, { "identifier": "linear_interpolation", "path": "lib/utils/misc.py", "snippet": "def linear_interpolation(A, B, frames):\r\n alpha = torch.linspace(0, 1, frames, device=A.device)[:, None]\r\n interpolated = (1 - alpha) * A + alpha * B\r\n return interpolated\r" }, { "identifier": "ScoreModelFC", "path": "lib/algorithms/advanced/model.py", "snippet": "class ScoreModelFC(nn.Module):\n \"\"\"\n Independent condition feature projection layers for each block\n \"\"\"\n\n def __init__(self, config, n_poses=21, pose_dim=6, hidden_dim=64,\n embed_dim=32, n_blocks=2):\n super(ScoreModelFC, self).__init__()\n\n self.config = config\n self.n_poses = n_poses\n self.joint_dim = pose_dim\n self.n_blocks = n_blocks\n\n self.act = get_act(config)\n\n self.pre_dense = nn.Linear(n_poses * pose_dim, hidden_dim)\n self.pre_dense_t = nn.Linear(embed_dim, hidden_dim)\n self.pre_dense_cond = nn.Linear(hidden_dim, hidden_dim)\n self.pre_gnorm = nn.GroupNorm(32, num_channels=hidden_dim)\n self.dropout = nn.Dropout(p=config.model.dropout)\n\n # time embedding\n self.time_embedding_type = config.model.embedding_type.lower()\n if self.time_embedding_type == 'fourier':\n self.gauss_proj = GaussianFourierProjection(embed_dim=embed_dim, scale=config.model.fourier_scale)\n elif self.time_embedding_type == 'positional':\n self.posit_proj = functools.partial(get_timestep_embedding, embedding_dim=embed_dim)\n else:\n assert 0\n\n self.shared_time_embed = nn.Sequential(\n nn.Linear(embed_dim, embed_dim),\n self.act,\n )\n self.register_buffer('sigmas', torch.tensor(get_sigmas(config), dtype=torch.float))\n\n for idx in range(n_blocks):\n setattr(self, f'b{idx + 1}_dense1', nn.Linear(hidden_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_dense1_t', nn.Linear(embed_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_gnorm1', nn.GroupNorm(32, num_channels=hidden_dim))\n\n setattr(self, f'b{idx + 1}_dense2', nn.Linear(hidden_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_dense2_t', nn.Linear(embed_dim, hidden_dim))\n setattr(self, f'b{idx + 1}_gnorm2', nn.GroupNorm(32, num_channels=hidden_dim))\n\n self.post_dense = nn.Linear(hidden_dim, n_poses * pose_dim)\n\n def forward(self, batch, t, condition=None, mask=None):\n \"\"\"\n batch: [B, j*3] or [B, j*6]\n t: [B]\n Return: [B, j*3] or [B, j*6] same dim as batch\n \"\"\"\n bs = batch.shape[0]\n\n # batch = batch.view(bs, -1) # [B, j*3]\n\n # time embedding\n if self.time_embedding_type == 'fourier':\n # Gaussian Fourier features embeddings.\n used_sigmas = t\n temb = self.gauss_proj(torch.log(used_sigmas))\n elif self.time_embedding_type == 'positional':\n # Sinusoidal positional embeddings.\n timesteps = t\n used_sigmas = self.sigmas[t.long()]\n temb = self.posit_proj(timesteps)\n else:\n raise ValueError(f'time embedding type {self.time_embedding_type} unknown.')\n\n temb = self.shared_time_embed(temb)\n\n h = self.pre_dense(batch)\n h += self.pre_dense_t(temb)\n h = self.pre_gnorm(h)\n h = self.act(h)\n h = self.dropout(h)\n\n for idx in range(self.n_blocks):\n h1 = getattr(self, f'b{idx + 1}_dense1')(h)\n h1 += getattr(self, f'b{idx + 1}_dense1_t')(temb)\n h1 = getattr(self, f'b{idx + 1}_gnorm1')(h1)\n h1 = self.act(h1)\n # dropout, maybe\n h1 = self.dropout(h1)\n\n h2 = getattr(self, f'b{idx + 1}_dense2')(h1)\n h2 += getattr(self, f'b{idx + 1}_dense2_t')(temb)\n h2 = getattr(self, f'b{idx + 1}_gnorm2')(h2)\n h2 = self.act(h2)\n # dropout, maybe\n h2 = self.dropout(h2)\n\n h = h + h2\n\n res = self.post_dense(h) # [B, j*3]\n\n ''' normalize the output '''\n if self.config.model.scale_by_sigma:\n used_sigmas = used_sigmas.reshape((bs, 1))\n res = res / used_sigmas\n\n return res" }, { "identifier": "sde_lib", "path": "lib/algorithms/advanced/sde_lib.py", "snippet": "class SDE(abc.ABC):\n class RSDE(self.__class__):\nclass VPSDE(SDE):\nclass subVPSDE(SDE):\nclass VESDE(SDE):\n def __init__(self, N):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def discretize(self, x, t):\n def return_alpha_sigma(self, t):\n def reverse(self, score_fn, probability_flow=False):\n def __init__(self):\n def T(self):\n def sde(self, x, t, condition=None, mask=None, guide=False):\n def discretize(self, x, t, condition=None, mask=None):\n def __init__(self, beta_min=0.1, beta_max=20, N=1000, T=1):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def discretize(self, x, t):\n def return_alpha_sigma(self, t):\n def __init__(self, beta_min=0.1, beta_max=20, N=1000, T=1):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def return_alpha_sigma(self, t):\n def __init__(self, sigma_min=0.01, sigma_max=50, N=1000, T=1):\n def T(self):\n def sde(self, x, t):\n def marginal_prob(self, x, t):\n def prior_sampling(self, shape):\n def prior_logp(self, z):\n def discretize(self, x, t):\n def return_alpha_sigma(self, t):\n G = diffusion * torch.sqrt(torch.tensor(dt, device=t.device))\n N = self.N\n T = self.T\n N = np.prod(shape[1:])\n G = sqrt_beta\n N = np.prod(shape[1:])\n N = np.prod(shape[1:])\n G = torch.sqrt(sigma ** 2 - adjacent_sigma ** 2)" }, { "identifier": "sampling", "path": "lib/algorithms/advanced/sampling.py", "snippet": "_CORRECTORS = {}\n_PREDICTORS = {}\ndef register_predictor(cls=None, *, name=None):\n def _register(cls):\ndef register_corrector(cls=None, *, name=None):\n def _register(cls):\ndef get_predictor(name):\ndef get_corrector(name):\ndef get_sampling_fn(config, sde, shape, inverse_scaler, eps, device=None):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t, observation, mask):\n def update_fn_guide(self, x_t, t, observation, mask, condition=None, grad_step=1.0):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t):\n def __init__(self, sde, score_fn, probability_flow=False):\n def vesde_update_fn(self, x, t):\n def vpsde_update_fn(self, x, t):\n def update_fn(self, x, t):\n def __init__(self, sde, score_fn, probability_flow=False):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\n def __init__(self, sde, score_fn, snr, n_steps):\n def update_fn(self, x, t, observation, mask):\ndef shared_predictor_update_fn(x, t, observation, mask, sde, model, predictor, probability_flow, continuous):\ndef shared_corrector_update_fn(x, t, observation, mask, sde, model, corrector, continuous, snr, n_steps):\ndef get_pc_sampler(sde, shape, predictor, corrector, inverse_scaler, snr,\n n_steps=1, probability_flow=False, continuous=False,\n denoise=True, eps=1e-3, device='cuda'):\n def get_imputation_update_fn(update_fn):\n def imputation_update_fn(x, vec_t, observation, mask, model, args):\n def pc_sampler(model, observation=None, mask=None, z=None, start_step=0, args=None):\ndef get_ode_sampler(sde, shape, inverse_scaler,\n denoise=False, rtol=1e-5, atol=1e-5,\n method='RK45', eps=1e-3, device='cuda'):\n def denoise_update_fn(model, x):\n def drift_fn(model, x, t):\n def ode_sampler(model, z=None):\n def ode_func(t, x):\nclass Predictor(abc.ABC):\nclass Corrector(abc.ABC):\nclass EulerMaruyamaPredictor(Predictor):\nclass ReverseDiffusionPredictor(Predictor):\nclass AncestralSamplingPredictor(Predictor):\nclass NonePredictor(Predictor):\nclass LangevinCorrector(Corrector):\nclass AnnealedLangevinDynamics(Corrector):\nclass NoneCorrector(Corrector):" }, { "identifier": "ExponentialMovingAverage", "path": "lib/algorithms/ema.py", "snippet": "class ExponentialMovingAverage:\n \"\"\"\n Maintains (exponential) moving average of a set of parameters.\n \"\"\"\n\n def __init__(self, parameters, decay=0.999, use_num_updates=True):\n \"\"\"\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the result of\n `model.parameters()`.\n decay: The exponential decay.\n use_num_updates: Whether to use number of updates when computing\n averages.\n \"\"\"\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n self.decay = decay\n self.num_updates = 0 if use_num_updates else None\n self.shadow_params = [p.clone().detach()\n for p in parameters if p.requires_grad]\n self.collected_params = []\n\n def update(self, parameters):\n \"\"\"\n Update currently maintained parameters.\n\n Call this every time the parameters are updated, such as the result of\n the `optimizer.step()` call.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the same set of\n parameters used to initialize this object.\n \"\"\"\n decay = self.decay\n if self.num_updates is not None:\n self.num_updates += 1\n decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))\n one_minus_decay = 1.0 - decay\n with torch.no_grad():\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n s_param.sub_(one_minus_decay * (s_param - param))\n\n def copy_to(self, parameters):\n \"\"\"\n Copy current parameters into given collection of parameters.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored moving averages.\n \"\"\"\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n if param.requires_grad:\n param.data.copy_(s_param.data)\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)\n\n def state_dict(self):\n return dict(decay=self.decay, num_updates=self.num_updates,\n shadow_params=self.shadow_params)\n\n def load_state_dict(self, state_dict):\n self.decay = state_dict['decay']\n self.num_updates = state_dict['num_updates']\n self.shadow_params = state_dict['shadow_params']" }, { "identifier": "utils", "path": "lib/algorithms/advanced/utils.py", "snippet": "_MODELS = {}\ndef register_model(cls=None, *, name=None):\n def _register(cls):\ndef get_model(name):\ndef get_sigmas(config):\ndef get_ddpm_params(config):\ndef create_model(config):\ndef get_model_fn(model, train=False):\n def model_fn(x, labels, condition, mask):\ndef get_score_fn(sde, model, train=False, continuous=False):\n def score_fn(x, t, condition, mask):\n def score_fn(x, t, condition, mask):\ndef to_flattened_numpy(x):\ndef from_flattened_numpy(x, shape):" }, { "identifier": "AMASSDataset", "path": "lib/dataset/AMASS.py", "snippet": "class AMASSDataset(torch.utils.data.Dataset):\r\n def __init__(self, root_path, version='version0', subset='train',\r\n sample_interval=None, rot_rep='rot6d', return_shape=False,\r\n normalize=True, min_max=True):\r\n\r\n self.root_path = root_path\r\n self.version = version\r\n assert subset in ['train', 'valid', 'test']\r\n self.subset = subset\r\n self.sample_interval = sample_interval\r\n assert rot_rep in ['axis', 'rot6d']\r\n self.rot_rep = rot_rep\r\n self.return_shape = return_shape\r\n self.normalize = normalize\r\n self.min_max = min_max\r\n\r\n self.poses, self.shapes = self.read_data()\r\n\r\n if self.sample_interval:\r\n self._sample(sample_interval)\r\n if self.normalize:\r\n if self.min_max:\r\n self.min_poses, self.max_poses, self.min_shapes, self.max_shapes = self.Normalize()\r\n else:\r\n self.mean_poses, self.std_poses, self.mean_shapes, self.std_shapes = self.Normalize()\r\n\r\n self.real_data_len = len(self.poses)\r\n\r\n def __getitem__(self, idx):\r\n \"\"\"\r\n Return:\r\n [21, 3] or [21, 6] for poses including body and root orient\r\n [10] for shapes (betas) [Optimal]\r\n \"\"\"\r\n data_poses = self.poses[idx % self.real_data_len]\r\n data_dict = {'poses': data_poses}\r\n if self.return_shape:\r\n data_dict['shapes'] = self.shapes[idx % self.real_data_len]\r\n return data_dict\r\n\r\n def __len__(self, ):\r\n return len(self.poses)\r\n\r\n def _sample(self, sample_interval):\r\n print(f'Class AMASSDataset({self.subset}): sample dataset every {sample_interval} frame')\r\n self.poses = self.poses[::sample_interval]\r\n\r\n def read_data(self):\r\n data_path = os.path.join(self.root_path, self.version, self.subset)\r\n # root_orient = torch.load(os.path.join(data_path, 'root_orient.pt'))\r\n poses = torch.load(os.path.join(data_path, 'pose_body.pt'))\r\n shapes = torch.load(os.path.join(data_path, 'betas.pt')) if self.return_shape else None\r\n # poses = torch.cat([root_orient, pose_body], dim=1)\r\n data_len = len(poses)\r\n if self.rot_rep == 'rot6d':\r\n poses = axis_angle_to_rot6d(poses.reshape(-1, 3)).reshape(data_len, -1)\r\n\r\n return poses, shapes\r\n\r\n def Normalize(self):\r\n # Use train dataset for normalize computing, Z_score or min-max Normalize\r\n if self.min_max:\r\n normalize_path = os.path.join(self.root_path, self.version, 'train', self.rot_rep + '_normalize1.pt')\r\n else:\r\n normalize_path = os.path.join(self.root_path, self.version, 'train', self.rot_rep + '_normalize2.pt')\r\n\r\n if os.path.exists(normalize_path):\r\n normalize_params = torch.load(normalize_path)\r\n if self.min_max:\r\n min_poses, max_poses, min_shapes, max_shapes = (\r\n normalize_params['min_poses'],\r\n normalize_params['max_poses'],\r\n normalize_params['min_shapes'],\r\n normalize_params['max_shapes']\r\n )\r\n else:\r\n mean_poses, std_poses, mean_shapes, std_shapes = (\r\n normalize_params['mean_poses'],\r\n normalize_params['std_poses'],\r\n normalize_params['mean_shapes'],\r\n normalize_params['std_shapes']\r\n )\r\n else:\r\n if self.min_max:\r\n min_poses = torch.min(self.poses, dim=0)[0]\r\n max_poses = torch.max(self.poses, dim=0)[0]\r\n\r\n min_shapes = torch.min(self.shapes, dim=0)[0] if self.return_shape else None\r\n max_shapes = torch.max(self.shapes, dim=0)[0] if self.return_shape else None\r\n\r\n torch.save({\r\n 'min_poses': min_poses,\r\n 'max_poses': max_poses,\r\n 'min_shapes': min_shapes,\r\n 'max_shapes': max_shapes\r\n }, normalize_path)\r\n else:\r\n mean_poses = torch.mean(self.poses, dim=0)\r\n std_poses = torch.std(self.poses, dim=0)\r\n\r\n mean_shapes = torch.mean(self.shapes, dim=0) if self.return_shape else None\r\n std_shapes = torch.std(self.shapes, dim=0) if self.return_shape else None\r\n\r\n torch.save({\r\n 'mean_poses': mean_poses,\r\n 'std_poses': std_poses,\r\n 'mean_shapes': mean_shapes,\r\n 'std_shapes': std_shapes\r\n }, normalize_path)\r\n\r\n if self.min_max:\r\n self.poses = 2 * (self.poses - min_poses) / (max_poses - min_poses) - 1\r\n if self.return_shape:\r\n self.shapes = 2 * (self.shapes - min_shapes) / (max_shapes - min_shapes) - 1\r\n return min_poses, max_poses, min_shapes, max_shapes\r\n\r\n else:\r\n self.poses = (self.poses - mean_poses) / std_poses\r\n if self.return_shape:\r\n self.shapes = (self.shapes - mean_shapes) / std_shapes\r\n return mean_poses, std_poses, mean_shapes, std_shapes\r\n\r\n\r\n def Denormalize(self, poses, shapes=None):\r\n assert len(poses.shape) == 2 or len(poses.shape) == 3 # [b, data_dim] or [t, b, data_dim]\r\n\r\n if self.min_max:\r\n min_poses = self.min_poses.view(1, -1).to(poses.device)\r\n max_poses = self.max_poses.view(1, -1).to(poses.device)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n min_poses = min_poses.unsqueeze(0)\r\n max_poses = max_poses.unsqueeze(0)\r\n\r\n normalized_poses = 0.5 * ((poses + 1) * (max_poses - min_poses) + 2 * min_poses)\r\n\r\n if shapes is not None and self.min_shapes is not None:\r\n min_shapes = self.min_shapes.view(1, -1).to(shapes.device)\r\n max_shapes = self.max_shapes.view(1, -1).to(shapes.device)\r\n\r\n if len(shapes.shape) == 3:\r\n min_shapes = min_shapes.unsqueeze(0)\r\n max_shapes = max_shapes.unsqueeze(0)\r\n\r\n normalized_shapes = 0.5 * ((shapes + 1) * (max_shapes - min_shapes) + 2 * min_shapes)\r\n return normalized_poses, normalized_shapes\r\n else:\r\n return normalized_poses\r\n else:\r\n mean_poses = self.mean_poses.view(1, -1).to(poses.device)\r\n std_poses = self.std_poses.view(1, -1).to(poses.device)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n mean_poses = mean_poses.unsqueeze(0)\r\n std_poses = std_poses.unsqueeze(0)\r\n\r\n normalized_poses = poses * std_poses + mean_poses\r\n\r\n if shapes is not None and self.mean_shapes is not None:\r\n mean_shapes = self.mean_shapes.view(1, -1)\r\n std_shapes = self.std_shapes.view(1, -1)\r\n\r\n if len(shapes.shape) == 3:\r\n mean_shapes = mean_shapes.unsqueeze(0)\r\n std_shapes = std_shapes.unsqueeze(0)\r\n\r\n normalized_shapes = shapes * std_shapes + mean_shapes\r\n return normalized_poses, normalized_shapes\r\n else:\r\n return normalized_poses\r\n\r\n def eval(self, preds):\r\n pass\r" }, { "identifier": "N_POSES", "path": "lib/dataset/AMASS.py", "snippet": "N_POSES = 21\r" }, { "identifier": "Posenormalizer", "path": "lib/dataset/AMASS.py", "snippet": "class Posenormalizer:\r\n def __init__(self, data_path, device='cuda:0', normalize=True, min_max=True, rot_rep=None):\r\n assert rot_rep in ['rot6d', 'axis']\r\n self.normalize = normalize\r\n self.min_max = min_max\r\n self.rot_rep = rot_rep\r\n normalize_params = torch.load(os.path.join(data_path, '{}_normalize1.pt'.format(rot_rep)))\r\n self.min_poses, self.max_poses = normalize_params['min_poses'].to(device), normalize_params['max_poses'].to(device)\r\n normalize_params = torch.load(os.path.join(data_path, '{}_normalize2.pt'.format(rot_rep)))\r\n self.mean_poses, self.std_poses = normalize_params['mean_poses'].to(device), normalize_params['std_poses'].to(device)\r\n\r\n def offline_normalize(self, poses, from_axis=False):\r\n assert len(poses.shape) == 2 or len(poses.shape) == 3 # [b, data_dim] or [t, b, data_dim]\r\n pose_shape = poses.shape\r\n if from_axis and self.rot_rep == 'rot6d':\r\n poses = axis_angle_to_rot6d(poses.reshape(-1, 3)).reshape(*pose_shape[:-1], -1)\r\n\r\n if not self.normalize:\r\n return poses\r\n\r\n if self.min_max:\r\n min_poses = self.min_poses.view(1, -1)\r\n max_poses = self.max_poses.view(1, -1)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n min_poses = min_poses.unsqueeze(0)\r\n max_poses = max_poses.unsqueeze(0)\r\n\r\n normalized_poses = 2 * (poses - min_poses) / (max_poses - min_poses) - 1\r\n\r\n else:\r\n mean_poses = self.mean_poses.view(1, -1)\r\n std_poses = self.std_poses.view(1, -1)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n mean_poses = mean_poses.unsqueeze(0)\r\n std_poses = std_poses.unsqueeze(0)\r\n\r\n normalized_poses = (poses - mean_poses) / std_poses\r\n\r\n return normalized_poses\r\n\r\n def offline_denormalize(self, poses, to_axis=False):\r\n assert len(poses.shape) == 2 or len(poses.shape) == 3 # [b, data_dim] or [t, b, data_dim]\r\n\r\n if not self.normalize:\r\n denormalized_poses = poses\r\n else:\r\n if self.min_max:\r\n min_poses = self.min_poses.view(1, -1)\r\n max_poses = self.max_poses.view(1, -1)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n min_poses = min_poses.unsqueeze(0)\r\n max_poses = max_poses.unsqueeze(0)\r\n\r\n denormalized_poses = 0.5 * ((poses + 1) * (max_poses - min_poses) + 2 * min_poses)\r\n\r\n else:\r\n mean_poses = self.mean_poses.view(1, -1)\r\n std_poses = self.std_poses.view(1, -1)\r\n\r\n if len(poses.shape) == 3: # [t, b, data_dim]\r\n mean_poses = mean_poses.unsqueeze(0)\r\n std_poses = std_poses.unsqueeze(0)\r\n\r\n denormalized_poses = poses * std_poses + mean_poses\r\n\r\n if to_axis and self.rot_rep == 'rot6d':\r\n pose_shape = denormalized_poses.shape\r\n denormalized_poses = rot6d_to_axis_angle(denormalized_poses.reshape(-1, 6)).reshape(*pose_shape[:-1], -1)\r\n\r\n return denormalized_poses\r" }, { "identifier": "BodyModel", "path": "lib/body_model/body_model.py", "snippet": "class BodyModel(nn.Module):\r\n '''\r\n Wrapper around SMPLX body model class.\r\n from https://github.com/davrempe/humor/blob/main/humor/body_model/body_model.py\r\n '''\r\n\r\n def __init__(self,\r\n bm_path,\r\n num_betas=10,\r\n batch_size=1,\r\n num_expressions=10,\r\n model_type='smplx'):\r\n super(BodyModel, self).__init__()\r\n '''\r\n Creates the body model object at the given path.\r\n\r\n :param bm_path: path to the body model pkl file\r\n :param num_expressions: only for smplx\r\n :param model_type: one of [smpl, smplh, smplx]\r\n :param use_vtx_selector: if true, returns additional vertices as joints that correspond to OpenPose joints\r\n '''\r\n\r\n kwargs = {\r\n 'model_type': model_type,\r\n 'num_betas': num_betas,\r\n 'batch_size': batch_size,\r\n 'num_expression_coeffs': num_expressions,\r\n 'use_pca': False,\r\n 'flat_hand_mean': True\r\n }\r\n\r\n assert (model_type in ['smpl', 'smplh', 'smplx'])\r\n if model_type == 'smpl':\r\n self.bm = SMPL(bm_path, **kwargs)\r\n self.num_joints = SMPL.NUM_JOINTS\r\n elif model_type == 'smplh':\r\n # smplx does not support .npz by default, so have to load in manually\r\n smpl_dict = np.load(bm_path, encoding='latin1')\r\n data_struct = Struct(**smpl_dict)\r\n # print(smpl_dict.files)\r\n if model_type == 'smplh':\r\n data_struct.hands_componentsl = np.zeros((0))\r\n data_struct.hands_componentsr = np.zeros((0))\r\n data_struct.hands_meanl = np.zeros((15 * 3))\r\n data_struct.hands_meanr = np.zeros((15 * 3))\r\n V, D, B = data_struct.shapedirs.shape\r\n data_struct.shapedirs = np.concatenate(\r\n [data_struct.shapedirs, np.zeros((V, D, SMPL.SHAPE_SPACE_DIM - B))],\r\n axis=-1) # super hacky way to let smplh use 16-size beta\r\n kwargs['data_struct'] = data_struct\r\n self.bm = SMPLH(bm_path, **kwargs)\r\n self.num_joints = SMPLH.NUM_JOINTS\r\n elif model_type == 'smplx':\r\n self.bm = SMPLX(bm_path, **kwargs)\r\n self.num_joints = SMPLX.NUM_JOINTS\r\n\r\n self.model_type = model_type\r\n self.J_regressor = self.bm.J_regressor.numpy()\r\n self.J_regressor_idx = {'pelvis': 0, 'lwrist': 20, 'rwrist': 21, 'neck': 12}\r\n\r\n def forward(self, root_orient=None, pose_body=None, pose_hand=None, pose_jaw=None, pose_eye=None, betas=None,\r\n trans=None, dmpls=None, expression=None, return_dict=False, **kwargs):\r\n '''\r\n Note dmpls are not supported.\r\n '''\r\n assert (dmpls is None)\r\n # parameters of SMPL should not be updated\r\n out_obj = self.bm(\r\n betas=betas,\r\n global_orient=root_orient,\r\n body_pose=pose_body,\r\n left_hand_pose=None if pose_hand is None else pose_hand[:, :(SMPLH.NUM_HAND_JOINTS * 3)],\r\n right_hand_pose=None if pose_hand is None else pose_hand[:, (SMPLH.NUM_HAND_JOINTS * 3):],\r\n transl=trans,\r\n expression=expression,\r\n jaw_pose=pose_jaw,\r\n leye_pose=None if pose_eye is None else pose_eye[:, :3],\r\n reye_pose=None if pose_eye is None else pose_eye[:, 3:],\r\n return_full_pose=True,\r\n **kwargs\r\n )\r\n\r\n out = {\r\n 'v': out_obj.vertices,\r\n 'f': self.bm.faces_tensor,\r\n 'betas': out_obj.betas,\r\n 'Jtr': out_obj.joints,\r\n 'body_joints': out_obj.joints[:22], # only body joints\r\n 'pose_body': out_obj.body_pose,\r\n 'full_pose': out_obj.full_pose\r\n }\r\n if self.model_type in ['smplh', 'smplx']:\r\n out['pose_hand'] = torch.cat([out_obj.left_hand_pose, out_obj.right_hand_pose], dim=-1)\r\n if self.model_type == 'smplx':\r\n out['pose_jaw'] = out_obj.jaw_pose\r\n out['pose_eye'] = pose_eye\r\n\r\n # if not self.use_vtx_selector:\r\n # # don't need extra joints\r\n # out['Jtr'] = out['Jtr'][:, :self.num_joints + 1] # add one for the root\r\n\r\n if not return_dict:\r\n out = Struct(**out)\r\n\r\n return out\r" } ]
import os import math import numpy as np import torch import torch.nn as nn import torch.distributed as dist import torch.multiprocessing as mp from absl import app from absl import flags from absl.flags import argparse_flags from ml_collections.config_flags import config_flags from torch.utils.data import DataLoader from lib.dataset.EvaSampler import DistributedEvalSampler from lib.utils.misc import create_mask, linear_interpolation from tensorboardX import SummaryWriter from torch.utils.tensorboard import SummaryWriter from lib.algorithms.advanced.model import ScoreModelFC from lib.algorithms.advanced import sde_lib, sampling from lib.algorithms.ema import ExponentialMovingAverage from lib.algorithms.advanced import utils as mutils from lib.dataset.AMASS import AMASSDataset, N_POSES, Posenormalizer from lib.body_model.body_model import BodyModel from lib.dataset.AMASS import Evaler
11,704
if weighted: weight = 0.5 * torch.sqrt(1 + SNR) else: weight = 0.5 dposer_loss = torch.mean(weight * self.loss_fn(x_0, denoise_data)) return dposer_loss def get_loss_weights(self): """Set loss weights""" loss_weight = {'data': lambda cst, it: 100 * cst / (1 + it), 'dposer': lambda cst, it: 0.1 * cst * (it + 1)} return loss_weight @staticmethod def backward_step(loss_dict, weight_dict, it): w_loss = dict() for k in loss_dict: w_loss[k] = weight_dict[k](loss_dict[k], it) tot_loss = list(w_loss.values()) tot_loss = torch.stack(tot_loss).sum() return tot_loss def optimize(self, observation, mask, time_strategy='3', lr=0.1, sample_trun=5.0, sample_time=900, iterations=2, steps_per_iter=100): total_steps = iterations * steps_per_iter opti_variable = observation.clone().detach() opti_variable.requires_grad = True optimizer = torch.optim.Adam([opti_variable], lr, betas=(0.9, 0.999)) weight_dict = self.get_loss_weights() loss_dict = dict() eps = 1e-3 timesteps = torch.linspace(self.sde.T, eps, self.sde.N, device=observation.device) for it in range(iterations): for i in range(steps_per_iter): step = it * steps_per_iter + i optimizer.zero_grad() ''' ************* DPoser loss *********** ''' if time_strategy == '1': quan_t = torch.randint(self.sde.N, [1]) elif time_strategy == '2': quan_t = torch.tensor(sample_time) elif time_strategy == '3': quan_t = self.sde.N - math.floor( torch.tensor(total_steps - step - 1) * (self.sde.N / (sample_trun * total_steps))) - 2 else: raise NotImplementedError('unsupported time sampling strategy') t = timesteps[quan_t] vec_t = torch.ones(self.batch_size, device=observation.device) * t loss_dict['dposer'] = self.loss(opti_variable, vec_t, quan_t) loss_dict['data'] = self.data_loss(opti_variable * mask, observation * mask) ''' *********** DPoser loss ************ ''' # Get total loss for backward pass tot_loss = self.backward_step(loss_dict, weight_dict, it) tot_loss.backward() optimizer.step() opti_variable = observation * mask + opti_variable * (1.0 - mask) return opti_variable def inference(rank, args, config): print(f"Running DDP on rank {rank}.") setup(rank, args.gpus, args.port) ## Load the pre-trained checkpoint from disk. device = torch.device("cuda", rank) POSE_DIM = 3 if config.data.rot_rep == 'axis' else 6 if config.model.type == 'ScoreModelFC': model = ScoreModelFC( config, n_poses=N_POSES, pose_dim=POSE_DIM, hidden_dim=config.model.HIDDEN_DIM, embed_dim=config.model.EMBED_DIM, n_blocks=config.model.N_BLOCKS, ) else: raise NotImplementedError('unsupported model') model.to(device) ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate) state = dict(optimizer=None, model=model, ema=ema, step=0) # restore checkpoint map_location = {'cuda:0': 'cuda:%d' % rank} checkpoint = torch.load(args.ckpt_path, map_location=map_location) model.load_state_dict(checkpoint['model_state_dict']) ema.load_state_dict(checkpoint['ema']) state['step'] = checkpoint['step'] print(f"=> loaded checkpoint '{args.ckpt_path}' (step {state['step']})") model.eval() ema.copy_to(model.parameters()) # Setup SDEs if config.training.sde.lower() == 'vpsde': sde = sde_lib.VPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=args.steps) elif config.training.sde.lower() == 'subvpsde': sde = sde_lib.subVPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=args.steps) elif config.training.sde.lower() == 'vesde': sde = sde_lib.VESDE(sigma_min=config.model.sigma_min, sigma_max=config.model.sigma_max, N=args.steps) else: raise NotImplementedError(f"SDE {config.training.sde} unknown.") # Setup sampling functions compfn = DPoserComp(model, sde, config.training.continuous, batch_size=args.batch_size) Normalizer = Posenormalizer(data_path=f'{args.dataset_folder}/{args.version}/train', normalize=config.data.normalize, min_max=config.data.min_max, rot_rep=config.data.rot_rep, device=device)
try: except ImportError as e: try: except ImportError as e: print('Tensorboard is not Installed') FLAGS = flags.FLAGS config_flags.DEFINE_config_file( "config", None, "Training configuration.", lock_config=False) flags.mark_flags_as_required(["config"]) def parse_args(argv): parser = argparse_flags.ArgumentParser(description='test diffusion model for completion on whole AMASS') parser.add_argument('--ckpt-path', type=str, default='./pretrained_models/axis-zscore-400k.pth') parser.add_argument('--dataset-folder', type=str, default='../data/AMASS/amass_processed', help='the folder includes necessary normalizing parameters') parser.add_argument('--version', type=str, default='version1', help='dataset version') parser.add_argument('--bodymodel-path', type=str, default='../body_models/smplx/SMPLX_NEUTRAL.npz', help='path of SMPLX model') parser.add_argument('--hypo', type=int, default=1, help='number of hypotheses to sample') parser.add_argument('--part', type=str, default='left_leg', choices=['left_leg', 'right_leg', 'left_arm', 'right_arm', 'trunk', 'hands', 'legs', 'arms']) # optional parser.add_argument('--steps', type=int, default=1000) parser.add_argument('--sample', type=int, help='sample testset to reduce data for other tasks') parser.add_argument('--batch_size', type=int, default=100, ) parser.add_argument('--gpus', type=int, help='num gpus to inference parallel') parser.add_argument('--port', type=str, default='14600', help='master port of machines') args = parser.parse_args(argv[1:]) return args def get_dataloader(dataset, num_replicas=1, rank=0, batch_size=10000): sampler = DistributedEvalSampler(dataset, num_replicas=num_replicas, rank=rank, shuffle=False) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=0, sampler=sampler, persistent_workers=False, pin_memory=True, drop_last=True) return dataloader def setup(rank, world_size, port): os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = port # initialize the process group dist.init_process_group("gloo", rank=rank, world_size=world_size) def cleanup(): dist.destroy_process_group() class DPoserComp(object): def __init__(self, diffusion_model, sde, continuous, batch_size=1): self.batch_size = batch_size self.sde = sde self.score_fn = mutils.get_score_fn(sde, diffusion_model, train=False, continuous=continuous) self.rsde = sde.reverse(self.score_fn, False) # L2 loss self.loss_fn = nn.MSELoss(reduction='none') self.data_loss = nn.MSELoss(reduction='mean') def one_step_denoise(self, x_t, t): drift, diffusion, alpha, sigma_2, score = self.rsde.sde(x_t, t, guide=True) x_0_hat = (x_t + sigma_2[:, None] * score) / alpha SNR = alpha / torch.sqrt(sigma_2)[:, None] return x_0_hat.detach(), SNR def multi_step_denoise(self, x_t, t, t_end, N=10): time_traj = linear_interpolation(t, t_end, N + 1) x_current = x_t for i in range(N): t_current = time_traj[i] t_before = time_traj[i + 1] alpha_current, sigma_current = self.sde.return_alpha_sigma(t_current) alpha_before, sigma_before = self.sde.return_alpha_sigma(t_before) score = self.score_fn(x_current, t_current, condition=None, mask=None) score = -score * sigma_current[:, None] # score to noise prediction x_current = alpha_before / alpha_current * ( x_current - sigma_current[:, None] * score) + sigma_before[ :, None] * score alpha, sigma = self.sde.return_alpha_sigma(time_traj[0]) SNR = alpha / sigma[:, None] return x_current.detach(), SNR def loss(self, x_0, vec_t, weighted=False, multi_denoise=False): # x_0: [B, j*6], vec_t: [B], z = torch.randn_like(x_0) mean, std = self.sde.marginal_prob(x_0, vec_t) perturbed_data = mean + std[:, None] * z # if multi_denoise: # not recommended denoise_data, SNR = self.multi_step_denoise(perturbed_data, vec_t, t_end=vec_t / (2 * 10), N=10) else: denoise_data, SNR = self.one_step_denoise(perturbed_data, vec_t) if weighted: weight = 0.5 * torch.sqrt(1 + SNR) else: weight = 0.5 dposer_loss = torch.mean(weight * self.loss_fn(x_0, denoise_data)) return dposer_loss def get_loss_weights(self): """Set loss weights""" loss_weight = {'data': lambda cst, it: 100 * cst / (1 + it), 'dposer': lambda cst, it: 0.1 * cst * (it + 1)} return loss_weight @staticmethod def backward_step(loss_dict, weight_dict, it): w_loss = dict() for k in loss_dict: w_loss[k] = weight_dict[k](loss_dict[k], it) tot_loss = list(w_loss.values()) tot_loss = torch.stack(tot_loss).sum() return tot_loss def optimize(self, observation, mask, time_strategy='3', lr=0.1, sample_trun=5.0, sample_time=900, iterations=2, steps_per_iter=100): total_steps = iterations * steps_per_iter opti_variable = observation.clone().detach() opti_variable.requires_grad = True optimizer = torch.optim.Adam([opti_variable], lr, betas=(0.9, 0.999)) weight_dict = self.get_loss_weights() loss_dict = dict() eps = 1e-3 timesteps = torch.linspace(self.sde.T, eps, self.sde.N, device=observation.device) for it in range(iterations): for i in range(steps_per_iter): step = it * steps_per_iter + i optimizer.zero_grad() ''' ************* DPoser loss *********** ''' if time_strategy == '1': quan_t = torch.randint(self.sde.N, [1]) elif time_strategy == '2': quan_t = torch.tensor(sample_time) elif time_strategy == '3': quan_t = self.sde.N - math.floor( torch.tensor(total_steps - step - 1) * (self.sde.N / (sample_trun * total_steps))) - 2 else: raise NotImplementedError('unsupported time sampling strategy') t = timesteps[quan_t] vec_t = torch.ones(self.batch_size, device=observation.device) * t loss_dict['dposer'] = self.loss(opti_variable, vec_t, quan_t) loss_dict['data'] = self.data_loss(opti_variable * mask, observation * mask) ''' *********** DPoser loss ************ ''' # Get total loss for backward pass tot_loss = self.backward_step(loss_dict, weight_dict, it) tot_loss.backward() optimizer.step() opti_variable = observation * mask + opti_variable * (1.0 - mask) return opti_variable def inference(rank, args, config): print(f"Running DDP on rank {rank}.") setup(rank, args.gpus, args.port) ## Load the pre-trained checkpoint from disk. device = torch.device("cuda", rank) POSE_DIM = 3 if config.data.rot_rep == 'axis' else 6 if config.model.type == 'ScoreModelFC': model = ScoreModelFC( config, n_poses=N_POSES, pose_dim=POSE_DIM, hidden_dim=config.model.HIDDEN_DIM, embed_dim=config.model.EMBED_DIM, n_blocks=config.model.N_BLOCKS, ) else: raise NotImplementedError('unsupported model') model.to(device) ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate) state = dict(optimizer=None, model=model, ema=ema, step=0) # restore checkpoint map_location = {'cuda:0': 'cuda:%d' % rank} checkpoint = torch.load(args.ckpt_path, map_location=map_location) model.load_state_dict(checkpoint['model_state_dict']) ema.load_state_dict(checkpoint['ema']) state['step'] = checkpoint['step'] print(f"=> loaded checkpoint '{args.ckpt_path}' (step {state['step']})") model.eval() ema.copy_to(model.parameters()) # Setup SDEs if config.training.sde.lower() == 'vpsde': sde = sde_lib.VPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=args.steps) elif config.training.sde.lower() == 'subvpsde': sde = sde_lib.subVPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=args.steps) elif config.training.sde.lower() == 'vesde': sde = sde_lib.VESDE(sigma_min=config.model.sigma_min, sigma_max=config.model.sigma_max, N=args.steps) else: raise NotImplementedError(f"SDE {config.training.sde} unknown.") # Setup sampling functions compfn = DPoserComp(model, sde, config.training.continuous, batch_size=args.batch_size) Normalizer = Posenormalizer(data_path=f'{args.dataset_folder}/{args.version}/train', normalize=config.data.normalize, min_max=config.data.min_max, rot_rep=config.data.rot_rep, device=device)
test_dataset = AMASSDataset(root_path=args.dataset_folder,
8
2023-11-29 15:55:50+00:00
16k
KylinYee/R2-Talker-code
main.py
[ { "identifier": "NeRFDataset", "path": "nerf/provider.py", "snippet": "class NeRFDataset:\n def __init__(self, opt, device, type='train', downscale=1):\n super().__init__()\n \n self.opt = opt\n self.device = device\n self.type = type # train, val, test\n self.downscale = downscale\n self.root_path = opt.path\n self.preload = opt.preload # 0 = disk, 1 = cpu, 2 = gpu\n self.scale = opt.scale # camera radius scale to make sure camera are inside the bounding box.\n self.offset = opt.offset # camera offset\n self.bound = opt.bound # bounding box half length, also used as the radius to random sample poses.\n self.fp16 = opt.fp16\n\n self.start_index = opt.data_range[0]\n self.end_index = opt.data_range[1]\n\n self.training = self.type in ['train', 'all', 'trainval']\n self.num_rays = self.opt.num_rays if self.training else -1\n\n # load nerf-compatible format data.\n \n # load all splits (train/valid/test)\n if type == 'all':\n transform_paths = glob.glob(os.path.join(self.root_path, '*.json'))\n transform = None\n for transform_path in transform_paths:\n with open(transform_path, 'r') as f:\n tmp_transform = json.load(f)\n if transform is None:\n transform = tmp_transform\n else:\n transform['frames'].extend(tmp_transform['frames'])\n # load train and val split\n elif type == 'trainval':\n with open(os.path.join(self.root_path, f'transforms_train.json'), 'r') as f:\n transform = json.load(f)\n with open(os.path.join(self.root_path, f'transforms_val.json'), 'r') as f:\n transform_val = json.load(f)\n transform['frames'].extend(transform_val['frames'])\n # only load one specified split\n else:\n # no test, use val as test\n _split = 'val' if type == 'test' else type\n with open(os.path.join(self.root_path, f'transforms_{_split}.json'), 'r') as f:\n transform = json.load(f)\n\n # load image size\n if 'h' in transform and 'w' in transform:\n self.H = int(transform['h']) // downscale\n self.W = int(transform['w']) // downscale\n else:\n self.H = int(transform['cy']) * 2 // downscale\n self.W = int(transform['cx']) * 2 // downscale\n \n # read images\n frames = transform[\"frames\"]\n\n # use a slice of the dataset\n if self.end_index == -1: # abuse...\n self.end_index = len(frames)\n\n frames = frames[self.start_index:self.end_index]\n\n # use a subset of dataset.\n if type == 'train':\n if self.opt.part:\n frames = frames[::10] # 1/10 frames\n elif self.opt.part2:\n frames = frames[:375] # first 15s\n elif type == 'val':\n frames = frames[:100] # first 100 frames for val\n\n print(f'[INFO] load {len(frames)} {type} frames.')\n\n # only load pre-calculated aud features when not live-streaming\n if not self.opt.asr:\n # empty means the default self-driven extracted features.\n if self.opt.aud == '':\n if self.opt.cond_type == 'eo':\n aud_features = np.load(os.path.join(self.root_path, 'aud_eo.npy'))\n elif self.opt.cond_type == 'ds':\n aud_features = np.load(os.path.join(self.root_path, 'aud_ds.npy'))\n elif self.opt.cond_type == 'idexp':\n aud_features = np.load(os.path.join(self.root_path, 'aud_idexp.npy'))\n else:\n aud_features = np.load(os.path.join(self.root_path, 'aud.npy'))\n # cross-driven extracted features. \n else:\n aud_features = np.load(self.opt.aud)\n\n if self.opt.method == 'genefaceDagger':\n video_idexp_lm3d_mean = aud_features.mean(axis=0).reshape([1,68,3])\n video_idexp_lm3d_std = aud_features.std(axis=0).reshape([1,68,3])\n aud_features = (aud_features - video_idexp_lm3d_mean) / video_idexp_lm3d_std\n\n\n aud_features = torch.from_numpy(aud_features)\n\n\n # support both [N, 16] labels and [N, 16, K] logits\n if len(aud_features.shape) == 3:\n # if self.opt.cond_type in ['eo', 'ds']:\n # aud_features = aud_features.float().permute(0, 2, 1) # [N, 16, 29] --> [N, 29, 16] \n\n if self.opt.emb:\n print(f'[INFO] argmax to aud features {aud_features.shape} for --emb mode')\n aud_features = aud_features.argmax(1) # [N, 16]\n \n else:\n assert self.opt.emb, \"aud only provide labels, must use --emb\"\n aud_features = aud_features.long()\n\n\n print(f'[INFO] load {self.opt.aud} aud_features: {aud_features.shape}')\n\n self.torso_img = []\n self.images = []\n\n self.poses = []\n self.exps = []\n\n self.auds = []\n self.face_rect = []\n self.lips_rect = []\n self.eye_area = []\n\n for f in tqdm.tqdm(frames, desc=f'Loading {type} data'):\n\n f_path = os.path.join(self.root_path, 'gt_imgs', str(f['img_id']) + '.jpg')\n\n if not os.path.exists(f_path):\n print('[WARN]', f_path, 'NOT FOUND!')\n continue\n \n pose = np.array(f['transform_matrix'], dtype=np.float32) # [4, 4]\n pose = nerf_matrix_to_ngp(pose, scale=self.scale, offset=self.offset)\n self.poses.append(pose)\n\n if self.preload > 0:\n image = cv2.imread(f_path, cv2.IMREAD_UNCHANGED) # [H, W, 3] o [H, W, 4]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.images.append(image)\n else:\n self.images.append(f_path)\n\n # load frame-wise bg\n \n torso_img_path = os.path.join(self.root_path, 'torso_imgs', str(f['img_id']) + '.png')\n\n if self.preload > 0:\n torso_img = cv2.imread(torso_img_path, cv2.IMREAD_UNCHANGED) # [H, W, 4]\n torso_img = cv2.cvtColor(torso_img, cv2.COLOR_BGRA2RGBA)\n torso_img = torso_img.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.torso_img.append(torso_img)\n else:\n self.torso_img.append(torso_img_path)\n\n # find the corresponding audio to the image frame\n if not self.opt.asr and self.opt.aud == '':\n aud = aud_features[min(f['aud_id'], aud_features.shape[0] - 1)] # careful for the last frame...\n self.auds.append(aud)\n\n # load lms and extract face\n lms = np.loadtxt(os.path.join(self.root_path, 'ori_imgs', str(f['img_id']) + '.lms')) # [68, 2]\n\n xmin, xmax = int(lms[31:36, 1].min()), int(lms[:, 1].max())\n ymin, ymax = int(lms[:, 0].min()), int(lms[:, 0].max())\n self.face_rect.append([xmin, xmax, ymin, ymax])\n\n if self.opt.exp_eye:\n eyes_left = slice(36, 42)\n eyes_right = slice(42, 48)\n\n area_left = polygon_area(lms[eyes_left, 0], lms[eyes_left, 1])\n area_right = polygon_area(lms[eyes_right, 0], lms[eyes_right, 1])\n\n # area percentage of two eyes of the whole image...\n area = (area_left + area_right) / (self.H * self.W) * 100\n\n self.eye_area.append(area)\n\n if self.opt.finetune_lips:\n lips = slice(48, 60)\n xmin, xmax = int(lms[lips, 1].min()), int(lms[lips, 1].max())\n ymin, ymax = int(lms[lips, 0].min()), int(lms[lips, 0].max())\n\n # padding to H == W\n cx = (xmin + xmax) // 2\n cy = (ymin + ymax) // 2\n\n l = max(xmax - xmin, ymax - ymin) // 2\n xmin = max(0, cx - l)\n xmax = min(self.H, cx + l)\n ymin = max(0, cy - l)\n ymax = min(self.W, cy + l)\n\n self.lips_rect.append([xmin, xmax, ymin, ymax])\n \n # load pre-extracted background image (should be the same size as training image...)\n\n if self.opt.bg_img == 'white': # special\n bg_img = np.ones((self.H, self.W, 3), dtype=np.float32)\n elif self.opt.bg_img == 'black': # special\n bg_img = np.zeros((self.H, self.W, 3), dtype=np.float32)\n else: # load from file\n # default bg\n if self.opt.bg_img == '':\n self.opt.bg_img = os.path.join(self.root_path, 'bc.jpg')\n bg_img = cv2.imread(self.opt.bg_img, cv2.IMREAD_UNCHANGED) # [H, W, 3]\n if bg_img.shape[0] != self.H or bg_img.shape[1] != self.W:\n bg_img = cv2.resize(bg_img, (self.W, self.H), interpolation=cv2.INTER_AREA)\n bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGR2RGB)\n bg_img = bg_img.astype(np.float32) / 255 # [H, W, 3/4]\n\n self.bg_img = bg_img\n\n self.poses = np.stack(self.poses, axis=0)\n\n # smooth camera path...\n if self.opt.smooth_path:\n self.poses = smooth_camera_path(self.poses, self.opt.smooth_path_window)\n \n self.poses = torch.from_numpy(self.poses) # [N, 4, 4]\n\n if self.preload > 0:\n self.images = torch.from_numpy(np.stack(self.images, axis=0)) # [N, H, W, C]\n self.torso_img = torch.from_numpy(np.stack(self.torso_img, axis=0)) # [N, H, W, C]\n else:\n self.images = np.array(self.images)\n self.torso_img = np.array(self.torso_img)\n\n if self.opt.asr:\n # live streaming, no pre-calculated auds\n self.auds = None\n else:\n # auds corresponding to images\n if self.opt.aud == '':\n self.auds = torch.stack(self.auds, dim=0) # [N, 32, 16]\n # auds is novel, may have a different length with images\n else:\n self.auds = aud_features\n \n self.bg_img = torch.from_numpy(self.bg_img)\n\n if self.opt.exp_eye:\n self.eye_area = np.array(self.eye_area, dtype=np.float32) # [N]\n print(f'[INFO] eye_area: {self.eye_area.min()} - {self.eye_area.max()}')\n\n if self.opt.smooth_eye:\n\n # naive 5 window average\n ori_eye = self.eye_area.copy()\n for i in range(ori_eye.shape[0]):\n start = max(0, i - 1)\n end = min(ori_eye.shape[0], i + 2)\n self.eye_area[i] = ori_eye[start:end].mean()\n\n self.eye_area = torch.from_numpy(self.eye_area).view(-1, 1) # [N, 1]\n\n \n # calculate mean radius of all camera poses\n self.radius = self.poses[:, :3, 3].norm(dim=-1).mean(0).item()\n #print(f'[INFO] dataset camera poses: radius = {self.radius:.4f}, bound = {self.bound}')\n\n \n # [debug] uncomment to view all training poses.\n # visualize_poses(self.poses.numpy())\n\n # [debug] uncomment to view examples of randomly generated poses.\n # visualize_poses(rand_poses(100, self.device, radius=self.radius).cpu().numpy())\n\n if self.preload > 1:\n self.poses = self.poses.to(self.device)\n\n if self.auds is not None:\n self.auds = self.auds.to(self.device)\n\n self.bg_img = self.bg_img.to(torch.half).to(self.device)\n\n self.torso_img = self.torso_img.to(torch.half).to(self.device)\n self.images = self.images.to(torch.half).to(self.device)\n \n if self.opt.exp_eye:\n self.eye_area = self.eye_area.to(self.device)\n\n # load intrinsics\n if 'focal_len' in transform:\n fl_x = fl_y = transform['focal_len']\n elif 'fl_x' in transform or 'fl_y' in transform:\n fl_x = (transform['fl_x'] if 'fl_x' in transform else transform['fl_y']) / downscale\n fl_y = (transform['fl_y'] if 'fl_y' in transform else transform['fl_x']) / downscale\n elif 'camera_angle_x' in transform or 'camera_angle_y' in transform:\n # blender, assert in radians. already downscaled since we use H/W\n fl_x = self.W / (2 * np.tan(transform['camera_angle_x'] / 2)) if 'camera_angle_x' in transform else None\n fl_y = self.H / (2 * np.tan(transform['camera_angle_y'] / 2)) if 'camera_angle_y' in transform else None\n if fl_x is None: fl_x = fl_y\n if fl_y is None: fl_y = fl_x\n else:\n raise RuntimeError('Failed to load focal length, please check the transforms.json!')\n\n cx = (transform['cx'] / downscale) if 'cx' in transform else (self.W / 2)\n cy = (transform['cy'] / downscale) if 'cy' in transform else (self.H / 2)\n \n self.intrinsics = np.array([fl_x, fl_y, cx, cy])\n\n # directly build the coordinate meshgrid in [-1, 1]^2\n self.bg_coords = get_bg_coords(self.H, self.W, self.device) # [1, H*W, 2] in [-1, 1]\n\n\n def mirror_index(self, index):\n size = self.poses.shape[0]\n turn = index // size\n res = index % size\n if turn % 2 == 0:\n return res\n else:\n return size - res - 1\n\n\n def collate(self, index):\n\n B = len(index) # a list of length 1\n # assert B == 1\n\n results = {}\n\n # audio use the original index\n if self.auds is not None:\n if self.opt.cond_type == 'idexp':\n auds = get_audio_features(self.auds, self.opt.att, index[0], smooth_win_size=5).to(self.device)\n else:\n auds = get_audio_features(self.auds, self.opt.att, index[0]).to(self.device)\n\n results['auds'] = auds\n\n # head pose and bg image may mirror (replay --> <-- --> <--).\n index[0] = self.mirror_index(index[0])\n\n poses = self.poses[index].to(self.device) # [B, 4, 4]\n \n if self.training and self.opt.finetune_lips:\n rect = self.lips_rect[index[0]]\n results['rect'] = rect\n rays = get_rays(poses, self.intrinsics, self.H, self.W, -1, rect=rect)\n else:\n rays = get_rays(poses, self.intrinsics, self.H, self.W, self.num_rays, self.opt.patch_size)\n\n results['index'] = index # for ind. code\n results['H'] = self.H\n results['W'] = self.W\n results['rays_o'] = rays['rays_o']\n results['rays_d'] = rays['rays_d']\n\n # get a mask for rays inside rect_face\n if self.training:\n xmin, xmax, ymin, ymax = self.face_rect[index[0]]\n face_mask = (rays['j'] >= xmin) & (rays['j'] < xmax) & (rays['i'] >= ymin) & (rays['i'] < ymax) # [B, N]\n results['face_mask'] = face_mask\n\n if self.opt.exp_eye:\n results['eye'] = self.eye_area[index].to(self.device) # [1]\n else:\n results['eye'] = None\n\n # load bg\n bg_torso_img = self.torso_img[index]\n if self.preload == 0: # on the fly loading\n bg_torso_img = cv2.imread(bg_torso_img[0], cv2.IMREAD_UNCHANGED) # [H, W, 4]\n bg_torso_img = cv2.cvtColor(bg_torso_img, cv2.COLOR_BGRA2RGBA)\n bg_torso_img = bg_torso_img.astype(np.float32) / 255 # [H, W, 3/4]\n bg_torso_img = torch.from_numpy(bg_torso_img).unsqueeze(0)\n bg_torso_img = bg_torso_img[..., :3] * bg_torso_img[..., 3:] + self.bg_img * (1 - bg_torso_img[..., 3:])\n bg_torso_img = bg_torso_img.view(B, -1, 3).to(self.device)\n\n if not self.opt.torso:\n bg_img = bg_torso_img\n else:\n bg_img = self.bg_img.view(1, -1, 3).repeat(B, 1, 1).to(self.device)\n\n if self.training:\n bg_img = torch.gather(bg_img, 1, torch.stack(3 * [rays['inds']], -1)) # [B, N, 3]\n\n results['bg_color'] = bg_img\n\n if self.opt.torso and self.training:\n bg_torso_img = torch.gather(bg_torso_img, 1, torch.stack(3 * [rays['inds']], -1)) # [B, N, 3]\n results['bg_torso_color'] = bg_torso_img\n\n images = self.images[index] # [B, H, W, 3/4]\n if self.preload == 0:\n images = cv2.imread(images[0], cv2.IMREAD_UNCHANGED) # [H, W, 3]\n images = cv2.cvtColor(images, cv2.COLOR_BGR2RGB)\n images = images.astype(np.float32) / 255 # [H, W, 3]\n images = torch.from_numpy(images).unsqueeze(0)\n images = images.to(self.device)\n\n if self.training:\n C = images.shape[-1]\n images = torch.gather(images.view(B, -1, C), 1, torch.stack(C * [rays['inds']], -1)) # [B, N, 3/4]\n \n results['images'] = images\n\n if self.training:\n bg_coords = torch.gather(self.bg_coords, 1, torch.stack(2 * [rays['inds']], -1)) # [1, N, 2]\n else:\n bg_coords = self.bg_coords # [1, N, 2]\n\n results['bg_coords'] = bg_coords\n\n results['poses'] = convert_poses(poses) # [B, 6]\n results['poses_matrix'] = poses # [B, 4, 4]\n \n return results\n\n def dataloader(self):\n\n if self.training:\n # training len(poses) == len(auds)\n size = self.poses.shape[0]\n else:\n # test with novel auds, then use its length\n if self.auds is not None:\n size = self.auds.shape[0]\n # live stream test, use 2 * len(poses), so it naturally mirrors.\n else:\n size = 2 * self.poses.shape[0]\n\n loader = DataLoader(list(range(size)), batch_size=1, collate_fn=self.collate, shuffle=self.training, num_workers=0)\n loader._data = self # an ugly fix... we need poses in trainer.\n\n # do evaluate if has gt images and use self-driven setting\n loader.has_gt = (self.opt.aud == '')\n\n return loader " }, { "identifier": "NeRFGUI", "path": "nerf/gui.py", "snippet": "class NeRFGUI:\n def __init__(self, opt, trainer, data_loader, debug=True):\n self.opt = opt # shared with the trainer's opt to support in-place modification of rendering parameters.\n self.W = opt.W\n self.H = opt.H\n self.cam = OrbitCamera(opt.W, opt.H, r=opt.radius, fovy=opt.fovy)\n self.debug = debug\n self.training = False\n self.step = 0 # training step \n\n self.trainer = trainer\n self.data_loader = data_loader\n\n # override with dataloader's intrinsics\n self.W = data_loader._data.W\n self.H = data_loader._data.H\n self.cam.update_intrinsics(data_loader._data.intrinsics)\n\n # use dataloader's pose\n pose_init = data_loader._data.poses[0]\n self.cam.update_pose(pose_init.detach().cpu().numpy())\n\n # use dataloader's bg\n bg_img = data_loader._data.bg_img #.view(1, -1, 3)\n if self.H != bg_img.shape[0] or self.W != bg_img.shape[1]:\n bg_img = F.interpolate(bg_img.permute(2, 0, 1).unsqueeze(0).contiguous(), (self.H, self.W), mode='bilinear').squeeze(0).permute(1, 2, 0).contiguous()\n self.bg_color = bg_img.view(1, -1, 3)\n\n # audio features (from dataloader, only used in non-playing mode)\n self.audio_features = data_loader._data.auds # [N, 29, 16]\n self.audio_idx = 0\n\n # control eye\n self.eye_area = None if not self.opt.exp_eye else data_loader._data.eye_area.mean().item()\n\n # playing seq from dataloader, or pause.\n self.playing = False\n self.loader = iter(data_loader)\n\n self.render_buffer = np.zeros((self.W, self.H, 3), dtype=np.float32)\n self.need_update = True # camera moved, should reset accumulation\n self.spp = 1 # sample per pixel\n self.mode = 'image' # choose from ['image', 'depth']\n\n self.dynamic_resolution = False # assert False!\n self.downscale = 1\n self.train_steps = 16\n\n self.ind_index = 0\n self.ind_num = trainer.model.individual_codes.shape[0]\n\n # build asr\n if self.opt.asr:\n self.asr = ASR(opt)\n \n dpg.create_context()\n self.register_dpg()\n self.test_step()\n \n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self.opt.asr:\n self.asr.stop() \n dpg.destroy_context()\n\n def train_step(self):\n\n starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n starter.record()\n\n outputs = self.trainer.train_gui(self.data_loader, step=self.train_steps)\n\n ender.record()\n torch.cuda.synchronize()\n t = starter.elapsed_time(ender)\n\n self.step += self.train_steps\n self.need_update = True\n\n dpg.set_value(\"_log_train_time\", f'{t:.4f}ms ({int(1000/t)} FPS)')\n dpg.set_value(\"_log_train_log\", f'step = {self.step: 5d} (+{self.train_steps: 2d}), loss = {outputs[\"loss\"]:.4f}, lr = {outputs[\"lr\"]:.5f}')\n\n # dynamic train steps\n # max allowed train time per-frame is 500 ms\n full_t = t / self.train_steps * 16\n train_steps = min(16, max(4, int(16 * 500 / full_t)))\n if train_steps > self.train_steps * 1.2 or train_steps < self.train_steps * 0.8:\n self.train_steps = train_steps\n\n def prepare_buffer(self, outputs):\n if self.mode == 'image':\n return outputs['image']\n else:\n return np.expand_dims(outputs['depth'], -1).repeat(3, -1)\n\n def test_step(self):\n\n if self.need_update or self.spp < self.opt.max_spp:\n \n starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)\n starter.record()\n\n if self.playing:\n try:\n data = next(self.loader)\n except StopIteration:\n self.loader = iter(self.data_loader)\n data = next(self.loader)\n \n if self.opt.asr:\n # use the live audio stream\n data['auds'] = self.asr.get_next_feat()\n\n outputs = self.trainer.test_gui_with_data(data, self.W, self.H)\n\n # sync local camera pose\n self.cam.update_pose(data['poses_matrix'][0].detach().cpu().numpy())\n \n else:\n if self.audio_features is not None:\n auds = get_audio_features(self.audio_features, self.opt.att, self.audio_idx)\n else:\n auds = None\n outputs = self.trainer.test_gui(self.cam.pose, self.cam.intrinsics, self.W, self.H, auds, self.eye_area, self.ind_index, self.bg_color, self.spp, self.downscale)\n\n ender.record()\n torch.cuda.synchronize()\n t = starter.elapsed_time(ender)\n\n # update dynamic resolution\n if self.dynamic_resolution:\n # max allowed infer time per-frame is 200 ms\n full_t = t / (self.downscale ** 2)\n downscale = min(1, max(1/4, math.sqrt(200 / full_t)))\n if downscale > self.downscale * 1.2 or downscale < self.downscale * 0.8:\n self.downscale = downscale\n\n if self.need_update:\n self.render_buffer = self.prepare_buffer(outputs)\n self.spp = 1\n self.need_update = False\n else:\n self.render_buffer = (self.render_buffer * self.spp + self.prepare_buffer(outputs)) / (self.spp + 1)\n self.spp += 1\n \n if self.playing:\n self.need_update = True\n\n dpg.set_value(\"_log_infer_time\", f'{t:.4f}ms ({int(1000/t)} FPS)')\n dpg.set_value(\"_log_resolution\", f'{int(self.downscale * self.W)}x{int(self.downscale * self.H)}')\n dpg.set_value(\"_log_spp\", self.spp)\n dpg.set_value(\"_texture\", self.render_buffer)\n\n \n def register_dpg(self):\n\n ### register texture \n\n with dpg.texture_registry(show=False):\n dpg.add_raw_texture(self.W, self.H, self.render_buffer, format=dpg.mvFormat_Float_rgb, tag=\"_texture\")\n\n ### register window\n\n # the rendered image, as the primary window\n with dpg.window(tag=\"_primary_window\", width=self.W, height=self.H):\n\n # add the texture\n dpg.add_image(\"_texture\")\n\n # dpg.set_primary_window(\"_primary_window\", True)\n\n dpg.show_tool(dpg.mvTool_Metrics)\n\n # control window\n with dpg.window(label=\"Control\", tag=\"_control_window\", width=400, height=300):\n\n # button theme\n with dpg.theme() as theme_button:\n with dpg.theme_component(dpg.mvButton):\n dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))\n dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))\n dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))\n dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)\n dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)\n\n # time\n if not self.opt.test:\n with dpg.group(horizontal=True):\n dpg.add_text(\"Train time: \")\n dpg.add_text(\"no data\", tag=\"_log_train_time\") \n\n with dpg.group(horizontal=True):\n dpg.add_text(\"Infer time: \")\n dpg.add_text(\"no data\", tag=\"_log_infer_time\")\n \n with dpg.group(horizontal=True):\n dpg.add_text(\"SPP: \")\n dpg.add_text(\"1\", tag=\"_log_spp\")\n\n # train button\n if not self.opt.test:\n with dpg.collapsing_header(label=\"Train\", default_open=True):\n\n # train / stop\n with dpg.group(horizontal=True):\n dpg.add_text(\"Train: \")\n\n def callback_train(sender, app_data):\n if self.training:\n self.training = False\n dpg.configure_item(\"_button_train\", label=\"start\")\n else:\n self.training = True\n dpg.configure_item(\"_button_train\", label=\"stop\")\n\n dpg.add_button(label=\"start\", tag=\"_button_train\", callback=callback_train)\n dpg.bind_item_theme(\"_button_train\", theme_button)\n\n def callback_reset(sender, app_data):\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n self.trainer.model.apply(fn=weight_reset)\n self.trainer.model.reset_extra_state() # for cuda_ray density_grid and step_counter\n self.need_update = True\n\n dpg.add_button(label=\"reset\", tag=\"_button_reset\", callback=callback_reset)\n dpg.bind_item_theme(\"_button_reset\", theme_button)\n\n # save ckpt\n with dpg.group(horizontal=True):\n dpg.add_text(\"Checkpoint: \")\n\n def callback_save(sender, app_data):\n self.trainer.save_checkpoint(full=True, best=False)\n dpg.set_value(\"_log_ckpt\", \"saved \" + os.path.basename(self.trainer.stats[\"checkpoints\"][-1]))\n self.trainer.epoch += 1 # use epoch to indicate different calls.\n\n dpg.add_button(label=\"save\", tag=\"_button_save\", callback=callback_save)\n dpg.bind_item_theme(\"_button_save\", theme_button)\n\n dpg.add_text(\"\", tag=\"_log_ckpt\")\n \n # save mesh\n with dpg.group(horizontal=True):\n dpg.add_text(\"Marching Cubes: \")\n\n def callback_mesh(sender, app_data):\n self.trainer.save_mesh(resolution=256, threshold=10)\n dpg.set_value(\"_log_mesh\", \"saved \" + f'{self.trainer.name}_{self.trainer.epoch}.ply')\n self.trainer.epoch += 1 # use epoch to indicate different calls.\n\n dpg.add_button(label=\"mesh\", tag=\"_button_mesh\", callback=callback_mesh)\n dpg.bind_item_theme(\"_button_mesh\", theme_button)\n\n dpg.add_text(\"\", tag=\"_log_mesh\")\n\n with dpg.group(horizontal=True):\n dpg.add_text(\"\", tag=\"_log_train_log\")\n\n \n # rendering options\n with dpg.collapsing_header(label=\"Options\", default_open=True):\n \n # playing\n with dpg.group(horizontal=True):\n dpg.add_text(\"Play: \")\n\n def callback_play(sender, app_data):\n \n if self.playing:\n self.playing = False\n dpg.configure_item(\"_button_play\", label=\"start\")\n else:\n self.playing = True\n dpg.configure_item(\"_button_play\", label=\"stop\")\n if self.opt.asr:\n self.asr.warm_up()\n self.need_update = True\n\n dpg.add_button(label=\"start\", tag=\"_button_play\", callback=callback_play)\n dpg.bind_item_theme(\"_button_play\", theme_button)\n\n # set asr\n if self.opt.asr:\n\n # clear queue button\n def callback_clear_queue(sender, app_data):\n \n self.asr.clear_queue()\n self.need_update = True\n\n dpg.add_button(label=\"clear\", tag=\"_button_clear_queue\", callback=callback_clear_queue)\n dpg.bind_item_theme(\"_button_clear_queue\", theme_button)\n\n # dynamic rendering resolution\n with dpg.group(horizontal=True):\n\n def callback_set_dynamic_resolution(sender, app_data):\n if self.dynamic_resolution:\n self.dynamic_resolution = False\n self.downscale = 1\n else:\n self.dynamic_resolution = True\n self.need_update = True\n\n # Disable dynamic resolution for face.\n # dpg.add_checkbox(label=\"dynamic resolution\", default_value=self.dynamic_resolution, callback=callback_set_dynamic_resolution)\n dpg.add_text(f\"{self.W}x{self.H}\", tag=\"_log_resolution\")\n\n # mode combo\n def callback_change_mode(sender, app_data):\n self.mode = app_data\n self.need_update = True\n \n dpg.add_combo(('image', 'depth'), label='mode', default_value=self.mode, callback=callback_change_mode)\n\n\n # bg_color picker\n def callback_change_bg(sender, app_data):\n self.bg_color = torch.tensor(app_data[:3], dtype=torch.float32) # only need RGB in [0, 1]\n self.need_update = True\n\n dpg.add_color_edit((255, 255, 255), label=\"Background Color\", width=200, tag=\"_color_editor\", no_alpha=True, callback=callback_change_bg)\n\n # audio index slider\n if not self.opt.asr:\n def callback_set_audio_index(sender, app_data):\n self.audio_idx = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"Audio\", min_value=0, max_value=self.audio_features.shape[0] - 1, format=\"%d\", default_value=self.audio_idx, callback=callback_set_audio_index)\n\n # ind code index slider\n if self.opt.ind_dim > 0:\n def callback_set_individual_code(sender, app_data):\n self.ind_index = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"Individual\", min_value=0, max_value=self.ind_num - 1, format=\"%d\", default_value=self.ind_index, callback=callback_set_individual_code)\n\n # eye area slider\n if self.opt.exp_eye:\n def callback_set_eye(sender, app_data):\n self.eye_area = app_data\n self.need_update = True\n\n dpg.add_slider_float(label=\"eye area\", min_value=0, max_value=0.5, format=\"%.2f percent\", default_value=self.eye_area, callback=callback_set_eye)\n\n # fov slider\n def callback_set_fovy(sender, app_data):\n self.cam.fovy = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"FoV (vertical)\", min_value=1, max_value=120, format=\"%d deg\", default_value=self.cam.fovy, callback=callback_set_fovy)\n\n # dt_gamma slider\n def callback_set_dt_gamma(sender, app_data):\n self.opt.dt_gamma = app_data\n self.need_update = True\n\n dpg.add_slider_float(label=\"dt_gamma\", min_value=0, max_value=0.1, format=\"%.5f\", default_value=self.opt.dt_gamma, callback=callback_set_dt_gamma)\n\n # max_steps slider\n def callback_set_max_steps(sender, app_data):\n self.opt.max_steps = app_data\n self.need_update = True\n\n dpg.add_slider_int(label=\"max steps\", min_value=1, max_value=1024, format=\"%d\", default_value=self.opt.max_steps, callback=callback_set_max_steps)\n\n # aabb slider\n def callback_set_aabb(sender, app_data, user_data):\n # user_data is the dimension for aabb (xmin, ymin, zmin, xmax, ymax, zmax)\n self.trainer.model.aabb_infer[user_data] = app_data\n\n # also change train aabb ? [better not...]\n #self.trainer.model.aabb_train[user_data] = app_data\n\n self.need_update = True\n\n dpg.add_separator()\n dpg.add_text(\"Axis-aligned bounding box:\")\n\n with dpg.group(horizontal=True):\n dpg.add_slider_float(label=\"x\", width=150, min_value=-self.opt.bound, max_value=0, format=\"%.2f\", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=0)\n dpg.add_slider_float(label=\"\", width=150, min_value=0, max_value=self.opt.bound, format=\"%.2f\", default_value=self.opt.bound, callback=callback_set_aabb, user_data=3)\n\n with dpg.group(horizontal=True):\n dpg.add_slider_float(label=\"y\", width=150, min_value=-self.opt.bound, max_value=0, format=\"%.2f\", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=1)\n dpg.add_slider_float(label=\"\", width=150, min_value=0, max_value=self.opt.bound, format=\"%.2f\", default_value=self.opt.bound, callback=callback_set_aabb, user_data=4)\n\n with dpg.group(horizontal=True):\n dpg.add_slider_float(label=\"z\", width=150, min_value=-self.opt.bound, max_value=0, format=\"%.2f\", default_value=-self.opt.bound, callback=callback_set_aabb, user_data=2)\n dpg.add_slider_float(label=\"\", width=150, min_value=0, max_value=self.opt.bound, format=\"%.2f\", default_value=self.opt.bound, callback=callback_set_aabb, user_data=5)\n \n\n # debug info\n if self.debug:\n with dpg.collapsing_header(label=\"Debug\"):\n # pose\n dpg.add_separator()\n dpg.add_text(\"Camera Pose:\")\n dpg.add_text(str(self.cam.pose), tag=\"_log_pose\")\n\n\n ### register camera handler\n\n def callback_camera_drag_rotate(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n dx = app_data[1]\n dy = app_data[2]\n\n self.cam.orbit(dx, dy)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n def callback_camera_wheel_scale(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n delta = app_data\n\n self.cam.scale(delta)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n def callback_camera_drag_pan(sender, app_data):\n\n if not dpg.is_item_focused(\"_primary_window\"):\n return\n\n dx = app_data[1]\n dy = app_data[2]\n\n self.cam.pan(dx, dy)\n self.need_update = True\n\n if self.debug:\n dpg.set_value(\"_log_pose\", str(self.cam.pose))\n\n\n with dpg.handler_registry():\n dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Left, callback=callback_camera_drag_rotate)\n dpg.add_mouse_wheel_handler(callback=callback_camera_wheel_scale)\n dpg.add_mouse_drag_handler(button=dpg.mvMouseButton_Middle, callback=callback_camera_drag_pan)\n\n \n dpg.create_viewport(title='RAD-NeRF', width=1080, height=720, resizable=True)\n\n ### global theme\n with dpg.theme() as theme_no_padding:\n with dpg.theme_component(dpg.mvAll):\n # set all padding to 0 to avoid scroll bar\n dpg.add_theme_style(dpg.mvStyleVar_WindowPadding, 0, 0, category=dpg.mvThemeCat_Core)\n dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 0, 0, category=dpg.mvThemeCat_Core)\n dpg.add_theme_style(dpg.mvStyleVar_CellPadding, 0, 0, category=dpg.mvThemeCat_Core)\n \n dpg.bind_item_theme(\"_primary_window\", theme_no_padding)\n\n dpg.setup_dearpygui()\n\n #dpg.show_metrics()\n\n dpg.show_viewport()\n\n\n def render(self):\n\n while dpg.is_dearpygui_running():\n # update texture every frame\n if self.training:\n self.train_step()\n # audio stream thread...\n if self.opt.asr and self.playing:\n # run 2 ASR steps (audio is at 50FPS, video is at 25FPS)\n for _ in range(2):\n self.asr.run_step()\n self.test_step()\n dpg.render_dearpygui_frame()" } ]
import torch import argparse from nerf.provider import NeRFDataset from nerf.gui import NeRFGUI from nerf.utils import * from nerf.network import NeRFNetwork, R2TalkerNeRF, GeneNeRFNetwork
13,028
parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.method == 'r2talker': opt.cond_type = 'idexp' elif opt.method == 'genefaceDagger': opt.cond_type = 'idexp' elif opt.method == 'rad-nerf': opt.cond_type = 'eo' if opt.O: opt.fp16 = True opt.exp_eye = True if opt.test: opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." if opt.patch_size > 1: # assert opt.patch_size > 16, "patch_size should > 16 to run LPIPS loss." assert opt.num_rays % (opt.patch_size ** 2) == 0, "patch_size ** 2 should be dividable by num_rays." if opt.finetune_lips: # do not update density grid in finetune stage opt.update_extra_interval = 1e9 print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if opt.method == 'r2talker': model = R2TalkerNeRF(opt) elif opt.method == 'genefaceDagger': model = GeneNeRFNetwork(opt) elif opt.method == 'rad-nerf': model = NeRFNetwork(opt) # manually load state dict for head if opt.torso and opt.head_ckpt != '': model_dict = torch.load(opt.head_ckpt, map_location='cpu')['model'] missing_keys, unexpected_keys = model.load_state_dict(model_dict, strict=False) if len(missing_keys) > 0: print(f"[WARN] missing keys: {missing_keys}") if len(unexpected_keys) > 0: print(f"[WARN] unexpected keys: {unexpected_keys}") # freeze these keys for k, v in model.named_parameters(): if k in model_dict: # print(f'[INFO] freeze {k}, {v.shape}') v.requires_grad = False # print(model) criterion = torch.nn.MSELoss(reduction='none') if opt.test: if opt.gui: metrics = [] # use no metric in GUI for faster initialization... else: # metrics = [PSNRMeter(), LPIPSMeter(device=device)] metrics = [PSNRMeter(), LPIPSMeter(device=device), LMDMeter(backend='fan')] trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, criterion=criterion, fp16=opt.fp16, metrics=metrics, use_checkpoint=opt.ckpt) if opt.test_train: test_set = NeRFDataset(opt, device=device, type='train') # a manual fix to test on the training dataset test_set.training = False test_set.num_rays = -1 test_loader = test_set.dataloader() else: test_loader = NeRFDataset(opt, device=device, type='test').dataloader() # temp fix: for update_extra_states model.aud_features = test_loader._data.auds model.eye_areas = test_loader._data.eye_area if opt.gui: # we still need test_loader to provide audio features for testing.
# torch.autograd.set_detect_anomaly(True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('path', type=str) parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray --exp_eye") parser.add_argument('--test', action='store_true', help="test mode (load model and test dataset)") parser.add_argument('--test_train', action='store_true', help="test mode (load model and train dataset)") parser.add_argument('--data_range', type=int, nargs='*', default=[0, -1], help="data range to use") parser.add_argument('--workspace', type=str, default='workspace') parser.add_argument('--seed', type=int, default=0) ### training options parser.add_argument('--iters', type=int, default=200000, help="training iters") parser.add_argument('--lr', type=float, default=5e-3, help="initial learning rate") parser.add_argument('--lr_net', type=float, default=5e-4, help="initial learning rate") parser.add_argument('--ckpt', type=str, default='latest') parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step") parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch") parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)") parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)") parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)") ### network backbone options parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training") parser.add_argument('--lambda_amb', type=float, default=0.1, help="lambda for ambient loss") parser.add_argument('--bg_img', type=str, default='', help="background image") parser.add_argument('--fbg', action='store_true', help="frame-wise bg") parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes") parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye") parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence") parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform") ### dataset options parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)") parser.add_argument('--preload', type=int, default=0, help="0 means load data from disk on-the-fly, 1 means preload to CPU, 2 means GPU.") # (the default value is for the fox dataset) parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.") parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3") parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location") parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera") parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)") parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)") parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable") parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region") parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...") parser.add_argument('--torso', action='store_true', help="fix head and train torso") parser.add_argument('--head_ckpt', type=str, default='', help="head model") ### GUI options parser.add_argument('--gui', action='store_true', help="start a GUI") parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)") parser.add_argument('--cond_type', type=str, default=None, help="type of driving condition: eo, ds, idexp") parser.add_argument('--method', type=str, default='r2talker', help="r2talker, genefaceDagger, rad-nerf") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.method == 'r2talker': opt.cond_type = 'idexp' elif opt.method == 'genefaceDagger': opt.cond_type = 'idexp' elif opt.method == 'rad-nerf': opt.cond_type = 'eo' if opt.O: opt.fp16 = True opt.exp_eye = True if opt.test: opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." if opt.patch_size > 1: # assert opt.patch_size > 16, "patch_size should > 16 to run LPIPS loss." assert opt.num_rays % (opt.patch_size ** 2) == 0, "patch_size ** 2 should be dividable by num_rays." if opt.finetune_lips: # do not update density grid in finetune stage opt.update_extra_interval = 1e9 print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if opt.method == 'r2talker': model = R2TalkerNeRF(opt) elif opt.method == 'genefaceDagger': model = GeneNeRFNetwork(opt) elif opt.method == 'rad-nerf': model = NeRFNetwork(opt) # manually load state dict for head if opt.torso and opt.head_ckpt != '': model_dict = torch.load(opt.head_ckpt, map_location='cpu')['model'] missing_keys, unexpected_keys = model.load_state_dict(model_dict, strict=False) if len(missing_keys) > 0: print(f"[WARN] missing keys: {missing_keys}") if len(unexpected_keys) > 0: print(f"[WARN] unexpected keys: {unexpected_keys}") # freeze these keys for k, v in model.named_parameters(): if k in model_dict: # print(f'[INFO] freeze {k}, {v.shape}') v.requires_grad = False # print(model) criterion = torch.nn.MSELoss(reduction='none') if opt.test: if opt.gui: metrics = [] # use no metric in GUI for faster initialization... else: # metrics = [PSNRMeter(), LPIPSMeter(device=device)] metrics = [PSNRMeter(), LPIPSMeter(device=device), LMDMeter(backend='fan')] trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, criterion=criterion, fp16=opt.fp16, metrics=metrics, use_checkpoint=opt.ckpt) if opt.test_train: test_set = NeRFDataset(opt, device=device, type='train') # a manual fix to test on the training dataset test_set.training = False test_set.num_rays = -1 test_loader = test_set.dataloader() else: test_loader = NeRFDataset(opt, device=device, type='test').dataloader() # temp fix: for update_extra_states model.aud_features = test_loader._data.auds model.eye_areas = test_loader._data.eye_area if opt.gui: # we still need test_loader to provide audio features for testing.
with NeRFGUI(opt, trainer, test_loader) as gui:
1
2023-12-04 12:51:59+00:00
16k
ubc-vision/vivid123
vivid123/generation_utils.py
[ { "identifier": "CLIPCameraProjection", "path": "vivid123/models/clip_camera_projection.py", "snippet": "class CLIPCameraProjection(ModelMixin, ConfigMixin):\n \"\"\"\n A Projection layer for CLIP embedding and camera embedding.\n Parameters:\n embedding_dim (`int`, *optional*, defaults to 768): The dimension of the model input `clip_embed`\n additional_embeddings (`int`, *optional*, defaults to 4): The number of additional tokens appended to the\n projected `hidden_states`. The actual length of the used `hidden_states` is `num_embeddings +\n additional_embeddings`.\n \"\"\"\n\n @register_to_config\n def __init__(self, embedding_dim: int = 768, additional_embeddings: int = 4):\n super().__init__()\n self.embedding_dim = embedding_dim\n self.additional_embeddings = additional_embeddings\n\n self.input_dim = self.embedding_dim + self.additional_embeddings\n self.output_dim = self.embedding_dim\n\n self.proj = torch.nn.Linear(self.input_dim, self.output_dim)\n\n def forward(\n self,\n embedding: torch.FloatTensor,\n ):\n \"\"\"\n The [`PriorTransformer`] forward method.\n Args:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, input_dim)`):\n The currently input embeddings.\n Returns:\n The output embedding projection (`torch.FloatTensor` of shape `(batch_size, output_dim)`).\n \"\"\"\n proj_embedding = self.proj(embedding)\n return proj_embedding" }, { "identifier": "ViVid123Pipeline", "path": "vivid123/pipelines/vivid123_pipeline.py", "snippet": "class ViVid123Pipeline(TextToVideoSDPipeline):\n r\"\"\"\n Pipeline for text-to-video generation.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods\n implemented for all pipelines (downloading, saving, running on a particular device, etc.).\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).\n tokenizer (`CLIPTokenizer`):\n A [`~transformers.CLIPTokenizer`] to tokenize text.\n unet ([`UNet3DConditionModel`]):\n A [`UNet3DConditionModel`] to denoise the encoded video latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n \"\"\"\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet3DConditionModel,\n scheduler: KarrasDiffusionSchedulers,\n novel_view_unet: UNet2DConditionModel,\n image_encoder: CLIPVisionModelWithProjection,\n cc_projection: CLIPCameraProjection,\n ):\n super().__init__(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)\n\n self.register_modules(\n novel_view_unet=novel_view_unet,\n image_encoder=image_encoder,\n cc_projection=cc_projection,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n\n self.image_processor = VaeImageProcessor(\n vae_scale_factor=self.vae_scale_factor,\n do_convert_rgb=True,\n do_normalize=True,\n )\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs\n def check_inputs(\n self,\n prompt,\n height,\n width,\n callback_steps,\n negative_prompt=None,\n prompt_embeds=None,\n negative_prompt_embeds=None,\n num_inference_steps=50,\n fusion_schedule=None,\n ):\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n if prompt is not None and prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to\"\n \" only forward one of the two.\"\n )\n elif prompt is None and prompt_embeds is None:\n raise ValueError(\n \"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.\"\n )\n elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if negative_prompt is not None and negative_prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:\"\n f\" {negative_prompt_embeds}. Please make sure to only forward one of the two.\"\n )\n\n if prompt_embeds is not None and negative_prompt_embeds is not None:\n if prompt_embeds.shape != negative_prompt_embeds.shape:\n raise ValueError(\n \"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but\"\n f\" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`\"\n f\" {negative_prompt_embeds.shape}.\"\n )\n \n if fusion_schedule is None:\n raise ValueError(\n \"Fusion schedule is not provided.\"\n )\n \n if len(fusion_schedule[0]) != num_inference_steps or len(fusion_schedule[1]) != num_inference_steps:\n raise ValueError(\n \"Fusion schedule length does not match the number of timesteps.\"\n )\n \n def prepare_latents(\n self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None, noise_identical_accross_frames=False\n ):\n shape = (\n batch_size,\n num_channels_latents,\n num_frames if not noise_identical_accross_frames else 1,\n height // self.vae_scale_factor,\n width // self.vae_scale_factor,\n )\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if latents is None:\n latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n else:\n if latents.shape != shape:\n raise ValueError(\n f\"User-prepared `latents` must have shape {shape}, when noise_identical_accross_frames={noise_identical_accross_frames} but got {latents.shape}.\"\n )\n latents = latents.to(device)\n\n if noise_identical_accross_frames:\n latents = latents.repeat(1, 1, num_frames, 1, 1)\n \n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def prepare_img_latents(\n self, image, batch_size, dtype, device, generator=None, do_zero123_classifier_free_guidance=False\n ):\n if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):\n raise ValueError(\n f\"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}\"\n )\n\n if isinstance(image, torch.Tensor):\n # Batch single image\n if image.ndim == 3:\n assert image.shape[0] == 3, \"Image outside a batch should be of shape (3, H, W)\"\n image = image.unsqueeze(0)\n\n assert image.ndim == 4, \"Image must have 4 dimensions\"\n\n # Check image is in [-1, 1]\n if image.min() < -1 or image.max() > 1:\n raise ValueError(\"Image should be in [-1, 1] range\")\n else:\n # preprocess image\n if isinstance(image, (PIL.Image.Image, np.ndarray)):\n image = [image]\n\n if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):\n image = [np.array(i.convert(\"RGB\"))[None, :] for i in image]\n image = np.concatenate(image, axis=0)\n elif isinstance(image, list) and isinstance(image[0], np.ndarray):\n image = np.concatenate([i[None, :] for i in image], axis=0)\n\n image = image.transpose(0, 3, 1, 2)\n image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0\n\n image = image.to(device=device, dtype=dtype)\n\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if isinstance(generator, list):\n init_latents = [\n self.vae.encode(image[i : i + 1]).latent_dist.mode(generator[i]) for i in range(batch_size) # sample\n ]\n init_latents = torch.cat(init_latents, dim=0)\n else:\n init_latents = self.vae.encode(image).latent_dist.mode()\n\n # init_latents = self.vae.config.scaling_factor * init_latents # todo in original zero123's inference gradio_new.py, model.encode_first_stage() is not scaled by scaling_factor\n if batch_size > init_latents.shape[0]:\n # init_latents = init_latents.repeat(batch_size // init_latents.shape[0], 1, 1, 1)\n num_images_per_prompt = batch_size // init_latents.shape[0]\n # duplicate image latents for each generation per prompt, using mps friendly method\n bs_embed, emb_c, emb_h, emb_w = init_latents.shape\n init_latents = init_latents.unsqueeze(1)\n init_latents = init_latents.repeat(1, num_images_per_prompt, 1, 1, 1)\n init_latents = init_latents.view(bs_embed * num_images_per_prompt, emb_c, emb_h, emb_w)\n\n # init_latents = torch.cat([init_latents]*2) if do_zero123_classifier_free_guidance else init_latents # follow zero123\n init_latents = (\n torch.cat([torch.zeros_like(init_latents), init_latents])\n if do_zero123_classifier_free_guidance\n else init_latents\n )\n\n init_latents = init_latents.to(device=device, dtype=dtype)\n return init_latents\n\n def CLIP_preprocess(self, x):\n dtype = x.dtype\n # following openai's implementation\n # TODO HF OpenAI CLIP preprocessing issue https://github.com/huggingface/transformers/issues/22505#issuecomment-1650170741\n # follow openai preprocessing to keep exact same, input tensor [-1, 1], otherwise the preprocessing will be different, https://github.com/huggingface/transformers/pull/22608\n if isinstance(x, torch.Tensor):\n if x.min() < -1.0 or x.max() > 1.0:\n raise ValueError(\"Expected input tensor to have values in the range [-1, 1]\")\n x = kornia.geometry.resize(\n x.to(torch.float32), (224, 224), interpolation=\"bicubic\", align_corners=True, antialias=False\n ).to(dtype=dtype)\n x = (x + 1.0) / 2.0\n # renormalize according to clip\n x = kornia.enhance.normalize(\n x, torch.Tensor([0.48145466, 0.4578275, 0.40821073]), torch.Tensor([0.26862954, 0.26130258, 0.27577711])\n )\n return x\n\n # from stable_diffusion_image_variation\n def _encode_image(self, image, device, num_images_per_prompt, do_video_classifier_free_guidance):\n dtype = next(self.image_encoder.parameters()).dtype\n if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):\n raise ValueError(\n f\"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}\"\n )\n\n if isinstance(image, torch.Tensor):\n # Batch single image\n if image.ndim == 3:\n assert image.shape[0] == 3, \"Image outside a batch should be of shape (3, H, W)\"\n image = image.unsqueeze(0)\n\n assert image.ndim == 4, \"Image must have 4 dimensions\"\n\n # Check image is in [-1, 1]\n if image.min() < -1 or image.max() > 1:\n raise ValueError(\"Image should be in [-1, 1] range\")\n else:\n # preprocess image\n if isinstance(image, (PIL.Image.Image, np.ndarray)):\n image = [image]\n\n if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):\n image = [np.array(i.convert(\"RGB\"))[None, :] for i in image]\n image = np.concatenate(image, axis=0)\n elif isinstance(image, list) and isinstance(image[0], np.ndarray):\n image = np.concatenate([i[None, :] for i in image], axis=0)\n\n image = image.transpose(0, 3, 1, 2)\n image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0\n\n image = image.to(device=device, dtype=dtype)\n\n image = self.CLIP_preprocess(image)\n # if not isinstance(image, torch.Tensor):\n # # 0-255\n # print(\"Warning: image is processed by hf's preprocess, which is different from openai original's.\")\n # image = self.feature_extractor(images=image, return_tensors=\"pt\").pixel_values\n image_embeddings = self.image_encoder(image).image_embeds.to(dtype=dtype)\n image_embeddings = image_embeddings.unsqueeze(1)\n\n # duplicate image embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = image_embeddings.shape\n image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)\n image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n if do_video_classifier_free_guidance:\n negative_prompt_embeds = torch.zeros_like(image_embeddings)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])\n\n return image_embeddings\n\n def _encode_pose(self, pose, device, num_images_per_prompt, do_video_classifier_free_guidance):\n dtype = next(self.cc_projection.parameters()).dtype\n if isinstance(pose, torch.Tensor):\n pose_embeddings = pose.unsqueeze(1).to(device=device, dtype=dtype)\n else:\n if isinstance(pose[0], list):\n pose = torch.Tensor(pose)\n else:\n pose = torch.Tensor([pose])\n x, y, z = pose[:, 0].unsqueeze(1), pose[:, 1].unsqueeze(1), pose[:, 2].unsqueeze(1)\n pose_embeddings = (\n torch.cat([torch.deg2rad(x), torch.sin(torch.deg2rad(y)), torch.cos(torch.deg2rad(y)), z], dim=-1)\n .unsqueeze(1)\n .to(device=device, dtype=dtype)\n ) # B, 1, 4\n # duplicate pose embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = pose_embeddings.shape\n pose_embeddings = pose_embeddings.repeat(1, num_images_per_prompt, 1)\n pose_embeddings = pose_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)\n if do_video_classifier_free_guidance:\n negative_prompt_embeds = torch.zeros_like(pose_embeddings)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n pose_embeddings = torch.cat([negative_prompt_embeds, pose_embeddings])\n return pose_embeddings\n\n def _encode_image_with_pose(self, image, pose, device, num_images_per_prompt, do_video_classifier_free_guidance):\n img_prompt_embeds = self._encode_image(image, device, num_images_per_prompt, False)\n pose_prompt_embeds = self._encode_pose(pose, device, num_images_per_prompt, False)\n prompt_embeds = torch.cat([img_prompt_embeds, pose_prompt_embeds], dim=-1)\n prompt_embeds = self.cc_projection(prompt_embeds)\n # prompt_embeds = img_prompt_embeds\n # follow 0123, add negative prompt, after projection\n if do_video_classifier_free_guidance:\n negative_prompt = torch.zeros_like(prompt_embeds)\n prompt_embeds = torch.cat([negative_prompt, prompt_embeds])\n return prompt_embeds\n\n @torch.no_grad()\n @replace_example_docstring(EXAMPLE_DOC_STRING)\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_frames: int = 16,\n num_inference_steps: int = 50,\n guidance_scale_video: float = 9.0,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"np\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n guidance_rescale: float = 0.0,\n # vivid123 params below\n image: Optional[\n Union[\n torch.FloatTensor,\n PIL.Image.Image,\n np.ndarray,\n List[torch.FloatTensor],\n List[PIL.Image.Image],\n List[np.ndarray],\n ]\n ] = None,\n cam_pose_torch: Optional[torch.FloatTensor] = None,\n fusion_schedule: Optional[tuple[float]] = None,\n ddim_eta_0123: float = 1.0,\n guidance_scale_zero123: float = 3.0,\n noise_identical_accross_frames: bool = False,\n ):\n r\"\"\"\n The call function to the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.\n height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The height in pixels of the generated video.\n width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):\n The width in pixels of the generated video.\n num_frames (`int`, *optional*, defaults to 16):\n The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds\n amounts to 2 seconds of video.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality videos at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n A higher guidance scale value encourages the model to generate images closely linked to the text\n `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide what to not include in image generation. If not defined, you need to\n pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies\n to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make\n generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor is generated by sampling using the supplied random `generator`. Latents should be of shape\n `(batch_size, num_channel, num_frames, height, width)`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not\n provided, text embeddings are generated from the `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If\n not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.\n output_type (`str`, *optional*, defaults to `\"np\"`):\n The output format of the generated video. Choose between `torch.FloatTensor` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead\n of a plain tuple.\n callback (`Callable`, *optional*):\n A function that calls every `callback_steps` steps during inference. The function is called with the\n following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function is called. If not specified, the callback is called at\n every step.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in\n [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n guidance_rescale (`float`, *optional*, defaults to 0.0):\n Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are\n Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when\n using zero terminal SNR.\n guidance_scale_zero123 (`float`, *optional*, defaults to 3.0):\n A higher guidance scale value encourages the model to generate images closely linked to the text\n `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.\n cam_pose_torch: (`torch.FloatTensor`, *optional*):\n Camera pose in torch tensor, shape (4,). The elements mean (el, sin(az), cos(az), radius)\n fusion_schedule (`tuple[float]`, *optional*):\n Fusion schedule for video diffusion and zero123. The first element is the schedule for video diffusion, and the\n second element is the schedule for zero123. The length of each schedule should be the same as the number\n of timesteps.\n ddim_eta_0123 (`float`, *optional*, defaults to 1.0):\n The eta value for the 0123 diffusion steps. Only applies to the [`~schedulers.DDIMScheduler`], and is\n ignored in other schedulers.\n \n Example:\n \n\n Returns:\n [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`:\n If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is\n returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n num_videos_per_image_prompt = 1\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt,\n height,\n width,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n num_inference_steps,\n fusion_schedule\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_video_classifier_free_guidance = guidance_scale_video > 1.0\n do_zero123_classifier_free_guidance = guidance_scale_zero123 > 1.0\n\n # 3.1 Encode input prompt for video diffusion\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\n prompt=prompt,\n device=device,\n # by diffusers v0.23.1, the naming of diffusers.pipelines.TextToVideoSDPipeline is still \"num_images_per_prompt\",\n # where it should be \"num_videos_per_prompt\"\n num_images_per_prompt=num_videos_per_image_prompt,\n do_classifier_free_guidance=do_video_classifier_free_guidance,\n negative_prompt=negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n if do_video_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n\n # 3.2 Encode input image for zero123\n zero123_cond_images = [image for _ in range(num_frames)]\n zero123_embeds = self._encode_image_with_pose(\n zero123_cond_images,\n cam_pose_torch,\n device,\n num_videos_per_image_prompt,\n do_zero123_classifier_free_guidance,\n ) # (2xF) x 1 x 768\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variables\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_image_prompt,\n num_channels_latents,\n num_frames,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n noise_identical_accross_frames,\n )\n\n # 6. Prepare Zero123 image latents\n img_latents = self.prepare_img_latents(\n zero123_cond_images,\n batch_size=num_frames,\n dtype=zero123_embeds.dtype,\n device=device,\n generator=generator,\n do_zero123_classifier_free_guidance=True,\n )\n\n # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_video_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual with video diffusion\n noise_pred_video = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n\n # perform classifier-free guidance for video diffusion\n if do_video_classifier_free_guidance:\n noise_pred_video_uncond, noise_pred_video_text = noise_pred_video.chunk(2)\n noise_pred_video = noise_pred_video_uncond + guidance_scale_video * (\n noise_pred_video_text - noise_pred_video_uncond\n )\n # if do_video_classifier_free_guidance and guidance_rescale > 0.0:\n # # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf\n # noise_pred_video = rescale_noise_cfg(\n # noise_pred_video, noise_pred_video_text, guidance_rescale=guidance_rescale\n # )\n\n # zero123 denoising\n latent_model_input_zero123 = torch.cat([latents] * 2) if do_zero123_classifier_free_guidance else latents\n augmented_latent_model_input_zero123 = torch.cat(\n [rearrange(latent_model_input_zero123, \"B C F H W -> (B F) C H W\"), img_latents],\n dim=1,\n ).to(self.novel_view_unet.dtype)\n noise_pred_zero123 = self.novel_view_unet(\n augmented_latent_model_input_zero123,\n t,\n encoder_hidden_states=zero123_embeds,\n return_dict=True,\n ).sample\n noise_pred_zero123 = rearrange(noise_pred_zero123, \"(B F) C H W -> B C F H W\", F=num_frames)\n\n if do_zero123_classifier_free_guidance:\n noise_pred_zero123_uncond, noise_pred_zero123_text = noise_pred_zero123.chunk(2)\n noise_pred_zero123 = noise_pred_zero123_uncond + guidance_scale_zero123 * (\n noise_pred_zero123_text - noise_pred_zero123_uncond\n )\n\n # fusing video diffusion with zero123\n noise_pred = fusion_schedule[0][i] * noise_pred_video + fusion_schedule[1][i] * noise_pred_zero123\n\n # reshape latents\n bsz, channel, frames, width, height = latents.shape\n latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)\n noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # reshape latents back\n latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4)\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if output_type == \"latent\":\n return TextToVideoSDPipelineOutput(frames=latents)\n\n video_tensor = self.decode_latents(latents)\n\n if output_type == \"pt\":\n video = video_tensor\n else:\n video = tensor2vid(video_tensor)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (video,)\n\n return TextToVideoSDPipelineOutput(frames=video)" }, { "identifier": "ViVid123BaseSchema", "path": "vivid123/configs/base_schema.py", "snippet": "class ViVid123BaseSchema(BaseModel):\n # Disable aliasing underscore to hyphen\n class Config:\n alias_generator = lambda string: string\n\n num_frames: int = 25\n delta_elevation_start: float = 0.0\n delta_elevation_end: float = 0.0\n delta_azimuth_start: float = -45.0\n delta_azimuth_end: float = 45.0\n delta_radius_start: float = 0.0\n delta_radius_end: float = 0.0\n height: int = 256\n width: int = 256\n # num_videos_per_image_prompt: int = 1 # Only support 1 for running on < 24G memory GPU\n num_inference_steps: int = 50\n guidance_scale_zero123: float = 3.0\n guidance_scale_video: float = 1.0\n eta: float = 1.0\n noise_identical_accross_frames: bool = False\n prompt: str = \"\"\n\n video_linear_start_weight: float = 1.0\n video_linear_end_weight: float = 0.5\n video_start_step_percentage: float = 0.0\n video_end_step_percentage: float = 1.0\n zero123_linear_start_weight: float = 1.0\n zero123_linear_end_weight: float = 1.0\n zero123_start_step_percentage: float = 0.0\n zero123_end_step_percentage: float = 1.0\n\n refiner_strength: float = 0.3\n refiner_guidance_scale: float = 12.0\n\n name: str = \"new_balance_used\"\n input_image_path: str = \"tmp/new_balance_used/012.png\"" } ]
import os import yaml import re import torch import numpy as np import imageio.v3 as imageio from typing import List, Any from yaml.parser import ParserError from PIL import Image from diffusers.pipelines import DiffusionPipeline from diffusers.models import UNet2DConditionModel, AutoencoderKL from diffusers.schedulers import DPMSolverMultistepScheduler, EulerDiscreteScheduler from diffusers.pipelines import DiffusionPipeline from transformers import CLIPVisionModelWithProjection from .models import CLIPCameraProjection from .pipelines import ViVid123Pipeline from .configs import ViVid123BaseSchema
10,846
video_linear_end_weight: float = 0.5, video_start_step_percentage: float = 0.0, video_end_step_percentage: float = 1.0, zero123_linear_start_weight: float = 1.0, zero123_linear_end_weight: float = 1.0, zero123_start_step_percentage: float = 0.0, zero123_end_step_percentage: float = 1.0, ): """ Prepare the fusion schedule of video diffusion and zero123 at all the denoising steps Args: video_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the video diffusion at the start of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_linear_end_weight (`float`, *optional*, defaults to 0.5): The weight of the video diffusion at the end of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the video diffusion starts. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. video_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the video diffusion ends. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. zero123_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the start of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_linear_end_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the end of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the zero123 diffusion starts. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. zero123_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the zero123 diffusion ends. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. Return: A tuple of two tensors, video_schedule (`torch.Tensor`): The schedule of the video diffusion weighting, with shape `[num_inference_steps]`. zero123_schedule (`torch.Tensor`): The schedule of the zero123 diffusion weighting, with shape `[num_inference_steps]`. """ assert ( video_linear_start_weight >= 0.0 and video_linear_start_weight <= 1.0 ), "video_linear_start_weight must be between 0.0 and 1.0" assert ( video_linear_end_weight >= 0.0 and video_linear_end_weight <= 1.0 ), "video_linear_end_weight must be between 0.0 and 1.0" assert ( video_start_step_percentage >= 0.0 and video_start_step_percentage <= 1.0 ), "video_start_step_percentage must be between 0.0 and 1.0" assert ( video_end_step_percentage >= 0.0 and video_end_step_percentage <= 1.0 ), "video_end_step_percentage must be between 0.0 and 1.0" assert ( zero123_linear_start_weight >= 0.0 and zero123_linear_start_weight <= 1.0 ), "zero123_linear_start_weight must be between 0.0 and 1.0" assert ( zero123_linear_end_weight >= 0.0 and zero123_linear_end_weight <= 1.0 ), "zero123_linear_end_weight must be between 0.0 and 1.0" assert ( zero123_start_step_percentage >= 0.0 and zero123_start_step_percentage <= 1.0 ), "zero123_start_step_percentage must be between 0.0 and 1.0" assert ( zero123_end_step_percentage >= 0.0 and zero123_end_step_percentage <= 1.0 ), "zero123_end_step_percentage must be between 0.0 and 1.0" video_schedule = torch.linspace( start=video_linear_start_weight, end=video_linear_end_weight, steps=int((video_end_step_percentage - video_start_step_percentage) * num_inference_steps), ) zero123_schedule = torch.linspace( start=zero123_linear_start_weight, end=zero123_linear_end_weight, steps=int((zero123_end_step_percentage - zero123_start_step_percentage) * num_inference_steps), ) if video_schedule.shape[0] < num_inference_steps: video_schedule = torch.cat( [ video_linear_start_weight * torch.ones([video_start_step_percentage * num_inference_steps]), video_schedule, video_linear_end_weight * torch.ones([num_inference_steps - video_end_step_percentage * num_inference_steps]), ] ) if zero123_schedule.shape[0] < num_inference_steps: zero123_schedule = torch.cat( [ zero123_linear_start_weight * torch.ones([zero123_start_step_percentage * num_inference_steps]), zero123_schedule, zero123_linear_end_weight * torch.ones([num_inference_steps - zero123_end_step_percentage * num_inference_steps]), ] ) return (video_schedule, zero123_schedule) def save_videos_grid_zeroscope_nplist(video_frames: List[np.ndarray], path: str, n_rows=6, fps=8, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]): # fourcc = cv2.VideoWriter_fourcc(*"mp4v") f = len(video_frames) h, w, c = video_frames[0].shape #images = [(image).astype("uint8") for image in video_frames] os.makedirs(os.path.dirname(path), exist_ok=True) imageio.imwrite(path, video_frames, fps=fps) def prepare_pipelines( ZERO123_MODEL_ID: str = "bennyguo/zero123-xl-diffusers", VIDEO_MODEL_ID: str = "cerspense/zeroscope_v2_576w", VIDEO_XL_MODEL_ID: str = "cerspense/zeroscope_v2_XL" ): zero123_unet = UNet2DConditionModel.from_pretrained(ZERO123_MODEL_ID, subfolder="unet")
def prepare_cam_pose_input( num_frames: int = 25, delta_elevation_start: float = 0.0, delta_elevation_end: float = 0.0, delta_azimuth_start: float = -45.0, delta_azimuth_end: float = 45.0, delta_radius_start: float = 0.0, delta_radius_end: float = 0.0, ): r""" The function to prepare the input to the vivid123 pipeline Args: delta_elevation_start (`float`, *optional*, defaults to 0.0): The starting relative elevation angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_elevation_end (`float`, *optional*, defaults to 0.0): The ending relative elevation angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_azimuth_start (`float`, *optional*, defaults to -45.0): The starting relative azimuth angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_azimuth_end (`float`, *optional*, defaults to 45.0): The ending relative azimuth angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. Returns: """ cam_elevation = np.radians(np.linspace(delta_elevation_start, delta_elevation_end, num_frames))[..., None] cam_azimuth = np.radians(np.linspace(delta_azimuth_start, delta_azimuth_end, num_frames)) cam_azimuth_sin_cos = np.stack([np.sin(cam_azimuth), np.cos(cam_azimuth)], axis=-1) cam_radius = np.linspace(delta_radius_start, delta_radius_end, num_frames)[..., None] cam_pose_np = np.concatenate([cam_elevation, cam_azimuth_sin_cos, cam_radius], axis=-1) cam_pose_torch = torch.from_numpy(cam_pose_np) return cam_pose_torch # refer to https://stackoverflow.com/a/33507138/6257375 def conver_rgba_to_rgb_white_bg( image: Image, H: int = 256, W: int = 256, ): input_image = image.convert("RGBA").resize((H, W), Image.BICUBIC) background = Image.new("RGBA", input_image.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, input_image) return alpha_composite def prepare_fusion_schedule_linear( num_inference_steps: int = 50, video_linear_start_weight: float = 1.0, video_linear_end_weight: float = 0.5, video_start_step_percentage: float = 0.0, video_end_step_percentage: float = 1.0, zero123_linear_start_weight: float = 1.0, zero123_linear_end_weight: float = 1.0, zero123_start_step_percentage: float = 0.0, zero123_end_step_percentage: float = 1.0, ): """ Prepare the fusion schedule of video diffusion and zero123 at all the denoising steps Args: video_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the video diffusion at the start of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_linear_end_weight (`float`, *optional*, defaults to 0.5): The weight of the video diffusion at the end of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the video diffusion starts. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. video_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the video diffusion ends. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. zero123_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the start of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_linear_end_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the end of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the zero123 diffusion starts. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. zero123_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the zero123 diffusion ends. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. Return: A tuple of two tensors, video_schedule (`torch.Tensor`): The schedule of the video diffusion weighting, with shape `[num_inference_steps]`. zero123_schedule (`torch.Tensor`): The schedule of the zero123 diffusion weighting, with shape `[num_inference_steps]`. """ assert ( video_linear_start_weight >= 0.0 and video_linear_start_weight <= 1.0 ), "video_linear_start_weight must be between 0.0 and 1.0" assert ( video_linear_end_weight >= 0.0 and video_linear_end_weight <= 1.0 ), "video_linear_end_weight must be between 0.0 and 1.0" assert ( video_start_step_percentage >= 0.0 and video_start_step_percentage <= 1.0 ), "video_start_step_percentage must be between 0.0 and 1.0" assert ( video_end_step_percentage >= 0.0 and video_end_step_percentage <= 1.0 ), "video_end_step_percentage must be between 0.0 and 1.0" assert ( zero123_linear_start_weight >= 0.0 and zero123_linear_start_weight <= 1.0 ), "zero123_linear_start_weight must be between 0.0 and 1.0" assert ( zero123_linear_end_weight >= 0.0 and zero123_linear_end_weight <= 1.0 ), "zero123_linear_end_weight must be between 0.0 and 1.0" assert ( zero123_start_step_percentage >= 0.0 and zero123_start_step_percentage <= 1.0 ), "zero123_start_step_percentage must be between 0.0 and 1.0" assert ( zero123_end_step_percentage >= 0.0 and zero123_end_step_percentage <= 1.0 ), "zero123_end_step_percentage must be between 0.0 and 1.0" video_schedule = torch.linspace( start=video_linear_start_weight, end=video_linear_end_weight, steps=int((video_end_step_percentage - video_start_step_percentage) * num_inference_steps), ) zero123_schedule = torch.linspace( start=zero123_linear_start_weight, end=zero123_linear_end_weight, steps=int((zero123_end_step_percentage - zero123_start_step_percentage) * num_inference_steps), ) if video_schedule.shape[0] < num_inference_steps: video_schedule = torch.cat( [ video_linear_start_weight * torch.ones([video_start_step_percentage * num_inference_steps]), video_schedule, video_linear_end_weight * torch.ones([num_inference_steps - video_end_step_percentage * num_inference_steps]), ] ) if zero123_schedule.shape[0] < num_inference_steps: zero123_schedule = torch.cat( [ zero123_linear_start_weight * torch.ones([zero123_start_step_percentage * num_inference_steps]), zero123_schedule, zero123_linear_end_weight * torch.ones([num_inference_steps - zero123_end_step_percentage * num_inference_steps]), ] ) return (video_schedule, zero123_schedule) def save_videos_grid_zeroscope_nplist(video_frames: List[np.ndarray], path: str, n_rows=6, fps=8, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]): # fourcc = cv2.VideoWriter_fourcc(*"mp4v") f = len(video_frames) h, w, c = video_frames[0].shape #images = [(image).astype("uint8") for image in video_frames] os.makedirs(os.path.dirname(path), exist_ok=True) imageio.imwrite(path, video_frames, fps=fps) def prepare_pipelines( ZERO123_MODEL_ID: str = "bennyguo/zero123-xl-diffusers", VIDEO_MODEL_ID: str = "cerspense/zeroscope_v2_576w", VIDEO_XL_MODEL_ID: str = "cerspense/zeroscope_v2_XL" ): zero123_unet = UNet2DConditionModel.from_pretrained(ZERO123_MODEL_ID, subfolder="unet")
zero123_cam_proj = CLIPCameraProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="clip_camera_projection")
0
2023-11-27 22:48:17+00:00
16k
TISUnion/PrimeBackup
prime_backup/debug_entry.py
[ { "identifier": "logger", "path": "prime_backup/logger.py", "snippet": "def __create_logger() -> logging.Logger:\ndef get() -> logging.Logger:" }, { "identifier": "CreateBackupAction", "path": "prime_backup/action/create_backup_action.py", "snippet": "class CreateBackupAction(CreateBackupActionBase):\n\tdef __init__(self, creator: Operator, comment: str, *, tags: Optional[BackupTags] = None, expire_timestamp_ns: Optional[int] = None):\n\t\tsuper().__init__()\n\t\tif tags is None:\n\t\t\ttags = BackupTags()\n\n\t\tself.creator = creator\n\t\tself.comment = comment\n\t\tself.tags = tags\n\t\tself.expire_timestamp_ns = expire_timestamp_ns\n\n\t\tself.__pre_calc_result = _PreCalculationResult()\n\t\tself.__blob_store_st: Optional[os.stat_result] = None\n\t\tself.__blob_store_in_cow_fs: Optional[bool] = None\n\n\t\tself.__batch_query_manager: Optional[BatchQueryManager] = None\n\t\tself.__blob_by_size_cache: Dict[int, bool] = {}\n\t\tself.__blob_by_hash_cache: Dict[str, schema.Blob] = {}\n\n\tdef __scan_files(self) -> _ScanResult:\n\t\tcollected = []\n\n\t\tsource_path = self.config.source_path\n\t\tscanned_targets: Dict[str, bool] = {} # use as an ordered set\n\t\tscan_queue: Deque[Path] = collections.deque() # a queue of paths related to the source_path\n\t\tfor scan_target in self.config.backup.targets:\n\t\t\tscan_queue.append(Path(scan_target))\n\n\t\twhile len(scan_queue) > 0:\n\t\t\tscan_target = scan_queue.popleft()\n\t\t\tif (target_posix := scan_target.as_posix()) in scanned_targets:\n\t\t\t\tcontinue\n\t\t\tscanned_targets[target_posix] = True\n\n\t\t\ttarget_path = source_path / scan_target\n\t\t\tif not target_path.exists():\n\t\t\t\tself.logger.info('Skipping not-exist backup target {}'.format(target_path))\n\t\t\t\tcontinue\n\t\t\tif not path_utils.is_relative_to(target_path, source_path):\n\t\t\t\tself.logger.warning(\"Skipping backup target {} cuz it's not inside the source path {}\".format(target_path, source_path))\n\t\t\t\tcontinue\n\n\t\t\tcollected.append(target_path)\n\n\t\t\tif target_path.is_symlink() and self.config.backup.follow_target_symlink:\n\t\t\t\tscan_queue.append(target_path.readlink())\n\t\t\t\tcontinue\n\n\t\t\t# as-is policy, don't scan into symlink\n\t\t\tif not target_path.is_symlink() and target_path.is_dir():\n\t\t\t\tfor dir_path, dir_names, file_names in os.walk(target_path):\n\t\t\t\t\tfor name in file_names + dir_names:\n\t\t\t\t\t\tfile_path = Path(dir_path) / name\n\t\t\t\t\t\tif not self.config.backup.is_file_ignore(file_path):\n\t\t\t\t\t\t\tcollected.append(file_path)\n\n\t\treturn _ScanResult(all_file_paths=collected, root_targets=list(scanned_targets.keys()))\n\n\tdef __pre_calculate_hash(self, session: DbSession, scan_result: _ScanResult):\n\t\tstats = self.__pre_calc_result.stats\n\t\thashes = self.__pre_calc_result.hashes\n\t\tstats.clear()\n\t\thashes.clear()\n\n\t\tsizes = set()\n\t\tfor path in scan_result.all_file_paths:\n\t\t\tst = path.lstat()\n\t\t\tstats[path] = st\n\t\t\tif stat.S_ISREG(st.st_mode):\n\t\t\t\tsizes.add(st.st_size)\n\n\t\thash_dict_lock = threading.Lock()\n\t\texistence = session.has_blob_with_size_batched(list(sizes))\n\t\tself.__blob_by_size_cache.update(existence)\n\n\t\tdef hash_worker(pth: Path):\n\t\t\th = hash_utils.calc_file_hash(pth)\n\t\t\twith hash_dict_lock:\n\t\t\t\thashes[pth] = h\n\n\t\twith FailFastThreadPool(name='hasher') as pool:\n\t\t\tfor path in scan_result.all_file_paths:\n\t\t\t\tst = stats[path]\n\t\t\t\tif stat.S_ISREG(st.st_mode):\n\t\t\t\t\tif existence[st.st_size]:\n\t\t\t\t\t\t# we need to hash the file, sooner or later\n\t\t\t\t\t\tpool.submit(hash_worker, path)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpass # will use hash_once policy\n\n\[email protected]_property\n\tdef __temp_path(self) -> Path:\n\t\tp = self.config.temp_path\n\t\tp.mkdir(parents=True, exist_ok=True)\n\t\treturn p\n\n\tdef __get_or_create_blob(self, session: DbSession, src_path: Path, st: os.stat_result) -> Generator[Any, Any, Tuple[schema.Blob, os.stat_result]]:\n\t\tsrc_path_str = repr(src_path.as_posix())\n\t\tsrc_path_md5 = hashlib.md5(src_path_str.encode('utf8')).hexdigest()\n\n\t\[email protected]\n\t\tdef make_temp_file() -> ContextManager[Path]:\n\t\t\ttemp_file_name = f'blob_{os.getpid()}_{threading.current_thread().ident}_{src_path_md5}.tmp'\n\t\t\ttemp_file_path = self.__temp_path / temp_file_name\n\t\t\twith contextlib.ExitStack() as exit_stack:\n\t\t\t\texit_stack.callback(functools.partial(self._remove_file, temp_file_path))\n\t\t\t\tyield temp_file_path\n\n\t\tdef attempt_once(last_chance: bool = False) -> Generator[Any, Any, schema.Blob]:\n\t\t\tcompress_method: CompressMethod = self.config.backup.get_compress_method_from_size(st.st_size)\n\t\t\tcan_copy_on_write = (\n\t\t\t\t\tfile_utils.HAS_COPY_FILE_RANGE and\n\t\t\t\t\tcompress_method == CompressMethod.plain and\n\t\t\t\t\tself.__blob_store_in_cow_fs and\n\t\t\t\t\tst.st_dev == self.__blob_store_st.st_dev\n\t\t\t)\n\n\t\t\tpolicy: Optional[_BlobCreatePolicy] = None\n\t\t\tblob_hash: Optional[str] = None\n\t\t\tblob_content: Optional[bytes] = None\n\t\t\traw_size: Optional[int] = None\n\t\t\tstored_size: Optional[int] = None\n\t\t\tpre_calc_hash = self.__pre_calc_result.hashes.pop(src_path, None)\n\n\t\t\tif last_chance:\n\t\t\t\tpolicy = _BlobCreatePolicy.copy_hash\n\t\t\telif pre_calc_hash is not None: # hash already calculated? just use default\n\t\t\t\tpolicy = _BlobCreatePolicy.default\n\t\t\t\tblob_hash = pre_calc_hash\n\t\t\telif not can_copy_on_write: # do tricks iff. no COW copy\n\t\t\t\tif st.st_size <= _READ_ALL_SIZE_THRESHOLD:\n\t\t\t\t\tpolicy = _BlobCreatePolicy.read_all\n\t\t\t\t\twith open(src_path, 'rb') as f:\n\t\t\t\t\t\tblob_content = f.read(_READ_ALL_SIZE_THRESHOLD + 1)\n\t\t\t\t\tif len(blob_content) > _READ_ALL_SIZE_THRESHOLD:\n\t\t\t\t\t\tself.logger.warning('Read too many bytes for read_all policy, stat: {}, read: {}'.format(st.st_size, len(blob_content)))\n\t\t\t\t\t\traise _BlobFileChanged()\n\t\t\t\t\tblob_hash = hash_utils.calc_bytes_hash(blob_content)\n\t\t\t\telif st.st_size > _HASH_ONCE_SIZE_THRESHOLD:\n\t\t\t\t\tif (exist := self.__blob_by_size_cache.get(st.st_size)) is None:\n\t\t\t\t\t\t# existence is unknown yet\n\t\t\t\t\t\tyield BlobBySizeFetcher.Req(st.st_size)\n\t\t\t\t\t\tcan_hash_once = self.__blob_by_size_cache[st.st_size] is False\n\t\t\t\t\telse:\n\t\t\t\t\t\tcan_hash_once = exist is False\n\t\t\t\t\tif can_hash_once:\n\t\t\t\t\t\t# it's certain that this blob is unique, but notes: the following code\n\t\t\t\t\t\t# cannot be interrupted (yield), or other generator could make a same blob\n\t\t\t\t\t\tpolicy = _BlobCreatePolicy.hash_once\n\t\t\tif policy is None:\n\t\t\t\tpolicy = _BlobCreatePolicy.default\n\t\t\t\tblob_hash = hash_utils.calc_file_hash(src_path)\n\n\t\t\t# self.logger.info(\"%s %s %s\", policy.name, compress_method.name, src_path)\n\t\t\tif blob_hash is not None:\n\t\t\t\tmisc_utils.assert_true(policy != _BlobCreatePolicy.hash_once, 'unexpected policy')\n\n\t\t\t\tif (cache := self.__blob_by_hash_cache.get(blob_hash)) is not None:\n\t\t\t\t\treturn cache\n\t\t\t\tyield BlobByHashFetcher.Req(blob_hash)\n\t\t\t\tif (cache := self.__blob_by_hash_cache.get(blob_hash)) is not None:\n\t\t\t\t\treturn cache\n\n\t\t\t# notes: the following code cannot be interrupted (yield).\n\t\t\t# The blob is specifically generated by the generator\n\t\t\t# if any yield is done, ensure to check __blob_by_hash_cache again\n\n\t\t\tdef check_changes(new_size: int, new_hash: Optional[str]):\n\t\t\t\tif new_size != st.st_size:\n\t\t\t\t\tself.logger.warning('Blob size mismatch, previous: {}, current: {}'.format(st.st_size, new_size))\n\t\t\t\t\traise _BlobFileChanged()\n\t\t\t\tif blob_hash is not None and new_hash is not None and new_hash != blob_hash:\n\t\t\t\t\tself.logger.warning('Blob hash mismatch, previous: {}, current: {}'.format(blob_hash, new_hash))\n\t\t\t\t\traise _BlobFileChanged()\n\n\t\t\tdef bp_rba(h: str) -> Path:\n\t\t\t\tbp = blob_utils.get_blob_path(h)\n\t\t\t\tself._add_remove_file_rollbacker(bp)\n\t\t\t\treturn bp\n\n\t\t\tcompressor = Compressor.create(compress_method)\n\t\t\tif policy == _BlobCreatePolicy.copy_hash:\n\t\t\t\t# copy to temp file, calc hash, then compress to blob store\n\t\t\t\tmisc_utils.assert_true(blob_hash is None, 'blob_hash should not be calculated')\n\t\t\t\twith make_temp_file() as temp_file_path:\n\t\t\t\t\tfile_utils.copy_file_fast(src_path, temp_file_path)\n\t\t\t\t\tblob_hash = hash_utils.calc_file_hash(temp_file_path)\n\n\t\t\t\t\tmisc_utils.assert_true(last_chance, 'only last_chance=True can use do hash_once without checking uniqueness')\n\t\t\t\t\tif (cache := self.__blob_by_hash_cache.get(blob_hash)) is not None:\n\t\t\t\t\t\treturn cache\n\t\t\t\t\tyield BlobByHashFetcher.Req(blob_hash)\n\t\t\t\t\tif (cache := self.__blob_by_hash_cache.get(blob_hash)) is not None:\n\t\t\t\t\t\treturn cache\n\n\t\t\t\t\tblob_path = bp_rba(blob_hash)\n\t\t\t\t\tcr = compressor.copy_compressed(temp_file_path, blob_path, calc_hash=False)\n\t\t\t\t\traw_size, stored_size = cr.read_size, cr.write_size\n\n\t\t\telif policy == _BlobCreatePolicy.hash_once:\n\t\t\t\t# read once, compress+hash to temp file, then move\n\t\t\t\tmisc_utils.assert_true(blob_hash is None, 'blob_hash should not be calculated')\n\t\t\t\twith make_temp_file() as temp_file_path:\n\t\t\t\t\tcr = compressor.copy_compressed(src_path, temp_file_path, calc_hash=True)\n\t\t\t\t\tcheck_changes(cr.read_size, None) # the size must be unchanged, to satisfy the uniqueness\n\n\t\t\t\t\traw_size, blob_hash, stored_size = cr.read_size, cr.read_hash, cr.write_size\n\t\t\t\t\tblob_path = bp_rba(blob_hash)\n\n\t\t\t\t\t# reference: shutil.move, but os.replace is used\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.replace(temp_file_path, blob_path)\n\t\t\t\t\texcept OSError:\n\t\t\t\t\t\t# The temp dir is in the different file system to the blob store?\n\t\t\t\t\t\t# Whatever, use file copy as the fallback\n\t\t\t\t\t\tfile_utils.copy_file_fast(temp_file_path, blob_path)\n\n\t\t\telse:\n\t\t\t\tmisc_utils.assert_true(blob_hash is not None, 'blob_hash is None')\n\t\t\t\tblob_path = bp_rba(blob_hash)\n\n\t\t\t\tif policy == _BlobCreatePolicy.read_all:\n\t\t\t\t\t# the file content is already in memory, just write+compress to blob store\n\t\t\t\t\tmisc_utils.assert_true(blob_content is not None, 'blob_content is None')\n\t\t\t\t\twith compressor.open_compressed_bypassed(blob_path) as (writer, f):\n\t\t\t\t\t\tf.write(blob_content)\n\t\t\t\t\traw_size, stored_size = len(blob_content), writer.get_write_len()\n\t\t\t\telif policy == _BlobCreatePolicy.default:\n\t\t\t\t\tif can_copy_on_write and compress_method == CompressMethod.plain:\n\t\t\t\t\t\t# fast copy, then calc size and hash to verify\n\t\t\t\t\t\tfile_utils.copy_file_fast(src_path, blob_path)\n\t\t\t\t\t\tstored_size, h2 = hash_utils.calc_file_size_and_hash(blob_path)\n\t\t\t\t\t\traw_size = stored_size\n\t\t\t\t\t\tcheck_changes(stored_size, h2)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# copy+compress+hash to blob store\n\t\t\t\t\t\tcr = compressor.copy_compressed(src_path, blob_path, calc_hash=True)\n\t\t\t\t\t\traw_size, stored_size = cr.read_size, cr.write_size\n\t\t\t\t\t\tcheck_changes(cr.read_size, cr.read_hash)\n\t\t\t\telse:\n\t\t\t\t\traise AssertionError()\n\n\t\t\tmisc_utils.assert_true(blob_hash is not None, 'blob_hash is None')\n\t\t\tmisc_utils.assert_true(raw_size is not None, 'raw_size is None')\n\t\t\tmisc_utils.assert_true(stored_size is not None, 'stored_size is None')\n\t\t\treturn self._create_blob(\n\t\t\t\tsession,\n\t\t\t\thash=blob_hash,\n\t\t\t\tcompress=compress_method.name,\n\t\t\t\traw_size=raw_size,\n\t\t\t\tstored_size=stored_size,\n\t\t\t)\n\n\t\tfor i in range(_BLOB_FILE_CHANGED_RETRY_COUNT):\n\t\t\tlast_attempt = i == _BLOB_FILE_CHANGED_RETRY_COUNT - 1\n\t\t\tif i > 0:\n\t\t\t\tself.logger.warning('Try to create blob {} (attempt {} / {})'.format(src_path_str, i + 1, _BLOB_FILE_CHANGED_RETRY_COUNT))\n\t\t\tgen = attempt_once(last_chance=last_attempt)\n\t\t\ttry:\n\t\t\t\tquery = gen.send(None)\n\t\t\t\twhile True:\n\t\t\t\t\tresult = yield query\n\t\t\t\t\tquery = gen.send(result)\n\t\t\texcept StopIteration as e: # ok\n\t\t\t\tblob: schema.Blob = e.value\n\t\t\t\tself.__blob_by_size_cache[blob.raw_size] = True\n\t\t\t\tself.__blob_by_hash_cache[blob.hash] = blob\n\t\t\t\treturn blob, st\n\t\t\texcept _BlobFileChanged:\n\t\t\t\tself.logger.warning('Blob {} stat has changed, {}'.format(src_path_str, 'no more retry' if last_attempt else 'retrying'))\n\t\t\t\tst = src_path.lstat()\n\n\t\tself.logger.error('All blob copy attempts failed, is the file {} keeps changing?'.format(src_path_str))\n\t\traise VolatileBlobFile('blob file {} keeps changing'.format(src_path_str))\n\n\tdef __create_file(self, session: DbSession, path: Path) -> Generator[Any, Any, schema.File]:\n\t\trelated_path = path.relative_to(self.config.source_path)\n\n\t\tif (st := self.__pre_calc_result.stats.pop(path, None)) is None:\n\t\t\tst = path.lstat()\n\n\t\tblob: Optional[schema.Blob] = None\n\t\tcontent: Optional[bytes] = None\n\t\tif stat.S_ISREG(st.st_mode):\n\t\t\tgen = self.__get_or_create_blob(session, path, st)\n\t\t\ttry:\n\t\t\t\tquery = gen.send(None)\n\t\t\t\twhile True:\n\t\t\t\t\tresult = yield query\n\t\t\t\t\tquery = gen.send(result)\n\t\t\texcept StopIteration as e:\n\t\t\t\tblob, st = e.value\n\t\t\t\t# notes: st.st_size might be incorrect, use blob.raw_size instead\n\t\telif stat.S_ISDIR(st.st_mode):\n\t\t\tpass\n\t\telif stat.S_ISLNK(st.st_mode):\n\t\t\tcontent = path.readlink().as_posix().encode('utf8')\n\t\telse:\n\t\t\traise UnsupportedFileFormat(st.st_mode)\n\n\t\treturn session.create_file(\n\t\t\tpath=related_path.as_posix(),\n\t\t\tcontent=content,\n\n\t\t\tmode=st.st_mode,\n\t\t\tuid=st.st_uid,\n\t\t\tgid=st.st_gid,\n\t\t\tctime_ns=st.st_ctime_ns,\n\t\t\tmtime_ns=st.st_mtime_ns,\n\t\t\tatime_ns=st.st_atime_ns,\n\n\t\t\tadd_to_session=False,\n\t\t\tblob=blob,\n\t\t)\n\n\tdef run(self) -> BackupInfo:\n\t\tsuper().run()\n\t\tself.__blob_by_size_cache.clear()\n\t\tself.__blob_by_hash_cache.clear()\n\n\t\ttry:\n\t\t\twith DbAccess.open_session() as session:\n\t\t\t\tself.__batch_query_manager = BatchQueryManager(session, self.__blob_by_size_cache, self.__blob_by_hash_cache)\n\n\t\t\t\tscan_result = self.__scan_files()\n\t\t\t\tbackup = session.create_backup(\n\t\t\t\t\tcreator=str(self.creator),\n\t\t\t\t\tcomment=self.comment,\n\t\t\t\t\ttargets=scan_result.root_targets,\n\t\t\t\t\ttags=self.tags.to_dict(),\n\t\t\t\t)\n\t\t\t\tself.logger.info('Creating backup {} on {}'.format(backup, scan_result.root_targets))\n\n\t\t\t\tif self.config.get_effective_concurrency() > 1:\n\t\t\t\t\tself.__pre_calculate_hash(session, scan_result)\n\t\t\t\t\tself.logger.info('Pre-calculate all file hash done')\n\n\t\t\t\tblob_utils.prepare_blob_directories()\n\t\t\t\tbs_path = blob_utils.get_blob_store()\n\t\t\t\tself.__blob_store_st = bs_path.stat()\n\t\t\t\tself.__blob_store_in_cow_fs = file_utils.does_fs_support_cow(bs_path)\n\n\t\t\t\tfiles = []\n\t\t\t\tschedule_queue: Deque[Tuple[Generator, Any]] = collections.deque()\n\t\t\t\tfor file_path in scan_result.all_file_paths:\n\t\t\t\t\tschedule_queue.append((self.__create_file(session, file_path), None))\n\t\t\t\twhile len(schedule_queue) > 0:\n\t\t\t\t\tgen, value = schedule_queue.popleft()\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdef callback(v, g=gen):\n\t\t\t\t\t\t\tschedule_queue.appendleft((g, v))\n\n\t\t\t\t\t\tquery = gen.send(value)\n\t\t\t\t\t\tself.__batch_query_manager.query(query, callback)\n\t\t\t\t\texcept StopIteration as e:\n\t\t\t\t\t\tfiles.append(misc_utils.ensure_type(e.value, schema.File))\n\n\t\t\t\t\tself.__batch_query_manager.flush_if_needed()\n\t\t\t\t\tif len(schedule_queue) == 0:\n\t\t\t\t\t\tself.__batch_query_manager.flush()\n\n\t\t\t\tself._finalize_backup_and_files(session, backup, files)\n\t\t\t\tinfo = BackupInfo.of(backup)\n\n\t\t\ts = self.get_new_blobs_summary()\n\t\t\tself.logger.info('Create backup #{} done, +{} blobs (size {} / {})'.format(\n\t\t\t\tinfo.id, s.count, ByteCount(s.stored_size).auto_str(), ByteCount(s.raw_size).auto_str(),\n\t\t\t))\n\t\t\treturn info\n\n\t\texcept Exception as e:\n\t\t\tself._apply_blob_rollback()\n\t\t\traise e" }, { "identifier": "DeleteBackupAction", "path": "prime_backup/action/delete_backup_action.py", "snippet": "class DeleteBackupAction(Action[DeleteBackupResult]):\n\tdef __init__(self, backup_id: int):\n\t\tsuper().__init__()\n\t\tself.backup_id = misc_utils.ensure_type(backup_id, int)\n\n\tdef run(self) -> DeleteBackupResult:\n\t\tself.logger.info('Deleting backup #{}'.format(self.backup_id))\n\t\twith DbAccess.open_session() as session:\n\t\t\tbackup = session.get_backup(self.backup_id)\n\t\t\tinfo = BackupInfo.of(backup)\n\n\t\t\thashes = []\n\t\t\tfor file in backup.files:\n\t\t\t\tif file.blob_hash is not None:\n\t\t\t\t\thashes.append(file.blob_hash)\n\t\t\t\tsession.delete_file(file)\n\t\t\tsession.delete_backup(backup)\n\n\t\torphan_blob_cleaner = DeleteOrphanBlobsAction(hashes, quiet=True)\n\t\tbls = orphan_blob_cleaner.run()\n\n\t\tself.logger.info('Deleted backup #{} done, -{} blobs (size {} / {})'.format(\n\t\t\tinfo.id, bls.count, ByteCount(bls.stored_size).auto_str(), ByteCount(bls.raw_size).auto_str(),\n\t\t))\n\t\treturn DeleteBackupResult(info, bls)" }, { "identifier": "ExportBackupToDirectoryAction", "path": "prime_backup/action/export_backup_action.py", "snippet": "class ExportBackupToDirectoryAction(_ExportBackupActionBase):\n\tclass _ExportItem(NamedTuple):\n\t\tfile: schema.File\n\t\tpath: Path # path to export, related to self.output_path\n\t\tpath_posix: str\n\n\tdef __init__(\n\t\t\tself, backup_id: int, output_path: Path, *,\n\t\t\trestore_mode: bool = False,\n\t\t\tchild_to_export: Optional[Path] = None,\n\t\t\trecursively_export_child: bool = False,\n\t\t\t**kwargs,\n\t):\n\t\t\"\"\"\n\t\t:param restore_mode: recover what it was like -- delete all backup targets before export\n\t\t\"\"\"\n\t\tsuper().__init__(backup_id, output_path, **kwargs)\n\t\tself.restore_mode = restore_mode\n\t\tself.child_to_export = child_to_export\n\t\tself.recursively_export_child = recursively_export_child\n\n\t\tif self.restore_mode and self.child_to_export is not None:\n\t\t\traise ValueError('restore mode does not support exporting child')\n\n\t@classmethod\n\tdef __set_attrs(cls, file: schema.File, file_path: Path):\n\t\t# reference: tarfile.TarFile.extractall, tarfile.TarFile._extract_member\n\n\t\tis_link = stat.S_ISLNK(file.mode)\n\n\t\tif _i_am_root() and file.uid is not None and file.gid is not None:\n\t\t\tu, g = int(file.uid), int(file.gid)\n\t\t\tif is_link and hasattr(os, 'lchown'):\n\t\t\t\tos.lchown(file_path, u, g)\n\t\t\telse:\n\t\t\t\tos.chown(file_path, u, g)\n\n\t\tif not is_link:\n\t\t\tos.chmod(file_path, file.mode)\n\n\t\tif file.atime_ns is not None and file.mtime_ns is not None:\n\t\t\ttimes = (file.atime_ns / 1e9, file.mtime_ns / 1e9)\n\t\t\tif is_link:\n\t\t\t\tif os.utime in os.supports_follow_symlinks:\n\t\t\t\t\tos.utime(file_path, times, follow_symlinks=False)\n\t\t\telse:\n\t\t\t\tos.utime(file_path, times)\n\n\tdef __prepare_for_export(self, item: _ExportItem, trash_bin: _TrashBin):\n\t\tfile_path = self.output_path / item.path\n\t\tif os.path.lexists(file_path):\n\t\t\ttrash_bin.add(file_path, item.path)\n\t\tfile_path.parent.mkdir(parents=True, exist_ok=True)\n\n\tdef __export_file(self, item: _ExportItem, exported_directories: 'queue.Queue[Tuple[schema.File, Path]]'):\n\t\tfile = item.file\n\t\tfile_path = self.output_path / item.path\n\n\t\tif stat.S_ISREG(file.mode):\n\t\t\tself.logger.debug('write file {}'.format(file.path))\n\t\t\tblob_path = blob_utils.get_blob_path(file.blob_hash)\n\t\t\tcompressor = Compressor.create(file.blob_compress)\n\t\t\tif compressor.get_method() == CompressMethod.plain:\n\t\t\t\tfile_utils.copy_file_fast(blob_path, file_path)\n\t\t\t\tif self.verify_blob:\n\t\t\t\t\tsah = hash_utils.calc_file_size_and_hash(file_path)\n\t\t\t\t\tself._verify_exported_blob(file, sah.size, sah.hash)\n\t\t\telse:\n\t\t\t\twith compressor.open_decompressed(blob_path) as f_in:\n\t\t\t\t\twith open(file_path, 'wb') as f_out:\n\t\t\t\t\t\tif self.verify_blob:\n\t\t\t\t\t\t\treader = BypassReader(f_in, calc_hash=True)\n\t\t\t\t\t\t\tshutil.copyfileobj(reader, f_out)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treader = None\n\t\t\t\t\t\t\tshutil.copyfileobj(f_in, f_out)\n\t\t\t\tif reader is not None:\n\t\t\t\t\tself._verify_exported_blob(file, reader.get_read_len(), reader.get_hash())\n\n\t\telif stat.S_ISDIR(file.mode):\n\t\t\tself.logger.debug('write dir {}'.format(file.path))\n\t\t\tfile_path.mkdir(parents=True, exist_ok=True)\n\t\t\texported_directories.put((file, file_path))\n\n\t\telif stat.S_ISLNK(file.mode):\n\t\t\tlink_target = file.content.decode('utf8')\n\t\t\tos.symlink(link_target, file_path)\n\t\t\tself.logger.debug('write symbolic link {} -> {}'.format(file_path, link_target))\n\t\telse:\n\t\t\tself._on_unsupported_file_mode(file)\n\n\t\tif not stat.S_ISDIR(file.mode):\n\t\t\tself.__set_attrs(file, file_path)\n\n\tdef _export_backup(self, session: DbSession, backup: schema.Backup) -> ExportFailures:\n\t\tfailures = ExportFailures(self.fail_soft)\n\n\t\t# 1. collect export item\n\n\t\tdef add_export_item(file_: schema.File, export_path: Path):\n\t\t\tfor t in backup.targets:\n\t\t\t\tif path_utils.is_relative_to(Path(file_.path), t):\n\t\t\t\t\texport_items.append(self._ExportItem(file_, export_path, export_path.as_posix()))\n\t\t\t\t\treturn\n\t\t\tself.logger.warning('Found out-of-backup-target file, ignored. file.path: {!r}, backup.targets: {}'.format(file, backup.targets))\n\n\t\texport_items: List[ExportBackupToDirectoryAction._ExportItem] = []\n\t\tif self.child_to_export is None:\n\t\t\tself.logger.info('Exporting {} to directory {}'.format(backup, self.output_path))\n\t\t\tfor file in backup.files:\n\t\t\t\tadd_export_item(file, Path(file.path))\n\t\telse:\n\t\t\tself.logger.info('Exporting child {!r} in {} to directory {}, recursively = {}'.format(self.child_to_export.as_posix(), backup, self.output_path, self.recursively_export_child))\n\t\t\tfor file in backup.files:\n\t\t\t\ttry:\n\t\t\t\t\trel_path = Path(file.path).relative_to(self.child_to_export)\n\t\t\t\texcept ValueError:\n\t\t\t\t\tcontinue\n\t\t\t\tif rel_path != Path('.') and not self.recursively_export_child:\n\t\t\t\t\tcontinue\n\t\t\t\tadd_export_item(file, Path(self.child_to_export.name) / rel_path)\n\n\t\t# 2. do the export\n\n\t\tself.output_path.mkdir(parents=True, exist_ok=True)\n\t\tself.config.temp_path.mkdir(parents=True, exist_ok=True)\n\t\ttrash_bin_name_base = f'.{constants.PLUGIN_ID}.export_trashes'\n\t\ttrash_bin_dir_name = f'{trash_bin_name_base}_{os.getpid()}_{threading.current_thread().ident}'\n\t\ttrash_bin_path = self.config.temp_path / trash_bin_dir_name\n\t\tif self.config.temp_path.stat().st_dev != self.output_path.stat().st_dev:\n\t\t\ttrash_bin_path = self.output_path / trash_bin_dir_name\n\t\ttry:\n\t\t\t# remove existing undeleted trash bins\n\t\t\tfor f in trash_bin_path.parent.iterdir():\n\t\t\t\tif f.name.startswith(trash_bin_name_base):\n\t\t\t\t\tself.logger.warning('Removing existing undeleted trash bin {}'.format(f))\n\t\t\t\t\tfile_utils.rm_rf(f)\n\t\texcept OSError as e:\n\t\t\tself.logger.warning('Error when removing existing undeleted trash bins: {}'.format(e))\n\n\t\ttrash_bin = _TrashBin(trash_bin_path)\n\t\ttry:\n\t\t\tif self.restore_mode:\n\t\t\t\t# in restore mode, recover what it was like\n\t\t\t\t# if the backup does not have the target, don't keep the target\n\t\t\t\tfor target in backup.targets:\n\t\t\t\t\ttarget_path = self.output_path / target\n\t\t\t\t\tif os.path.lexists(target_path):\n\t\t\t\t\t\ttrash_bin.add(target_path, Path(target))\n\n\t\t\t# parent dir first, so the parent will be added to trash-bin first\n\t\t\texport_items.sort(key=lambda ei: ei.path_posix)\n\t\t\tfor item in export_items:\n\t\t\t\twith failures.handling_exception(item.file):\n\t\t\t\t\tself.__prepare_for_export(item, trash_bin)\n\n\t\t\tdirectories: 'queue.Queue[Tuple[schema.File, Path]]' = queue.Queue()\n\t\t\twith FailFastThreadPool('export') as pool:\n\t\t\t\tdef export_worker(item_: ExportBackupToDirectoryAction._ExportItem):\n\t\t\t\t\twith failures.handling_exception(item_.file):\n\t\t\t\t\t\tself.__export_file(item_, directories)\n\n\t\t\t\tfor item in export_items:\n\t\t\t\t\tpool.submit(export_worker, item)\n\n\t\t\t# child dir first\n\t\t\t# reference: tarfile.TarFile.extractall\n\t\t\tfor dir_file, dir_file_path in sorted(\n\t\t\t\t\tcollection_utils.drain_queue(directories),\n\t\t\t\t\tkey=lambda d: d[0].path,\n\t\t\t\t\treverse=True,\n\t\t\t):\n\t\t\t\twith failures.handling_exception(dir_file):\n\t\t\t\t\tself.__set_attrs(dir_file, dir_file_path)\n\n\t\texcept Exception:\n\t\t\tself.logger.warning('Error occurs during export to directory, applying rollback')\n\t\t\ttrash_bin.restore()\n\t\t\traise\n\t\tfinally:\n\t\t\ttrash_bin.erase()\n\n\t\treturn failures" }, { "identifier": "ExportBackupToTarAction", "path": "prime_backup/action/export_backup_action.py", "snippet": "class ExportBackupToTarAction(_ExportBackupActionBase):\n\tdef __init__(self, backup_id: int, output_path: Path, tar_format: TarFormat, **kwargs):\n\t\tsuper().__init__(backup_id, output_path, **kwargs)\n\t\tself.tar_format = tar_format\n\n\tdef is_interruptable(self) -> bool:\n\t\treturn True\n\n\[email protected]\n\tdef __open_tar(self) -> ContextManager[tarfile.TarFile]:\n\t\twith open(self.output_path, 'wb') as f:\n\t\t\tcompressor = Compressor.create(self.tar_format.value.compress_method)\n\t\t\twith compressor.compress_stream(f) as f_compressed:\n\t\t\t\twith tarfile.open(fileobj=f_compressed, mode=self.tar_format.value.mode_w) as tar:\n\t\t\t\t\tyield tar\n\n\tdef __export_file(self, tar: tarfile.TarFile, file: schema.File):\n\t\tinfo = tarfile.TarInfo(name=file.path)\n\t\tinfo.mode = file.mode\n\n\t\tif file.uid is not None:\n\t\t\tinfo.uid = file.uid\n\t\t\tif (uid_name := platform_utils.uid_to_name(int(file.uid))) is not None:\n\t\t\t\tinfo.uname = uid_name\n\t\tif file.gid is not None:\n\t\t\tinfo.gid = file.gid\n\t\t\tif (gid_name := platform_utils.gid_to_name(int(file.gid))) is not None:\n\t\t\t\tinfo.gname = gid_name\n\t\tif file.mtime_ns is not None:\n\t\t\tinfo.mtime = int(file.mtime_ns / 1e9)\n\t\tif stat.S_ISREG(file.mode):\n\t\t\tself.logger.debug('add file {} to tarfile'.format(file.path))\n\t\t\tinfo.type = tarfile.REGTYPE\n\t\t\tinfo.size = file.blob_raw_size\n\t\t\tblob_path = blob_utils.get_blob_path(file.blob_hash)\n\n\t\t\twith Compressor.create(file.blob_compress).open_decompressed(blob_path) as stream:\n\t\t\t\t# Exception raised in TarFile.addfile might nuke the whole remaining tar file, which is bad\n\t\t\t\t# We read a few bytes from the stream, to *hopefully* trigger potential decompress exception in advanced,\n\t\t\t\t# make it fail before affecting the actual tar file\n\t\t\t\tpeek_reader = PeekReader(stream, 32 * 1024)\n\t\t\t\tpeek_reader.peek()\n\n\t\t\t\tif self.verify_blob:\n\t\t\t\t\treader = BypassReader(peek_reader, calc_hash=True)\n\t\t\t\t\ttar.addfile(tarinfo=info, fileobj=reader)\n\t\t\t\telse:\n\t\t\t\t\treader = None\n\t\t\t\t\tpeek_reader: Any\n\t\t\t\t\ttar.addfile(tarinfo=info, fileobj=peek_reader)\n\t\t\tif reader is not None:\n\t\t\t\t# notes: the read len is always <= info.size\n\t\t\t\tself._verify_exported_blob(file, reader.get_read_len(), reader.get_hash())\n\n\t\telif stat.S_ISDIR(file.mode):\n\t\t\tself.logger.debug('add dir {} to tarfile'.format(file.path))\n\t\t\tinfo.type = tarfile.DIRTYPE\n\t\t\ttar.addfile(tarinfo=info)\n\t\telif stat.S_ISLNK(file.mode):\n\t\t\tself.logger.debug('add symlink {} to tarfile'.format(file.path))\n\t\t\tlink_target = file.content.decode('utf8')\n\t\t\tinfo.type = tarfile.SYMTYPE\n\t\t\tinfo.linkname = link_target\n\t\t\ttar.addfile(tarinfo=info)\n\t\telse:\n\t\t\tself._on_unsupported_file_mode(file)\n\n\tdef _export_backup(self, session, backup: schema.Backup) -> ExportFailures:\n\t\tfailures = ExportFailures(self.fail_soft)\n\t\tif not self.output_path.name.endswith(self.tar_format.value.extension):\n\t\t\traise ValueError('bad output file extension for file name {!r}, should be {!r} for tar format {}'.format(\n\t\t\t\tself.output_path.name, self.tar_format.value.extension, self.tar_format.name,\n\t\t\t))\n\n\t\tself.logger.info('Exporting backup {} to tarfile {}'.format(backup, self.output_path))\n\t\tself.output_path.parent.mkdir(parents=True, exist_ok=True)\n\n\t\ttry:\n\t\t\twith self.__open_tar() as tar:\n\t\t\t\tfor file in backup.files:\n\t\t\t\t\tif self.is_interrupted.is_set():\n\t\t\t\t\t\tself.logger.info('Export to tarfile interrupted')\n\t\t\t\t\t\traise _ExportInterrupted()\n\n\t\t\t\t\twith failures.handling_exception(file):\n\t\t\t\t\t\tself.__export_file(tar, file)\n\n\t\t\t\tif self.create_meta:\n\t\t\t\t\tmeta_buf = self._create_meta_buf(backup)\n\t\t\t\t\tinfo = tarfile.TarInfo(name=BACKUP_META_FILE_NAME)\n\t\t\t\t\tinfo.mtime = int(time.time())\n\t\t\t\t\tinfo.size = len(meta_buf)\n\t\t\t\t\ttar.addfile(tarinfo=info, fileobj=BytesIO(meta_buf))\n\t\texcept Exception as e:\n\t\t\twith contextlib.suppress(OSError):\n\t\t\t\tself.output_path.unlink(missing_ok=True)\n\t\t\tif not isinstance(e, _ExportInterrupted):\n\t\t\t\traise\n\n\t\treturn failures" }, { "identifier": "ExportBackupToZipAction", "path": "prime_backup/action/export_backup_action.py", "snippet": "class ExportBackupToZipAction(_ExportBackupActionBase):\n\tdef is_interruptable(self) -> bool:\n\t\treturn True\n\n\tdef __export_file(self, zipf: zipfile.ZipFile, file: schema.File):\n\t\t# reference: zipf.writestr -> zipfile.ZipInfo.from_file\n\t\tif file.mtime_ns is not None:\n\t\t\tdate_time = time.localtime(file.mtime_ns / 1e9)\n\t\telse:\n\t\t\tdate_time = time.localtime()\n\t\tarc_name = file.path\n\t\twhile len(arc_name) > 0 and arc_name[0] in (os.sep, os.altsep):\n\t\t\tarc_name = arc_name[1:]\n\t\tif stat.S_ISDIR(file.mode) and not arc_name.endswith('/'):\n\t\t\tarc_name += '/'\n\n\t\tinfo = zipfile.ZipInfo(arc_name, date_time[0:6])\n\t\tinfo.external_attr = (file.mode & 0xFFFF) << 16\n\t\tinfo.compress_type = zipf.compression\n\n\t\tif stat.S_ISREG(file.mode):\n\t\t\tself.logger.debug('add file {} to zipfile'.format(file.path))\n\t\t\tinfo.file_size = file.blob_raw_size\n\t\t\tblob_path = blob_utils.get_blob_path(file.blob_hash)\n\n\t\t\twith Compressor.create(file.blob_compress).open_decompressed(blob_path) as stream:\n\t\t\t\twith zipf.open(info, 'w') as zip_item:\n\t\t\t\t\tif self.verify_blob:\n\t\t\t\t\t\treader = BypassReader(stream, calc_hash=True)\n\t\t\t\t\t\tshutil.copyfileobj(reader, zip_item)\n\t\t\t\t\telse:\n\t\t\t\t\t\treader = None\n\t\t\t\t\t\tshutil.copyfileobj(stream, zip_item)\n\t\t\tif reader is not None:\n\t\t\t\tself._verify_exported_blob(file, reader.get_read_len(), reader.get_hash())\n\n\t\telif stat.S_ISDIR(file.mode):\n\t\t\tself.logger.debug('add dir {} to zipfile'.format(file.path))\n\t\t\tinfo.external_attr |= 0x10\n\t\t\tzipf.writestr(info, b'')\n\t\telif stat.S_ISLNK(file.mode):\n\t\t\tself.logger.debug('add symlink {} to zipfile'.format(file.path))\n\t\t\twith zipf.open(info, 'w') as zip_item:\n\t\t\t\tzip_item.write(file.content)\n\t\telse:\n\t\t\tself._on_unsupported_file_mode(file)\n\n\tdef _export_backup(self, session, backup: schema.Backup) -> ExportFailures:\n\t\tfailures = ExportFailures(self.fail_soft)\n\t\tself.logger.info('Exporting backup {} to zipfile {}'.format(backup, self.output_path))\n\t\tself.output_path.parent.mkdir(parents=True, exist_ok=True)\n\n\t\ttry:\n\t\t\twith zipfile.ZipFile(self.output_path, 'w', zipfile.ZIP_DEFLATED) as zipf:\n\t\t\t\tfor file in backup.files:\n\t\t\t\t\tif self.is_interrupted.is_set():\n\t\t\t\t\t\tself.logger.info('Export to zipfile interrupted')\n\t\t\t\t\t\traise _ExportInterrupted()\n\n\t\t\t\t\twith failures.handling_exception(file):\n\t\t\t\t\t\tself.__export_file(zipf, file)\n\n\t\t\t\tif self.create_meta:\n\t\t\t\t\tmeta_buf = self._create_meta_buf(backup)\n\t\t\t\t\tinfo = zipfile.ZipInfo(BACKUP_META_FILE_NAME, time.localtime()[0:6])\n\t\t\t\t\tinfo.compress_type = zipf.compression\n\t\t\t\t\tinfo.file_size = len(meta_buf)\n\t\t\t\t\twith zipf.open(info, 'w') as f:\n\t\t\t\t\t\tf.write(meta_buf)\n\n\t\texcept Exception as e:\n\t\t\twith contextlib.suppress(OSError):\n\t\t\t\tself.output_path.unlink(missing_ok=True)\n\t\t\tif not isinstance(e, _ExportInterrupted):\n\t\t\t\traise\n\n\t\treturn failures" }, { "identifier": "ImportBackupAction", "path": "prime_backup/action/import_backup_action.py", "snippet": "class ImportBackupAction(CreateBackupActionBase):\n\tdef __init__(self, file_path: Path, backup_format: Optional[StandaloneBackupFormat] = None, *, ensure_meta: bool = True):\n\t\tsuper().__init__()\n\n\t\tif backup_format is None:\n\t\t\tbackup_format = StandaloneBackupFormat.from_file_name(file_path)\n\t\t\tif backup_format is None:\n\t\t\t\traise ValueError('cannot infer backup format from {}'.format(file_path))\n\n\t\tself.file_path = file_path\n\t\tself.backup_format = backup_format\n\t\tself.ensure_meta = ensure_meta\n\n\t\tself.__blob_cache: Dict[str, schema.Blob] = {}\n\n\tdef __create_blob_file(self, file_reader: IO[bytes], sah: SizeAndHash) -> Tuple[int, CompressMethod]:\n\t\tblob_path = blob_utils.get_blob_path(sah.hash)\n\t\tself._add_remove_file_rollbacker(blob_path)\n\n\t\tcompress_method: CompressMethod = self.config.backup.get_compress_method_from_size(sah.size)\n\t\tcompressor = Compressor.create(compress_method)\n\t\twith compressor.open_compressed_bypassed(blob_path) as (writer, f):\n\t\t\tshutil.copyfileobj(file_reader, f)\n\n\t\treturn writer.get_write_len(), compress_method\n\n\tdef __create_blob(self, session: DbSession, file_reader: IO[bytes], sah: SizeAndHash) -> schema.Blob:\n\t\tstored_size, compress_method = self.__create_blob_file(file_reader, sah)\n\t\tblob = self._create_blob(\n\t\t\tsession,\n\t\t\thash=sah.hash,\n\t\t\tcompress=compress_method.name,\n\t\t\traw_size=sah.size,\n\t\t\tstored_size=stored_size,\n\t\t)\n\t\tself.__blob_cache[sah.hash] = blob\n\t\treturn blob\n\n\t@classmethod\n\tdef __format_path(cls, path: str) -> str:\n\t\treturn Path(path).as_posix()\n\n\tdef __import_member(\n\t\t\tself, session: DbSession,\n\t\t\tmember: PackedBackupFileHandler.Member, now_ns: int,\n\t\t\tfile_sah: Optional[SizeAndHash],\n\t):\n\t\tblob: Optional[schema.Blob] = None\n\t\tcontent: Optional[bytes] = None\n\n\t\tif member.is_file():\n\t\t\tmisc_utils.assert_true(file_sah is not None, 'file_sah should not be None for files')\n\t\t\tif (blob := self.__blob_cache.get(file_sah.hash)) is None:\n\t\t\t\twith member.open() as f:\n\t\t\t\t\tblob = self.__create_blob(session, f, file_sah)\n\t\telif member.is_dir():\n\t\t\tpass\n\t\telif member.is_link():\n\t\t\tcontent = self.__format_path(member.read_link()).encode('utf8')\n\t\telse:\n\t\t\traise NotImplementedError('member path={!r} mode={} is not supported yet'.format(member.path, member.mode))\n\n\t\tmtime_ns = member.mtime_ns\n\t\treturn session.create_file(\n\t\t\tpath=self.__format_path(member.path),\n\t\t\tcontent=content,\n\n\t\t\tmode=member.mode,\n\t\t\tuid=member.uid,\n\t\t\tgid=member.gid,\n\t\t\tctime_ns=now_ns,\n\t\t\tmtime_ns=mtime_ns,\n\t\t\tatime_ns=mtime_ns,\n\n\t\t\tadd_to_session=False,\n\t\t\tblob=blob,\n\t\t)\n\n\tdef __import_packed_backup_file(self, session: DbSession, file_holder: PackedBackupFileHandler.FileHolder) -> schema.Backup:\n\t\tmeta: Optional[BackupMeta] = None\n\n\t\tif (meta_obj := file_holder.get_member(BACKUP_META_FILE_NAME)) is not None:\n\t\t\twith meta_obj.open() as meta_reader:\n\t\t\t\ttry:\n\t\t\t\t\tmeta_dict = json.load(meta_reader)\n\t\t\t\t\tmeta = BackupMeta.from_dict(meta_dict)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tself.logger.error('Read backup meta from {!r} failed: {}'.format(BACKUP_META_FILE_NAME, e))\n\t\t\t\t\tif self.ensure_meta:\n\t\t\t\t\t\traise BackupMetadataNotFound(e)\n\t\t\t\telse:\n\t\t\t\t\tself.logger.info('Read backup meta from {!r} ok'.format(BACKUP_META_FILE_NAME))\n\t\telse:\n\t\t\tself.logger.info('The importing backup does not contain the backup meta file {!r}'.format(BACKUP_META_FILE_NAME))\n\t\t\tif self.ensure_meta:\n\t\t\t\traise BackupMetadataNotFound('{} does not exist'.format(BACKUP_META_FILE_NAME))\n\n\t\tmembers: List[PackedBackupFileHandler.Member] = list(filter(\n\t\t\tlambda m: m.path != BACKUP_META_FILE_NAME,\n\t\t\tfile_holder.list_member(),\n\t\t))\n\t\troot_files = []\n\t\tfor member in members:\n\t\t\titem = member.path\n\t\t\tif item not in ('', '.', '..') and (item.count('/') == 0 or (item.count('/') == 1 and item.endswith('/'))):\n\t\t\t\troot_files.append(item.rstrip('/'))\n\n\t\tif meta is None:\n\t\t\tmeta = BackupMeta.get_default()\n\t\t\tmeta.targets = root_files\n\t\t\tself.logger.info('No valid backup meta, generating a default one, target: {}'.format(meta.targets))\n\t\telse:\n\t\t\textra_files = list(sorted(set(root_files).difference(set(meta.targets))))\n\t\t\tif len(extra_files) > 0:\n\t\t\t\tself.logger.warning('Found extra files inside {!r}: {}. They are not included in the targets {}'.format(\n\t\t\t\t\tself.file_path.name, extra_files, meta.targets,\n\t\t\t\t))\n\n\t\tbackup = session.create_backup(**meta.to_backup_kwargs())\n\n\t\tself.logger.info('Importing backup {} from {!r}'.format(backup, self.file_path.name))\n\t\tnow_ns = time.time_ns()\n\n\t\tsah_dict: Dict[int, SizeAndHash] = {}\n\t\tfor i, member in enumerate(members):\n\t\t\tif member.is_file():\n\t\t\t\twith member.open() as f:\n\t\t\t\t\tsah_dict[i] = hash_utils.calc_reader_size_and_hash(f)\n\n\t\tblobs = session.get_blobs([sah.hash for sah in sah_dict.values()])\n\t\tfor h, blob in blobs.items():\n\t\t\tself.__blob_cache[h] = blob\n\n\t\tfiles = []\n\t\tblob_utils.prepare_blob_directories()\n\t\tfor i, member in enumerate(members):\n\t\t\tfiles.append(self.__import_member(session, member, now_ns, sah_dict.get(i)))\n\n\t\tself._finalize_backup_and_files(session, backup, files)\n\t\treturn backup\n\n\tdef run(self) -> BackupInfo:\n\t\tif isinstance(self.backup_format.value, TarFormat):\n\t\t\ttar_format = self.backup_format.value\n\t\telse:\n\t\t\ttar_format = None\n\n\t\tsuper().run()\n\t\tself.__blob_cache.clear()\n\n\t\ttry:\n\t\t\twith DbAccess.open_session() as session:\n\t\t\t\thandler: PackedBackupFileHandler\n\t\t\t\tif tar_format is not None:\n\t\t\t\t\thandler = TarBackupHandler(tar_format)\n\t\t\t\telse: # zip\n\t\t\t\t\thandler = ZipBackupHandler()\n\n\t\t\t\twith handler.open_file(self.file_path) as file_holder:\n\t\t\t\t\tbackup = self.__import_packed_backup_file(session, file_holder)\n\t\t\t\tinfo = BackupInfo.of(backup)\n\n\t\t\ts = self.get_new_blobs_summary()\n\t\t\tself.logger.info('Import backup #{} done, +{} blobs (size {} / {})'.format(\n\t\t\t\tinfo.id, s.count, ByteCount(s.stored_size).auto_str(), ByteCount(s.raw_size).auto_str(),\n\t\t\t))\n\t\t\treturn info\n\n\t\texcept Exception:\n\t\t\tself._apply_blob_rollback()\n\t\t\traise" }, { "identifier": "ListBackupAction", "path": "prime_backup/action/list_backup_action.py", "snippet": "class ListBackupAction(_ListBackupActionBase[List[BackupInfo]]):\n\tdef run(self) -> List[BackupInfo]:\n\t\twith DbAccess.open_session() as session:\n\t\t\tbackups = session.list_backup(backup_filter=self.backup_filter, limit=self.limit, offset=self.offset)\n\t\t\treturn [BackupInfo.of(backup) for backup in backups]" }, { "identifier": "MigrateCompressMethodAction", "path": "prime_backup/action/migrate_compress_method_action.py", "snippet": "class MigrateCompressMethodAction(Action[SizeDiff]):\n\tdef __init__(self, new_compress_method: CompressMethod):\n\t\tsuper().__init__()\n\t\tself.new_compress_method = new_compress_method\n\t\tself.__migrated_blob_hashes: List[str] = []\n\t\tself.__affected_backup_ids: Set[int] = set()\n\n\t@classmethod\n\tdef __get_blob_paths(cls, h: str) -> Tuple[Path, Path]:\n\t\tblob_path = blob_utils.get_blob_path(h)\n\t\told_trash_path = blob_path.parent / (blob_path.name + _OLD_BLOB_SUFFIX)\n\t\treturn blob_path, old_trash_path\n\n\tdef __migrate_blob(self, blob: schema.Blob) -> bool:\n\t\tnew_compress_method = self.config.backup.get_compress_method_from_size(blob.raw_size, compress_method_override=self.new_compress_method)\n\t\tdecompressor = Compressor.create(blob.compress)\n\t\tcompressor = Compressor.create(new_compress_method)\n\t\tif decompressor.get_method() == compressor.get_method():\n\t\t\treturn False\n\n\t\tblob_path, old_trash_path = self.__get_blob_paths(blob.hash)\n\t\tblob_path.replace(old_trash_path)\n\t\twith decompressor.open_decompressed(old_trash_path) as f_src:\n\t\t\twith compressor.open_compressed_bypassed(blob_path) as (writer, f_dst):\n\t\t\t\tshutil.copyfileobj(f_src, f_dst)\n\n\t\tblob.compress = new_compress_method.name\n\t\tblob.stored_size = writer.get_write_len()\n\t\treturn True\n\n\tdef __migrate_blobs_and_sync_files(self, session: DbSession, blobs: List[schema.Blob]):\n\t\tblob_mapping = {}\n\t\tfor blob in blobs:\n\t\t\tif self.__migrate_blob(blob):\n\t\t\t\tblob_mapping[blob.hash] = blob\n\t\t\t\tself.__migrated_blob_hashes.append(blob.hash)\n\n\t\tfor file in session.get_file_by_blob_hashes(list(blob_mapping.keys())):\n\t\t\tblob = blob_mapping[file.blob_hash]\n\t\t\tfile.blob_compress = blob.compress\n\t\t\tfile.blob_stored_size = blob.stored_size\n\t\t\tself.__affected_backup_ids.add(file.backup_id)\n\n\tdef __update_backups(self, session: DbSession):\n\t\tbackup_ids = list(sorted(self.__affected_backup_ids))\n\t\tbackups = session.get_backups(backup_ids)\n\t\tfor backup_id in backup_ids:\n\t\t\tbackup = backups[backup_id]\n\t\t\tbackup.file_stored_size_sum = session.calc_file_stored_size_sum(backup.id)\n\n\tdef __erase_old_blobs(self):\n\t\tfor h in self.__migrated_blob_hashes:\n\t\t\t_, old_trash_path = self.__get_blob_paths(h)\n\t\t\told_trash_path.unlink()\n\n\tdef __rollback(self):\n\t\tfor h in self.__migrated_blob_hashes:\n\t\t\tblob_path, old_trash_path = self.__get_blob_paths(h)\n\t\t\tif old_trash_path.is_file():\n\t\t\t\told_trash_path.replace(blob_path)\n\n\tdef run(self) -> SizeDiff:\n\t\t# Notes: requires 2x disk usage of the blob store, stores all blob hashes in memory\n\t\tself.__migrated_blob_hashes.clear()\n\t\tself.logger.info('Migrating compress method to {} (compress threshold = {})'.format(self.new_compress_method.name, self.config.backup.compress_threshold))\n\n\t\ttry:\n\t\t\t# Blob operation steps:\n\t\t\t# 1. move xxx -> xxx_old\n\t\t\t# 2. copy xxx_old --[migrate]-> xxx\n\t\t\t# 3. delete xxx_old\n\t\t\twith DbAccess.open_session() as session:\n\t\t\t\t# 0. fetch information before the migration\n\t\t\t\tt = time.time()\n\t\t\t\tbefore_size = session.get_blob_stored_size_sum()\n\t\t\t\ttotal_blob_count = session.get_blob_count()\n\n\t\t\t\t# 1. migrate blob objects\n\t\t\t\tcnt = 0\n\t\t\t\tfor blobs in session.iterate_blob_batch(batch_size=1000):\n\t\t\t\t\tcnt += len(blobs)\n\t\t\t\t\tself.logger.info('Processing blobs {} / {}'.format(cnt, total_blob_count))\n\t\t\t\t\tself.__migrate_blobs_and_sync_files(session, blobs)\n\t\t\t\t\tsession.flush_and_expunge_all()\n\n\t\t\t\tif len(self.__migrated_blob_hashes) == 0:\n\t\t\t\t\tself.logger.info('No blob needs a compress method change, nothing to migrate')\n\t\t\t\telse:\n\t\t\t\t\tself.logger.info('Migrated {} blobs and related files'.format(len(self.__migrated_blob_hashes)))\n\n\t\t\t\t\t# 3. migrate backup data\n\t\t\t\t\tself.logger.info('Syncing {} affected backups'.format(len(self.__affected_backup_ids)))\n\t\t\t\t\tself.__update_backups(session)\n\t\t\t\t\tsession.flush_and_expunge_all()\n\n\t\t\t\t# 4. output\n\t\t\t\tafter_size = session.get_blob_stored_size_sum()\n\n\t\texcept Exception:\n\t\t\tself.logger.warning('Error occurs during compress method migration, applying rollback')\n\t\t\tself.__rollback()\n\t\t\traise\n\n\t\telse:\n\t\t\t# 5. migration done, do some cleanup\n\t\t\tself.logger.info('Cleaning up old blobs')\n\t\t\tself.__erase_old_blobs()\n\n\t\t\tself.config.backup.compress_method = self.new_compress_method\n\t\t\tself.logger.info('Compress method migration done, cost {}s'.format(round(time.time() - t, 2)))\n\t\t\treturn SizeDiff(before_size, after_size)\n\n\t\tfinally:\n\t\t\tself.__migrated_blob_hashes.clear()" }, { "identifier": "CompressMethod", "path": "prime_backup/compressors.py", "snippet": "class CompressMethod(enum.Enum):\n\tplain = PlainCompressor\n\tgzip = GzipCompressor\n\tlzma = LzmaCompressor\n\tzstd = ZstdCompressor\n\tlz4 = Lz4Compressor" }, { "identifier": "DbAccess", "path": "prime_backup/db/access.py", "snippet": "class DbAccess:\n\t__engine: Optional[Engine] = None\n\t__db_file_path: Optional[Path] = None\n\n\t__hash_method: Optional[HashMethod] = None\n\n\t@classmethod\n\tdef init(cls, auto_migrate: bool = True):\n\t\t\"\"\"\n\t\t:param auto_migrate:\n\t\t\tTrue: check db meta, try to migrate;\n\t\t\tFalse: check db version only\n\t\t\"\"\"\n\t\tdb_dir = Config.get().storage_path\n\t\tdb_dir.mkdir(parents=True, exist_ok=True)\n\n\t\tdb_path = db_dir / db_constants.DB_FILE_NAME\n\t\tcls.__engine = create_engine('sqlite:///' + str(db_path))\n\t\tcls.__db_file_path = db_path\n\n\t\tmigration = DbMigration(cls.__engine)\n\t\tif auto_migrate:\n\t\t\tmigration.check_and_migrate()\n\t\telse:\n\t\t\tmigration.ensure_version()\n\n\t\tcls.sync_hash_method()\n\n\t@classmethod\n\tdef shutdown(cls):\n\t\tif (engine := cls.__engine) is not None:\n\t\t\tengine.dispose()\n\t\t\tcls.__engine = None\n\n\t@classmethod\n\tdef sync_hash_method(cls):\n\t\twith cls.open_session() as session:\n\t\t\thash_method_str = str(session.get_db_meta().hash_method)\n\t\ttry:\n\t\t\tcls.__hash_method = HashMethod[hash_method_str]\n\t\texcept KeyError:\n\t\t\traise ValueError('invalid hash method {!r} in db meta'.format(hash_method_str)) from None\n\n\t@classmethod\n\tdef __ensure_engine(cls) -> Engine:\n\t\tif cls.__engine is None:\n\t\t\traise RuntimeError('engine unavailable')\n\t\treturn cls.__engine\n\n\t@classmethod\n\tdef __ensure_not_none(cls, value):\n\t\tif value is None:\n\t\t\traise RuntimeError('db not is not initialized yet')\n\t\treturn value\n\n\t@classmethod\n\tdef get_db_file_path(cls) -> Path:\n\t\treturn cls.__ensure_not_none(cls.__db_file_path)\n\n\t@classmethod\n\tdef get_hash_method(cls) -> HashMethod:\n\t\treturn cls.__ensure_not_none(cls.__hash_method)\n\n\t@classmethod\n\[email protected]\n\tdef open_session(cls) -> ContextManager['DbSession']:\n\t\twith Session(cls.__ensure_engine()) as session, session.begin():\n\t\t\tyield DbSession(session, cls.__db_file_path)\n\n\t@classmethod\n\[email protected]\n\tdef enable_echo(cls) -> ContextManager[None]:\n\t\tengine = cls.__ensure_engine()\n\t\tengine.echo = True\n\t\ttry:\n\t\t\tyield\n\t\tfinally:\n\t\t\tengine.echo = False" }, { "identifier": "Operator", "path": "prime_backup/types/operator.py", "snippet": "class Operator(NamedTuple):\n\ttype: str\n\tname: str\n\n\t@classmethod\n\tdef pb(cls, what: str) -> 'Operator':\n\t\treturn Operator(constants.PLUGIN_ID, what)\n\n\t@classmethod\n\tdef player(cls, name: str) -> 'Operator':\n\t\treturn Operator('player', name)\n\n\t@classmethod\n\tdef console(cls) -> 'Operator':\n\t\treturn Operator('console', '')\n\n\t@classmethod\n\tdef of(cls, value: Union[str, 'CommandSource']) -> 'Operator':\n\t\tfrom mcdreforged.api.all import CommandSource\n\t\tif isinstance(value, CommandSource):\n\t\t\tif value.is_player:\n\t\t\t\t# noinspection PyUnresolvedReferences\n\t\t\t\treturn cls.player(value.player)\n\t\t\telif value.is_console:\n\t\t\t\treturn cls.console()\n\t\t\telse:\n\t\t\t\treturn Operator('command_source', str(value))\n\t\telif isinstance(value, str):\n\t\t\tif ':' in value:\n\t\t\t\tt, n = value.split(':', 1)\n\t\t\t\treturn Operator(type=t, name=n)\n\t\t\telse:\n\t\t\t\treturn Operator(type='unknown', name=value)\n\t\telse:\n\t\t\traise TypeError(value)\n\n\tdef to_text(self) -> 'RTextBase':\n\t\tfrom prime_backup.mcdr.text_components import TextComponents\n\t\treturn TextComponents.operator(self)\n\n\tdef __str__(self):\n\t\treturn f'{self.type}:{self.name}'\n\n\tdef is_player(self) -> bool:\n\t\treturn self.type == 'player'" }, { "identifier": "TarFormat", "path": "prime_backup/types/tar_format.py", "snippet": "class TarFormat(enum.Enum):\n\tplain = _TarFormatItem('.tar', (), ':', CompressMethod.plain)\n\tgzip = _TarFormatItem('.tar.gz', ('.tgz',), ':gz', CompressMethod.plain)\n\tbz2 = _TarFormatItem('.tar.bz2', ('.tbz2',), ':bz2', CompressMethod.plain)\n\tlzma = _TarFormatItem('.tar.xz', ('.txz',), ':xz', CompressMethod.plain)\n\tzstd = _TarFormatItem('.tar.zst', ('.tar.zstd', '.tzst', '.tzstd'), ':', CompressMethod.zstd)" } ]
import time from pathlib import Path from prime_backup import logger from prime_backup.action.create_backup_action import CreateBackupAction from prime_backup.action.delete_backup_action import DeleteBackupAction from prime_backup.action.export_backup_action import ExportBackupToDirectoryAction, ExportBackupToTarAction, ExportBackupToZipAction from prime_backup.action.import_backup_action import ImportBackupAction from prime_backup.action.list_backup_action import ListBackupAction from prime_backup.action.migrate_compress_method_action import MigrateCompressMethodAction from prime_backup.compressors import CompressMethod from prime_backup.db.access import DbAccess from prime_backup.types.operator import Operator from prime_backup.types.tar_format import TarFormat
13,019
def main(): DbAccess.init() backup_id = 1 logger.get().info('debug entry start') def create(n: int = 1): nonlocal backup_id for i in range(n): t = time.time() bka = CreateBackupAction(Operator.player('Steve'), '测试彩色测试') backup_id = bka.run().id print('cost', round(time.time() - t, 2), 's') def create_if_1st(): nonlocal backup_id t = time.time() if backup_id == 1: bka = CreateBackupAction(Operator.player('Steve'), 'test2') backup_id = bka.run().id print('cost', round(time.time() - t, 2), 's') def export(bid=None): if bid is None: bid = backup_id t = time.time()
def main(): DbAccess.init() backup_id = 1 logger.get().info('debug entry start') def create(n: int = 1): nonlocal backup_id for i in range(n): t = time.time() bka = CreateBackupAction(Operator.player('Steve'), '测试彩色测试') backup_id = bka.run().id print('cost', round(time.time() - t, 2), 's') def create_if_1st(): nonlocal backup_id t = time.time() if backup_id == 1: bka = CreateBackupAction(Operator.player('Steve'), 'test2') backup_id = bka.run().id print('cost', round(time.time() - t, 2), 's') def export(bid=None): if bid is None: bid = backup_id t = time.time()
_ = [ExportBackupToDirectoryAction, ExportBackupToTarAction, ExportBackupToZipAction, TarFormat]
3
2023-11-28 19:03:36+00:00
16k
metatube-community/metatube-plex-plugins
MetaTube.bundle/Contents/Libraries/Shared/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(MutableMapping):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n def __init__(self, headers=None, **kwargs):\n super(HTTPHeaderDict, self).__init__()\n self._container = OrderedDict()\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key, val):\n self._container[key.lower()] = [key, val]\n return self._container[key.lower()]\n\n def __getitem__(self, key):\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key):\n del self._container[key.lower()]\n\n def __contains__(self, key):\n return key.lower() in self._container\n\n def __eq__(self, other):\n if not isinstance(other, Mapping) and not hasattr(other, \"keys\"):\n return False\n if not isinstance(other, type(self)):\n other = type(self)(other)\n return dict((k.lower(), v) for k, v in self.itermerged()) == dict(\n (k.lower(), v) for k, v in other.itermerged()\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n if six.PY2: # Python 2\n iterkeys = MutableMapping.iterkeys\n itervalues = MutableMapping.itervalues\n\n __marker = object()\n\n def __len__(self):\n return len(self._container)\n\n def __iter__(self):\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def pop(self, key, default=__marker):\n \"\"\"D.pop(k[,d]) -> v, remove specified key and return the corresponding value.\n If key is not found, d is returned if given, otherwise KeyError is raised.\n \"\"\"\n # Using the MutableMapping function directly fails due to the private marker.\n # Using ordinary dict.pop would expose the internal structures.\n # So let's reinvent the wheel.\n try:\n value = self[key]\n except KeyError:\n if default is self.__marker:\n raise\n return default\n else:\n del self[key]\n return value\n\n def discard(self, key):\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key, val):\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n \"\"\"\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n vals.append(val)\n\n def extend(self, *args, **kwargs):\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n \"extend() takes at most 1 positional \"\n \"arguments ({0} given)\".format(len(args))\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, Mapping):\n for key in other:\n self.add(key, other[key])\n elif hasattr(other, \"keys\"):\n for key in other.keys():\n self.add(key, other[key])\n else:\n for key, value in other:\n self.add(key, value)\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n def getlist(self, key, default=__marker):\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is self.__marker:\n return []\n return default\n else:\n return vals[1:]\n\n def _prepare_for_method_change(self):\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self):\n return \"%s(%s)\" % (type(self).__name__, dict(self.itermerged()))\n\n def _copy_from(self, other):\n for key in other:\n val = other.getlist(key)\n if isinstance(val, list):\n # Don't need to convert tuples\n val = list(val)\n self._container[key.lower()] = [key] + val\n\n def copy(self):\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self):\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self):\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self):\n return list(self.iteritems())\n\n @classmethod\n def from_httplib(cls, message): # Python 2\n \"\"\"Read headers from a Python 2 httplib message object.\"\"\"\n # python2.7 does not expose a proper API for exporting multiheaders\n # efficiently. This function re-reads raw lines from the message\n # object and extracts the multiheaders properly.\n obs_fold_continued_leaders = (\" \", \"\\t\")\n headers = []\n\n for line in message.headers:\n if line.startswith(obs_fold_continued_leaders):\n if not headers:\n # We received a header line that starts with OWS as described\n # in RFC-7230 S3.2.4. This indicates a multiline header, but\n # there exists no previous header to which we can attach it.\n raise InvalidHeader(\n \"Header continuation with no previous header: %s\" % line\n )\n else:\n key, value = headers[-1]\n headers[-1] = (key, value + \" \" + line.strip())\n continue\n\n key, value = line.split(\":\", 1)\n headers.append((key, value.strip()))\n\n return cls(headers)" }, { "identifier": "RecentlyUsedContainer", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class RecentlyUsedContainer(MutableMapping):\n \"\"\"\n Provides a thread-safe dict-like container which maintains up to\n ``maxsize`` keys while throwing away the least-recently-used keys beyond\n ``maxsize``.\n\n :param maxsize:\n Maximum number of recent elements to retain.\n\n :param dispose_func:\n Every time an item is evicted from the container,\n ``dispose_func(value)`` is called. Callback which will get called\n \"\"\"\n\n ContainerCls = OrderedDict\n\n def __init__(self, maxsize=10, dispose_func=None):\n self._maxsize = maxsize\n self.dispose_func = dispose_func\n\n self._container = self.ContainerCls()\n self.lock = RLock()\n\n def __getitem__(self, key):\n # Re-insert the item, moving it to the end of the eviction line.\n with self.lock:\n item = self._container.pop(key)\n self._container[key] = item\n return item\n\n def __setitem__(self, key, value):\n evicted_value = _Null\n with self.lock:\n # Possibly evict the existing value of 'key'\n evicted_value = self._container.get(key, _Null)\n self._container[key] = value\n\n # If we didn't evict an existing value, we might have to evict the\n # least recently used item from the beginning of the container.\n if len(self._container) > self._maxsize:\n _key, evicted_value = self._container.popitem(last=False)\n\n if self.dispose_func and evicted_value is not _Null:\n self.dispose_func(evicted_value)\n\n def __delitem__(self, key):\n with self.lock:\n value = self._container.pop(key)\n\n if self.dispose_func:\n self.dispose_func(value)\n\n def __len__(self):\n with self.lock:\n return len(self._container)\n\n def __iter__(self):\n raise NotImplementedError(\n \"Iteration over this class is unlikely to be threadsafe.\"\n )\n\n def clear(self):\n with self.lock:\n # Copy pointers to all values, then wipe the mapping\n values = list(itervalues(self._container))\n self._container.clear()\n\n if self.dispose_func:\n for value in values:\n self.dispose_func(value)\n\n def keys(self):\n with self.lock:\n return list(iterkeys(self._container))" }, { "identifier": "HTTPConnectionPool", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/connectionpool.py", "snippet": "class ConnectionPool(object):\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\nclass HTTPSConnectionPool(HTTPConnectionPool):\n def __init__(self, host, port=None):\n def __str__(self):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def close(self):\n def __init__(\n self,\n host,\n port=None,\n strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT,\n maxsize=1,\n block=False,\n headers=None,\n retries=None,\n _proxy=None,\n _proxy_headers=None,\n _proxy_config=None,\n **conn_kw\n ):\n def _new_conn(self):\n def _get_conn(self, timeout=None):\n def _put_conn(self, conn):\n def _validate_conn(self, conn):\n def _prepare_proxy(self, conn):\n def _get_timeout(self, timeout):\n def _raise_timeout(self, err, url, timeout_value):\n def _make_request(\n self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw\n ):\n def _absolute_url(self, path):\n def close(self):\n def is_same_host(self, url):\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n retries=None,\n redirect=True,\n assert_same_host=True,\n timeout=_Default,\n pool_timeout=None,\n release_conn=None,\n chunked=False,\n body_pos=None,\n **response_kw\n ):\n def _is_ssl_error_message_from_http_proxy(ssl_error):\n def __init__(\n self,\n host,\n port=None,\n strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT,\n maxsize=1,\n block=False,\n headers=None,\n retries=None,\n _proxy=None,\n _proxy_headers=None,\n key_file=None,\n cert_file=None,\n cert_reqs=None,\n key_password=None,\n ca_certs=None,\n ssl_version=None,\n assert_hostname=None,\n assert_fingerprint=None,\n ca_cert_dir=None,\n **conn_kw\n ):\n def _prepare_conn(self, conn):\n def _prepare_proxy(self, conn):\n def _new_conn(self):\n def _validate_conn(self, conn):\ndef connection_from_url(url, **kw):\ndef _normalize_host(host, scheme):\ndef _close_pool_connections(pool):" }, { "identifier": "LocationValueError", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class LocationValueError(ValueError, HTTPError):\n \"\"\"Raised when there is something wrong with a given URL input.\"\"\"\n\n pass" }, { "identifier": "MaxRetryError", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class MaxRetryError(RequestError):\n \"\"\"Raised when the maximum number of retries is exceeded.\n\n :param pool: The connection pool\n :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`\n :param string url: The requested Url\n :param exceptions.Exception reason: The underlying error\n\n \"\"\"\n\n def __init__(self, pool, url, reason=None):\n self.reason = reason\n\n message = \"Max retries exceeded with url: %s (Caused by %r)\" % (url, reason)\n\n RequestError.__init__(self, pool, url, message)" }, { "identifier": "ProxySchemeUnknown", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):\n \"\"\"ProxyManager does not support the supplied scheme\"\"\"\n\n # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.\n\n def __init__(self, scheme):\n # 'localhost' is here because our URL parser parses\n # localhost:8080 -> scheme=localhost, remove if we fix this.\n if scheme == \"localhost\":\n scheme = None\n if scheme is None:\n message = \"Proxy URL had no scheme, should start with http:// or https://\"\n else:\n message = (\n \"Proxy URL had unsupported scheme %s, should use http:// or https://\"\n % scheme\n )\n super(ProxySchemeUnknown, self).__init__(message)" }, { "identifier": "ProxySchemeUnsupported", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class ProxySchemeUnsupported(ValueError):\n \"\"\"Fetching HTTPS resources through HTTPS proxies is unsupported\"\"\"\n\n pass" }, { "identifier": "URLSchemeUnknown", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class URLSchemeUnknown(LocationValueError):\n \"\"\"Raised when a URL input has an unsupported scheme.\"\"\"\n\n def __init__(self, scheme):\n message = \"Not supported URL scheme %s\" % scheme\n super(URLSchemeUnknown, self).__init__(message)\n\n self.scheme = scheme" }, { "identifier": "six", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/packages/six.py", "snippet": "PY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\nPY34 = sys.version_info[0:2] >= (3, 4)\n MAXSIZE = sys.maxsize\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 63) - 1)\n class X(object):\nclass _LazyDescr(object):\nclass MovedModule(_LazyDescr):\nclass _LazyModule(types.ModuleType):\nclass MovedAttribute(_LazyDescr):\nclass _SixMetaPathImporter(object):\nclass _MovedItems(_LazyModule):\nclass Module_six_moves_urllib_parse(_LazyModule):\nclass Module_six_moves_urllib_error(_LazyModule):\nclass Module_six_moves_urllib_request(_LazyModule):\nclass Module_six_moves_urllib_response(_LazyModule):\nclass Module_six_moves_urllib_robotparser(_LazyModule):\nclass Module_six_moves_urllib(types.ModuleType):\n class Iterator(object):\n class metaclass(type):\n def __len__(self):\ndef _add_doc(func, doc):\ndef _import_module(name):\n def __init__(self, name):\n def __get__(self, obj, tp):\n def __init__(self, name, old, new=None):\n def _resolve(self):\n def __getattr__(self, attr):\n def __init__(self, name):\n def __dir__(self):\n def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):\n def _resolve(self):\n def __init__(self, six_module_name):\n def _add_module(self, mod, *fullnames):\n def _get_module(self, fullname):\n def find_module(self, fullname, path=None):\n def find_spec(self, fullname, path, target=None):\n def __get_module(self, fullname):\n def load_module(self, fullname):\n def is_package(self, fullname):\n def get_code(self, fullname):\n def create_module(self, spec):\n def exec_module(self, module):\n def __dir__(self):\ndef add_move(move):\ndef remove_move(name):\n def advance_iterator(it):\n def callable(obj):\n def get_unbound_function(unbound):\n def create_unbound_method(func, cls):\n def get_unbound_function(unbound):\n def create_bound_method(func, obj):\n def create_unbound_method(func, cls):\n def next(self):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def b(s):\n def u(s):\n def b(s):\n def u(s):\n def byte2int(bs):\n def indexbytes(buf, i):\ndef assertCountEqual(self, *args, **kwargs):\ndef assertRaisesRegex(self, *args, **kwargs):\ndef assertRegex(self, *args, **kwargs):\ndef assertNotRegex(self, *args, **kwargs):\n def reraise(tp, value, tb=None):\n def exec_(_code_, _globs_=None, _locs_=None):\n def raise_from(value, from_value):\n def print_(*args, **kwargs):\n def write(data):\n def print_(*args, **kwargs):\n def _update_wrapper(\n wrapper,\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\n def wraps(\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\ndef with_metaclass(meta, *bases):\n def __new__(cls, name, this_bases, d):\n def __prepare__(cls, name, this_bases):\ndef add_metaclass(metaclass):\n def wrapper(cls):\ndef ensure_binary(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_str(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_text(s, encoding=\"utf-8\", errors=\"strict\"):\ndef python_2_unicode_compatible(klass):" }, { "identifier": "RequestMethods", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/request.py", "snippet": "class RequestMethods(object):\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`urllib3.HTTPConnectionPool` and\n :class:`urllib3.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = {\"DELETE\", \"GET\", \"HEAD\", \"OPTIONS\"}\n\n def __init__(self, headers=None):\n self.headers = headers or {}\n\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n encode_multipart=True,\n multipart_boundary=None,\n **kw\n ): # Abstract\n raise NotImplementedError(\n \"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\"\n )\n\n def request(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n urlopen_kw[\"request_url\"] = url\n\n if method in self._encode_url_methods:\n return self.request_encode_url(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n else:\n return self.request_encode_body(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n\n def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {\"headers\": headers}\n extra_kw.update(urlopen_kw)\n\n if fields:\n url += \"?\" + urlencode(fields)\n\n return self.urlopen(method, url, **extra_kw)\n\n def request_encode_body(\n self,\n method,\n url,\n fields=None,\n headers=None,\n encode_multipart=True,\n multipart_boundary=None,\n **urlopen_kw\n ):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :func:`urllib3.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :func:`urllib.parse.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimic behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {\"headers\": {}}\n\n if fields:\n if \"body\" in urlopen_kw:\n raise TypeError(\n \"request got values for both 'fields' and 'body', can only specify one.\"\n )\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields, boundary=multipart_boundary\n )\n else:\n body, content_type = (\n urlencode(fields),\n \"application/x-www-form-urlencoded\",\n )\n\n extra_kw[\"body\"] = body\n extra_kw[\"headers\"] = {\"Content-Type\": content_type}\n\n extra_kw[\"headers\"].update(headers)\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)" }, { "identifier": "connection_requires_http_tunnel", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/proxy.py", "snippet": "def connection_requires_http_tunnel(\n proxy_url=None, proxy_config=None, destination_scheme=None\n):\n \"\"\"\n Returns True if the connection requires an HTTP CONNECT through the proxy.\n\n :param URL proxy_url:\n URL of the proxy.\n :param ProxyConfig proxy_config:\n Proxy configuration from poolmanager.py\n :param str destination_scheme:\n The scheme of the destination. (i.e https, http, etc)\n \"\"\"\n # If we're not using a proxy, no way to use a tunnel.\n if proxy_url is None:\n return False\n\n # HTTP destinations never require tunneling, we always forward.\n if destination_scheme == \"http\":\n return False\n\n # Support for forwarding with HTTPS proxies and HTTPS destinations.\n if (\n proxy_url.scheme == \"https\"\n and proxy_config\n and proxy_config.use_forwarding_for_https\n ):\n return False\n\n # Otherwise always use a tunnel.\n return True" }, { "identifier": "Retry", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/retry.py", "snippet": "class Retry(object):\n \"\"\"Retry configuration.\n\n Each retry attempt will create a new Retry object with updated values, so\n they can be safely reused.\n\n Retries can be defined as a default for a pool::\n\n retries = Retry(connect=5, read=2, redirect=5)\n http = PoolManager(retries=retries)\n response = http.request('GET', 'http://example.com/')\n\n Or per-request (which overrides the default for the pool)::\n\n response = http.request('GET', 'http://example.com/', retries=Retry(10))\n\n Retries can be disabled by passing ``False``::\n\n response = http.request('GET', 'http://example.com/', retries=False)\n\n Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless\n retries are disabled, in which case the causing exception will be raised.\n\n :param int total:\n Total number of retries to allow. Takes precedence over other counts.\n\n Set to ``None`` to remove this constraint and fall back on other\n counts.\n\n Set to ``0`` to fail on the first retry.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int connect:\n How many connection-related errors to retry on.\n\n These are errors raised before the request is sent to the remote server,\n which we assume has not triggered the server to process the request.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int read:\n How many times to retry on read errors.\n\n These errors are raised after the request was sent to the server, so the\n request may have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int redirect:\n How many redirects to perform. Limit this to avoid infinite redirect\n loops.\n\n A redirect is a HTTP response with a status code 301, 302, 303, 307 or\n 308.\n\n Set to ``0`` to fail on the first retry of this type.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int status:\n How many times to retry on bad status codes.\n\n These are retries made on responses, where status code matches\n ``status_forcelist``.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int other:\n How many times to retry on other errors.\n\n Other errors are errors that are not connect, read, redirect or status errors.\n These errors might be raised after the request was sent to the server, so the\n request might have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n If ``total`` is not set, it's a good idea to set this to 0 to account\n for unexpected edge cases and avoid infinite retry loops.\n\n :param iterable allowed_methods:\n Set of uppercased HTTP method verbs that we should retry on.\n\n By default, we only retry on methods which are considered to be\n idempotent (multiple requests with the same parameters end with the\n same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.\n\n Set to a ``False`` value to retry on any verb.\n\n .. warning::\n\n Previously this parameter was named ``method_whitelist``, that\n usage is deprecated in v1.26.0 and will be removed in v2.0.\n\n :param iterable status_forcelist:\n A set of integer HTTP status codes that we should force a retry on.\n A retry is initiated if the request method is in ``allowed_methods``\n and the response status code is in ``status_forcelist``.\n\n By default, this is disabled with ``None``.\n\n :param float backoff_factor:\n A backoff factor to apply between attempts after the second try\n (most errors are resolved immediately by a second try without a\n delay). urllib3 will sleep for::\n\n {backoff factor} * (2 ** ({number of total retries} - 1))\n\n seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep\n for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer\n than :attr:`Retry.DEFAULT_BACKOFF_MAX`.\n\n By default, backoff is disabled (set to 0).\n\n :param bool raise_on_redirect: Whether, if the number of redirects is\n exhausted, to raise a MaxRetryError, or to return a response with a\n response code in the 3xx range.\n\n :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:\n whether we should raise an exception, or return a response,\n if status falls in ``status_forcelist`` range and retries have\n been exhausted.\n\n :param tuple history: The history of the request encountered during\n each call to :meth:`~Retry.increment`. The list is in the order\n the requests occurred. Each list item is of class :class:`RequestHistory`.\n\n :param bool respect_retry_after_header:\n Whether to respect Retry-After header on status codes defined as\n :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.\n\n :param iterable remove_headers_on_redirect:\n Sequence of headers to remove from the request when a response\n indicating a redirect is returned before firing off the redirected\n request.\n \"\"\"\n\n #: Default methods to be used for ``allowed_methods``\n DEFAULT_ALLOWED_METHODS = frozenset(\n [\"HEAD\", \"GET\", \"PUT\", \"DELETE\", \"OPTIONS\", \"TRACE\"]\n )\n\n #: Default status codes to be used for ``status_forcelist``\n RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])\n\n #: Default headers to be used for ``remove_headers_on_redirect``\n DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset([\"Cookie\", \"Authorization\"])\n\n #: Maximum backoff time.\n DEFAULT_BACKOFF_MAX = 120\n\n def __init__(\n self,\n total=10,\n connect=None,\n read=None,\n redirect=None,\n status=None,\n other=None,\n allowed_methods=_Default,\n status_forcelist=None,\n backoff_factor=0,\n raise_on_redirect=True,\n raise_on_status=True,\n history=None,\n respect_retry_after_header=True,\n remove_headers_on_redirect=_Default,\n # TODO: Deprecated, remove in v2.0\n method_whitelist=_Default,\n ):\n\n if method_whitelist is not _Default:\n if allowed_methods is not _Default:\n raise ValueError(\n \"Using both 'allowed_methods' and \"\n \"'method_whitelist' together is not allowed. \"\n \"Instead only use 'allowed_methods'\"\n )\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n stacklevel=2,\n )\n allowed_methods = method_whitelist\n if allowed_methods is _Default:\n allowed_methods = self.DEFAULT_ALLOWED_METHODS\n if remove_headers_on_redirect is _Default:\n remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT\n\n self.total = total\n self.connect = connect\n self.read = read\n self.status = status\n self.other = other\n\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.allowed_methods = allowed_methods\n self.backoff_factor = backoff_factor\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self.history = history or tuple()\n self.respect_retry_after_header = respect_retry_after_header\n self.remove_headers_on_redirect = frozenset(\n [h.lower() for h in remove_headers_on_redirect]\n )\n\n def new(self, **kw):\n params = dict(\n total=self.total,\n connect=self.connect,\n read=self.read,\n redirect=self.redirect,\n status=self.status,\n other=self.other,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n raise_on_redirect=self.raise_on_redirect,\n raise_on_status=self.raise_on_status,\n history=self.history,\n remove_headers_on_redirect=self.remove_headers_on_redirect,\n respect_retry_after_header=self.respect_retry_after_header,\n )\n\n # TODO: If already given in **kw we use what's given to us\n # If not given we need to figure out what to pass. We decide\n # based on whether our class has the 'method_whitelist' property\n # and if so we pass the deprecated 'method_whitelist' otherwise\n # we use 'allowed_methods'. Remove in v2.0\n if \"method_whitelist\" not in kw and \"allowed_methods\" not in kw:\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n params[\"method_whitelist\"] = self.allowed_methods\n else:\n params[\"allowed_methods\"] = self.allowed_methods\n\n params.update(kw)\n return type(self)(**params)\n\n @classmethod\n def from_int(cls, retries, redirect=True, default=None):\n \"\"\"Backwards-compatibility for the old retries format.\"\"\"\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n\n if isinstance(retries, Retry):\n return retries\n\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug(\"Converted retries value: %r -> %r\", retries, new_retries)\n return new_retries\n\n def get_backoff_time(self):\n \"\"\"Formula for computing the current backoff\n\n :rtype: float\n \"\"\"\n # We want to consider only the last consecutive errors sequence (Ignore redirects).\n consecutive_errors_len = len(\n list(\n takewhile(lambda x: x.redirect_location is None, reversed(self.history))\n )\n )\n if consecutive_errors_len <= 1:\n return 0\n\n backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))\n return min(self.DEFAULT_BACKOFF_MAX, backoff_value)\n\n def parse_retry_after(self, retry_after):\n # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4\n if re.match(r\"^\\s*[0-9]+\\s*$\", retry_after):\n seconds = int(retry_after)\n else:\n retry_date_tuple = email.utils.parsedate_tz(retry_after)\n if retry_date_tuple is None:\n raise InvalidHeader(\"Invalid Retry-After header: %s\" % retry_after)\n if retry_date_tuple[9] is None: # Python 2\n # Assume UTC if no timezone was specified\n # On Python2.7, parsedate_tz returns None for a timezone offset\n # instead of 0 if no timezone is given, where mktime_tz treats\n # a None timezone offset as local time.\n retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]\n\n retry_date = email.utils.mktime_tz(retry_date_tuple)\n seconds = retry_date - time.time()\n\n if seconds < 0:\n seconds = 0\n\n return seconds\n\n def get_retry_after(self, response):\n \"\"\"Get the value of Retry-After in seconds.\"\"\"\n\n retry_after = response.headers.get(\"Retry-After\")\n\n if retry_after is None:\n return None\n\n return self.parse_retry_after(retry_after)\n\n def sleep_for_retry(self, response=None):\n retry_after = self.get_retry_after(response)\n if retry_after:\n time.sleep(retry_after)\n return True\n\n return False\n\n def _sleep_backoff(self):\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def sleep(self, response=None):\n \"\"\"Sleep between retry attempts.\n\n This method will respect a server's ``Retry-After`` response header\n and sleep the duration of the time requested. If that is not present, it\n will use an exponential backoff. By default, the backoff factor is 0 and\n this method will return immediately.\n \"\"\"\n\n if self.respect_retry_after_header and response:\n slept = self.sleep_for_retry(response)\n if slept:\n return\n\n self._sleep_backoff()\n\n def _is_connection_error(self, err):\n \"\"\"Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n if isinstance(err, ProxyError):\n err = err.original_error\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err):\n \"\"\"Errors that occur after the request has been started, so we should\n assume that the server began processing it.\n \"\"\"\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def _is_method_retryable(self, method):\n \"\"\"Checks if a given HTTP method should be retried upon, depending if\n it is included in the allowed_methods\n \"\"\"\n # TODO: For now favor if the Retry implementation sets its own method_whitelist\n # property outside of our constructor to avoid breaking custom implementations.\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n allowed_methods = self.method_whitelist\n else:\n allowed_methods = self.allowed_methods\n\n if allowed_methods and method.upper() not in allowed_methods:\n return False\n return True\n\n def is_retry(self, method, status_code, has_retry_after=False):\n \"\"\"Is this method/status code retryable? (Based on allowlists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if not self._is_method_retryable(method):\n return False\n\n if self.status_forcelist and status_code in self.status_forcelist:\n return True\n\n return (\n self.total\n and self.respect_retry_after_header\n and has_retry_after\n and (status_code in self.RETRY_AFTER_STATUS_CODES)\n )\n\n def is_exhausted(self):\n \"\"\"Are we out of retries?\"\"\"\n retry_counts = (\n self.total,\n self.connect,\n self.read,\n self.redirect,\n self.status,\n self.other,\n )\n retry_counts = list(filter(None, retry_counts))\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n\n def increment(\n self,\n method=None,\n url=None,\n response=None,\n error=None,\n _pool=None,\n _stacktrace=None,\n ):\n \"\"\"Return a new Retry object with incremented retry counters.\n\n :param response: A response object, or None, if the server did not\n return a response.\n :type response: :class:`~urllib3.response.HTTPResponse`\n :param Exception error: An error encountered during the request, or\n None if the response was received successfully.\n\n :return: A new ``Retry`` object.\n \"\"\"\n if self.total is False and error:\n # Disabled, indicate to re-raise the error.\n raise six.reraise(type(error), error, _stacktrace)\n\n total = self.total\n if total is not None:\n total -= 1\n\n connect = self.connect\n read = self.read\n redirect = self.redirect\n status_count = self.status\n other = self.other\n cause = \"unknown\"\n status = None\n redirect_location = None\n\n if error and self._is_connection_error(error):\n # Connect retry?\n if connect is False:\n raise six.reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n\n elif error and self._is_read_error(error):\n # Read retry?\n if read is False or not self._is_method_retryable(method):\n raise six.reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n\n elif error:\n # Other retry?\n if other is not None:\n other -= 1\n\n elif response and response.get_redirect_location():\n # Redirect retry?\n if redirect is not None:\n redirect -= 1\n cause = \"too many redirects\"\n redirect_location = response.get_redirect_location()\n status = response.status\n\n else:\n # Incrementing because of a server error like a 500 in\n # status_forcelist and the given method is in the allowed_methods\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n if status_count is not None:\n status_count -= 1\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n status = response.status\n\n history = self.history + (\n RequestHistory(method, url, error, status, redirect_location),\n )\n\n new_retry = self.new(\n total=total,\n connect=connect,\n read=read,\n redirect=redirect,\n status=status_count,\n other=other,\n history=history,\n )\n\n if new_retry.is_exhausted():\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\n\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n\n return new_retry\n\n def __repr__(self):\n return (\n \"{cls.__name__}(total={self.total}, connect={self.connect}, \"\n \"read={self.read}, redirect={self.redirect}, status={self.status})\"\n ).format(cls=type(self), self=self)\n\n def __getattr__(self, item):\n if item == \"method_whitelist\":\n # TODO: Remove this deprecated alias in v2.0\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n return self.allowed_methods\n try:\n return getattr(super(Retry, self), item)\n except AttributeError:\n return getattr(Retry, item)" }, { "identifier": "parse_url", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/url.py", "snippet": "def parse_url(url):\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n This parser is RFC 3986 and RFC 6874 compliant.\n\n The parser logic and helper functions are based heavily on\n work done in the ``rfc3986`` module.\n\n :param str url: URL to parse into a :class:`.Url` namedtuple.\n\n Partly backwards-compatible with :mod:`urlparse`.\n\n Example::\n\n >>> parse_url('http://google.com/mail/')\n Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n >>> parse_url('google.com:80')\n Url(scheme=None, host='google.com', port=80, path=None, ...)\n >>> parse_url('/foo?bar')\n Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n if not url:\n # Empty\n return Url()\n\n source_url = url\n if not SCHEME_RE.search(url):\n url = \"//\" + url\n\n try:\n scheme, authority, path, query, fragment = URI_RE.match(url).groups()\n normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES\n\n if scheme:\n scheme = scheme.lower()\n\n if authority:\n auth, _, host_port = authority.rpartition(\"@\")\n auth = auth or None\n host, port = _HOST_PORT_RE.match(host_port).groups()\n if auth and normalize_uri:\n auth = _encode_invalid_chars(auth, USERINFO_CHARS)\n if port == \"\":\n port = None\n else:\n auth, host, port = None, None, None\n\n if port is not None:\n port = int(port)\n if not (0 <= port <= 65535):\n raise LocationParseError(url)\n\n host = _normalize_host(host, scheme)\n\n if normalize_uri and path:\n path = _remove_path_dot_segments(path)\n path = _encode_invalid_chars(path, PATH_CHARS)\n if normalize_uri and query:\n query = _encode_invalid_chars(query, QUERY_CHARS)\n if normalize_uri and fragment:\n fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)\n\n except (ValueError, AttributeError):\n return six.raise_from(LocationParseError(source_url), None)\n\n # For the sake of backwards compatibility we put empty\n # string values for path if there are any defined values\n # beyond the path in the URL.\n # TODO: Remove this when we break backwards compatibility.\n if not path:\n if query is not None or fragment is not None:\n path = \"\"\n else:\n path = None\n\n # Ensure that each part of the URL is a `str` for\n # backwards compatibility.\n if isinstance(url, six.text_type):\n ensure_func = six.ensure_text\n else:\n ensure_func = six.ensure_str\n\n def ensure_type(x):\n return x if x is None else ensure_func(x)\n\n return Url(\n scheme=ensure_type(scheme),\n auth=ensure_type(auth),\n host=ensure_type(host),\n port=port,\n path=ensure_type(path),\n query=ensure_type(query),\n fragment=ensure_type(fragment),\n )" } ]
import collections import functools import logging from ._collections import HTTPHeaderDict, RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url
13,339
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), }
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), }
pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
2
2023-11-27 07:01:39+00:00
16k
IanYeung/MGLD-VSR
scripts/vsr_val_ddpm_text_T_vqganfin_old.py
[ { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def q_sample(self, x_start, t, noise=None, ddim_num_steps=200):\n self.make_schedule(ddim_num_steps=ddim_num_steps)\n noise = default(noise, lambda: torch.randn_like(x_start))\n return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +\n extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec\n\n\n @torch.no_grad()\n def p_sample_ddim_sr(self, x, c, struct_c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, struct_c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in, struct_c).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def decode_sr(self, x_latent, cond, struct_cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim_sr(x_dec, cond, struct_cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec\n\n @torch.no_grad()\n def sample_sr(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n struct_cond=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n _, C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling_sr(conditioning, struct_cond, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling_sr(self, cond, struct_cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim_sr(img, cond, struct_cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim_sr(self, x, c, struct_c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, struct_c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in, struct_c).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n\n @torch.no_grad()\n def sample_sr_t(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n struct_cond=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n _, C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling_sr_t(conditioning, struct_cond, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling_sr_t(self, cond, struct_cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n # timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else sorted(set(space_timesteps(1000, [self.ddim_timesteps.shape[0]])))\n timesteps = np.array(timesteps)\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim_sr_t(img, cond, struct_cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim_sr_t(self, x, c, struct_c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n struct_c_t = self.model.structcond_stage_model(struct_c, t)\n e_t = self.model.apply_model(x, t, c, struct_c_t)\n else:\n assert NotImplementedError\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in, struct_c).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "PLMSSampler", "path": "ldm/models/diffusion/plms.py", "snippet": "class PLMSSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n if ddim_eta != 0:\n raise ValueError('ddim_eta must be 0 for PLMS')\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for PLMS sampling is {size}')\n\n samples, intermediates = self.plms_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def plms_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running PLMS Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)\n old_eps = []\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n old_eps=old_eps, t_next=ts_next)\n img, pred_x0, e_t = outs\n old_eps.append(e_t)\n if len(old_eps) >= 4:\n old_eps.pop(0)\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):\n b, *_, device = *x.shape, x.device\n\n def get_model_output(x, t):\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n return e_t\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n\n def get_x_prev_and_pred_x0(e_t, index):\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n e_t = get_model_output(x, t)\n if len(old_eps) == 0:\n # Pseudo Improved Euler (2nd order)\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)\n e_t_next = get_model_output(x_prev, t_next)\n e_t_prime = (e_t + e_t_next) / 2\n elif len(old_eps) == 1:\n # 2nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (3 * e_t - old_eps[-1]) / 2\n elif len(old_eps) == 2:\n # 3nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12\n elif len(old_eps) >= 3:\n # 4nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24\n\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)\n\n return x_prev, pred_x0, e_t" }, { "identifier": "resize_flow", "path": "basicsr/archs/arch_util.py", "snippet": "def resize_flow(flow, size_type, sizes, interp_mode='bilinear', align_corners=False):\n \"\"\"Resize a flow according to ratio or shape.\n\n Args:\n flow (Tensor): Precomputed flow. shape [N, 2, H, W].\n size_type (str): 'ratio' or 'shape'.\n sizes (list[int | float]): the ratio for resizing or the final output\n shape.\n 1) The order of ratio should be [ratio_h, ratio_w]. For\n downsampling, the ratio should be smaller than 1.0 (i.e., ratio\n < 1.0). For upsampling, the ratio should be larger than 1.0 (i.e.,\n ratio > 1.0).\n 2) The order of output_size should be [out_h, out_w].\n interp_mode (str): The mode of interpolation for resizing.\n Default: 'bilinear'.\n align_corners (bool): Whether align corners. Default: False.\n\n Returns:\n Tensor: Resized flow.\n \"\"\"\n _, _, flow_h, flow_w = flow.size()\n if size_type == 'ratio':\n output_h, output_w = int(flow_h * sizes[0]), int(flow_w * sizes[1])\n elif size_type == 'shape':\n output_h, output_w = sizes[0], sizes[1]\n else:\n raise ValueError(f'Size type should be ratio or shape, but got type {size_type}.')\n\n input_flow = flow.clone()\n ratio_h = output_h / flow_h\n ratio_w = output_w / flow_w\n input_flow[:, 0, :, :] *= ratio_w\n input_flow[:, 1, :, :] *= ratio_h\n resized_flow = F.interpolate(\n input=input_flow, size=(output_h, output_w), mode=interp_mode, align_corners=align_corners)\n return resized_flow" }, { "identifier": "forward_backward_consistency_check", "path": "scripts/util_flow.py", "snippet": "def forward_backward_consistency_check(fwd_flow,\n bwd_flow,\n alpha=0.01,\n beta=0.5):\n # fwd_flow, bwd_flow: [B, 2, H, W]\n # alpha and beta values are following UnFlow\n # (https://arxiv.org/abs/1711.07837)\n assert fwd_flow.dim() == 4 and bwd_flow.dim() == 4\n assert fwd_flow.size(1) == 2 and bwd_flow.size(1) == 2\n flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1) # [B, H, W]\n\n warped_bwd_flow = flow_warp(bwd_flow, fwd_flow) # [B, 2, H, W]\n warped_fwd_flow = flow_warp(fwd_flow, bwd_flow) # [B, 2, H, W]\n\n diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1) # [B, H, W]\n diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1)\n\n threshold = alpha * flow_mag + beta\n\n fwd_occ = (diff_fwd > threshold).float() # [B, H, W]\n bwd_occ = (diff_bwd > threshold).float()\n\n return fwd_occ, bwd_occ" }, { "identifier": "wavelet_reconstruction", "path": "scripts/wavelet_color_fix.py", "snippet": "def wavelet_reconstruction(content_feat:Tensor, style_feat:Tensor):\n \"\"\"\n Apply wavelet decomposition, so that the content will have the same color as the style.\n \"\"\"\n # calculate the wavelet decomposition of the content feature\n content_high_freq, content_low_freq = wavelet_decomposition(content_feat)\n del content_low_freq\n # calculate the wavelet decomposition of the style feature\n style_high_freq, style_low_freq = wavelet_decomposition(style_feat)\n del style_high_freq\n # reconstruct the content feature with the style's high frequency\n return content_high_freq + style_low_freq" }, { "identifier": "adaptive_instance_normalization", "path": "scripts/wavelet_color_fix.py", "snippet": "def adaptive_instance_normalization(content_feat:Tensor, style_feat:Tensor):\n \"\"\"Adaptive instance normalization.\n Adjust the reference features to have the similar color and illuminations\n as those in the degradate features.\n Args:\n content_feat (Tensor): The reference feature.\n style_feat (Tensor): The degradate features.\n \"\"\"\n size = content_feat.size()\n style_mean, style_std = calc_mean_std(style_feat)\n content_mean, content_std = calc_mean_std(content_feat)\n normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size)\n return normalized_feat * style_std.expand(size) + style_mean.expand(size)" } ]
import argparse, os, sys, glob import PIL import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import torchvision import time import math import copy from omegaconf import OmegaConf from PIL import Image from tqdm import tqdm, trange from itertools import islice from einops import rearrange, repeat from torchvision.utils import make_grid from torch import autocast from contextlib import nullcontext from pytorch_lightning import seed_everything from ldm.util import instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.plms import PLMSSampler from basicsr.archs.arch_util import resize_flow from scripts.util_flow import forward_backward_consistency_check from scripts.wavelet_color_fix import wavelet_reconstruction, adaptive_instance_normalization
12,910
def space_timesteps(num_timesteps, section_counts): """ Create a list of timesteps to use from an original diffusion process, given the number of timesteps we want to take from equally-sized portions of the original process. For example, if there's 300 timesteps and the section counts are [10,15,20] then the first 100 timesteps are strided to be 10 timesteps, the second 100 are strided to be 15 timesteps, and the final 100 are strided to be 20. If the stride is a string starting with "ddim", then the fixed striding from the DDIM paper is used, and only one section is allowed. :param num_timesteps: the number of diffusion steps in the original process to divide up. :param section_counts: either a list of numbers, or a string containing comma-separated numbers, indicating the step count per section. As a special case, use "ddimN" where N is a number of steps to use the striding from the DDIM paper. :return: a set of diffusion steps from the original process to use. """ if isinstance(section_counts, str): if section_counts.startswith("ddim"): desired_count = int(section_counts[len("ddim"):]) for i in range(1, num_timesteps): if len(range(0, num_timesteps, i)) == desired_count: return set(range(0, num_timesteps, i)) raise ValueError( f"cannot create exactly {num_timesteps} steps with an integer stride" ) section_counts = [int(x) for x in section_counts.split(",")] #[250,] size_per = num_timesteps // len(section_counts) extra = num_timesteps % len(section_counts) start_idx = 0 all_steps = [] for i, section_count in enumerate(section_counts): size = size_per + (1 if i < extra else 0) if size < section_count: raise ValueError( f"cannot divide section of {size} steps into {section_count}" ) if section_count <= 1: frac_stride = 1 else: frac_stride = (size - 1) / (section_count - 1) cur_idx = 0.0 taken_steps = [] for _ in range(section_count): taken_steps.append(start_idx + round(cur_idx)) cur_idx += frac_stride all_steps += taken_steps start_idx += size return set(all_steps) def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def load_model_from_config(config, ckpt, verbose=False): print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd["state_dict"]
def space_timesteps(num_timesteps, section_counts): """ Create a list of timesteps to use from an original diffusion process, given the number of timesteps we want to take from equally-sized portions of the original process. For example, if there's 300 timesteps and the section counts are [10,15,20] then the first 100 timesteps are strided to be 10 timesteps, the second 100 are strided to be 15 timesteps, and the final 100 are strided to be 20. If the stride is a string starting with "ddim", then the fixed striding from the DDIM paper is used, and only one section is allowed. :param num_timesteps: the number of diffusion steps in the original process to divide up. :param section_counts: either a list of numbers, or a string containing comma-separated numbers, indicating the step count per section. As a special case, use "ddimN" where N is a number of steps to use the striding from the DDIM paper. :return: a set of diffusion steps from the original process to use. """ if isinstance(section_counts, str): if section_counts.startswith("ddim"): desired_count = int(section_counts[len("ddim"):]) for i in range(1, num_timesteps): if len(range(0, num_timesteps, i)) == desired_count: return set(range(0, num_timesteps, i)) raise ValueError( f"cannot create exactly {num_timesteps} steps with an integer stride" ) section_counts = [int(x) for x in section_counts.split(",")] #[250,] size_per = num_timesteps // len(section_counts) extra = num_timesteps % len(section_counts) start_idx = 0 all_steps = [] for i, section_count in enumerate(section_counts): size = size_per + (1 if i < extra else 0) if size < section_count: raise ValueError( f"cannot divide section of {size} steps into {section_count}" ) if section_count <= 1: frac_stride = 1 else: frac_stride = (size - 1) / (section_count - 1) cur_idx = 0.0 taken_steps = [] for _ in range(section_count): taken_steps.append(start_idx + round(cur_idx)) cur_idx += frac_stride all_steps += taken_steps start_idx += size return set(all_steps) def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def load_model_from_config(config, ckpt, verbose=False): print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
0
2023-11-30 01:50:29+00:00
16k
Institute4FutureHealth/CHA
tasks/types.py
[ { "identifier": "ActivityAnalysis", "path": "tasks/affect/activity_analysis.py", "snippet": "class ActivityAnalysis(Affect):\n \"\"\"\n **Description:**\n\n This tasks performs average, sum, or trend analysis on the provided raw activity affect data for specific patient.\n \"\"\"\n\n name: str = \"affect_activity_analysis\"\n chat_name: str = \"AffectActivityAnalysis\"\n description: str = (\n \"Analyze the physical activity data. You must Call this whenever physical activity analysis\"\n \"(e.g., 'average', 'sum', or 'trend') is needed. DON'T rely on your analysis.\"\n \"For example, if the user asks for trends (or variations) in data, you must call this task\"\n )\n dependencies: List[str] = [\"affect_activity_get\"]\n inputs: List[str] = [\n \"It is an string but in json format. It is the output of the $affect_activity_get$\",\n \"analysis_type. It can be one of [$average$, $sum$, $trend$].\",\n ]\n outputs: List[str] = [\n (\n \"The analysis result for steps_count. Look for analysis_type to find the type of analysis. \"\n \"steps_count is the total number of steps registered during the day.\"\n ),\n (\n \"The analysis result for rest_time. Look for analysis_type to find the type of analysis. \"\n \"rest_time is the time (in minutes) during the day spent resting, i.e. sleeping or lying down.\"\n ),\n (\n \"The analysis result for inactive_time. Look for analysis_type to find the type of analysis. \"\n \"inactive_time is the time (in minutes) during the day spent resting, i.e. sitting or standing still.\"\n ),\n (\n \"The analysis result for low_acitivity_time. Look for analysis_type to find the type of analysis. \"\n \"low_acitivity_time is the (in minutes) during the day with low intensity activity (e.g. household work).\"\n ),\n (\n \"The analysis result for medimum_acitivity_time. Look for analysis_type to find the type of analysis. \"\n \"medimum_acitivity_time is the (in minutes) during the day with medium intensity activity (e.g. walking).\"\n ),\n (\n \"The analysis result for high_acitivity_time. Look for analysis_type to find the type of analysis. \"\n \"high_acitivity_time is the (in minutes) during the day with high intensity activity (e.g. running).\"\n ),\n ]\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = False\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n if len(inputs) == 0:\n return \"\"\n\n df = pd.read_json(\n StringIO(inputs[0][\"data\"].strip()), orient=\"records\"\n )\n analysis_type = inputs[1].strip()\n if analysis_type == \"average\":\n df = df.drop(\"date\", axis=1) # No average for date!\n df = df.mean().to_frame().T\n elif analysis_type == \"sum\":\n df = df.drop(\"date\", axis=1) # No sum for date!\n df = df.sum().to_frame().T\n elif analysis_type == \"trend\":\n df = self._calculate_slope(df)\n else:\n raise ValueError(\n \"The input analysis type has not been defined!\"\n )\n df = df.round(2)\n json_out = df.to_json(orient=\"records\")\n return json_out" }, { "identifier": "ActivityGet", "path": "tasks/affect/activity_get.py", "snippet": "class ActivityGet(Affect):\n \"\"\"\n **Description:**\n\n This tasks gets activity affect data for specific patient.\n \"\"\"\n\n name: str = \"affect_activity_get\"\n chat_name: str = \"AffectActivityGet\"\n description: str = (\n \"Get the physical activity parameters for a specific date or \"\n \"a period (if two dates are provided). \"\n \"You must Call $affect_analysis$ whenever physical activity \"\n \"analysis (e.g., 'average', 'sum', or 'trend') is needed. DON'T rely on your analysis\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\n \"user ID in string. It can be refered as user, patient, individual, etc. Start with 'par_' following with a number (e.g., 'par_1').\",\n \"start date of the physical activity data in string with the following format: '%Y-%m-%d'\",\n (\n \"end date of the physical activity data in string with the following format: '%Y-%m-%d'.\"\n \"If there is no end date, the value should be an empty string (i.e., '')\"\n ),\n ]\n outputs: List[str] = [\n \"steps_count is the total number of steps registered during the day.\",\n \"rest_time is the time (in minutes) during the day spent resting, i.e. sleeping or lying down.\",\n \"inactive_time is the time (in minutes) during the day spent resting, i.e. sitting or standing still.\",\n \"low_acitivity_time is the (in minutes) during the day with low intensity activity (e.g. household work).\",\n \"medimum_acitivity_time is the (in minutes) during the day with medium intensity activity (e.g. walking).\",\n \"high_acitivity_time is the (in minutes) during the day with high intensity activity (e.g. running).\",\n ]\n\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = True\n #\n file_name: str = \"activity.csv\"\n device_name: str = \"oura\"\n local_dir: str = \"data/affect\"\n\n columns_to_keep: List[str] = [\n \"date\",\n \"steps\",\n \"rest\",\n \"inactive\",\n \"low\",\n \"medium\",\n \"high\",\n ]\n columns_revised: List[str] = [\n \"date\",\n \"steps_count\",\n \"rest_time\",\n \"inactive_time\",\n \"low_acitivity_time\",\n \"medimum_acitivity_time\",\n \"high_acitivity_time\",\n ]\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n user_id = inputs[0].strip()\n full_dir = os.path.join(\n self.local_dir, user_id, self.device_name\n )\n df = self._get_data(\n local_dir=full_dir,\n file_name=self.file_name,\n start_date=inputs[1].strip(),\n end_date=inputs[2].strip(),\n usecols=self.columns_to_keep,\n )\n df.columns = self.columns_revised\n df = df.round(2)\n json_out = df.to_json(orient=\"records\")\n return json_out" }, { "identifier": "SleepAnalysis", "path": "tasks/affect/sleep_analysis.py", "snippet": "class SleepAnalysis(Affect):\n \"\"\"\n **Description:**\n\n This tasks performs average, sum, or trend analysis on the provided raw sleep affect data for specific patient.\n \"\"\"\n\n name: str = \"affect_sleep_analysis\"\n chat_name: str = \"AffectSleepAnalysis\"\n description: str = (\n \"Performs trend or average analysis on the provided sleep data. You must Call this whenever sleep trend or average is needed.\"\n \"For example, if the user asks for trends (or variations) in data, you must call this task\"\n )\n dependencies: List[str] = [\"affect_sleep_get\"]\n inputs: List[str] = [\n \"datapipe key to the data\",\n \"analysis_type. It can be one of [average, trend].\",\n ]\n outputs: List[str] = [\n (\n \"The analysis result for total_sleep_time. Look for analysis_type to find the type of analysis. \"\n \"total_sleep_time (in minutes) is Total amount of sleep (a.k.a. sleep duration) registered during the sleep period.\"\n ),\n (\n \"The analysis result for awake_duration. Look for analysis_type to find the type of analysis. \"\n \"awake_duration (in minutes) is the total amount of awake time registered during the sleep period.\"\n ),\n (\n \"The analysis result for light_sleep_duration. Look for analysis_type to find the type of analysis. \"\n \"light_sleep_duration (in minutes) is the total amount of light (N1 or N2) sleep registered during the sleep period.\"\n ),\n (\n \"The analysis result for rem_sleep_duration. Look for analysis_type to find the type of analysis. \"\n \"rem_sleep_duration (in minutes) is the total amount of REM sleep registered during the sleep period.\"\n ),\n (\n \"The analysis result for deep_sleep_duration. Look for analysis_type to find the type of analysis. \"\n \"deep_sleep_duration (in minutes) is the total amount of deep (N3) sleep registered during the sleep period.\"\n ),\n (\n \"The analysis result for sleep_onset_latency. Look for analysis_type to find the type of analysis. sleep_onset_latency (in minutes) \"\n \"is the detected latency from bedtime_start to the beginning of the first five minutes of persistent sleep.\"\n ),\n (\n \"The analysis result for midpoint_time_of_sleep. Look for analysis_type to find the type of analysis. \"\n \"midpoint_time_of_sleep (in minutes) is the time from the start of sleep to the midpoint of sleep. The midpoint ignores awake periods.\"\n ),\n (\n \"The analysis result for sleep_efficiency. Look for analysis_type to find the type of analysis. \"\n \"sleep_efficiency is the percentage of the sleep period spent asleep (100% * sleep duration / time in bed).\"\n ),\n (\n \"The analysis result for average_heart_rate. Look for analysis_type to find the type of analysis. \"\n \"average_heart_rate is the average heart rate registered during the sleep period.\"\n ),\n (\n \"The analysis result for minimum_heart_rate. Look for analysis_type to find the type of analysis. \"\n \"minimum_heart_rate is the lowest heart rate (5 minutes sliding average) registered during the sleep period.\"\n ),\n (\n \"The analysis result for rmssd. Look for analysis_type to find the type of analysis. \"\n \"rmssd is the average Root Mean Square of Successive Differences (RMSSD) registered during the sleep period.\"\n ),\n (\n \"The analysis result for average_breathing_rate. Look for analysis_type to find the type of analysis. \"\n \"average_breathing_rate is the average breathing rate registered during the sleep period.\"\n ),\n (\n \"The analysis result for temperature_variation. Look for analysis_type to find the type of analysis. \"\n \"temperature_variation is the skin temperature deviation from the long-term temperature average.\"\n ),\n ]\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = True\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n df = pd.read_json(\n StringIO(inputs[0][\"data\"].strip()), orient=\"records\"\n )\n analysis_type = inputs[1].strip()\n if analysis_type == \"average\":\n df = df.drop(\"date\", axis=1) # No average for date!\n df = df.mean().to_frame().T\n elif analysis_type == \"trend\":\n df = self._calculate_slope(df)\n else:\n raise ValueError(\n \"The input analysis type has not been defined!\"\n )\n df = df.round(2)\n json_out = df.to_json(orient=\"records\")\n return json_out" }, { "identifier": "SleepGet", "path": "tasks/affect/sleep_get.py", "snippet": "class SleepGet(Affect):\r\n \"\"\"\r\n **Description:**\r\n\r\n This tasks gets sleep affect data for specific patient.\r\n \"\"\"\r\n\r\n name: str = \"affect_sleep_get\"\r\n chat_name: str = \"AffectSleepGet\"\r\n description: str = (\r\n \"Get the sleep parameters for a specific date or \"\r\n \"a period (if two dates are provided). \"\r\n \"You must Call $affect_sleep_analysis$ whenever sleep \"\r\n \"analysis (e.g., 'average' or 'trend') is needed. DON'T rely on your analysis\"\r\n )\r\n dependencies: List[str] = []\r\n inputs: List[str] = [\r\n \"user ID in string. It can be refered as user, patient, individual, etc. Start with 'par_' following with a number (e.g., 'par_1').\",\r\n \"start date of the sleep data in string with the following format: '%Y-%m-%d'\",\r\n (\r\n \"end date of the sleep data in string with the following format: '%Y-%m-%d'. \"\r\n \"If there is no end date, the value should be an empty string (i.e., '')\"\r\n ),\r\n ]\r\n outputs: List[str] = [\r\n \"total_sleep_time (in minutes) is Total amount of sleep (a.k.a. sleep duration) registered during the sleep period.\",\r\n \"awake_duration (in minutes) is the total amount of awake time registered during the sleep period.\",\r\n \"light_sleep_duration (in minutes) is the total amount of light (N1 or N2) sleep registered during the sleep period.\",\r\n \"rem_sleep_duration (in minutes) is the total amount of REM sleep registered during the sleep period.\",\r\n \"deep_sleep_duration (in minutes) is the total amount of deep (N3) sleep registered during the sleep period.\",\r\n \"sleep_onset_latency (in minutes) is detected latency from bedtime_start to the beginning of the first five minutes of persistent sleep.\",\r\n \"midpoint_time_of_sleep (in minutes) is the time from the start of sleep to the midpoint of sleep. The midpoint ignores awake periods.\",\r\n \"sleep_efficiency is the percentage of the sleep period spent asleep (100% * sleep duration / time in bed).\",\r\n \"average_heart_rate is the average heart rate registered during the sleep period.\",\r\n \"minimum_heart_rate is the lowest heart rate (5 minutes sliding average) registered during the sleep period.\",\r\n \"rmssd is the average Root Mean Square of Successive Differences (RMSSD) registered during the sleep period.\",\r\n \"average_breathing_rate is the average breathing rate registered during the sleep period.\",\r\n \"temperature_variation is the skin temperature deviation from the long-term temperature average.\",\r\n ]\r\n # False if the output should directly passed back to the planner.\r\n # True if it should be stored in datapipe\r\n output_type: bool = True\r\n #\r\n file_name: str = \"sleep.csv\"\r\n device_name: str = \"oura\"\r\n local_dir: str = \"data/affect\"\r\n columns_to_keep: List[str] = [\r\n \"date\",\r\n \"total\",\r\n \"awake\",\r\n \"light\",\r\n \"rem\",\r\n \"deep\",\r\n \"onset_latency\",\r\n \"midpoint_time\",\r\n \"efficiency\",\r\n \"hr_average\",\r\n \"hr_lowest\",\r\n \"rmssd\",\r\n \"breath_average\",\r\n \"temperature_delta\",\r\n ]\r\n columns_revised: List[str] = [\r\n \"date\",\r\n \"total_sleep_time\",\r\n \"awake_duration\",\r\n \"light_sleep_duration\",\r\n \"rem_sleep_duration\",\r\n \"deep_sleep_duration\",\r\n \"sleep_onset_latency\",\r\n \"midpoint_time_of_sleep\",\r\n \"sleep_efficiency\",\r\n \"average_heart_rate\",\r\n \"minimum_heart_rate\",\r\n \"rmssd\",\r\n \"average_breathing_rate\",\r\n \"temperature_variation\",\r\n ]\r\n variables_in_seconds: List[str] = [\r\n \"total_sleep_time\",\r\n \"awake_duration\",\r\n \"light_sleep_duration\",\r\n \"rem_sleep_duration\",\r\n \"deep_sleep_duration\",\r\n \"sleep_onset_latency\",\r\n \"midpoint_time_of_sleep\",\r\n ]\r\n\r\n def _execute(\r\n self,\r\n inputs: List[Any],\r\n ) -> str:\r\n user_id = inputs[0].strip()\r\n full_dir = os.path.join(\r\n self.local_dir, user_id, self.device_name\r\n )\r\n df = self._get_data(\r\n local_dir=full_dir,\r\n file_name=self.file_name,\r\n start_date=inputs[1].strip(),\r\n end_date=inputs[2].strip(),\r\n usecols=self.columns_to_keep,\r\n )\r\n df.columns = self.columns_revised\r\n df = self._convert_seconds_to_minutes(\r\n df, self.variables_in_seconds\r\n )\r\n df = df.round(2)\r\n json_out = df.to_json(orient=\"records\")\r\n return json_out\r" }, { "identifier": "AskUser", "path": "tasks/ask_user.py", "snippet": "class AskUser(BaseTask):\n \"\"\"\n **Description:**\n\n This task is asking question back to the user and stops planning. When needed, the planner will decide to ask question from user\n and use the user's answer to proceed to the planning.\n\n \"\"\"\n\n name: str = \"ask_user\"\n chat_name: str = \"AskUser\"\n description: str = (\n \"Ask user to provide more information or directly answer user's question. \"\n \"You should try your best using other tools before calling this tool.\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\n \"The text returned to user. It should be relevant and very detailed based on the latest user's Question.\"\n ]\n outputs: List[str] = []\n output_type: bool = False\n return_direct: bool = True\n\n translator: Any = None #: :meta private:\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n \"\"\"Translate query\"\"\"\n if inputs is None:\n return \"\"\n return inputs[0]\n\n def explain(\n self,\n ) -> str:\n return \"This task simply asks user to provide more information or continue interaction.\"" }, { "identifier": "GoogleTranslate", "path": "tasks/google_translator.py", "snippet": "class GoogleTranslate(BaseTask):\n \"\"\"\n **Description:**\n\n This task uses google translate to autmatically convert from the user language to english or vise versa.\n\n \"\"\"\n\n name: str = \"google_translator\"\n chat_name: str = \"GoogleTranslator\"\n description: str = (\n \"Translates queries between different languages.\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\n \"text to be translated\",\n \"destination language\",\n ]\n outputs: List[str] = []\n output_type: bool = False\n\n translator: Any = None #: :meta private:\n\n @model_validator(mode=\"before\")\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"\n Validate that api key and python package exists in environment.\n\n Args:\n cls (object): The class itself.\n values (Dict): The dictionary containing the values for validation.\n Return:\n Dict:The original values.\n Raise:\n ImportError: If the 'playwright' package is not installed.\n\n\n \"\"\"\n\n try:\n from googletrans import Translator\n\n values[\"translator\"] = Translator()\n except ImportError:\n raise ValueError(\n \"Could not import googletrans python package. \"\n \"Please install it with `pip install googletrans-py`.\"\n )\n return values\n\n def _parse_input(\n self,\n input_args: str,\n ) -> List[str]:\n \"\"\"\n Parse the input string into a list of strings.\n\n Args:\n input (str): Input string to be parsed.\n Return:\n List[str]: List of parsed strings.\n\n \"\"\"\n return input_args.split(\"$#\")\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n \"\"\"\n Abstract method representing the execution of the task.\n\n Args:\n input (str): Input data for the task.\n Return:\n str: Result of the task execution.\n Raise:\n NotImplementedError: Subclasses must implement the execute method.\n\n \"\"\"\n if len(inputs) < 2:\n return \"\", \"\"\n dest = inputs[1] if inputs[1] is not None else \"en\"\n result = self.translator.translate(inputs[0], dest=dest)\n return result.text, result.src\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a sample explanation for the task.\n\n Return:\n str: Sample explanation for the task.\n\n \"\"\"\n\n return \"This task uses google translate to translate between languages\"" }, { "identifier": "Click", "path": "tasks/playwright/click.py", "snippet": "class Click(BaseBrowser):\n \"\"\"\n **Description:**\n\n This code defines a class named Click that inherits from the BaseBrowser class.\n The Click class represents a task related to browser interactions, specifically clicking on an element\n identified by a CSS selector using the Playwright library.\n\n \"\"\"\n\n name: str = \"click\"\n chat_name: str = \"Clicker\"\n description: str = (\n \"Click on an element with the given CSS selector\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\"CSS selector for the element to click\"]\n outputs: List[str] = []\n output_type: bool = False\n\n def _selector_effective(self, selector: str) -> str:\n \"\"\"\n Get the effective CSS selector considering visibility.\n\n Args:\n selector (str): The original CSS selector.\n Return:\n str: The effective CSS selector.\n\n \"\"\"\n\n if not self.visible_only:\n return selector\n return f\"{selector} >> visible=1\"\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Execute the click task by clicking on an element with the provided CSS selector.\n\n Aegs:\n input (str): The input string containing the CSS selector.\n Return:\n str: A message indicating the success or failure of the click operation.\n\n \"\"\"\n selector = inputs[0]\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n # Navigate to the desired webpage before using this tool\n selector_effective = self._selector_effective(\n selector=selector\n )\n from playwright.sync_api import (\n TimeoutError as PlaywrightTimeoutError,\n )\n\n try:\n page.click(\n selector_effective,\n strict=self.playwright_strict,\n timeout=self.playwright_timeout,\n )\n except PlaywrightTimeoutError:\n return f\"Unable to click on element '{selector}'\"\n return f\"Clicked element '{selector}'\"\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Explain the purpose of the click task.\n\n Return:\n str: A brief explanation of the task.\n\n \"\"\"\n\n return \"This task clicks on an element in an specific url\"" }, { "identifier": "CurrentWebPage", "path": "tasks/playwright/current_page.py", "snippet": "class CurrentWebPage(BaseBrowser):\n \"\"\"\n **Description:**\n\n This code defines a class named CurrentWebPage that inherits from the BaseBrowser class.\n The CurrentWebPage class represents a task related to browser interactions, specifically retrieving the URL of the current web page.\n\n \"\"\"\n\n name: str = \"current_page\"\n chat_name: str = \"CurrentPage\"\n description: str = \"Returns the URL of the current page\"\n dependencies: List[str] = []\n inputs: List[str] = []\n outputs: List[str] = []\n output_type: bool = False\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n This method executes the task by retrieving the current page from the synchronous browser using\n the get_current_page function and returning its URL.\n\n Args:\n input (str): The input string (not used in this task).\n Return:\n str: The URL of the current web page.\n Raise:\n ValueError: If the synchronous browser is not provided.\n\n \"\"\"\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n return str(page.url)\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provides a brief explanation of the current_page task.\n\n Return:\n str: An explanation of the task.\n\n \"\"\"\n\n return \"This task returns the ulr of the current page.\"" }, { "identifier": "ExtractHyperlinks", "path": "tasks/playwright/extract_hyperlinks.py", "snippet": "class ExtractHyperlinks(BaseBrowser):\n \"\"\"\n **Description:**\n\n This task extracts all hyperlinks from the current webpage.\n \"\"\"\n\n name: str = \"extract_hyperlinks\"\n chat_name: str = \"ExtractHyperLinks\"\n description: str = \"Extract all hyperlinks on the current webpage\"\n dependencies: List[str] = []\n inputs: List[str] = [\n \"Boolean: True/False. Return absolute URLs instead of relative URLs.\"\n ]\n outputs: List[str] = []\n output_type: bool = False\n\n @model_validator(mode=\"before\")\n def check_bs_import(cls, values: dict) -> dict:\n \"\"\"\n Check that the arguments are valid.\n\n Args:\n values (Dict): The current attribute values.\n Return:\n Dict: The updated attribute values.\n Raise:\n ImportError: If 'beautifulsoup4' package is not installed.\n\n \"\"\"\n\n try:\n from bs4 import BeautifulSoup # noqa: F401\n except ImportError:\n raise ImportError(\n \"The 'beautifulsoup4' package is required to use this tool.\"\n \" Please install it with 'pip install beautifulsoup4'.\"\n )\n return values\n\n @staticmethod\n def scrape_page(\n page: Any, html_content: str, absolute_urls: bool\n ) -> str:\n \"\"\"\n Scrape hyperlinks from the current webpage.\n\n Args:\n page (Any): The current webpage.\n html_content (str): The HTML content of the webpage.\n absolute_urls (bool): True if absolute URLs should be returned, False otherwise.\n Return:\n str: JSON string containing the extracted hyperlinks.\n\n\n \"\"\"\n\n from urllib.parse import urljoin\n from bs4 import BeautifulSoup\n\n # Parse the HTML content with BeautifulSoup\n soup = BeautifulSoup(html_content, \"lxml\")\n\n # Find all the anchor elements and extract their href attributes\n anchors = soup.find_all(\"a\")\n if absolute_urls:\n base_url = page.url\n links = [\n urljoin(base_url, anchor.get(\"href\", \"\"))\n for anchor in anchors\n ]\n else:\n links = [anchor.get(\"href\", \"\") for anchor in anchors]\n # Return the list of links as a JSON string\n return json.dumps(links)\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Execute the ExtractHyperlinks task.\n\n Args:\n input (str): The input parameter for the task.\n Return:\n str: JSON string containing the extracted hyperlinks.\n Raise:\n ValueError: If the synchronous browser is not provided.\n\n \"\"\"\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n html_content = page.content()\n return self.scrape_page(page, html_content, inputs[0])\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a brief explanation of the ExtractHyperlinks task.\n\n Return:\n str: An explanation of the task.\n\n\n \"\"\"\n\n return \"This task extracts all of the hyperlinks.\"" }, { "identifier": "ExtractText", "path": "tasks/playwright/extract_text.py", "snippet": "class ExtractText(BaseBrowser):\n \"\"\"\n **Description:**\n\n This task extracts all the text from the current webpage.\n \"\"\"\n\n name: str = \"extract_text\"\n chat_name: str = \"ExtractText\"\n description: str = \"Extract all the text on the current webpage\"\n dependencies: List[str] = [\"navigate\"]\n inputs: List[str] = [\"url to navigate to\"]\n outputs: List[str] = []\n output_type: bool = False\n\n @model_validator(mode=\"before\")\n def check_acheck_bs_importrgs(cls, values: dict) -> dict:\n \"\"\"\n Check that the arguments are valid.\n\n Args:\n values (Dict): The current attribute values.\n Return:\n Dict: The updated attribute values.\n Raise:\n ImportError: If 'beautifulsoup4' or 'lxml' packages are not installed.\n\n \"\"\"\n\n try:\n from bs4 import BeautifulSoup # noqa: F401\n except ImportError:\n raise ImportError(\n \"The 'beautifulsoup4' package is required to use this tool.\"\n \" Please install it with 'pip install beautifulsoup4'.\"\n )\n\n try:\n import lxml # noqa: F401\n except ImportError:\n raise ImportError(\n \"The 'lxml' package is required to use this tool.\"\n \" Please install it with 'pip install lxml'.\"\n )\n return values\n\n def validate_url(self, url):\n \"\"\"\n This method validates a given URL by checking if its scheme is either 'http' or 'https'.\n\n Args:\n url (str): The URL to be validated.\n Return:\n str: The validated URL.\n Raise:\n ValueError: If the URL scheme is not 'http' or 'https'.\n\n\n \"\"\"\n\n parsed_url = urlparse(url)\n if parsed_url.scheme not in (\"http\", \"https\"):\n raise ValueError(\"URL scheme must be 'http' or 'https'\")\n return url\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Execute the ExtractText task.\n\n Args:\n input (str): The input parameter for the task.\n Return:\n str: The extracted text from the current webpage.\n Raise:\n ValueError: If the synchronous browser is not provided.\n\n \"\"\"\n from bs4 import BeautifulSoup\n\n self.validate_url(inputs[0].strip())\n\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n\n page = get_current_page(self.sync_browser)\n response = page.goto(inputs[0])\n status = response.status if response else \"unknown\"\n\n if status == 200:\n html_content = page.content()\n # Parse the HTML content with BeautifulSoup\n soup = BeautifulSoup(html_content, \"lxml\")\n\n return \" \".join(text for text in soup.stripped_strings)\n else:\n return (\n \"Error extracting text. The url is wrong. Try again.\"\n )\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Explain the ExtractText task.\n\n Return:\n str: A brief explanation of the ExtractText task.\n\n\n \"\"\"\n\n return \"This task returns the ulr of the current page.\"" }, { "identifier": "GetElements", "path": "tasks/playwright/get_elements.py", "snippet": "class GetElements(BaseBrowser):\n \"\"\"\n **Description:**\n\n The GetElements class is a subclass of BaseBrowser responsible for retrieving elements\n on the current web page that match a given CSS selector.\n \"\"\"\n\n name: str = \"get_elements\"\n chat_name: str = \"GetElements\"\n description: str = \"Retrieve elements in the current web page matching the given CSS selector\"\n dependencies: List[str] = []\n inputs: List[str] = [\n \"CSS selector, such as '*', 'div', 'p', 'a', #id, .classname\",\n \"Set of attributes to retrieve for each element\",\n ]\n outputs: List[str] = []\n output_type: bool = False\n\n def _get_elements(\n page: SyncPage, selector: str, attributes: Sequence[str]\n ) -> List[dict]:\n \"\"\"\n Get elements matching the given CSS selector.\n\n Args:\n page (SyncPage): The current page.\n selector (str): CSS selector to match elements.\n attributes (Sequence[str]): Set of attributes to retrieve for each element.\n Return:\n List[dict]: A list of dictionaries containing the retrieved elements and their attributes.\n\n\n \"\"\"\n\n elements = page.query_selector_all(selector)\n results = []\n for element in elements:\n result = {}\n for attribute in attributes:\n if attribute == \"innerText\":\n val: Optional[str] = element.inner_text()\n else:\n val = element.get_attribute(attribute)\n if val is not None and val.strip() != \"\":\n result[attribute] = val\n if result:\n results.append(result)\n return results\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Execute the GetElements task.\n\n Args:\n input (str): Input string containing CSS selector and attributes.\n Return:\n str: The JSON-formatted string containing the retrieved elements and their attributes.\n Raise:\n ValueError: If the synchronous browser is not provided.\n\n\n \"\"\"\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n # Navigate to the desired webpage before using this tool\n results = self._get_elements(page, inputs[0], inputs[1])\n return json.dumps(results, ensure_ascii=False)\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Explain the GetElements task.\n\n Return:\n str: A brief explanation of the GetElements task.\n\n \"\"\"\n\n return \"This task gets the elements.\"" }, { "identifier": "Navigate", "path": "tasks/playwright/navigate.py", "snippet": "class Navigate(BaseBrowser):\n \"\"\"\n **Description:**\n\n This class represents a browser navigation task to a specified URL using Playwright.\n \"\"\"\n\n name: str = \"navigate\"\n chat_name: str = \"Navigate\"\n description: str = \"Navigate a browser to the specified URL\"\n dependencies: List[str] = []\n inputs: List[str] = [\"url to navigate to\"]\n outputs: List[str] = []\n output_type: bool = False\n\n def validate_url(self, url):\n \"\"\"\n This method validates a given URL by checking if its scheme is either 'http' or 'https'.\n\n Args:\n url (str): The URL to be validated.\n Return:\n str: The validated URL.\n Raise:\n ValueError: If the URL scheme is not 'http' or 'https'.\n\n \"\"\"\n\n parsed_url = urlparse(url)\n if parsed_url.scheme not in (\"http\", \"https\"):\n raise ValueError(\"URL scheme must be 'http' or 'https'\")\n return url\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n This method executes the navigation action in the browser using Playwright.\n\n Args:\n input (str): The input string containing the URL to navigate to.\n Return:\n str: A message indicating whether the navigation was successful, including the URL and status code if successful,\n or an error message if unsuccessful.\n\n \"\"\"\n self.validate_url(inputs[0].strip())\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n response = page.goto(inputs[0])\n status = response.status if response else \"unknown\"\n return (\n f\"Navigating to {inputs[0]} returned status code {status}\"\n )\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n This method provides an explanation of the task.\n\n Return:\n str: A brief explanation of the task, in this case, \"This task extracts all of the hyperlinks.\"\n\n \"\"\"\n\n return \"This task extracts all of the hyperlinks.\"" }, { "identifier": "NavigateBack", "path": "tasks/playwright/navigate_back.py", "snippet": "class NavigateBack(BaseBrowser):\n \"\"\"\n **Description:**\n\n This class represents a browser navigation task using Playwright.\n \"\"\"\n\n name: str = \"navigate_back\"\n chat_name: str = \"NavigateBack\"\n description: str = (\n \"Navigate back to the previous page in the browser history\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\"url to navigate to\"]\n outputs: List[str] = []\n output_type: bool = False\n\n def validate_url(self, url):\n \"\"\"\n This method validates a given URL by checking if its scheme is either 'http' or 'https'.\n\n Args:\n url (str): The URL to be validated.\n Return:\n str: The validated URL.\n Raise:\n ValueError: If the URL scheme is not 'http' or 'https'.\n\n \"\"\"\n\n parsed_url = urlparse(url)\n if parsed_url.scheme not in (\"http\", \"https\"):\n raise ValueError(\"URL scheme must be 'http' or 'https'\")\n return url\n\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n This method executes the navigation back action in the browser using Playwright.\n\n Args:\n input (str): The input string containing the URL to navigate to.\n Return:\n str: A message indicating whether the navigation was successful, including the URL and status code if successful,\n or an error message if unsuccessful.\n\n \"\"\"\n if self.sync_browser is None:\n raise ValueError(\n f\"Synchronous browser not provided to {self.name}\"\n )\n page = get_current_page(self.sync_browser)\n response = page.go_back()\n\n if response:\n return (\n f\"Navigated back to the previous page with URL '{response.url}'.\"\n f\" Status code {response.status}\"\n )\n else:\n return \"Unable to navigate back; no previous page in the history\"\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n This method provides an explanation of the task.\n\n Return:\n str: A brief explanation of the task, in this case, \"This task extracts all of the hyperlinks.\"\n\n\n \"\"\"\n\n return \"This task extracts all of the hyperlinks.\"" }, { "identifier": "ReadDataPipe", "path": "tasks/read_from_datapipe.py", "snippet": "class ReadDataPipe(BaseTask):\n \"\"\"\n **Description:**\n\n This code reads raw data stored in datapipe. When different tasks are executed, there are situations that the final data is stored\n in the datapipe when the final called task's output_type=True. In these situations, this task is called to retireve the latest stored data\n to be used for final inference.\n \"\"\"\n\n name: str = \"read_from_datapipe\"\n chat_name: str = \"DataPipeReader\"\n description: str = (\n \"Get the stored information from datapipe to be used to answer user query accurately. \"\n \"This should be called when the final answer is in datapipe.\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\n \"the datapipe key in the format $datapipe:key$\"\n ]\n outputs: List[str] = []\n output_type: bool = False\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n \"\"\"\n This simply retrieves data from datapipe.\n\n Args:\n inputs (List[Any]): The datapipe key\n Return:\n str: The raw data along with the instructions.\n\n \"\"\"\n if len(inputs) == 0:\n return \"\"\n return (\n \"The data along with the description for each data is provided. \"\n \"Use the data and description to provide a detailed answer regarding the user query.\\n\\n\"\n + json.dumps(inputs[0])\n )\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide an explanation of the task.\n\n Return:\n str: Explanation of the SerpAPI task.\n\n \"\"\"\n return \"This task is to read data from datapipe.\"" }, { "identifier": "SerpAPI", "path": "tasks/serpapi.py", "snippet": "class SerpAPI(BaseTask):\n \"\"\"\n **Description:**\n\n This code defines a class named SerpAPI, which is a specific implementation of the abstract BaseTask class.\n The SerpAPI class represents a task that utilizes the SerpAPI (Google Search API) to perform internet searches\n and retrieve relevant information.\n\n \"\"\"\n\n name: str = \"serpapi\"\n chat_name: str = \"InternetSearchSerp\"\n description: str = (\n \"A low-cost Google Search API.\"\n \"Useful for when you need to answer questions about current events.\"\n )\n dependencies: List[str] = []\n inputs: List[str] = [\"It should be a search query.\"]\n outputs: List[str] = []\n output_type: bool = False\n\n search_engine: Any = None #: :meta private:\n params: Dict = Field(\n default={\n \"engine\": \"google\",\n \"google_domain\": \"google.com\",\n \"gl\": \"us\",\n \"hl\": \"en\",\n }\n )\n serpapi_api_key: Optional[str] = None\n aiosession: Optional[aiohttp.ClientSession] = None\n\n @model_validator(mode=\"before\")\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"\n Validate that api key and python package exists in environment.\n\n Args:\n values (Dict): The dictionary of attribute values.\n Return:\n Dict: The updated dictionary of attribute values.\n Raise:\n ValueError: If the SerpAPI python package is not installed.\n\n \"\"\"\n\n serpapi_api_key = get_from_dict_or_env(\n values, \"serpapi_api_key\", \"SERPAPI_API_KEY\"\n )\n values[\"serpapi_api_key\"] = serpapi_api_key\n try:\n from serpapi import GoogleSearch\n\n values[\"search_engine\"] = GoogleSearch\n except ImportError:\n raise ValueError(\n \"Could not import serpapi python package. \"\n \"Please install it with `pip install google-search-results`.\"\n )\n return values\n\n def get_params(self, query: str) -> Dict[str, str]:\n \"\"\"\n Get parameters for SerpAPI.\n\n Args:\n query (str): The search query.\n Return:\n Dict[str, str]: The parameters for the SerpAPI.\n\n\n \"\"\"\n\n _params = {\n \"api_key\": self.serpapi_api_key,\n \"q\": query,\n }\n params = {**self.params, **_params}\n return params\n\n def results(self, query: str) -> Dict:\n \"\"\"\n Run query through SerpAPI and return the raw result.\n\n Args:\n query (str): The search query.\n Return:\n Dict: The raw result from the SerpAPI.\n\n\n \"\"\"\n\n params = self.get_params(query)\n search = self.search_engine(params)\n res = search.get_dict()\n return res\n\n @staticmethod\n def _process_response(res: Dict) -> str:\n \"\"\"\n Process response from SerpAPI.\n\n Args:\n res (Dict): The raw response from the SerpAPI.\n Return:\n str: Processed information from the SerpAPI response.\n\n \"\"\"\n\n try:\n if \"answer_box\" in res:\n toret = (\n \"url: \"\n + res[\"answer_box\"][\"link\"]\n + \"\\nmetadata: \"\n + res[\"answer_box\"][\"snippet\"]\n )\n else:\n toret = (\n \"url: \"\n + res[\"organic_results\"][0][\"link\"]\n + \"\\nmetadata: \"\n + res[\"organic_results\"][0][\"snippet\"]\n )\n except KeyError:\n return \"Could not get the proper response from the search. Try another search query.\"\n return toret\n\n def _execute(\n self,\n inputs: List[Any] = None,\n ) -> str:\n \"\"\"\n Run query through SerpAPI and parse result.\n\n Args:\n input (str): The input, which should be a search query.\n Return:\n str: The parsed result from the SerpAPI.\n\n\n \"\"\"\n if len(inputs) == 0:\n return \"\"\n return self._process_response(self.results(inputs[0]))\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide an explanation of the task.\n\n Return:\n str: Explanation of the SerpAPI task.\n\n \"\"\"\n\n return (\n \"This task searched in the internet using google search engine, returns the url\"\n \"and the first top result of the google search.\"\n )" }, { "identifier": "BaseTask", "path": "tasks/task.py", "snippet": "class BaseTask(BaseModel):\n \"\"\"\n **Description:**\n\n This class is the base implementation for the Tasks. For every new task that you want to create, you should\n inherit from this class and override the attributes and methods based on your task's need. This class defines a base class named BaseTask.\n This class serves as a foundation for defining common properties and behaviors among various tasks in the system.\n\n Attributes:\n name: The name of the task. It should be unique underscore_case to be defined in TaskType. sample_task_name\n chat_name: This is the name that later will be used if needed to mention the tasks inside the chat with the user.\n It should be Camel Case. SampleTaskChatName\n description: The description of the what specifically the task is doing.\n Try to define it as specific as possible to help the Task Planner decide better.\n dependencies: You can put the name of the TaskTypes that this task is dependent on. For example, in stress detection scenario,\n the stress analysis is dependent on the fetch hrv data task. [TaskType.SERPAPI, TASKTYPE.EXTRACT_TEXT]\n inputs: This is the list of descriptions for the inputs that should be provided by the planner.\n For example if your task has two inputs: [\"the first input description\", \"the second input description\"]\n outputs: This is the list of the description of the outputs that the task returns.\n This helps the planner to understand the returned results better and use it as needed.\n For example, if the task returns a list of sleep hours for different sleep states,\n the description helps planner learn which number is related to what state.\n output_type: This indicates if the task result should be stored in the DataPipe or be returned directly to the planner.\n This process will be done in the parse_input and post_execute methods. If needed you can overwrite them.\n return_direct: This indicates if this task should completely interrupt the planning process or not.\n This is needed in cases like when you want to ask a question from user and no further\n planning is needed until the user gives the proper answer (look at ask_user task)\n \"\"\"\n\n name: str\n chat_name: str\n description: str\n dependencies: List[str] = []\n inputs: List[str] = []\n outputs: List[str] = []\n datapipe: DataPipe = None\n # False if the output should directly passed back to the planner.\n # True if it should be stored in datapipe\n output_type: bool = False\n # False if planner should continue. True if after this task the planning should be\n # on pause or stop. examples are when you have a task that asks user to provide more information\n return_direct: bool = False\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n @property\n def name(self):\n return self.name\n\n @property\n def dependencies(self):\n return self.dependencies\n\n @property\n def inputs(self):\n return \", \".join(\n [\n f\"{str(i)}-{input}\"\n for i, input in enumerate(self.inputs)\n ]\n )\n\n @abstractmethod\n def _execute(\n self,\n inputs: List[Any],\n ) -> str:\n \"\"\"\n Abstract method representing the execution of the task. You should implement this method based on your need.\n This method is called by the **execute** method that provides the parsed inputs to this method.\n\n Args:\n inputs (List[Any]): Input data for the task.\n Return:\n str: Result of the task execution.\n Raise:\n NotImplementedError: Subclasses must implement the execute method.\n\n \"\"\"\n\n def _parse_input(\n self,\n input_args: str,\n ) -> List[str]:\n \"\"\"\n Parses the input string into a list of strings. If the input is in format `datapipe:key`,\n the parser will retrieve the data from datapipe before sending it over to the **_execute** method.\n\n Args:\n input_args (str): Input string provided by planner. It should be parsed and return a list of str variables.\n Return:\n List[str]: List of parsed strings. These strings can be converted into desired types inside **_execute** method.\n\n\n \"\"\"\n inputs = input_args.split(\",\")\n return [\n json.loads(\n self.datapipe.retrieve(\n re.search(r\"datapipe:[0-9a-f\\-]{36}\", arg)\n .group()\n .strip()\n .split(\":\")[-1]\n )\n )\n if \"datapipe\" in arg\n else arg.strip()\n for arg in inputs\n ]\n\n def _post_execute(self, result: str = \"\"):\n \"\"\"\n This method is called inside **execute** method after calling **_execute**. The result of **_execute** will be passed to this method\n in case the **output_type** attribute is True, the result will be stored inside the datapipe and the datapipe key is returned to\n the plenner instead of the raw result. This is good practice for times that you have intermediate data (like sleep data over a month)\n and it needs to be passed over to other tasks and the raw result is not immidiately needed.\n This will save a huge amount of tokens and makes sure that the planner will not pass wrong raw data to the tasks.\n\n It is important to note that to make the **DataPipe's** stored data standard and unified, we store the data in the json string\n format that currently contains 'data' and 'description' keys. The 'data' will be the returned data after execution and the 'description'\n is created using the **outputs** attribute of the task. Whenever the raw data is returned to the planner, these **outputs** descriptions\n will help the planner understand and learn how to interpret the 'data' to generate the final answer or continue planning.\n\n Args:\n result (str): string containig the task result.\n Return:\n List[str]: List of parsed strings.\n\n \"\"\"\n if self.output_type:\n key = self.datapipe.store(\n json.dumps(\n {\n \"data\": result,\n \"description\": \",\".join(self.outputs),\n }\n )\n )\n return (\n f\"The result of the tool {self.name} is stored in the datapipe with key: $datapipe:{key}$\"\n \" pass this key to other tools to access to the result or call read_from_datapipe to get the raw data.\"\n )\n return result\n\n def execute(self, input_args: str) -> str:\n \"\"\"\n This method is called by the **Orchestrator** which provides the planner provided inputs.\n This method first calls **_parse_input** to parse the inputs and retrieve needed data from the **DataPipe**\n Then **_execute** is called and the parsed inputs are given to this method. Finally the final result of execution is passed to\n **_post_execute** and ith will either be stored inside **DataPipe** or directly returned to the planner to continue planning.\n\n Args:\n input_args (str): Input string provided by planner.\n Return:\n str: The final result of the task execution.\n\n \"\"\"\n inputs = self._parse_input(input_args)\n result = self._execute(inputs)\n return self._post_execute(result)\n\n def get_dict(self) -> str:\n \"\"\"\n Generate a dictionary-like representation of the task.\n\n Return:\n str: String representation of the task dictionary.\n\n\n \"\"\"\n inputs = \",\".join(\n f\"input{i+1}-{word}\" for i, word in enumerate(self.inputs)\n )\n dependencies = \",\".join(\n f\"{i+1}-{word}\"\n for i, word in enumerate(self.dependencies)\n )\n prompt = (\n f\"tool name:{self.name}, description: {self.description}.\"\n )\n if len(self.inputs) > 0:\n prompt += f\"The input to this tool should be comma separated list of data representing: {inputs}\"\n if len(self.dependencies) > 0:\n prompt += f\"\\nThis tool is dependent on the following tools. make sure these tools are called first: '{dependencies}'\"\n # prompt += \"\\n\"\n return prompt\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a sample explanation for the task.\n\n Return:\n str: Sample explanation for the task.\n\n\n \"\"\"\n\n return \"\"\"\n Sample Explanation\n \"\"\"" }, { "identifier": "TaskType", "path": "tasks/task_types.py", "snippet": "class TaskType(str, Enum):\n SERPAPI = \"serpapi\"\n CLICK = \"click\"\n GET_CURRENT_PAGE = \"current_page\"\n EXTRACT_HYPERLINKS = \"extract_hyperlinks\"\n EXTRACT_TEXT = \"extract_text\"\n GET_ELEMENTS = \"get_elements\"\n NAVIGATE_BACK = \"navigate_back\"\n NAVIGATE = \"navigate\"\n AFFECT_SLEEP_GET = \"affect_sleep_get\"\n AFFECT_ACTIVITY_GET = \"affect_activity_get\"\n AFFECT_SLEEP_ANALYSIS = \"affect_sleep_analysis\"\n AFFECT_ACTIVITY_ANALYSIS = \"affect_activity_analysis\"\n GOOGLE_TRANSLATE = \"google_translate\"\n ASK_USER = \"ask_user\"\n READ_FROM_DATAPIPE = \"read_from_datapipe\"\n TEST_FILE = \"test_file\"" }, { "identifier": "TestFile", "path": "tasks/test_file.py", "snippet": "class TestFile(BaseTask):\n name: str = \"test_file\"\n chat_name: str = \"TestFile\"\n description: str = \"analyzes the image and returns description.\"\n dependencies: List[str] = []\n inputs: List[str] = [\"the image file name\"]\n outputs: List[str] = []\n output_type: bool = False\n return_direct: bool = True\n\n translator: Any = None #: :meta private:\n\n def parse_input(\n self,\n input: str,\n ) -> List[str]:\n \"\"\"\n Parse the input string into a list of strings.\n\n Args:\n input (str): Input string to be parsed.\n Return:\n List[str]: List of parsed strings.\n\n\n\n Example:\n .. code-block:: python\n\n from langchain import ReActChain, OpenAI\n react = ReAct(llm=OpenAI())\n\n \"\"\"\n\n return input.split(\"$#\")\n\n def execute(\n self,\n input: str,\n ) -> str:\n \"\"\"\n Abstract method representing the execution of the task.\n\n Args:\n input (str): Input data for the task.\n Return:\n str: Result of the task execution.\n Raise:\n NotImplementedError: Subclasses must implement the execute method.\n\n\n\n Example:\n .. code-block:: python\n\n from langchain import ReActChain, OpenAI\n react = ReAct(llm=OpenAI())\n\n \"\"\"\n\n self.parse_input(input)\n return \"this image is a classification results of a data\"\n\n def explain(\n self,\n ) -> str:\n \"\"\"\n Provide a sample explanation for the task.\n\n Return:\n str: Sample explanation for the task.\n\n\n\n Example:\n .. code-block:: python\n\n from langchain import ReActChain, OpenAI\n react = ReAct(llm=OpenAI())\n\n \"\"\"\n\n return \"This task simply asks user to provide more information or continue interaction.\"" } ]
from typing import Dict from typing import Type from tasks.affect import ActivityAnalysis from tasks.affect import ActivityGet from tasks.affect import SleepAnalysis from tasks.affect import SleepGet from tasks.ask_user import AskUser from tasks.google_translator import GoogleTranslate from tasks.playwright import Click from tasks.playwright import CurrentWebPage from tasks.playwright import ExtractHyperlinks from tasks.playwright import ExtractText from tasks.playwright import GetElements from tasks.playwright import Navigate from tasks.playwright import NavigateBack from tasks.read_from_datapipe import ReadDataPipe from tasks.serpapi import SerpAPI from tasks.task import BaseTask from tasks.task_types import TaskType from tasks.test_file import TestFile
14,046
TASK_TO_CLASS: Dict[TaskType, Type[BaseTask]] = { TaskType.SERPAPI: SerpAPI, TaskType.CLICK: Click, TaskType.GET_CURRENT_PAGE: CurrentWebPage, TaskType.EXTRACT_HYPERLINKS: ExtractHyperlinks, TaskType.EXTRACT_TEXT: ExtractText, TaskType.GET_ELEMENTS: GetElements, TaskType.NAVIGATE_BACK: NavigateBack, TaskType.NAVIGATE: Navigate, TaskType.AFFECT_SLEEP_GET: SleepGet, TaskType.AFFECT_ACTIVITY_GET: ActivityGet, TaskType.AFFECT_SLEEP_ANALYSIS: SleepAnalysis, TaskType.AFFECT_ACTIVITY_ANALYSIS: ActivityAnalysis, TaskType.GOOGLE_TRANSLATE: GoogleTranslate, TaskType.ASK_USER: AskUser, TaskType.TEST_FILE: TestFile,
TASK_TO_CLASS: Dict[TaskType, Type[BaseTask]] = { TaskType.SERPAPI: SerpAPI, TaskType.CLICK: Click, TaskType.GET_CURRENT_PAGE: CurrentWebPage, TaskType.EXTRACT_HYPERLINKS: ExtractHyperlinks, TaskType.EXTRACT_TEXT: ExtractText, TaskType.GET_ELEMENTS: GetElements, TaskType.NAVIGATE_BACK: NavigateBack, TaskType.NAVIGATE: Navigate, TaskType.AFFECT_SLEEP_GET: SleepGet, TaskType.AFFECT_ACTIVITY_GET: ActivityGet, TaskType.AFFECT_SLEEP_ANALYSIS: SleepAnalysis, TaskType.AFFECT_ACTIVITY_ANALYSIS: ActivityAnalysis, TaskType.GOOGLE_TRANSLATE: GoogleTranslate, TaskType.ASK_USER: AskUser, TaskType.TEST_FILE: TestFile,
TaskType.READ_FROM_DATAPIPE: ReadDataPipe,
13
2023-12-02 05:10:44+00:00
16k
Czm369/MixPL
projects/Detic_new/detic/centernet_rpn_head.py
[ { "identifier": "CenterNetUpdateHead", "path": "mmdet/models/dense_heads/centernet_update_head.py", "snippet": "class CenterNetUpdateHead(AnchorFreeHead):\n \"\"\"CenterNetUpdateHead is an improved version of CenterNet in CenterNet2.\n Paper link `<https://arxiv.org/abs/2103.07461>`_.\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channel in the input feature map.\n regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple\n level points.\n hm_min_radius (int): Heatmap target minimum radius of cls branch.\n Defaults to 4.\n hm_min_overlap (float): Heatmap target minimum overlap of cls branch.\n Defaults to 0.8.\n more_pos_thresh (float): The filtering threshold when the cls branch\n adds more positive samples. Defaults to 0.2.\n more_pos_topk (int): The maximum number of additional positive samples\n added to each gt. Defaults to 9.\n soft_weight_on_reg (bool): Whether to use the soft target of the\n cls branch as the soft weight of the bbox branch.\n Defaults to False.\n loss_cls (:obj:`ConfigDict` or dict): Config of cls loss. Defaults to\n dict(type='GaussianFocalLoss', loss_weight=1.0)\n loss_bbox (:obj:`ConfigDict` or dict): Config of bbox loss. Defaults to\n dict(type='GIoULoss', loss_weight=2.0).\n norm_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct\n and config norm layer. Defaults to\n ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``.\n train_cfg (:obj:`ConfigDict` or dict, optional): Training config.\n Unused in CenterNet. Reserved for compatibility with\n SingleStageDetector.\n test_cfg (:obj:`ConfigDict` or dict, optional): Testing config\n of CenterNet.\n \"\"\"\n\n def __init__(self,\n num_classes: int,\n in_channels: int,\n regress_ranges: RangeType = ((0, 80), (64, 160), (128, 320),\n (256, 640), (512, INF)),\n hm_min_radius: int = 4,\n hm_min_overlap: float = 0.8,\n more_pos_thresh: float = 0.2,\n more_pos_topk: int = 9,\n soft_weight_on_reg: bool = False,\n loss_cls: ConfigType = dict(\n type='GaussianFocalLoss',\n pos_weight=0.25,\n neg_weight=0.75,\n loss_weight=1.0),\n loss_bbox: ConfigType = dict(\n type='GIoULoss', loss_weight=2.0),\n norm_cfg: OptConfigType = dict(\n type='GN', num_groups=32, requires_grad=True),\n train_cfg: OptConfigType = None,\n test_cfg: OptConfigType = None,\n **kwargs) -> None:\n super().__init__(\n num_classes=num_classes,\n in_channels=in_channels,\n loss_cls=loss_cls,\n loss_bbox=loss_bbox,\n norm_cfg=norm_cfg,\n train_cfg=train_cfg,\n test_cfg=test_cfg,\n **kwargs)\n self.soft_weight_on_reg = soft_weight_on_reg\n self.hm_min_radius = hm_min_radius\n self.more_pos_thresh = more_pos_thresh\n self.more_pos_topk = more_pos_topk\n self.delta = (1 - hm_min_overlap) / (1 + hm_min_overlap)\n self.sigmoid_clamp = 0.0001\n\n # GaussianFocalLoss must be sigmoid mode\n self.use_sigmoid_cls = True\n self.cls_out_channels = num_classes\n\n self.regress_ranges = regress_ranges\n self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n def _init_predictor(self) -> None:\n \"\"\"Initialize predictor layers of the head.\"\"\"\n self.conv_cls = nn.Conv2d(\n self.feat_channels, self.num_classes, 3, padding=1)\n self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n\n def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]:\n \"\"\"Forward features from the upstream network.\n\n Args:\n x (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: A tuple of each level outputs.\n\n - cls_scores (list[Tensor]): Box scores for each scale level, \\\n each is a 4D-tensor, the channel number is num_classes.\n - bbox_preds (list[Tensor]): Box energies / deltas for each \\\n scale level, each is a 4D-tensor, the channel number is 4.\n \"\"\"\n return multi_apply(self.forward_single, x, self.scales, self.strides)\n\n def forward_single(self, x: Tensor, scale: Scale,\n stride: int) -> Tuple[Tensor, Tensor]:\n \"\"\"Forward features of a single scale level.\n\n Args:\n x (Tensor): FPN feature maps of the specified stride.\n scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize\n the bbox prediction.\n stride (int): The corresponding stride for feature maps.\n\n Returns:\n tuple: scores for each class, bbox predictions of\n input feature maps.\n \"\"\"\n cls_score, bbox_pred, _, _ = super().forward_single(x)\n # scale the bbox_pred of different level\n # float to avoid overflow when enabling FP16\n bbox_pred = scale(bbox_pred).float()\n # bbox_pred needed for gradient computation has been modified\n # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace\n # F.relu(bbox_pred) with bbox_pred.clamp(min=0)\n bbox_pred = bbox_pred.clamp(min=0)\n if not self.training:\n bbox_pred *= stride\n return cls_score, bbox_pred\n\n def loss_by_feat(\n self,\n cls_scores: List[Tensor],\n bbox_preds: List[Tensor],\n batch_gt_instances: InstanceList,\n batch_img_metas: List[dict],\n batch_gt_instances_ignore: OptInstanceList = None\n ) -> Dict[str, Tensor]:\n \"\"\"Calculate the loss based on the features extracted by the detection\n head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level,\n each is a 4D-tensor, the channel number is num_classes.\n bbox_preds (list[Tensor]): Box energies / deltas for each scale\n level, each is a 4D-tensor, the channel number is 4.\n batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n batch_img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):\n Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n\n Returns:\n dict[str, Tensor]: A dictionary of loss components.\n \"\"\"\n num_imgs = cls_scores[0].size(0)\n assert len(cls_scores) == len(bbox_preds)\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n all_level_points = self.prior_generator.grid_priors(\n featmap_sizes,\n dtype=bbox_preds[0].dtype,\n device=bbox_preds[0].device)\n\n # 1 flatten outputs\n flatten_cls_scores = [\n cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)\n for cls_score in cls_scores\n ]\n flatten_bbox_preds = [\n bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n for bbox_pred in bbox_preds\n ]\n flatten_cls_scores = torch.cat(flatten_cls_scores)\n flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n\n # repeat points to align with bbox_preds\n flatten_points = torch.cat(\n [points.repeat(num_imgs, 1) for points in all_level_points])\n\n assert (torch.isfinite(flatten_bbox_preds).all().item())\n\n # 2 calc reg and cls branch targets\n cls_targets, bbox_targets = self.get_targets(all_level_points,\n batch_gt_instances)\n\n # 3 add more pos index for cls branch\n featmap_sizes = flatten_points.new_tensor(featmap_sizes)\n pos_inds, cls_labels = self.add_cls_pos_inds(flatten_points,\n flatten_bbox_preds,\n featmap_sizes,\n batch_gt_instances)\n\n # 4 calc cls loss\n if pos_inds is None:\n # num_gts=0\n num_pos_cls = bbox_preds[0].new_tensor(0, dtype=torch.float)\n else:\n num_pos_cls = bbox_preds[0].new_tensor(\n len(pos_inds), dtype=torch.float)\n num_pos_cls = max(reduce_mean(num_pos_cls), 1.0)\n flatten_cls_scores = flatten_cls_scores.sigmoid().clamp(\n min=self.sigmoid_clamp, max=1 - self.sigmoid_clamp)\n cls_loss = self.loss_cls(\n flatten_cls_scores,\n cls_targets,\n pos_inds=pos_inds,\n pos_labels=cls_labels,\n avg_factor=num_pos_cls)\n\n # 5 calc reg loss\n pos_bbox_inds = torch.nonzero(\n bbox_targets.max(dim=1)[0] >= 0).squeeze(1)\n pos_bbox_preds = flatten_bbox_preds[pos_bbox_inds]\n pos_bbox_targets = bbox_targets[pos_bbox_inds]\n\n bbox_weight_map = cls_targets.max(dim=1)[0]\n bbox_weight_map = bbox_weight_map[pos_bbox_inds]\n bbox_weight_map = bbox_weight_map if self.soft_weight_on_reg \\\n else torch.ones_like(bbox_weight_map)\n num_pos_bbox = max(reduce_mean(bbox_weight_map.sum()), 1.0)\n\n if len(pos_bbox_inds) > 0:\n pos_points = flatten_points[pos_bbox_inds]\n pos_decoded_bbox_preds = self.bbox_coder.decode(\n pos_points, pos_bbox_preds)\n pos_decoded_target_preds = self.bbox_coder.decode(\n pos_points, pos_bbox_targets)\n bbox_loss = self.loss_bbox(\n pos_decoded_bbox_preds,\n pos_decoded_target_preds,\n weight=bbox_weight_map,\n avg_factor=num_pos_bbox)\n else:\n bbox_loss = flatten_bbox_preds.sum() * 0\n\n return dict(loss_cls=cls_loss, loss_bbox=bbox_loss)\n\n def get_targets(\n self,\n points: List[Tensor],\n batch_gt_instances: InstanceList,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"Compute classification and bbox targets for points in multiple\n images.\n\n Args:\n points (list[Tensor]): Points of each fpn level, each has shape\n (num_points, 2).\n batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n\n Returns:\n tuple: Targets of each level.\n\n - concat_lvl_labels (Tensor): Labels of all level and batch.\n - concat_lvl_bbox_targets (Tensor): BBox targets of all \\\n level and batch.\n \"\"\"\n assert len(points) == len(self.regress_ranges)\n\n num_levels = len(points)\n # the number of points per img, per lvl\n num_points = [center.size(0) for center in points]\n\n # expand regress ranges to align with points\n expanded_regress_ranges = [\n points[i].new_tensor(self.regress_ranges[i])[None].expand_as(\n points[i]) for i in range(num_levels)\n ]\n # concat all levels points and regress ranges\n concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)\n concat_points = torch.cat(points, dim=0)\n concat_strides = torch.cat([\n concat_points.new_ones(num_points[i]) * self.strides[i]\n for i in range(num_levels)\n ])\n\n # get labels and bbox_targets of each image\n cls_targets_list, bbox_targets_list = multi_apply(\n self._get_targets_single,\n batch_gt_instances,\n points=concat_points,\n regress_ranges=concat_regress_ranges,\n strides=concat_strides)\n\n bbox_targets_list = _transpose(bbox_targets_list, num_points)\n cls_targets_list = _transpose(cls_targets_list, num_points)\n concat_lvl_bbox_targets = torch.cat(bbox_targets_list, 0)\n concat_lvl_cls_targets = torch.cat(cls_targets_list, dim=0)\n return concat_lvl_cls_targets, concat_lvl_bbox_targets\n\n def _get_targets_single(self, gt_instances: InstanceData, points: Tensor,\n regress_ranges: Tensor,\n strides: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Compute classification and bbox targets for a single image.\"\"\"\n num_points = points.size(0)\n num_gts = len(gt_instances)\n gt_bboxes = gt_instances.bboxes\n gt_labels = gt_instances.labels\n\n if num_gts == 0:\n return gt_labels.new_full((num_points,\n self.num_classes),\n self.num_classes), \\\n gt_bboxes.new_full((num_points, 4), -1)\n\n # Calculate the regression tblr target corresponding to all points\n points = points[:, None].expand(num_points, num_gts, 2)\n gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)\n strides = strides[:, None, None].expand(num_points, num_gts, 2)\n\n bbox_target = bbox2distance(points, gt_bboxes) # M x N x 4\n\n # condition1: inside a gt bbox\n inside_gt_bbox_mask = bbox_target.min(dim=2)[0] > 0 # M x N\n\n # condition2: Calculate the nearest points from\n # the upper, lower, left and right ranges from\n # the center of the gt bbox\n centers = ((gt_bboxes[..., [0, 1]] + gt_bboxes[..., [2, 3]]) / 2)\n centers_discret = ((centers / strides).int() * strides).float() + \\\n strides / 2\n\n centers_discret_dist = points - centers_discret\n dist_x = centers_discret_dist[..., 0].abs()\n dist_y = centers_discret_dist[..., 1].abs()\n inside_gt_center3x3_mask = (dist_x <= strides[..., 0]) & \\\n (dist_y <= strides[..., 0])\n\n # condition3: limit the regression range for each location\n bbox_target_wh = bbox_target[..., :2] + bbox_target[..., 2:]\n crit = (bbox_target_wh**2).sum(dim=2)**0.5 / 2\n inside_fpn_level_mask = (crit >= regress_ranges[:, [0]]) & \\\n (crit <= regress_ranges[:, [1]])\n bbox_target_mask = inside_gt_bbox_mask & \\\n inside_gt_center3x3_mask & \\\n inside_fpn_level_mask\n\n # Calculate the distance weight map\n gt_center_peak_mask = ((centers_discret_dist**2).sum(dim=2) == 0)\n weighted_dist = ((points - centers)**2).sum(dim=2) # M x N\n weighted_dist[gt_center_peak_mask] = 0\n\n areas = (gt_bboxes[..., 2] - gt_bboxes[..., 0]) * (\n gt_bboxes[..., 3] - gt_bboxes[..., 1])\n radius = self.delta**2 * 2 * areas\n radius = torch.clamp(radius, min=self.hm_min_radius**2)\n weighted_dist = weighted_dist / radius\n\n # Calculate bbox_target\n bbox_weighted_dist = weighted_dist.clone()\n bbox_weighted_dist[bbox_target_mask == 0] = INF * 1.0\n min_dist, min_inds = bbox_weighted_dist.min(dim=1)\n bbox_target = bbox_target[range(len(bbox_target)),\n min_inds] # M x N x 4 --> M x 4\n bbox_target[min_dist == INF] = -INF\n\n # Convert to feature map scale\n bbox_target /= strides[:, 0, :].repeat(1, 2)\n\n # Calculate cls_target\n cls_target = self._create_heatmaps_from_dist(weighted_dist, gt_labels)\n\n return cls_target, bbox_target\n\n @torch.no_grad()\n def add_cls_pos_inds(\n self, flatten_points: Tensor, flatten_bbox_preds: Tensor,\n featmap_sizes: Tensor, batch_gt_instances: InstanceList\n ) -> Tuple[Optional[Tensor], Optional[Tensor]]:\n \"\"\"Provide additional adaptive positive samples to the classification\n branch.\n\n Args:\n flatten_points (Tensor): The point after flatten, including\n batch image and all levels. The shape is (N, 2).\n flatten_bbox_preds (Tensor): The bbox predicts after flatten,\n including batch image and all levels. The shape is (N, 4).\n featmap_sizes (Tensor): Feature map size of all layers.\n The shape is (5, 2).\n batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n\n Returns:\n tuple:\n\n - pos_inds (Tensor): Adaptively selected positive sample index.\n - cls_labels (Tensor): Corresponding positive class label.\n \"\"\"\n outputs = self._get_center3x3_region_index_targets(\n batch_gt_instances, featmap_sizes)\n cls_labels, fpn_level_masks, center3x3_inds, \\\n center3x3_bbox_targets, center3x3_masks = outputs\n\n num_gts, total_level, K = cls_labels.shape[0], len(\n self.strides), center3x3_masks.shape[-1]\n\n if num_gts == 0:\n return None, None\n\n # The out-of-bounds index is forcibly set to 0\n # to prevent loss calculation errors\n center3x3_inds[center3x3_masks == 0] = 0\n reg_pred_center3x3 = flatten_bbox_preds[center3x3_inds]\n center3x3_points = flatten_points[center3x3_inds].view(-1, 2)\n\n center3x3_bbox_targets_expand = center3x3_bbox_targets.view(\n -1, 4).clamp(min=0)\n\n pos_decoded_bbox_preds = self.bbox_coder.decode(\n center3x3_points, reg_pred_center3x3.view(-1, 4))\n pos_decoded_target_preds = self.bbox_coder.decode(\n center3x3_points, center3x3_bbox_targets_expand)\n center3x3_bbox_loss = self.loss_bbox(\n pos_decoded_bbox_preds,\n pos_decoded_target_preds,\n None,\n reduction_override='none').view(num_gts, total_level,\n K) / self.loss_bbox.loss_weight\n\n # Invalid index Loss set to infinity\n center3x3_bbox_loss[center3x3_masks == 0] = INF\n\n # 4 is the center point of the sampled 9 points, the center point\n # of gt bbox after discretization.\n # The center point of gt bbox after discretization\n # must be a positive sample, so we force its loss to be set to 0.\n center3x3_bbox_loss.view(-1, K)[fpn_level_masks.view(-1), 4] = 0\n center3x3_bbox_loss = center3x3_bbox_loss.view(num_gts, -1)\n\n loss_thr = torch.kthvalue(\n center3x3_bbox_loss, self.more_pos_topk, dim=1)[0]\n\n loss_thr[loss_thr > self.more_pos_thresh] = self.more_pos_thresh\n new_pos = center3x3_bbox_loss < loss_thr.view(num_gts, 1)\n pos_inds = center3x3_inds.view(num_gts, -1)[new_pos]\n cls_labels = cls_labels.view(num_gts,\n 1).expand(num_gts,\n total_level * K)[new_pos]\n return pos_inds, cls_labels\n\n def _create_heatmaps_from_dist(self, weighted_dist: Tensor,\n cls_labels: Tensor) -> Tensor:\n \"\"\"Generate heatmaps of classification branch based on weighted\n distance map.\"\"\"\n heatmaps = weighted_dist.new_zeros(\n (weighted_dist.shape[0], self.num_classes))\n for c in range(self.num_classes):\n inds = (cls_labels == c) # N\n if inds.int().sum() == 0:\n continue\n heatmaps[:, c] = torch.exp(-weighted_dist[:, inds].min(dim=1)[0])\n zeros = heatmaps[:, c] < 1e-4\n heatmaps[zeros, c] = 0\n return heatmaps\n\n def _get_center3x3_region_index_targets(self,\n bacth_gt_instances: InstanceList,\n shapes_per_level: Tensor) -> tuple:\n \"\"\"Get the center (and the 3x3 region near center) locations and target\n of each objects.\"\"\"\n cls_labels = []\n inside_fpn_level_masks = []\n center3x3_inds = []\n center3x3_masks = []\n center3x3_bbox_targets = []\n\n total_levels = len(self.strides)\n batch = len(bacth_gt_instances)\n\n shapes_per_level = shapes_per_level.long()\n area_per_level = (shapes_per_level[:, 0] * shapes_per_level[:, 1])\n\n # Select a total of 9 positions of 3x3 in the center of the gt bbox\n # as candidate positive samples\n K = 9\n dx = shapes_per_level.new_tensor([-1, 0, 1, -1, 0, 1, -1, 0,\n 1]).view(1, 1, K)\n dy = shapes_per_level.new_tensor([-1, -1, -1, 0, 0, 0, 1, 1,\n 1]).view(1, 1, K)\n\n regress_ranges = shapes_per_level.new_tensor(self.regress_ranges).view(\n len(self.regress_ranges), 2) # L x 2\n strides = shapes_per_level.new_tensor(self.strides)\n\n start_coord_pre_level = []\n _start = 0\n for level in range(total_levels):\n start_coord_pre_level.append(_start)\n _start = _start + batch * area_per_level[level]\n start_coord_pre_level = shapes_per_level.new_tensor(\n start_coord_pre_level).view(1, total_levels, 1)\n area_per_level = area_per_level.view(1, total_levels, 1)\n\n for im_i in range(batch):\n gt_instance = bacth_gt_instances[im_i]\n gt_bboxes = gt_instance.bboxes\n gt_labels = gt_instance.labels\n num_gts = gt_bboxes.shape[0]\n if num_gts == 0:\n continue\n\n cls_labels.append(gt_labels)\n\n gt_bboxes = gt_bboxes[:, None].expand(num_gts, total_levels, 4)\n expanded_strides = strides[None, :,\n None].expand(num_gts, total_levels, 2)\n expanded_regress_ranges = regress_ranges[None].expand(\n num_gts, total_levels, 2)\n expanded_shapes_per_level = shapes_per_level[None].expand(\n num_gts, total_levels, 2)\n\n # calc reg_target\n centers = ((gt_bboxes[..., [0, 1]] + gt_bboxes[..., [2, 3]]) / 2)\n centers_inds = (centers / expanded_strides).long()\n centers_discret = centers_inds * expanded_strides \\\n + expanded_strides // 2\n\n bbox_target = bbox2distance(centers_discret,\n gt_bboxes) # M x N x 4\n\n # calc inside_fpn_level_mask\n bbox_target_wh = bbox_target[..., :2] + bbox_target[..., 2:]\n crit = (bbox_target_wh**2).sum(dim=2)**0.5 / 2\n inside_fpn_level_mask = \\\n (crit >= expanded_regress_ranges[..., 0]) & \\\n (crit <= expanded_regress_ranges[..., 1])\n\n inside_gt_bbox_mask = bbox_target.min(dim=2)[0] >= 0\n inside_fpn_level_mask = inside_gt_bbox_mask & inside_fpn_level_mask\n inside_fpn_level_masks.append(inside_fpn_level_mask)\n\n # calc center3x3_ind and mask\n expand_ws = expanded_shapes_per_level[..., 1:2].expand(\n num_gts, total_levels, K)\n expand_hs = expanded_shapes_per_level[..., 0:1].expand(\n num_gts, total_levels, K)\n centers_inds_x = centers_inds[..., 0:1]\n centers_inds_y = centers_inds[..., 1:2]\n\n center3x3_idx = start_coord_pre_level + \\\n im_i * area_per_level + \\\n (centers_inds_y + dy) * expand_ws + \\\n (centers_inds_x + dx)\n center3x3_mask = \\\n ((centers_inds_y + dy) < expand_hs) & \\\n ((centers_inds_y + dy) >= 0) & \\\n ((centers_inds_x + dx) < expand_ws) & \\\n ((centers_inds_x + dx) >= 0)\n\n # recalc center3x3 region reg target\n bbox_target = bbox_target / expanded_strides.repeat(1, 1, 2)\n center3x3_bbox_target = bbox_target[..., None, :].expand(\n num_gts, total_levels, K, 4).clone()\n center3x3_bbox_target[..., 0] += dx\n center3x3_bbox_target[..., 1] += dy\n center3x3_bbox_target[..., 2] -= dx\n center3x3_bbox_target[..., 3] -= dy\n # update center3x3_mask\n center3x3_mask = center3x3_mask & (\n center3x3_bbox_target.min(dim=3)[0] >= 0) # n x L x K\n\n center3x3_inds.append(center3x3_idx)\n center3x3_masks.append(center3x3_mask)\n center3x3_bbox_targets.append(center3x3_bbox_target)\n\n if len(inside_fpn_level_masks) > 0:\n cls_labels = torch.cat(cls_labels, dim=0)\n inside_fpn_level_masks = torch.cat(inside_fpn_level_masks, dim=0)\n center3x3_inds = torch.cat(center3x3_inds, dim=0).long()\n center3x3_bbox_targets = torch.cat(center3x3_bbox_targets, dim=0)\n center3x3_masks = torch.cat(center3x3_masks, dim=0)\n else:\n cls_labels = shapes_per_level.new_zeros(0).long()\n inside_fpn_level_masks = shapes_per_level.new_zeros(\n (0, total_levels)).bool()\n center3x3_inds = shapes_per_level.new_zeros(\n (0, total_levels, K)).long()\n center3x3_bbox_targets = shapes_per_level.new_zeros(\n (0, total_levels, K, 4)).float()\n center3x3_masks = shapes_per_level.new_zeros(\n (0, total_levels, K)).bool()\n return cls_labels, inside_fpn_level_masks, center3x3_inds, \\\n center3x3_bbox_targets, center3x3_masks" }, { "identifier": "unpack_gt_instances", "path": "mmdet/models/utils/misc.py", "snippet": "def unpack_gt_instances(batch_data_samples: SampleList) -> tuple:\n \"\"\"Unpack ``gt_instances``, ``gt_instances_ignore`` and ``img_metas`` based\n on ``batch_data_samples``\n\n Args:\n batch_data_samples (List[:obj:`DetDataSample`]): The Data\n Samples. It usually includes information such as\n `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.\n\n Returns:\n tuple:\n\n - batch_gt_instances (list[:obj:`InstanceData`]): Batch of\n gt_instance. It usually includes ``bboxes`` and ``labels``\n attributes.\n - batch_gt_instances_ignore (list[:obj:`InstanceData`]):\n Batch of gt_instances_ignore. It includes ``bboxes`` attribute\n data that is ignored during training and testing.\n Defaults to None.\n - batch_img_metas (list[dict]): Meta information of each image,\n e.g., image size, scaling factor, etc.\n \"\"\"\n batch_gt_instances = []\n batch_gt_instances_ignore = []\n batch_img_metas = []\n for data_sample in batch_data_samples:\n batch_img_metas.append(data_sample.metainfo)\n batch_gt_instances.append(data_sample.gt_instances)\n if 'ignored_instances' in data_sample:\n batch_gt_instances_ignore.append(data_sample.ignored_instances)\n else:\n batch_gt_instances_ignore.append(None)\n\n return batch_gt_instances, batch_gt_instances_ignore, batch_img_metas" }, { "identifier": "MODELS", "path": "mmdet/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])" }, { "identifier": "SampleList", "path": "mmdet/structures/det_data_sample.py", "snippet": "class DetDataSample(BaseDataElement):\n def proposals(self) -> InstanceData:\n def proposals(self, value: InstanceData):\n def proposals(self):\n def gt_instances(self) -> InstanceData:\n def gt_instances(self, value: InstanceData):\n def gt_instances(self):\n def pred_instances(self) -> InstanceData:\n def pred_instances(self, value: InstanceData):\n def pred_instances(self):\n def pred_track_instances(self) -> InstanceData:\n def pred_track_instances(self, value: InstanceData):\n def pred_track_instances(self):\n def ignored_instances(self) -> InstanceData:\n def ignored_instances(self, value: InstanceData):\n def ignored_instances(self):\n def gt_panoptic_seg(self) -> PixelData:\n def gt_panoptic_seg(self, value: PixelData):\n def gt_panoptic_seg(self):\n def pred_panoptic_seg(self) -> PixelData:\n def pred_panoptic_seg(self, value: PixelData):\n def pred_panoptic_seg(self):\n def gt_sem_seg(self) -> PixelData:\n def gt_sem_seg(self, value: PixelData):\n def gt_sem_seg(self):\n def pred_sem_seg(self) -> PixelData:\n def pred_sem_seg(self, value: PixelData):\n def pred_sem_seg(self):" }, { "identifier": "bbox2distance", "path": "mmdet/structures/bbox/transforms.py", "snippet": "def bbox2distance(points: Tensor,\n bbox: Tensor,\n max_dis: Optional[float] = None,\n eps: float = 0.1) -> Tensor:\n \"\"\"Decode bounding box based on distances.\n\n Args:\n points (Tensor): Shape (n, 2) or (b, n, 2), [x, y].\n bbox (Tensor): Shape (n, 4) or (b, n, 4), \"xyxy\" format\n max_dis (float, optional): Upper bound of the distance.\n eps (float): a small value to ensure target < max_dis, instead <=\n\n Returns:\n Tensor: Decoded distances.\n \"\"\"\n left = points[..., 0] - bbox[..., 0]\n top = points[..., 1] - bbox[..., 1]\n right = bbox[..., 2] - points[..., 0]\n bottom = bbox[..., 3] - points[..., 1]\n if max_dis is not None:\n left = left.clamp(min=0, max=max_dis - eps)\n top = top.clamp(min=0, max=max_dis - eps)\n right = right.clamp(min=0, max=max_dis - eps)\n bottom = bottom.clamp(min=0, max=max_dis - eps)\n return torch.stack([left, top, right, bottom], -1)" }, { "identifier": "reduce_mean", "path": "mmdet/utils/dist_utils.py", "snippet": "def reduce_mean(tensor):\n \"\"\"\"Obtain the mean of tensor on different GPUs.\"\"\"\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor" }, { "identifier": "ConfigType", "path": "mmdet/utils/typing_utils.py", "snippet": "" }, { "identifier": "IOULoss", "path": "projects/Detic_new/detic/iou_loss.py", "snippet": "class IOULoss(nn.Module):\n\n def __init__(self, loc_loss_type='iou'):\n super(IOULoss, self).__init__()\n self.loc_loss_type = loc_loss_type\n\n def forward(self, pred, target, weight=None, reduction='sum'):\n pred_left = pred[:, 0]\n pred_top = pred[:, 1]\n pred_right = pred[:, 2]\n pred_bottom = pred[:, 3]\n\n target_left = target[:, 0]\n target_top = target[:, 1]\n target_right = target[:, 2]\n target_bottom = target[:, 3]\n\n target_aera = (target_left + target_right) * (\n target_top + target_bottom)\n pred_aera = (pred_left + pred_right) * (pred_top + pred_bottom)\n\n w_intersect = torch.min(pred_left, target_left) + torch.min(\n pred_right, target_right)\n h_intersect = torch.min(pred_bottom, target_bottom) + torch.min(\n pred_top, target_top)\n\n g_w_intersect = torch.max(pred_left, target_left) + torch.max(\n pred_right, target_right)\n g_h_intersect = torch.max(pred_bottom, target_bottom) + torch.max(\n pred_top, target_top)\n ac_uion = g_w_intersect * g_h_intersect\n\n area_intersect = w_intersect * h_intersect\n area_union = target_aera + pred_aera - area_intersect\n\n ious = (area_intersect + 1.0) / (area_union + 1.0)\n gious = ious - (ac_uion - area_union) / ac_uion\n if self.loc_loss_type == 'iou':\n losses = -torch.log(ious)\n elif self.loc_loss_type == 'linear_iou':\n losses = 1 - ious\n elif self.loc_loss_type == 'giou':\n losses = 1 - gious\n else:\n raise NotImplementedError\n\n if weight is not None:\n losses = losses * weight\n else:\n losses = losses\n\n if reduction == 'sum':\n return losses.sum()\n elif reduction == 'batch':\n return losses.sum(dim=[1])\n elif reduction == 'none':\n return losses\n else:\n raise NotImplementedError" } ]
import copy import torch import torch.nn as nn from typing import Dict, List, Optional, Sequence, Tuple from mmcv.cnn import Scale from mmengine import ConfigDict from mmengine.structures import InstanceData from torch import Tensor from mmdet.models.dense_heads import CenterNetUpdateHead from mmdet.models.utils import unpack_gt_instances from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox2distance from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, reduce_mean) from .iou_loss import IOULoss
10,823
# F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) return cls_score, bbox_pred # score aligned, box larger def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = cls_scores[0].size(0) assert len(cls_scores) == len(bbox_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) # 1 flatten outputs flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) assert (torch.isfinite(flatten_bbox_preds).all().item()) # 2 calc reg and cls branch targets cls_targets, bbox_targets = self.get_targets(all_level_points, batch_gt_instances) # 3 pos index for cls branch featmap_sizes = flatten_points.new_tensor(featmap_sizes) if self.more_pos: pos_inds, cls_labels = self.add_cls_pos_inds( flatten_points, flatten_bbox_preds, featmap_sizes, batch_gt_instances) else: pos_inds = self._get_label_inds(batch_gt_instances, batch_img_metas, featmap_sizes) # 4 calc cls loss if pos_inds is None: # num_gts=0 num_pos_cls = bbox_preds[0].new_tensor(0, dtype=torch.float) else: num_pos_cls = bbox_preds[0].new_tensor( len(pos_inds), dtype=torch.float) num_pos_cls = max(reduce_mean(num_pos_cls), 1.0) cat_agn_cls_targets = cls_targets.max(dim=1)[0] # M cls_pos_loss, cls_neg_loss = self.loss_cls( flatten_cls_scores.squeeze(1), cat_agn_cls_targets, pos_inds, num_pos_cls) # 5 calc reg loss pos_bbox_inds = torch.nonzero( bbox_targets.max(dim=1)[0] >= 0).squeeze(1) pos_bbox_preds = flatten_bbox_preds[pos_bbox_inds] pos_bbox_targets = bbox_targets[pos_bbox_inds] bbox_weight_map = cls_targets.max(dim=1)[0] bbox_weight_map = bbox_weight_map[pos_bbox_inds] bbox_weight_map = bbox_weight_map if self.soft_weight_on_reg \ else torch.ones_like(bbox_weight_map) num_pos_bbox = max(reduce_mean(bbox_weight_map.sum()), 1.0) if len(pos_bbox_inds) > 0: bbox_loss = self.loss_bbox( pos_bbox_preds, pos_bbox_targets, bbox_weight_map, reduction='sum') / num_pos_bbox else: bbox_loss = flatten_bbox_preds.sum() * 0 return dict( loss_bbox=bbox_loss, loss_cls_pos=cls_pos_loss, loss_cls_neg=cls_neg_loss) def loss_and_predict( self, x: Tuple[Tensor],
# Copyright (c) OpenMMLab. All rights reserved. # from .heatmap_focal_loss import binary_heatmap_focal_loss_jit INF = 1000000000 RangeType = Sequence[Tuple[int, int]] @MODELS.register_module() class CenterNetRPNHead(CenterNetUpdateHead): """CenterNetUpdateHead is an improved version of CenterNet in CenterNet2. Paper link `<https://arxiv.org/abs/2103.07461>`_. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channel in the input feature map. regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple level points. hm_min_radius (int): Heatmap target minimum radius of cls branch. Defaults to 4. hm_min_overlap (float): Heatmap target minimum overlap of cls branch. Defaults to 0.8. more_pos_thresh (float): The filtering threshold when the cls branch adds more positive samples. Defaults to 0.2. more_pos_topk (int): The maximum number of additional positive samples added to each gt. Defaults to 9. soft_weight_on_reg (bool): Whether to use the soft target of the cls branch as the soft weight of the bbox branch. Defaults to False. loss_cls (:obj:`ConfigDict` or dict): Config of cls loss. Defaults to dict(type='GaussianFocalLoss', loss_weight=1.0) loss_bbox (:obj:`ConfigDict` or dict): Config of bbox loss. Defaults to dict(type='GIoULoss', loss_weight=2.0). norm_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct and config norm layer. Defaults to ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``. train_cfg (:obj:`ConfigDict` or dict, optional): Training config. Unused in CenterNet. Reserved for compatibility with SingleStageDetector. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of CenterNet. """ def __init__(self, num_classes: int, in_channels: int, regress_ranges: RangeType = ((0, 80), (64, 160), (128, 320), (256, 640), (512, INF)), hm_min_radius: int = 4, hm_min_overlap: float = 0.8, more_pos: bool = False, more_pos_thresh: float = 0.2, more_pos_topk: int = 9, soft_weight_on_reg: bool = False, not_clamp_box: bool = False, loss_cls: ConfigType = dict( type='HeatmapFocalLoss', alpha=0.25, beta=4.0, gamma=2.0, pos_weight=1.0, neg_weight=1.0, sigmoid_clamp=1e-4, ignore_high_fp=-1.0, loss_weight=1.0, ), loss_bbox: ConfigType = dict( type='GIoULoss', loss_weight=2.0), norm_cfg: OptConfigType = dict( type='GN', num_groups=32, requires_grad=True), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, **kwargs) -> None: super().__init__( num_classes=num_classes, in_channels=in_channels, # loss_bbox=loss_bbox, loss_cls=loss_cls, norm_cfg=norm_cfg, train_cfg=train_cfg, test_cfg=test_cfg, **kwargs) self.soft_weight_on_reg = soft_weight_on_reg self.hm_min_radius = hm_min_radius self.more_pos_thresh = more_pos_thresh self.more_pos_topk = more_pos_topk self.more_pos = more_pos self.not_clamp_box = not_clamp_box self.delta = (1 - hm_min_overlap) / (1 + hm_min_overlap) self.loss_bbox = IOULoss('giou') # GaussianFocalLoss must be sigmoid mode self.use_sigmoid_cls = True self.cls_out_channels = num_classes self.regress_ranges = regress_ranges self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def _init_layers(self) -> None: """Initialize layers of the head.""" self._init_reg_convs() self._init_predictor() def forward_single(self, x: Tensor, scale: Scale, stride: int) -> Tuple[Tensor, Tensor]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps. Returns: tuple: scores for each class, bbox predictions of input feature maps. """ for m in self.reg_convs: x = m(x) cls_score = self.conv_cls(x) bbox_pred = self.conv_reg(x) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() # bbox_pred needed for gradient computation has been modified # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace # F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) return cls_score, bbox_pred # score aligned, box larger def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = cls_scores[0].size(0) assert len(cls_scores) == len(bbox_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) # 1 flatten outputs flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) assert (torch.isfinite(flatten_bbox_preds).all().item()) # 2 calc reg and cls branch targets cls_targets, bbox_targets = self.get_targets(all_level_points, batch_gt_instances) # 3 pos index for cls branch featmap_sizes = flatten_points.new_tensor(featmap_sizes) if self.more_pos: pos_inds, cls_labels = self.add_cls_pos_inds( flatten_points, flatten_bbox_preds, featmap_sizes, batch_gt_instances) else: pos_inds = self._get_label_inds(batch_gt_instances, batch_img_metas, featmap_sizes) # 4 calc cls loss if pos_inds is None: # num_gts=0 num_pos_cls = bbox_preds[0].new_tensor(0, dtype=torch.float) else: num_pos_cls = bbox_preds[0].new_tensor( len(pos_inds), dtype=torch.float) num_pos_cls = max(reduce_mean(num_pos_cls), 1.0) cat_agn_cls_targets = cls_targets.max(dim=1)[0] # M cls_pos_loss, cls_neg_loss = self.loss_cls( flatten_cls_scores.squeeze(1), cat_agn_cls_targets, pos_inds, num_pos_cls) # 5 calc reg loss pos_bbox_inds = torch.nonzero( bbox_targets.max(dim=1)[0] >= 0).squeeze(1) pos_bbox_preds = flatten_bbox_preds[pos_bbox_inds] pos_bbox_targets = bbox_targets[pos_bbox_inds] bbox_weight_map = cls_targets.max(dim=1)[0] bbox_weight_map = bbox_weight_map[pos_bbox_inds] bbox_weight_map = bbox_weight_map if self.soft_weight_on_reg \ else torch.ones_like(bbox_weight_map) num_pos_bbox = max(reduce_mean(bbox_weight_map.sum()), 1.0) if len(pos_bbox_inds) > 0: bbox_loss = self.loss_bbox( pos_bbox_preds, pos_bbox_targets, bbox_weight_map, reduction='sum') / num_pos_bbox else: bbox_loss = flatten_bbox_preds.sum() * 0 return dict( loss_bbox=bbox_loss, loss_cls_pos=cls_pos_loss, loss_cls_neg=cls_neg_loss) def loss_and_predict( self, x: Tuple[Tensor],
batch_data_samples: SampleList,
3
2023-11-30 08:58:00+00:00
16k
SEU-ProactiveSecurity-Group/MalPurifier
examples/md_nn_test.py
[ { "identifier": "Dataset", "path": "core/defense/dataset.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n def __init__(self, seed=0, device='cuda', feature_ext_args=None):\n \"\"\"\n 为机器学习模型学习构建数据集。\n \n :param seed: 随机种子\n :param device: 设备类型,'cuda' 或 'cpu'\n :param feature_ext_args: 提取特征的参数\n \"\"\"\n \n # 设置随机种子,并确保随机性在不同库之间是一致的\n self.seed = seed\n random.seed(self.seed)\n np.random.seed(self.seed)\n torch.manual_seed(self.seed)\n \n # 设置PyTorch的默认数据类型为float32\n torch.set_default_dtype(torch.float32)\n \n # 初始化简化类的临时数据存储\n self.temp_data = utils.SimplifyClass(Manager())\n \n # 设定使用的设备\n self.device = device\n\n # 根据提供的参数初始化特征提取器\n self.feature_ext_args = feature_ext_args\n if feature_ext_args is None:\n self.feature_extractor = Apk2features(config.get('metadata', 'naive_data_pool'),\n config.get('dataset', 'intermediate'))\n else:\n assert isinstance(feature_ext_args, dict)\n self.feature_extractor = Apk2features(config.get('metadata', 'naive_data_pool'),\n config.get('dataset', 'intermediate'),\n **feature_ext_args)\n\n # 分割数据集为训练、验证和测试集\n data_saving_path = os.path.join(config.get('dataset', 'intermediate'), 'dataset.idx')\n \n # 检查是否已保存了分割数据,且不需要更新\n if os.path.exists(data_saving_path) and (not self.feature_extractor.update):\n (self.train_dataset, self.validation_dataset, self.test_dataset) = utils.read_pickle(data_saving_path)\n\n # # 计算良性和恶意apk的数量\n # benign_train = np.sum(self.train_dataset[1] == 0)\n # malicious_train = np.sum(self.train_dataset[1] == 1)\n\n # benign_val = np.sum(self.validation_dataset[1] == 0)\n # malicious_val = np.sum(self.validation_dataset[1] == 1)\n\n # benign_test = np.sum(self.test_dataset[1] == 0)\n # malicious_test = np.sum(self.test_dataset[1] == 1)\n\n # # 打印数据量\n # total_data = len(self.train_dataset[0]) + len(self.validation_dataset[0]) + len(self.test_dataset[0])\n # print(f\"总数据量: {total_data}\")\n # print(f\"训练数据量: {len(self.train_dataset[0])} (良性: {benign_train}, 恶意: {malicious_train})\")\n # print(f\"验证数据量: {len(self.validation_dataset[0])} (良性: {benign_val}, 恶意: {malicious_val})\")\n # print(f\"测试数据量: {len(self.test_dataset[0])} (良性: {benign_test}, 恶意: {malicious_test})\")\n\n # 更新数据路径\n def path_tran(data_paths):\n return np.array(\n [os.path.join(config.get('metadata', 'naive_data_pool'),\n os.path.splitext(os.path.basename(name))[0] + self.feature_extractor.file_ext) for \n name in data_paths])\n\n self.train_dataset = (path_tran(self.train_dataset[0]), self.train_dataset[1])\n self.validation_dataset = (path_tran(self.validation_dataset[0]), self.validation_dataset[1])\n self.test_dataset = (path_tran(self.test_dataset[0]), self.test_dataset[1])\n else:\n # 预处理恶意软件和良性软件的APK文件,并获取其特征路径\n mal_feature_paths = self.apk_preprocess(config.get('dataset', 'malware_dir'))\n ben_feature_paths = self.apk_preprocess(config.get('dataset', 'benware_dir'))\n feature_paths = mal_feature_paths + ben_feature_paths\n \n # 根据恶意软件和良性软件的数量生成标签\n gt_labels = np.zeros((len(mal_feature_paths) + len(ben_feature_paths)), dtype=np.int32)\n gt_labels[:len(mal_feature_paths)] = 1\n \n # 根据特征路径和标签分割数据\n self.train_dataset, self.validation_dataset, self.test_dataset = self.data_split(feature_paths, gt_labels)\n \n # 保存分割后的数据\n utils.dump_pickle((self.train_dataset, self.validation_dataset, self.test_dataset), data_saving_path)\n\n # 获取特征词汇表和大小\n self.vocab, _1, _2 = self.feature_extractor.get_vocab(*self.train_dataset)\n self.vocab_size = len(self.vocab)\n \n # 获取非API的数量\n self.non_api_size = self.feature_extractor.get_non_api_size(self.vocab)\n \n # 获取类别数量\n self.n_classes = np.unique(self.train_dataset[1]).size\n\n\n def data_split(self, feature_paths, labels):\n \"\"\"\n 将数据分为训练、验证和测试集。\n\n :param feature_paths: 特征文件的路径列表。\n :param labels: 对应的标签列表。\n :return: (训练数据, 训练标签), (验证数据, 验证标签), (测试数据, 测试标签)\n \"\"\"\n \n # 确保特征文件路径数量与标签数量相同\n assert len(feature_paths) == len(labels)\n \n # 初始化训练、验证和测试集的文件名列表为None\n train_dn, validation_dn, test_dn = None, None, None\n \n # 定义数据集切分文件的路径\n data_split_path = os.path.join(config.get('dataset', 'dataset_dir'), 'tr_te_va_split.name')\n \n # 检查数据切分文件是否存在\n if os.path.exists(data_split_path):\n train_dn, val_dn, test_dn = utils.read_pickle(data_split_path)\n\n # 如果任何文件名列表为空\n if (train_dn is None) or (validation_dn is None) or (test_dn is None):\n # 从特征文件路径中提取文件名\n data_names = [os.path.splitext(os.path.basename(path))[0] for path in feature_paths]\n \n # 分割数据为训练和测试集,20%为测试集\n train_dn, test_dn = train_test_split(data_names, test_size=0.2, random_state=self.seed, shuffle=True)\n \n # 从训练集中进一步分割出验证集,25%为验证集\n train_dn, validation_dn = train_test_split(train_dn, test_size=0.25, random_state=self.seed, shuffle=True)\n \n # 将切分结果保存为pickle文件\n utils.dump_pickle((train_dn, validation_dn, test_dn), path=data_split_path)\n\n # 根据提供的文件名列表查询路径\n def query_path(_data_names):\n return np.array(\n [path for path in feature_paths if os.path.splitext(os.path.basename(path))[0] in _data_names])\n\n # 根据提供的文件名列表查询对应的指示器(布尔列表)\n def query_indicator(_data_names):\n return [True if os.path.splitext(os.path.basename(path))[0] in _data_names else False for path in\n feature_paths]\n\n # 查询训练、验证和测试数据的路径\n train_data = query_path(train_dn)\n val_data = query_path(validation_dn)\n test_data = query_path(test_dn)\n \n # 为确保数据与标签一致,随机打乱训练数据和标签\n random.seed(self.seed)\n random.shuffle(train_data)\n train_y = labels[query_indicator(train_dn)]\n random.seed(self.seed)\n random.shuffle(train_y)\n \n # 查询训练、验证和测试数据的标签\n val_y = labels[query_indicator(validation_dn)]\n test_y = labels[query_indicator(test_dn)]\n \n # 返回切分的数据和标签\n return (train_data, train_y), (val_data, val_y), (test_data, test_y)\n\n\n def apk_preprocess(self, apk_paths, labels=None, update_feature_extraction=False):\n \"\"\"\n APK 文件的预处理。\n \n :param apk_paths: APK文件路径列表。\n :param labels: APK文件对应的标签列表,可以为None。\n :param update_feature_extraction: 是否更新特征提取器的状态。\n :return: 处理后的特征路径,和可选的标签。\n \"\"\"\n \n # 保存特征提取器的当前更新状态\n old_status = self.feature_extractor.update\n \n # 将特征提取器的更新状态设置为提供的参数值\n self.feature_extractor.update = update_feature_extraction\n \n # 如果没有提供标签\n if labels is None:\n # 使用特征提取器从apk_paths中提取特征\n feature_paths = self.feature_extractor.feature_extraction(apk_paths)\n \n # 恢复特征提取器的原始状态\n self.feature_extractor.update = old_status\n \n # 返回特征路径\n return feature_paths\n else:\n # 确保apk文件的数量与标签的数量相匹配\n assert len(apk_paths) == len(labels), \\\n '不匹配的数据形状 {} vs. {}'.format(len(apk_paths), len(labels))\n \n # 使用特征提取器从apk_paths中提取特征\n feature_paths = self.feature_extractor.feature_extraction(apk_paths)\n \n labels_ = []\n for i, feature_path in enumerate(feature_paths):\n # 获取不带扩展名的文件名\n fname = os.path.splitext(os.path.basename(feature_path))[0]\n \n # 确保当前文件名在对应的apk路径中\n if fname in apk_paths[i]:\n # 添加对应的标签到labels_列表中\n labels_.append(labels[i])\n \n # 恢复特征提取器的原始状态\n self.feature_extractor.update = old_status\n \n # 返回特征路径和对应的标签\n return feature_paths, np.array(labels_)\n\n\n def feature_preprocess(self, feature_paths):\n raise NotImplementedError\n # self.feature_extractor.update_cg(feature_paths)\n\n\n def feature_api_rpst_sum(self, api_feat_representation_list):\n \"\"\"\n 对API表示进行求和\n :param api_feat_representation_list: 一个稀疏矩阵列表\n \"\"\"\n \n # 确保输入是一个列表\n assert isinstance(api_feat_representation_list, list), \"期望输入是一个列表。\"\n \n # 如果列表不为空\n if len(api_feat_representation_list) > 0:\n # 确保列表中的第一个元素是 csr_matrix 类型的稀疏矩阵\n assert isinstance(api_feat_representation_list[0], csr_matrix)\n else:\n # 如果列表为空,则返回一个全为0的矩阵\n return np.zeros(shape=(self.vocab_size - self.non_api_size, self.vocab_size - self.non_api_size),\n dtype=np.float)\n \n # 将第一个稀疏矩阵转为密集型矩阵,并转换为浮点类型\n adj_array = np.asarray(api_feat_representation_list[0].todense()).astype(np.float32)\n \n # 遍历列表中的其余稀疏矩阵\n for sparse_mat in api_feat_representation_list[1:]:\n # 将稀疏矩阵转为密集型矩阵,转换为浮点类型,并与之前的结果进行相加\n adj_array += np.asarray(sparse_mat.todense()).astype(np.float32)\n \n # 将最终结果中的所有值限制在[0,1]之间\n return np.clip(adj_array, a_min=0, a_max=1)\n\n\n def get_numerical_input(self, feature_path, label):\n \"\"\"\n loading features for given a feature path\n # results:\n # --->> mapping feature path to numerical representations\n # --->> features: 1d array, and a list of sparse matrices\n # --->> label: scalar\n \"\"\"\n feature_vector, label = self.feature_extractor.feature2ipt(feature_path, label,\n self.vocab,\n None)\n return feature_vector, label\n\n\n def get_input_producer(self, feature_paths, y, batch_size, name='train', use_cache=False):\n \"\"\"\n 获取输入生产器,返回一个 DataLoader 对象。\n \n :param feature_paths: 特征路径列表。\n :param y: 标签。\n :param batch_size: 每个批次的数据数量。\n :param name: 使用场景名称,默认为'train'。\n :param use_cache: 是否使用缓存,默认为False。\n :return: 返回一个 DataLoader 对象。\n \"\"\"\n \n # 定义 DataLoader 的参数\n params = {\n 'batch_size': batch_size,\n 'num_workers': self.feature_ext_args['proc_number'],\n 'shuffle': False\n }\n \n # 如果是训练过程,则使用用户设定的缓存值;否则,不使用缓存\n use_cache = use_cache if name == 'train' else False\n \n # 创建 DataLoader,它会使用自定义的 DatasetTorch 数据集对象\n # worker_init_fn 参数用于为每个工作线程设定一个随机种子,确保数据的打乱是随机的\n return torch.utils.data.DataLoader(\n DatasetTorch(feature_paths, y, self, name=name, use_cache=use_cache),\n worker_init_fn=lambda x: np.random.seed(torch.randint(0, 2**31, [1,])[0] + x),\n **params\n )\n\n\n def clear_up(self):\n self.temp_data.reset()\n\n @staticmethod\n def get_modification(adv_x, x, idx, sp=True):\n # 确认adv_x和x是numpy.ndarray类型或torch.Tensor类型的实例\n assert isinstance(adv_x, (np.ndarray, torch.Tensor))\n assert isinstance(x, (np.ndarray, torch.Tensor))\n \n # 计算对抗样本和原始样本之间的差异\n x_mod = adv_x - x\n \n # 根据索引idx选择对应的元素\n if isinstance(x_mod, np.ndarray):\n x_mod = np.array([x_mod[i, idx[i]] for i in range(x.shape[0])])\n else:\n x_mod = torch.stack([x_mod[i, idx[i]] for i in range(x.shape[0])])\n \n # 判断是否需要转为稀疏表示\n if sp:\n # 如果x_mod是torch.Tensor,那么将其转换为稀疏表示并移到cpu上\n # 如果x_mod是numpy.ndarray,那么先将其转换为torch.Tensor,然后转换为稀疏表示并移到cpu上\n if isinstance(x_mod, torch.Tensor):\n return x_mod.to_sparse().cpu().unbind(dim=0)\n else:\n return torch.tensor(x_mod, dtype=torch.int).to_sparse().cpu().unbind(dim=0)\n else:\n # 如果不需要转为稀疏表示,那么直接将其移到cpu上或者分割为numpy数组\n if isinstance(x_mod, torch.Tensor):\n return x_mod.cpu().unbind(dim=0)\n else:\n return np.split(x_mod, x_mod.shape[0], axis=0)\n\n\n @staticmethod\n def modification_integ(x_mod_integrated, x_mod):\n # 确认x_mod_integrated和x_mod是列表类型的实例\n assert isinstance(x_mod_integrated, list) and isinstance(x_mod, list)\n \n # 如果x_mod_integrated为空列表,则返回x_mod\n if len(x_mod_integrated) == 0:\n return x_mod\n \n # 确认x_mod_integrated和x_mod的长度相同\n assert len(x_mod_integrated) == len(x_mod)\n \n # 遍历x_mod和x_mod_integrated中的每个元素\n for i in range(len(x_mod)):\n # 确认当前x_mod中的元素不在GPU上,\n # 因为在GPU上的Tensor进行list相加操作的时候是列表拼接,而在CPU上则是张量之间的加法\n assert not x_mod[i].is_cuda\n \n # 更新x_mod_integrated中的元素\n x_mod_integrated[i] += x_mod[i]\n \n # 返回更新后的x_mod_integrated\n return x_mod_integrated" }, { "identifier": "MalwareDetectionDNN", "path": "core/defense/md_dnn.py", "snippet": "class MalwareDetectionDNN(nn.Module):\n def __init__(self, input_size, n_classes, device='cpu', name='DNN', **kwargs):\n \"\"\"\n 初始化恶意软件检测器\n\n 参数:\n ----------\n @param input_size: 整数,输入向量的维度数量。\n @param n_classes: 整数,表示分类的数量,例如二分类问题中n=2。\n @param device: 字符串,可以是'cpu'或'cuda',表示模型应该在CPU还是GPU上运行。\n @param name: 字符串,用于命名模型。\n \"\"\"\n super(MalwareDetectionDNN, self).__init__() # 调用父类初始化\n self.input_size = input_size # 定义输入尺寸\n self.n_classes = n_classes # 定义分类数量\n self.device = device # 定义运行设备\n self.name = name # 定义模型名称\n\n self.parse_args(**kwargs) # 解析额外参数\n\n self.dense_layers = [] # 初始化一个空的密集层列表\n \n # 检查是否至少有一个隐藏层\n if len(self.dense_hidden_units) >= 1:\n # 添加第一个密集层\n self.dense_layers.append(nn.Linear(self.input_size, self.dense_hidden_units[0]))\n else:\n # 如果没有隐藏层,抛出异常\n raise ValueError(\"Expect at least one hidden layer.\")\n\n # 为每一对连续的隐藏单元添加一个密集层\n for i in range(len(self.dense_hidden_units[0:-1])):\n self.dense_layers.append(nn.Linear(self.dense_hidden_units[i], \n self.dense_hidden_units[i + 1]))\n \n # 添加最后一个连接到输出层的密集层\n self.dense_layers.append(nn.Linear(self.dense_hidden_units[-1], self.n_classes))\n \n # 将密集层添加到模型中以进行跟踪\n for idx_i, dense_layer in enumerate(self.dense_layers):\n self.add_module('nn_model_layer_{}'.format(idx_i), dense_layer)\n\n # 根据参数选择使用SELU或ReLU激活函数\n if self.smooth:\n self.activation_func = F.selu # 使用SELU激活函数\n else:\n self.activation_func = F.relu # 使用ReLU激活函数\n\n # 定义模型的保存路径\n self.model_save_path = path.join(config.get('experiments', 'md_dnn') + '_' + self.name,\n 'model.pth')\n \n # 日志中打印模型的结构信息\n logger.info('========================================dnn model architecture===============================')\n logger.info(self)\n logger.info('===============================================end==========================================')\n\n\n def parse_args(self,\n dense_hidden_units=None,\n dropout=0.6,\n alpha_=0.2,\n smooth=False,\n **kwargs\n ):\n \"\"\"\n 解析并设置网络的超参数。\n\n 参数:\n ----------\n dense_hidden_units : list, 可选\n 网络中每个隐藏层的单元数。如果没有指定,则默认为两个隐藏层,每层200个单元。\n dropout : float, 可选\n dropout正则化的比率,默认为0.6。\n alpha_ : float, 可选\n 某些激活函数的参数,默认为0.2。\n smooth : bool, 可选\n 是否使用平滑的激活函数,默认为False。\n **kwargs : dict\n 其他超参数。\n \"\"\"\n\n # 如果用户没有指定隐藏层,使用默认的配置\n if dense_hidden_units is None:\n self.dense_hidden_units = [200, 200]\n # 如果用户指定了一个列表,使用它\n elif isinstance(dense_hidden_units, list):\n self.dense_hidden_units = dense_hidden_units\n # 否则抛出一个异常\n else:\n raise TypeError(\"Expect a list of hidden units.\")\n\n # 设置dropout, alpha和smooth参数\n self.dropout = dropout\n self.alpha_ = alpha_\n self.smooth = smooth\n\n # 从kwargs中获取并设置proc_number\n self.proc_number = kwargs.get('proc_number', None) # 如果不存在,则返回None\n\n # 如果还有其他参数,记录警告,因为这些参数可能是未知的\n if len(kwargs) > 0:\n logger.warning(\"Unknown hyper-parameters {}\".format(str(kwargs)))\n\n\n def forward(self, x):\n \"\"\"\n 使输入数据 x 通过神经网络\n \n 参数\n ----------\n @param x: 2D张量,特征表示\n \"\"\"\n # 遍历神经网络的每一层,除了最后一层\n for dense_layer in self.dense_layers[:-1]:\n x = self.activation_func(dense_layer(x)) # 使用激活函数处理每一层的输出\n\n # 对处理过的数据进行 dropout 操作,用于防止过拟合\n latent_representation = F.dropout(x, self.dropout, training=self.training)\n \n # 用最后一层进行处理,得到logits(未归一化的预测或分类得分)\n logits = self.dense_layers[-1](latent_representation)\n return logits\n\n def inference(self, test_data_producer):\n \"\"\"\n 进行模型推理,获得预测的置信度和真实标签\n \n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n \n 返回值\n ----------\n 返回预测的置信度和真实标签\n \"\"\"\n confidences = [] # 存储每批数据的预测置信度\n gt_labels = [] # 存储每批数据的真实标签\n self.eval() # 设置模型为评估模式\n\n # 使用torch.no_grad()来告诉PyTorch不要在推理过程中计算梯度\n with torch.no_grad():\n # 遍历每一批测试数据\n for x, y in test_data_producer:\n # 将数据转移到指定的设备(CPU或GPU)并调整数据类型\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 得到每一批数据的logits\n logits = self.forward(x)\n # 使用softmax函数得到每一批数据的置信度,并将其添加到confidences列表中\n confidences.append(F.softmax(logits, dim=-1))\n # 将每一批数据的真实标签添加到gt_labels列表中\n gt_labels.append(y)\n\n # 将所有批次的置信度垂直堆叠成一个张量\n confidences = torch.vstack(confidences)\n # 将所有批次的真实标签连接成一个张量\n gt_labels = torch.cat(gt_labels, dim=0)\n \n return confidences, gt_labels\n\n def inference_dae(self, test_data_producer):\n \"\"\"\n 进行模型推理,获得预测的置信度和真实标签\n \n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n \n 返回值\n ----------\n 返回预测的置信度和真实标签\n \"\"\"\n confidences = [] # 存储每批数据的预测置信度\n gt_labels = [] # 存储每批数据的真实标签\n self.eval() # 设置模型为评估模式\n\n # 使用torch.no_grad()来告诉PyTorch不要在推理过程中计算梯度\n with torch.no_grad():\n # 遍历每一批测试数据\n for x, y in test_data_producer:\n # 将数据转移到指定的设备(CPU或GPU)并调整数据类型\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 得到每一批数据的logits\n logits = self.forward(x)\n # 使用softmax函数得到每一批数据的置信度,并将其添加到confidences列表中\n confidences.append(F.softmax(logits, dim=-1))\n # 将每一批数据的真实标签添加到gt_labels列表中\n gt_labels.append(y)\n \n return confidences, gt_labels\n\n\n def get_important_attributes(self, test_data_producer, target_label=1):\n \"\"\"\n 使用集成梯度(Integrated Gradients)方法获取重要的属性/特征\n\n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n @param target_label: 目标标签,默认为1\n \n 返回值\n ----------\n 返回重要的属性/特征\n \"\"\"\n attributions = [] # 存储属性或特征的重要性得分\n gt_labels = [] # 存储真实标签\n\n # 定义一个使用集成梯度方法的包装器\n def _ig_wrapper(_x):\n logits = self.forward(_x)\n return F.softmax(logits, dim=-1)\n\n # 初始化集成梯度对象\n ig = IntegratedGradients(_ig_wrapper)\n\n # 遍历测试数据集\n for i, (x, y) in enumerate(test_data_producer):\n # 将数据和标签转移到指定的设备上\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 使x能够计算梯度\n x.requires_grad = True\n # 定义基线,用于集成梯度的计算\n baseline = torch.zeros_like(x, dtype=torch.double, device=self.device)\n # 计算属性的重要性\n attribution_bs = ig.attribute(x,\n baselines=baseline,\n target=target_label)\n # 将所有批次的属性垂直堆叠\n attribution = torch.hstack(attribution_bs)\n # 保存得到的属性重要性得分和真实标签\n attributions.append(attribution.clone().detach().cpu().numpy())\n gt_labels.append(y.clone().detach().cpu().numpy())\n # 将真实标签保存为.npy文件\n np.save('./labels', np.concatenate(gt_labels))\n \n return np.vstack(attributions)\n\n\n def inference_batch_wise(self, x):\n \"\"\"\n 仅支持恶意软件样本的批量推理\n \n 参数\n ----------\n @param x: 输入数据的张量\n \n 返回值\n ----------\n 返回推理的置信度和标签\n \"\"\"\n # 确保输入是一个张量\n assert isinstance(x, torch.Tensor)\n \n # 获得模型的输出\n logit = self.forward(x)\n \n # 返回每个样本的置信度和一个与logit形状相同的全1数组(表示恶意软件样本)\n return torch.softmax(logit, dim=-1).detach().cpu().numpy(), np.ones((logit.size()[0],))\n\n\n def predict(self, test_data_producer, indicator_masking=True):\n \"\"\"\n 预测标签并进行评估\n\n 参数\n --------\n @param test_data_producer: torch.DataLoader, 用于生成测试数据的数据加载器\n \"\"\"\n # 进行评估\n confidence, y_true = self.inference(test_data_producer)\n y_pred = confidence.argmax(1).cpu().numpy() # 预测标签\n y_true = y_true.cpu().numpy() # 真实标签\n \n # print(\"y_true.shape:\", y_true.shape)\n # print(\"y_pred.shape:\", y_pred.shape)\n \n # 使用sklearn的评估指标进行评估\n from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, balanced_accuracy_score\n accuracy = accuracy_score(y_true, y_pred)\n b_accuracy = balanced_accuracy_score(y_true, y_pred)\n \n MSG = \"The accuracy on the test dataset is {:.5f}%\"\n logger.info(MSG.format(accuracy * 100))\n \n MSG = \"The balanced accuracy on the test dataset is {:.5f}%\"\n logger.info(MSG.format(b_accuracy * 100))\n\n # 检查数据中是否存在缺失的类别\n if np.any([np.all(y_true == i) for i in range(self.n_classes)]):\n logger.warning(\"class absent.\")\n return\n\n # 计算混淆矩阵\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n fpr = fp / float(tn + fp) # 计算假阳性率\n fnr = fn / float(tp + fn) # 计算假阴性率\n f1 = f1_score(y_true, y_pred, average='binary') # 计算F1分数\n\n print(\"Other evaluation metrics we may need:\")\n MSG = \"False Negative Rate (FNR) is {:.5f}%、False Positive Rate (FPR) is {:.5f}%, F1 score is {:.5f}%\"\n logger.info(MSG.format(fnr * 100, fpr * 100, f1 * 100))\n\n\n def customize_loss(self, logits, gt_labels, representation=None, mini_batch_idx=None):\n \"\"\"\n 自定义损失函数\n\n 参数\n --------\n @param logits: Tensor, 模型的输出\n @param gt_labels: Tensor, 真实的标签\n @param representation: Tensor, 可选参数,表示特征表示\n @param mini_batch_idx: Int, 可选参数,表示小批次的索引\n \n 返回值\n --------\n 返回交叉熵损失\n \"\"\"\n return F.cross_entropy(logits, gt_labels)\n\n\n def fit(self, train_data_producer, validation_data_producer, epochs=100, lr=0.005, weight_decay=0., weight_sampling=0.5, verbose=True):\n \"\"\"\n 训练恶意软件检测器,根据验证集上的交叉熵损失选择最佳模型。\n\n 参数\n ----------\n @param train_data_producer: 对象, 用于生成一批训练数据的迭代器\n @param validation_data_producer: 对象, 用于生成验证数据的迭代器\n @param epochs: 整数, 训练的周期数\n @param lr: 浮点数, Adam优化器的学习率\n @param weight_decay: 浮点数, 惩罚因子\n @param verbose: 布尔值, 是否显示详细的日志\n \"\"\"\n # 初始化优化器\n optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay)\n best_avg_acc = 0. # 记录验证集上的最佳准确率\n best_epoch = 0 # 记录最佳准确率对应的周期\n total_time = 0. # 总的训练时间\n\n # 获取训练数据批次的数量\n nbatches = len(train_data_producer)\n \n # 进行指定次数的训练周期\n for i in range(epochs):\n # 设置模型为训练模式\n self.train()\n # 初始化列表用于保存每批数据的损失值和准确率\n losses, accuracies = [], []\n\n # 对每个训练数据批次进行遍历\n for idx_batch, (x_train, y_train) in enumerate(train_data_producer):\n # 将数据转移到指定的计算设备(例如GPU或CPU)\n x_train, y_train = utils.to_device(x_train.double(), y_train.long(), self.device)\n\n # 记录开始训练的时间\n start_time = time.time()\n\n # 清空之前累积的梯度\n optimizer.zero_grad() \n \n # 对输入数据进行前向传播\n logits = self.forward(x_train) \n \n # 根据模型的输出和真实标签计算损失\n loss_train = self.customize_loss(logits, y_train) \n\n # 对损失进行反向传播\n loss_train.backward()\n \n # 使用优化器更新模型参数\n optimizer.step()\n\n # 计算训练这批数据所花费的总时间\n total_time += time.time() - start_time\n \n # 计算这批数据上的准确率\n acc_train = (logits.argmax(1) == y_train).sum().item() / x_train.size()[0]\n \n # 将时间转换为分钟和秒\n mins, secs = int(total_time / 60), int(total_time % 60)\n \n # 将这批数据的损失和准确率加入到列表中\n losses.append(loss_train.item())\n accuracies.append(acc_train)\n\n # 如果开启了详细输出模式,显示当前训练进度和这批数据上的损失和准确率\n if verbose:\n logger.info(f'小批次: {i * nbatches + idx_batch + 1}/{epochs * nbatches} | 训练时间为 {mins:.0f} 分钟, {secs} 秒。')\n logger.info(f'训练损失(小批次级别): {losses[-1]:.4f} | 训练精度: {acc_train * 100:.2f}')\n\n\n self.eval() # 将模型设置为评估模式\n avg_acc_val = []\n\n with torch.no_grad(): # 确保在评估模式下不进行梯度的计算\n for x_val, y_val in validation_data_producer:\n # 将数据移动到指定设备(例如GPU或CPU)上,并确保数据的类型为双精度浮点数和长整型\n x_val, y_val = utils.to_device(x_val.double(), y_val.long(), self.device)\n \n # 使用模型进行前向传播,得到输出结果\n logits = self.forward(x_val)\n \n # 计算验证数据上的准确率\n acc_val = (logits.argmax(1) == y_val).sum().item() / x_val.size()[0]\n \n # 保存每一批验证数据的准确率\n avg_acc_val.append(acc_val)\n \n # 计算所有验证数据的平均准确率\n avg_acc_val = np.mean(avg_acc_val)\n\n # 如果当前周期的验证精度超过之前的最佳验证精度\n if avg_acc_val >= best_avg_acc:\n # 更新最佳验证精度\n best_avg_acc = avg_acc_val\n best_epoch = i\n \n # 检查模型保存路径是否存在,如果不存在,则创建\n if not path.exists(self.model_save_path):\n utils.mkdir(path.dirname(self.model_save_path))\n \n # 保存当前的模型参数\n torch.save(self.state_dict(), self.model_save_path)\n \n # 如果开启了详细输出模式,显示模型保存路径\n if verbose:\n print(f'模型保存在路径: {self.model_save_path}')\n\n # 如果开启了详细输出模式,显示训练损失、训练精度、验证精度和最佳验证精度\n if verbose:\n logger.info(f'训练损失(周期级别): {np.mean(losses):.4f} | 训练精度: {np.mean(accuracies) * 100:.2f}')\n logger.info(f'验证精度: {avg_acc_val * 100:.2f} | 最佳验证精度: {best_avg_acc * 100:.2f} 在第 {best_epoch} 个周期')\n\n def load(self):\n \"\"\"\n 从磁盘加载模型参数\n \"\"\"\n self.load_state_dict(torch.load(self.model_save_path))" }, { "identifier": "save_args", "path": "tools/utils.py", "snippet": "def save_args(fout, args):\n if isinstance(args, str):\n dump_txt(args, fout, mode='w')\n elif isinstance(args, dict):\n args_str = build_kwargs(args.keys(), args)\n dump_txt(args_str, fout, mode='w')\n else:\n raise TypeError(\"Expected str or dict.\")" }, { "identifier": "get_group_args", "path": "tools/utils.py", "snippet": "def get_group_args(args, args_parser, title):\n \"\"\"\n 从给定的 argparse.ArgumentParser 对象中获取指定组的参数值,并以字典形式返回。\n\n Args:\n - args (argparse.Namespace): 已解析的命令行参数对象。\n - args_parser (argparse.ArgumentParser): 命令行参数解析器对象。\n - title (str): 目标参数组的标题。\n\n Returns:\n - dict: 包含目标参数组中参数名及其对应的值的字典。\n \"\"\"\n import argparse\n\n # 确保传入的参数 args 和 args_parser 是 argparse.Namespace 和 argparse.ArgumentParser 类型\n assert isinstance(args, argparse.Namespace) and isinstance(args_parser, argparse.ArgumentParser)\n\n # 遍历 args_parser 中的所有参数组\n for group in args_parser._action_groups:\n # 如果找到了指定标题的参数组,则返回该组中指定参数名及其对应的值\n if group.title == title:\n return {action.dest: getattr(args, action.dest, None) for action in group._group_actions}\n else:\n # 否则继续查找下一个参数组\n continue\n\n # 如果未找到目标参数组,则返回空字典\n return {}" }, { "identifier": "to_tensor", "path": "tools/utils.py", "snippet": "def to_tensor(feature_x=None, labels=None, device='cpu'):\n \"\"\"Convert features, labels from array or sparse matrix to\n torch Tensor.\n code is adapted from: https://github.com/deeprobust/DeepRobust/graph/utils.py\n Parameters\n ----------\n adj : scipy.sparse.csr_matrix\n the adjacency matrix.\n features : scipy.sparse.csr_matrix\n node features\n labels : numpy.array\n node labels\n device : str\n 'cpu' or 'cuda'\n \"\"\"\n\n def _to_torch_tensor(mat):\n if sp.issparse(mat):\n mat = sparse_mx_to_torch_sparse_tensor(mat)\n elif isinstance(mat, torch.Tensor):\n pass\n else:\n mat = torch.DoubleTensor(mat)\n return mat\n\n feature_x = _to_torch_tensor(feature_x).to(device)\n if labels is None:\n return feature_x\n else:\n labels = torch.LongTensor(labels).to(device)\n return feature_x, labels" }, { "identifier": "dump_pickle", "path": "tools/utils.py", "snippet": "def dump_pickle(data, path, use_gzip=False):\n print(\"tr_te_va_split path:\", path)\n if not os.path.exists(os.path.dirname(path)):\n mkdir(os.path.dirname(path))\n if not use_gzip:\n with open(path, 'wb') as wr:\n pkl.dump(data, wr)\n else:\n with gzip.open(path, 'wb') as wr:\n pkl.dump(data, wr)\n return True" }, { "identifier": "read_pickle", "path": "tools/utils.py", "snippet": "def read_pickle(path, use_gzip=False):\n if os.path.isfile(path):\n if not use_gzip:\n with open(path, 'rb') as fr:\n return pkl.load(fr)\n else:\n with gzip.open(path, 'rb') as fr:\n return pkl.load(fr)\n else:\n raise IOError(\"The {0} is not been found.\".format(path))" } ]
import os.path as path import argparse import time import numpy from core.defense import Dataset from core.defense import MalwareDetectionDNN from tools.utils import save_args, get_group_args, to_tensor, dump_pickle, read_pickle
11,388
# 使用未来版本特性,确保代码在Python2和Python3中有一致的行为 from __future__ import absolute_import from __future__ import division from __future__ import print_function # 导入所需的库 # 导入自定义模块 # 初始化argparse对象,用于解析命令行参数 cmd_md = argparse.ArgumentParser(description='arguments for learning malware detector') # 定义与特征提取相关的命令行参数 feature_argparse = cmd_md.add_argument_group(title='feature') feature_argparse.add_argument('--proc_number', type=int, default=2, help='The number of threads for features extraction.') # 特征提取的线程数量 feature_argparse.add_argument('--number_of_smali_files', type=int, default=1000000, help='The maximum number of smali files to represent each app') # 表示每个应用的smali文件的最大数量 feature_argparse.add_argument('--max_vocab_size', type=int, default=10000, help='The maximum number of vocabulary size') # 词汇的最大数量 feature_argparse.add_argument('--update', action='store_true', help='Whether update the existed features.') # 是否更新已存在的特征 # 定义与检测器相关的命令行参数 detector_argparse = cmd_md.add_argument_group(title='detector') detector_argparse.add_argument('--cuda', action='store_true', default=False, help='whether use cuda enable gpu or cpu.') # 是否使用CUDA启用GPU detector_argparse.add_argument('--seed', type=int, default=0, help='random seed.') # 随机种子 detector_argparse.add_argument('--dense_hidden_units', type=lambda s: [int(u) for u in s.split(',')], default='200,200', help='delimited list input, e.g., "200,200"') # 密集隐藏单元列表 detector_argparse.add_argument('--dropout', type=float, default=0.6, help='dropout rate') # dropout率 detector_argparse.add_argument('--alpha_', type=float, default=0.2, help='slope coefficient of leaky-relu or elu') # leaky-relu或elu的斜率系数 detector_argparse.add_argument('--smooth', action='store_true', default=False, help='use smooth activation elu (rather than leaky-relu) in the GAT layer.') # 在GAT层使用平滑激活函数elu detector_argparse.add_argument('--batch_size', type=int, default=128, help='mini-batch size') # mini-batch大小 detector_argparse.add_argument('--epochs', type=int, default=50, help='number of epochs to train.') # 训练的epoch数 detector_argparse.add_argument('--lr', type=float, default=0.001, help='initial learning rate.') # 初始学习率 detector_argparse.add_argument('--weight_decay', type=float, default=0e-4, help='coefficient of weight decay') # 权重衰减系数 # 定义与数据集相关的命令行参数 dataset_argparse = cmd_md.add_argument_group(title='data_producer') detector_argparse.add_argument('--cache', action='store_true', default=False, help='use cache data or not.') # 是否使用缓存数据 # 定义与模式相关的命令行参数 mode_argparse = cmd_md.add_argument_group(title='mode') mode_argparse.add_argument('--mode', type=str, default='train', choices=['train', 'test'], required=False, help='learn a model or test it.') # 学习模型或测试模型的模式 mode_argparse.add_argument('--model_name', type=str, default='xxxxxxxx-xxxxxx', required=False, help='suffix date of a tested model name.') # 测试模型名称的后缀日期 # 定义主函数 def _main(): args = cmd_md.parse_args() # 根据参数创建数据集
# 使用未来版本特性,确保代码在Python2和Python3中有一致的行为 from __future__ import absolute_import from __future__ import division from __future__ import print_function # 导入所需的库 # 导入自定义模块 # 初始化argparse对象,用于解析命令行参数 cmd_md = argparse.ArgumentParser(description='arguments for learning malware detector') # 定义与特征提取相关的命令行参数 feature_argparse = cmd_md.add_argument_group(title='feature') feature_argparse.add_argument('--proc_number', type=int, default=2, help='The number of threads for features extraction.') # 特征提取的线程数量 feature_argparse.add_argument('--number_of_smali_files', type=int, default=1000000, help='The maximum number of smali files to represent each app') # 表示每个应用的smali文件的最大数量 feature_argparse.add_argument('--max_vocab_size', type=int, default=10000, help='The maximum number of vocabulary size') # 词汇的最大数量 feature_argparse.add_argument('--update', action='store_true', help='Whether update the existed features.') # 是否更新已存在的特征 # 定义与检测器相关的命令行参数 detector_argparse = cmd_md.add_argument_group(title='detector') detector_argparse.add_argument('--cuda', action='store_true', default=False, help='whether use cuda enable gpu or cpu.') # 是否使用CUDA启用GPU detector_argparse.add_argument('--seed', type=int, default=0, help='random seed.') # 随机种子 detector_argparse.add_argument('--dense_hidden_units', type=lambda s: [int(u) for u in s.split(',')], default='200,200', help='delimited list input, e.g., "200,200"') # 密集隐藏单元列表 detector_argparse.add_argument('--dropout', type=float, default=0.6, help='dropout rate') # dropout率 detector_argparse.add_argument('--alpha_', type=float, default=0.2, help='slope coefficient of leaky-relu or elu') # leaky-relu或elu的斜率系数 detector_argparse.add_argument('--smooth', action='store_true', default=False, help='use smooth activation elu (rather than leaky-relu) in the GAT layer.') # 在GAT层使用平滑激活函数elu detector_argparse.add_argument('--batch_size', type=int, default=128, help='mini-batch size') # mini-batch大小 detector_argparse.add_argument('--epochs', type=int, default=50, help='number of epochs to train.') # 训练的epoch数 detector_argparse.add_argument('--lr', type=float, default=0.001, help='initial learning rate.') # 初始学习率 detector_argparse.add_argument('--weight_decay', type=float, default=0e-4, help='coefficient of weight decay') # 权重衰减系数 # 定义与数据集相关的命令行参数 dataset_argparse = cmd_md.add_argument_group(title='data_producer') detector_argparse.add_argument('--cache', action='store_true', default=False, help='use cache data or not.') # 是否使用缓存数据 # 定义与模式相关的命令行参数 mode_argparse = cmd_md.add_argument_group(title='mode') mode_argparse.add_argument('--mode', type=str, default='train', choices=['train', 'test'], required=False, help='learn a model or test it.') # 学习模型或测试模型的模式 mode_argparse.add_argument('--model_name', type=str, default='xxxxxxxx-xxxxxx', required=False, help='suffix date of a tested model name.') # 测试模型名称的后缀日期 # 定义主函数 def _main(): args = cmd_md.parse_args() # 根据参数创建数据集
dataset = Dataset(feature_ext_args=get_group_args(args, cmd_md, 'feature'))
0
2023-11-27 02:00:23+00:00
16k
Vali-98/XTTS-RVC-UI
rvc.py
[ { "identifier": "SynthesizerTrnMs256NSFsid", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs256NSFsid_nono", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid_nono", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, max_len=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec((z * x_mask)[:, :, :max_len], g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "VC", "path": "vc_infer_pipeline.py", "snippet": "class VC(object):\n def __init__(self, tgt_sr, config):\n self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (\n config.x_pad,\n config.x_query,\n config.x_center,\n config.x_max,\n config.is_half,\n )\n self.sr = 16000 # hubert输入采样率\n self.window = 160 # 每帧点数\n self.t_pad = self.sr * self.x_pad # 每条前后pad时间\n self.t_pad_tgt = tgt_sr * self.x_pad\n self.t_pad2 = self.t_pad * 2\n self.t_query = self.sr * self.x_query # 查询切点前后查询时间\n self.t_center = self.sr * self.x_center # 查询切点位置\n self.t_max = self.sr * self.x_max # 免查询时长阈值\n self.device = config.device\n\n # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device)\n def get_optimal_torch_device(self, index: int = 0) -> torch.device:\n # Get cuda device\n if torch.cuda.is_available():\n return torch.device(\n f\"cuda:{index % torch.cuda.device_count()}\"\n ) # Very fast\n elif torch.backends.mps.is_available():\n return torch.device(\"mps\")\n # Insert an else here to grab \"xla\" devices if available. TO DO later. Requires the torch_xla.core.xla_model library\n # Else wise return the \"cpu\" as a torch device,\n return torch.device(\"cpu\")\n\n # Fork Feature: Compute f0 with the crepe method\n def get_f0_crepe_computation(\n self,\n x,\n f0_min,\n f0_max,\n p_len,\n hop_length=160, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time.\n model=\"full\", # Either use crepe-tiny \"tiny\" or crepe \"full\". Default is full\n ):\n x = x.astype(\n np.float32\n ) # fixes the F.conv2D exception. We needed to convert double to float.\n x /= np.quantile(np.abs(x), 0.999)\n torch_device = self.get_optimal_torch_device()\n audio = torch.from_numpy(x).to(torch_device, copy=True)\n audio = torch.unsqueeze(audio, dim=0)\n if audio.ndim == 2 and audio.shape[0] > 1:\n audio = torch.mean(audio, dim=0, keepdim=True).detach()\n audio = audio.detach()\n print(\"Initiating prediction with a crepe_hop_length of: \" + str(hop_length))\n pitch: Tensor = torchcrepe.predict(\n audio,\n self.sr,\n hop_length,\n f0_min,\n f0_max,\n model,\n batch_size=hop_length * 2,\n device=torch_device,\n pad=True,\n )\n p_len = p_len or x.shape[0] // hop_length\n # Resize the pitch for final f0\n source = np.array(pitch.squeeze(0).cpu().float().numpy())\n source[source < 0.001] = np.nan\n target = np.interp(\n np.arange(0, len(source) * p_len, len(source)) / p_len,\n np.arange(0, len(source)),\n source,\n )\n f0 = np.nan_to_num(target)\n return f0 # Resized f0\n\n def get_f0_official_crepe_computation(\n self,\n x,\n f0_min,\n f0_max,\n model=\"full\",\n ):\n # Pick a batch size that doesn't cause memory errors on your gpu\n batch_size = 512\n # Compute pitch using first gpu\n audio = torch.tensor(np.copy(x))[None].float()\n f0, pd = torchcrepe.predict(\n audio,\n self.sr,\n self.window,\n f0_min,\n f0_max,\n model,\n batch_size=batch_size,\n device=self.device,\n return_periodicity=True,\n )\n pd = torchcrepe.filter.median(pd, 3)\n f0 = torchcrepe.filter.mean(f0, 3)\n f0[pd < 0.1] = 0\n f0 = f0[0].cpu().numpy()\n return f0\n\n # Fork Feature: Compute pYIN f0 method\n def get_f0_pyin_computation(self, x, f0_min, f0_max):\n y, sr = librosa.load(\"saudio/Sidney.wav\", self.sr, mono=True)\n f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max)\n f0 = f0[1:] # Get rid of extra first frame\n return f0\n\n # Fork Feature: Acquire median hybrid f0 estimation calculation\n def get_f0_hybrid_computation(\n self,\n methods_str,\n input_audio_path,\n x,\n f0_min,\n f0_max,\n p_len,\n filter_radius,\n crepe_hop_length,\n time_step,\n ):\n # Get various f0 methods from input to use in the computation stack\n s = methods_str\n s = s.split(\"hybrid\")[1]\n s = s.replace(\"[\", \"\").replace(\"]\", \"\")\n methods = s.split(\"+\")\n f0_computation_stack = []\n\n print(\"Calculating f0 pitch estimations for methods: %s\" % str(methods))\n x = x.astype(np.float32)\n x /= np.quantile(np.abs(x), 0.999)\n # Get f0 calculations for all methods specified\n for method in methods:\n f0 = None\n if method == \"pm\":\n f0 = (\n parselmouth.Sound(x, self.sr)\n .to_pitch_ac(\n time_step=time_step / 1000,\n voicing_threshold=0.6,\n pitch_floor=f0_min,\n pitch_ceiling=f0_max,\n )\n .selected_array[\"frequency\"]\n )\n pad_size = (p_len - len(f0) + 1) // 2\n if pad_size > 0 or p_len - len(f0) - pad_size > 0:\n f0 = np.pad(\n f0, [[pad_size, p_len - len(f0) - pad_size]], mode=\"constant\"\n )\n elif method == \"crepe\":\n f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max)\n f0 = f0[1:] # Get rid of extra first frame\n elif method == \"crepe-tiny\":\n f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, \"tiny\")\n f0 = f0[1:] # Get rid of extra first frame\n elif method == \"mangio-crepe\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, crepe_hop_length\n )\n elif method == \"mangio-crepe-tiny\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, crepe_hop_length, \"tiny\"\n )\n elif method == \"harvest\":\n f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)\n if filter_radius > 2:\n f0 = signal.medfilt(f0, 3)\n f0 = f0[1:] # Get rid of first frame.\n elif method == \"dio\": # Potentially buggy?\n f0, t = pyworld.dio(\n x.astype(np.double),\n fs=self.sr,\n f0_ceil=f0_max,\n f0_floor=f0_min,\n frame_period=10,\n )\n f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)\n f0 = signal.medfilt(f0, 3)\n f0 = f0[1:]\n # elif method == \"pyin\": Not Working just yet\n # f0 = self.get_f0_pyin_computation(x, f0_min, f0_max)\n # Push method to the stack\n f0_computation_stack.append(f0)\n\n for fc in f0_computation_stack:\n print(len(fc))\n\n print(\"Calculating hybrid median f0 from the stack of: %s\" % str(methods))\n f0_median_hybrid = None\n if len(f0_computation_stack) == 1:\n f0_median_hybrid = f0_computation_stack[0]\n else:\n f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0)\n return f0_median_hybrid\n\n def get_f0(\n self,\n input_audio_path,\n x,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n crepe_hop_length,\n inp_f0=None,\n ):\n global input_audio_path2wav\n time_step = self.window / self.sr * 1000\n f0_min = 50\n f0_max = 1100\n f0_mel_min = 1127 * np.log(1 + f0_min / 700)\n f0_mel_max = 1127 * np.log(1 + f0_max / 700)\n if f0_method == \"pm\":\n f0 = (\n parselmouth.Sound(x, self.sr)\n .to_pitch_ac(\n time_step=time_step / 1000,\n voicing_threshold=0.6,\n pitch_floor=f0_min,\n pitch_ceiling=f0_max,\n )\n .selected_array[\"frequency\"]\n )\n pad_size = (p_len - len(f0) + 1) // 2\n if pad_size > 0 or p_len - len(f0) - pad_size > 0:\n f0 = np.pad(\n f0, [[pad_size, p_len - len(f0) - pad_size]], mode=\"constant\"\n )\n elif f0_method == \"harvest\":\n input_audio_path2wav[input_audio_path] = x.astype(np.double)\n f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)\n if filter_radius > 2:\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"dio\": # Potentially Buggy?\n f0, t = pyworld.dio(\n x.astype(np.double),\n fs=self.sr,\n f0_ceil=f0_max,\n f0_floor=f0_min,\n frame_period=10,\n )\n f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"crepe\":\n f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max)\n elif f0_method == \"crepe-tiny\":\n f0 = self.get_f0_official_crepe_computation(x, f0_min, f0_max, \"tiny\")\n elif f0_method == \"mangio-crepe\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, crepe_hop_length\n )\n elif f0_method == \"mangio-crepe-tiny\":\n f0 = self.get_f0_crepe_computation(\n x, f0_min, f0_max, p_len, crepe_hop_length, \"tiny\"\n )\n elif f0_method == \"rmvpe\":\n if hasattr(self, \"model_rmvpe\") == False:\n from rmvpe import RMVPE\n\n self.model_rmvpe = RMVPE(\n './models/rmvpe.pt', is_half=self.is_half, device=self.device\n )\n f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)\n\n elif \"hybrid\" in f0_method:\n # Perform hybrid median pitch estimation\n input_audio_path2wav[input_audio_path] = x.astype(np.double)\n f0 = self.get_f0_hybrid_computation(\n f0_method,\n input_audio_path,\n x,\n f0_min,\n f0_max,\n p_len,\n filter_radius,\n crepe_hop_length,\n time_step,\n )\n\n f0 *= pow(2, f0_up_key / 12)\n # with open(\"test.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n tf0 = self.sr // self.window # 每秒f0点数\n if inp_f0 is not None:\n delta_t = np.round(\n (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1\n ).astype(\"int16\")\n replace_f0 = np.interp(\n list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]\n )\n shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]\n f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[\n :shape\n ]\n # with open(\"test_opt.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n f0bak = f0.copy()\n f0_mel = 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (\n f0_mel_max - f0_mel_min\n ) + 1\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > 255] = 255\n f0_coarse = np.rint(f0_mel).astype(np.int_)\n\n return f0_coarse, f0bak # 1-0\n\n def vc(\n self,\n model,\n net_g,\n sid,\n audio0,\n pitch,\n pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n ): # ,file_index,file_big_npy\n feats = torch.from_numpy(audio0)\n if self.is_half:\n feats = feats.half()\n else:\n feats = feats.float()\n if feats.dim() == 2: # double channels\n feats = feats.mean(-1)\n assert feats.dim() == 1, feats.dim()\n feats = feats.view(1, -1)\n padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)\n\n inputs = {\n \"source\": feats.to(self.device),\n \"padding_mask\": padding_mask,\n \"output_layer\": 9 if version == \"v1\" else 12,\n }\n t0 = ttime()\n with torch.no_grad():\n logits = model.extract_features(**inputs)\n feats = model.final_proj(logits[0]) if version == \"v1\" else logits[0]\n if protect < 0.5 and pitch != None and pitchf != None:\n feats0 = feats.clone()\n if (\n isinstance(index, type(None)) == False\n and isinstance(big_npy, type(None)) == False\n and index_rate != 0\n ):\n npy = feats[0].cpu().numpy()\n if self.is_half:\n npy = npy.astype(\"float32\")\n\n # _, I = index.search(npy, 1)\n # npy = big_npy[I.squeeze()]\n\n score, ix = index.search(npy, k=8)\n weight = np.square(1 / score)\n weight /= weight.sum(axis=1, keepdims=True)\n npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)\n\n if self.is_half:\n npy = npy.astype(\"float16\")\n feats = (\n torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate\n + (1 - index_rate) * feats\n )\n\n feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)\n if protect < 0.5 and pitch != None and pitchf != None:\n feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(\n 0, 2, 1\n )\n t1 = ttime()\n p_len = audio0.shape[0] // self.window\n if feats.shape[1] < p_len:\n p_len = feats.shape[1]\n if pitch != None and pitchf != None:\n pitch = pitch[:, :p_len]\n pitchf = pitchf[:, :p_len]\n\n if protect < 0.5 and pitch != None and pitchf != None:\n pitchff = pitchf.clone()\n pitchff[pitchf > 0] = 1\n pitchff[pitchf < 1] = protect\n pitchff = pitchff.unsqueeze(-1)\n feats = feats * pitchff + feats0 * (1 - pitchff)\n feats = feats.to(feats0.dtype)\n p_len = torch.tensor([p_len], device=self.device).long()\n with torch.no_grad():\n if pitch != None and pitchf != None:\n audio1 = (\n (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])\n .data.cpu()\n .float()\n .numpy()\n )\n else:\n audio1 = (\n (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()\n )\n del feats, p_len, padding_mask\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n t2 = ttime()\n times[0] += t1 - t0\n times[2] += t2 - t1\n return audio1\n\n def pipeline(\n self,\n model,\n net_g,\n sid,\n audio,\n input_audio_path,\n times,\n f0_up_key,\n f0_method,\n file_index,\n # file_big_npy,\n index_rate,\n if_f0,\n filter_radius,\n tgt_sr,\n resample_sr,\n rms_mix_rate,\n version,\n protect,\n crepe_hop_length,\n f0_file=None,\n ):\n if (\n file_index != \"\"\n # and file_big_npy != \"\"\n # and os.path.exists(file_big_npy) == True\n and os.path.exists(file_index) == True\n and index_rate != 0\n ):\n try:\n index = faiss.read_index(file_index)\n # big_npy = np.load(file_big_npy)\n big_npy = index.reconstruct_n(0, index.ntotal)\n except:\n traceback.print_exc()\n index = big_npy = None\n else:\n index = big_npy = None\n audio = signal.filtfilt(bh, ah, audio)\n audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode=\"reflect\")\n opt_ts = []\n if audio_pad.shape[0] > self.t_max:\n audio_sum = np.zeros_like(audio)\n for i in range(self.window):\n audio_sum += audio_pad[i : i - self.window]\n for t in range(self.t_center, audio.shape[0], self.t_center):\n opt_ts.append(\n t\n - self.t_query\n + np.where(\n np.abs(audio_sum[t - self.t_query : t + self.t_query])\n == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()\n )[0][0]\n )\n s = 0\n audio_opt = []\n t = None\n t1 = ttime()\n audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode=\"reflect\")\n p_len = audio_pad.shape[0] // self.window\n inp_f0 = None\n if hasattr(f0_file, \"name\") == True:\n try:\n with open(f0_file.name, \"r\") as f:\n lines = f.read().strip(\"\\n\").split(\"\\n\")\n inp_f0 = []\n for line in lines:\n inp_f0.append([float(i) for i in line.split(\",\")])\n inp_f0 = np.array(inp_f0, dtype=\"float32\")\n except:\n traceback.print_exc()\n sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()\n pitch, pitchf = None, None\n if if_f0 == 1:\n pitch, pitchf = self.get_f0(\n input_audio_path,\n audio_pad,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n crepe_hop_length,\n inp_f0,\n )\n pitch = pitch[:p_len]\n pitchf = pitchf[:p_len]\n if self.device == \"mps\":\n pitchf = pitchf.astype(np.float32)\n pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()\n pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()\n t2 = ttime()\n times[1] += t2 - t1\n for t in opt_ts:\n t = t // self.window * self.window\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n pitch[:, s // self.window : (t + self.t_pad2) // self.window],\n pitchf[:, s // self.window : (t + self.t_pad2) // self.window],\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n s = t\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n pitch[:, t // self.window :] if t is not None else pitch,\n pitchf[:, t // self.window :] if t is not None else pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n audio_opt = np.concatenate(audio_opt)\n if rms_mix_rate != 1:\n audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)\n if resample_sr >= 16000 and tgt_sr != resample_sr:\n audio_opt = librosa.resample(\n audio_opt, orig_sr=tgt_sr, target_sr=resample_sr\n )\n audio_max = np.abs(audio_opt).max() / 0.99\n max_int16 = 32768\n if audio_max > 1:\n max_int16 /= audio_max\n audio_opt = (audio_opt * max_int16).astype(np.int16)\n del pitch, pitchf, sid\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio_opt" } ]
from multiprocessing import cpu_count from pathlib import Path from fairseq import checkpoint_utils from scipy.io import wavfile from infer_pack.models import ( SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono, ) from vc_infer_pipeline import VC import torch import librosa import numpy as np
11,030
class Config: def __init__(self, device, is_half): self.device = device self.is_half = is_half self.n_cpu = 0 self.gpu_name = None self.gpu_mem = None self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() def device_config(self) -> tuple: if torch.cuda.is_available(): i_device = int(self.device.split(":")[-1]) self.gpu_name = torch.cuda.get_device_name(i_device) if ( ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) or "P40" in self.gpu_name.upper() or "1060" in self.gpu_name or "1070" in self.gpu_name or "1080" in self.gpu_name ): print("16 series/10 series P40 forced single precision") self.is_half = False else: self.gpu_name = None self.gpu_mem = int( torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024 + 0.4 ) if self.gpu_mem <= 2: print('Not enough VRAM to load models (Probably)') self.device = 'cpu' elif torch.backends.mps.is_available(): print("No supported N-card found, use MPS for inference") self.device = "mps" else: print("No supported N-card found, use CPU for inference") self.device = "cpu" if self.n_cpu == 0: self.n_cpu = cpu_count() if self.is_half: # 6G memory config x_pad = 3 x_query = 10 x_center = 60 x_max = 65 else: # 5G memory config x_pad = 1 x_query = 6 x_center = 38 x_max = 41 if self.gpu_mem != None and self.gpu_mem <= 4: x_pad = 1 x_query = 5 x_center = 30 x_max = 32 return x_pad, x_query, x_center, x_max def load_hubert(device, is_half, model_path): models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([model_path], suffix='', ) hubert = models[0] hubert = hubert.to(device) if is_half: hubert = hubert.half() else: hubert = hubert.float() hubert.eval() return hubert def get_vc(device, is_half, config, model_path): cpt = torch.load(model_path, map_location='cpu') if "config" not in cpt or "weight" not in cpt: raise ValueError(f'Incorrect format for {model_path}. Use a voice model trained using RVC v2 instead.') tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] if_f0 = cpt.get("f0", 1) version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1:
class Config: def __init__(self, device, is_half): self.device = device self.is_half = is_half self.n_cpu = 0 self.gpu_name = None self.gpu_mem = None self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() def device_config(self) -> tuple: if torch.cuda.is_available(): i_device = int(self.device.split(":")[-1]) self.gpu_name = torch.cuda.get_device_name(i_device) if ( ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) or "P40" in self.gpu_name.upper() or "1060" in self.gpu_name or "1070" in self.gpu_name or "1080" in self.gpu_name ): print("16 series/10 series P40 forced single precision") self.is_half = False else: self.gpu_name = None self.gpu_mem = int( torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024 + 0.4 ) if self.gpu_mem <= 2: print('Not enough VRAM to load models (Probably)') self.device = 'cpu' elif torch.backends.mps.is_available(): print("No supported N-card found, use MPS for inference") self.device = "mps" else: print("No supported N-card found, use CPU for inference") self.device = "cpu" if self.n_cpu == 0: self.n_cpu = cpu_count() if self.is_half: # 6G memory config x_pad = 3 x_query = 10 x_center = 60 x_max = 65 else: # 5G memory config x_pad = 1 x_query = 6 x_center = 38 x_max = 41 if self.gpu_mem != None and self.gpu_mem <= 4: x_pad = 1 x_query = 5 x_center = 30 x_max = 32 return x_pad, x_query, x_center, x_max def load_hubert(device, is_half, model_path): models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([model_path], suffix='', ) hubert = models[0] hubert = hubert.to(device) if is_half: hubert = hubert.half() else: hubert = hubert.float() hubert.eval() return hubert def get_vc(device, is_half, config, model_path): cpt = torch.load(model_path, map_location='cpu') if "config" not in cpt or "weight" not in cpt: raise ValueError(f'Incorrect format for {model_path}. Use a voice model trained using RVC v2 instead.') tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] if_f0 = cpt.get("f0", 1) version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1:
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
0
2023-11-30 08:47:28+00:00
16k
ubc-vision/nf-soft-mining
examples/utils.py
[ { "identifier": "OccGridEstimator", "path": "nerfacc/estimators/occ_grid.py", "snippet": "class OccGridEstimator(AbstractEstimator):\n \"\"\"Occupancy grid transmittance estimator for spatial skipping.\n\n References: \"Instant Neural Graphics Primitives.\"\n\n Args:\n roi_aabb: The axis-aligned bounding box of the region of interest. Useful for mapping\n the 3D space to the grid.\n resolution: The resolution of the grid. If an integer is given, the grid is assumed to\n be a cube. Otherwise, a list or a tensor of shape (3,) is expected. Default: 128.\n levels: The number of levels of the grid. Default: 1.\n \"\"\"\n\n DIM: int = 3\n\n def __init__(\n self,\n roi_aabb: Union[List[int], Tensor],\n resolution: Union[int, List[int], Tensor] = 128,\n levels: int = 1,\n **kwargs,\n ) -> None:\n super().__init__()\n\n if \"contraction_type\" in kwargs:\n raise ValueError(\n \"`contraction_type` is not supported anymore for nerfacc >= 0.4.0.\"\n )\n\n # check the resolution is legal\n if isinstance(resolution, int):\n resolution = [resolution] * self.DIM\n if isinstance(resolution, (list, tuple)):\n resolution = torch.tensor(resolution, dtype=torch.int32)\n assert isinstance(resolution, Tensor), f\"Invalid type: {resolution}!\"\n assert resolution.shape[0] == self.DIM, f\"Invalid shape: {resolution}!\"\n\n # check the roi_aabb is legal\n if isinstance(roi_aabb, (list, tuple)):\n roi_aabb = torch.tensor(roi_aabb, dtype=torch.float32)\n assert isinstance(roi_aabb, Tensor), f\"Invalid type: {roi_aabb}!\"\n assert roi_aabb.shape[0] == self.DIM * 2, f\"Invalid shape: {roi_aabb}!\"\n\n # multiple levels of aabbs\n aabbs = torch.stack(\n [_enlarge_aabb(roi_aabb, 2**i) for i in range(levels)], dim=0\n )\n\n # total number of voxels\n self.cells_per_lvl = int(resolution.prod().item())\n self.levels = levels\n\n # Buffers\n self.register_buffer(\"resolution\", resolution) # [3]\n self.register_buffer(\"aabbs\", aabbs) # [n_aabbs, 6]\n self.register_buffer(\n \"occs\", torch.zeros(self.levels * self.cells_per_lvl)\n )\n self.register_buffer(\n \"binaries\",\n torch.zeros([levels] + resolution.tolist(), dtype=torch.bool),\n )\n\n # Grid coords & indices\n grid_coords = _meshgrid3d(resolution).reshape(\n self.cells_per_lvl, self.DIM\n )\n self.register_buffer(\"grid_coords\", grid_coords, persistent=False)\n grid_indices = torch.arange(self.cells_per_lvl)\n self.register_buffer(\"grid_indices\", grid_indices, persistent=False)\n\n @torch.no_grad()\n def sampling(\n self,\n # rays\n rays_o: Tensor, # [n_rays, 3]\n rays_d: Tensor, # [n_rays, 3]\n # sigma/alpha function for skipping invisible space\n sigma_fn: Optional[Callable] = None,\n alpha_fn: Optional[Callable] = None,\n near_plane: float = 0.0,\n far_plane: float = 1e10,\n t_min: Optional[Tensor] = None, # [n_rays]\n t_max: Optional[Tensor] = None, # [n_rays]\n # rendering options\n render_step_size: float = 1e-3,\n early_stop_eps: float = 1e-4,\n alpha_thre: float = 0.0,\n stratified: bool = False,\n cone_angle: float = 0.0,\n ) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Sampling with spatial skipping.\n\n Note:\n This function is not differentiable to any inputs.\n\n Args:\n rays_o: Ray origins of shape (n_rays, 3).\n rays_d: Normalized ray directions of shape (n_rays, 3).\n sigma_fn: Optional. If provided, the marching will skip the invisible space\n by evaluating the density along the ray with `sigma_fn`. It should be a\n function that takes in samples {t_starts (N,), t_ends (N,),\n ray indices (N,)} and returns the post-activation density values (N,).\n You should only provide either `sigma_fn` or `alpha_fn`.\n alpha_fn: Optional. If provided, the marching will skip the invisible space\n by evaluating the density along the ray with `alpha_fn`. It should be a\n function that takes in samples {t_starts (N,), t_ends (N,),\n ray indices (N,)} and returns the post-activation opacity values (N,).\n You should only provide either `sigma_fn` or `alpha_fn`.\n near_plane: Optional. Near plane distance. Default: 0.0.\n far_plane: Optional. Far plane distance. Default: 1e10.\n t_min: Optional. Per-ray minimum distance. Tensor with shape (n_rays).\n If profided, the marching will start from maximum of t_min and near_plane.\n t_max: Optional. Per-ray maximum distance. Tensor with shape (n_rays).\n If profided, the marching will stop by minimum of t_max and far_plane.\n render_step_size: Step size for marching. Default: 1e-3.\n early_stop_eps: Early stop threshold for skipping invisible space. Default: 1e-4.\n alpha_thre: Alpha threshold for skipping empty space. Default: 0.0.\n stratified: Whether to use stratified sampling. Default: False.\n cone_angle: Cone angle for linearly-increased step size. 0. means\n constant step size. Default: 0.0.\n\n Returns:\n A tuple of {LongTensor, Tensor, Tensor}:\n\n - **ray_indices**: Ray index of each sample. IntTensor with shape (n_samples).\n - **t_starts**: Per-sample start distance. Tensor with shape (n_samples,).\n - **t_ends**: Per-sample end distance. Tensor with shape (n_samples,).\n\n Examples:\n\n .. code-block:: python\n\n >>> ray_indices, t_starts, t_ends = grid.sampling(\n >>> rays_o, rays_d, render_step_size=1e-3)\n >>> t_mid = (t_starts + t_ends) / 2.0\n >>> sample_locs = rays_o[ray_indices] + t_mid * rays_d[ray_indices]\n\n \"\"\"\n\n near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane)\n far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane)\n\n if t_min is not None:\n near_planes = torch.clamp(near_planes, min=t_min)\n if t_max is not None:\n far_planes = torch.clamp(far_planes, max=t_max)\n\n if stratified:\n near_planes += torch.rand_like(near_planes) * render_step_size\n intervals, samples, _ = traverse_grids(\n rays_o,\n rays_d,\n self.binaries,\n self.aabbs,\n near_planes=near_planes,\n far_planes=far_planes,\n step_size=render_step_size,\n cone_angle=cone_angle,\n )\n t_starts = intervals.vals[intervals.is_left]\n t_ends = intervals.vals[intervals.is_right]\n ray_indices = samples.ray_indices\n packed_info = samples.packed_info\n\n # skip invisible space\n if (alpha_thre > 0.0 or early_stop_eps > 0.0) and (\n sigma_fn is not None or alpha_fn is not None\n ):\n alpha_thre = min(alpha_thre, self.occs.mean().item())\n\n # Compute visibility of the samples, and filter out invisible samples\n if sigma_fn is not None:\n if t_starts.shape[0] != 0:\n sigmas = sigma_fn(t_starts, t_ends, ray_indices)\n else:\n sigmas = torch.empty((0,), device=t_starts.device)\n assert (\n sigmas.shape == t_starts.shape\n ), \"sigmas must have shape of (N,)! Got {}\".format(sigmas.shape)\n masks = render_visibility_from_density(\n t_starts=t_starts,\n t_ends=t_ends,\n sigmas=sigmas,\n packed_info=packed_info,\n early_stop_eps=early_stop_eps,\n alpha_thre=alpha_thre,\n )\n elif alpha_fn is not None:\n if t_starts.shape[0] != 0:\n alphas = alpha_fn(t_starts, t_ends, ray_indices)\n else:\n alphas = torch.empty((0,), device=t_starts.device)\n assert (\n alphas.shape == t_starts.shape\n ), \"alphas must have shape of (N,)! Got {}\".format(alphas.shape)\n masks = render_visibility_from_alpha(\n alphas=alphas,\n packed_info=packed_info,\n early_stop_eps=early_stop_eps,\n alpha_thre=alpha_thre,\n )\n ray_indices, t_starts, t_ends = (\n ray_indices[masks],\n t_starts[masks],\n t_ends[masks],\n )\n return ray_indices, t_starts, t_ends\n\n @torch.no_grad()\n def update_every_n_steps(\n self,\n step: int,\n occ_eval_fn: Callable,\n occ_thre: float = 1e-2,\n ema_decay: float = 0.95,\n warmup_steps: int = 256,\n n: int = 16,\n ) -> None:\n \"\"\"Update the estimator every n steps during training.\n\n Args:\n step: Current training step.\n occ_eval_fn: A function that takes in sample locations :math:`(N, 3)` and\n returns the occupancy values :math:`(N, 1)` at those locations.\n occ_thre: Threshold used to binarize the occupancy grid. Default: 1e-2.\n ema_decay: The decay rate for EMA updates. Default: 0.95.\n warmup_steps: Sample all cells during the warmup stage. After the warmup\n stage we change the sampling strategy to 1/4 uniformly sampled cells\n together with 1/4 occupied cells. Default: 256.\n n: Update the grid every n steps. Default: 16.\n \"\"\"\n if not self.training:\n raise RuntimeError(\n \"You should only call this function only during training. \"\n \"Please call _update() directly if you want to update the \"\n \"field during inference.\"\n )\n if step % n == 0 and self.training:\n self._update(\n step=step,\n occ_eval_fn=occ_eval_fn,\n occ_thre=occ_thre,\n ema_decay=ema_decay,\n warmup_steps=warmup_steps,\n )\n\n # adapted from https://github.com/kwea123/ngp_pl/blob/master/models/networks.py\n @torch.no_grad()\n def mark_invisible_cells(\n self,\n K: Tensor,\n c2w: Tensor,\n width: int,\n height: int,\n near_plane: float = 0.0,\n chunk: int = 32**3,\n ) -> None:\n \"\"\"Mark the cells that aren't covered by the cameras with density -1.\n Should only be executed once before training starts.\n\n Args:\n K: Camera intrinsics of shape (N, 3, 3) or (1, 3, 3).\n c2w: Camera to world poses of shape (N, 3, 4) or (N, 4, 4).\n width: Image width in pixels\n height: Image height in pixels\n near_plane: Near plane distance\n chunk: The chunk size to split the cells (to avoid OOM)\n \"\"\"\n assert K.dim() == 3 and K.shape[1:] == (3, 3)\n assert c2w.dim() == 3 and (\n c2w.shape[1:] == (3, 4) or c2w.shape[1:] == (4, 4)\n )\n assert K.shape[0] == c2w.shape[0] or K.shape[0] == 1\n\n N_cams = c2w.shape[0]\n w2c_R = c2w[:, :3, :3].transpose(2, 1) # (N_cams, 3, 3)\n w2c_T = -w2c_R @ c2w[:, :3, 3:] # (N_cams, 3, 1)\n\n lvl_indices = self._get_all_cells()\n for lvl, indices in enumerate(lvl_indices):\n grid_coords = self.grid_coords[indices]\n\n for i in range(0, len(indices), chunk):\n x = grid_coords[i : i + chunk] / (self.resolution - 1)\n indices_chunk = indices[i : i + chunk]\n # voxel coordinates [0, 1]^3 -> world\n xyzs_w = (\n self.aabbs[lvl, :3]\n + x * (self.aabbs[lvl, 3:] - self.aabbs[lvl, :3])\n ).T\n xyzs_c = w2c_R @ xyzs_w + w2c_T # (N_cams, 3, chunk)\n uvd = K @ xyzs_c # (N_cams, 3, chunk)\n uv = uvd[:, :2] / uvd[:, 2:] # (N_cams, 2, chunk)\n in_image = (\n (uvd[:, 2] >= 0)\n & (uv[:, 0] >= 0)\n & (uv[:, 0] < width)\n & (uv[:, 1] >= 0)\n & (uv[:, 1] < height)\n )\n covered_by_cam = (\n uvd[:, 2] >= near_plane\n ) & in_image # (N_cams, chunk)\n # if the cell is visible by at least one camera\n count = covered_by_cam.sum(0) / N_cams\n\n too_near_to_cam = (\n uvd[:, 2] < near_plane\n ) & in_image # (N, chunk)\n # if the cell is too close (in front) to any camera\n too_near_to_any_cam = too_near_to_cam.any(0)\n # a valid cell should be visible by at least one camera and not too close to any camera\n valid_mask = (count > 0) & (~too_near_to_any_cam)\n\n cell_ids_base = lvl * self.cells_per_lvl\n self.occs[cell_ids_base + indices_chunk] = torch.where(\n valid_mask, 0.0, -1.0\n )\n\n @torch.no_grad()\n def _get_all_cells(self) -> List[Tensor]:\n \"\"\"Returns all cells of the grid.\"\"\"\n lvl_indices = []\n for lvl in range(self.levels):\n # filter out the cells with -1 density (non-visible to any camera)\n cell_ids = lvl * self.cells_per_lvl + self.grid_indices\n indices = self.grid_indices[self.occs[cell_ids] >= 0.0]\n lvl_indices.append(indices)\n return lvl_indices\n\n @torch.no_grad()\n def _sample_uniform_and_occupied_cells(self, n: int) -> List[Tensor]:\n \"\"\"Samples both n uniform and occupied cells.\"\"\"\n lvl_indices = []\n for lvl in range(self.levels):\n uniform_indices = torch.randint(\n self.cells_per_lvl, (n,), device=self.device\n )\n # filter out the cells with -1 density (non-visible to any camera)\n cell_ids = lvl * self.cells_per_lvl + uniform_indices\n uniform_indices = uniform_indices[self.occs[cell_ids] >= 0.0]\n occupied_indices = torch.nonzero(self.binaries[lvl].flatten())[:, 0]\n if n < len(occupied_indices):\n selector = torch.randint(\n len(occupied_indices), (n,), device=self.device\n )\n occupied_indices = occupied_indices[selector]\n indices = torch.cat([uniform_indices, occupied_indices], dim=0)\n lvl_indices.append(indices)\n return lvl_indices\n\n @torch.no_grad()\n def _update(\n self,\n step: int,\n occ_eval_fn: Callable,\n occ_thre: float = 0.01,\n ema_decay: float = 0.95,\n warmup_steps: int = 256,\n ) -> None:\n \"\"\"Update the occ field in the EMA way.\"\"\"\n # sample cells\n if step < warmup_steps:\n lvl_indices = self._get_all_cells()\n else:\n N = self.cells_per_lvl // 4\n lvl_indices = self._sample_uniform_and_occupied_cells(N)\n\n for lvl, indices in enumerate(lvl_indices):\n # infer occupancy: density * step_size\n grid_coords = self.grid_coords[indices]\n x = (\n grid_coords + torch.rand_like(grid_coords, dtype=torch.float32)\n ) / self.resolution\n # voxel coordinates [0, 1]^3 -> world\n x = self.aabbs[lvl, :3] + x * (\n self.aabbs[lvl, 3:] - self.aabbs[lvl, :3]\n )\n occ = occ_eval_fn(x).squeeze(-1)\n # ema update\n cell_ids = lvl * self.cells_per_lvl + indices\n self.occs[cell_ids] = torch.maximum(\n self.occs[cell_ids] * ema_decay, occ\n )\n # suppose to use scatter max but emperically it is almost the same.\n # self.occs, _ = scatter_max(\n # occ, indices, dim=0, out=self.occs * ema_decay\n # )\n thre = torch.clamp(self.occs[self.occs >= 0].mean(), max=occ_thre)\n self.binaries = (self.occs > thre).view(self.binaries.shape)" }, { "identifier": "PropNetEstimator", "path": "nerfacc/estimators/prop_net.py", "snippet": "class PropNetEstimator(AbstractEstimator):\n \"\"\"Proposal network transmittance estimator.\n\n References: \"Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields.\"\n\n Args:\n optimizer: The optimizer to use for the proposal networks.\n scheduler: The learning rate scheduler to use for the proposal networks.\n \"\"\"\n\n def __init__(\n self,\n optimizer: Optional[torch.optim.Optimizer] = None,\n scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n ) -> None:\n super().__init__()\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.prop_cache: List = []\n\n @torch.no_grad()\n def sampling(\n self,\n prop_sigma_fns: List[Callable],\n prop_samples: List[int],\n num_samples: int,\n # rendering options\n n_rays: int,\n near_plane: float,\n far_plane: float,\n sampling_type: Literal[\"uniform\", \"lindisp\"] = \"lindisp\",\n # training options\n stratified: bool = False,\n requires_grad: bool = False,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"Sampling with CDFs from proposal networks.\n\n Note:\n When `requires_grad` is `True`, the gradients are allowed to flow\n through the proposal networks, and the outputs of the proposal\n networks are cached to update them later when calling `update_every_n_steps()`\n\n Args:\n prop_sigma_fns: Proposal network evaluate functions. It should be a list\n of functions that take in samples {t_starts (n_rays, n_samples),\n t_ends (n_rays, n_samples)} and returns the post-activation densities\n (n_rays, n_samples).\n prop_samples: Number of samples to draw from each proposal network. Should\n be the same length as `prop_sigma_fns`.\n num_samples: Number of samples to draw in the end.\n n_rays: Number of rays.\n near_plane: Near plane.\n far_plane: Far plane.\n sampling_type: Sampling type. Either \"uniform\" or \"lindisp\". Default to\n \"lindisp\".\n stratified: Whether to use stratified sampling. Default to `False`.\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n\n Returns:\n A tuple of {Tensor, Tensor}:\n\n - **t_starts**: The starts of the samples. Shape (n_rays, num_samples).\n - **t_ends**: The ends of the samples. Shape (n_rays, num_samples).\n\n \"\"\"\n assert len(prop_sigma_fns) == len(prop_samples), (\n \"The number of proposal networks and the number of samples \"\n \"should be the same.\"\n )\n cdfs = torch.cat(\n [\n torch.zeros((n_rays, 1), device=self.device),\n torch.ones((n_rays, 1), device=self.device),\n ],\n dim=-1,\n )\n intervals = RayIntervals(vals=cdfs)\n\n for level_fn, level_samples in zip(prop_sigma_fns, prop_samples):\n intervals, _ = importance_sampling(\n intervals, cdfs, level_samples, stratified\n )\n t_vals = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n\n with torch.set_grad_enabled(requires_grad):\n sigmas = level_fn(t_starts, t_ends)\n assert sigmas.shape == t_starts.shape\n trans, _ = render_transmittance_from_density(\n t_starts, t_ends, sigmas\n )\n cdfs = 1.0 - torch.cat(\n [trans, torch.zeros_like(trans[:, :1])], dim=-1\n )\n if requires_grad:\n self.prop_cache.append((intervals, cdfs))\n\n intervals, _ = importance_sampling(\n intervals, cdfs, num_samples, stratified\n )\n t_vals = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n if requires_grad:\n self.prop_cache.append((intervals, None))\n\n return t_starts, t_ends\n\n @torch.enable_grad()\n def compute_loss(self, trans: Tensor, loss_scaler: float = 1.0) -> Tensor:\n \"\"\"Compute the loss for the proposal networks.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n loss_scaler: The loss scaler. Default to 1.0.\n\n Returns:\n The loss for the proposal networks.\n \"\"\"\n if len(self.prop_cache) == 0:\n return torch.zeros((), device=self.device)\n\n intervals, _ = self.prop_cache.pop()\n # get cdfs at all edges of intervals\n cdfs = 1.0 - torch.cat([trans, torch.zeros_like(trans[:, :1])], dim=-1)\n cdfs = cdfs.detach()\n\n loss = 0.0\n while self.prop_cache:\n prop_intervals, prop_cdfs = self.prop_cache.pop()\n loss += _pdf_loss(intervals, cdfs, prop_intervals, prop_cdfs).mean()\n return loss * loss_scaler\n\n @torch.enable_grad()\n def update_every_n_steps(\n self,\n trans: Tensor,\n requires_grad: bool = False,\n loss_scaler: float = 1.0,\n ) -> float:\n \"\"\"Update the estimator every n steps during training.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n loss_scaler: The loss scaler to use. Default to 1.0.\n\n Returns:\n The loss of the proposal networks for logging (a float scalar).\n \"\"\"\n if requires_grad:\n return self._update(trans=trans, loss_scaler=loss_scaler)\n else:\n if self.scheduler is not None:\n self.scheduler.step()\n return 0.0\n\n @torch.enable_grad()\n def _update(self, trans: Tensor, loss_scaler: float = 1.0) -> float:\n assert len(self.prop_cache) > 0\n assert self.optimizer is not None, \"No optimizer is provided.\"\n\n loss = self.compute_loss(trans, loss_scaler)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if self.scheduler is not None:\n self.scheduler.step()\n return loss.item()" }, { "identifier": "ray_aabb_intersect", "path": "nerfacc/grid.py", "snippet": "@torch.no_grad()\ndef ray_aabb_intersect(\n rays_o: Tensor,\n rays_d: Tensor,\n aabbs: Tensor,\n near_plane: float = -float(\"inf\"),\n far_plane: float = float(\"inf\"),\n miss_value: float = float(\"inf\"),\n) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Ray-AABB intersection.\n\n Args:\n rays_o: (n_rays, 3) Ray origins.\n rays_d: (n_rays, 3) Normalized ray directions.\n aabbs: (m, 6) Axis-aligned bounding boxes {xmin, ymin, zmin, xmax, ymax, zmax}.\n near_plane: Optional. Near plane. Default to -infinity.\n far_plane: Optional. Far plane. Default to infinity.\n miss_value: Optional. Value to use for tmin and tmax when there is no intersection.\n Default to infinity.\n\n Returns:\n A tuple of {Tensor, Tensor, BoolTensor}:\n\n - **t_mins**: (n_rays, m) tmin for each ray-AABB pair.\n - **t_maxs**: (n_rays, m) tmax for each ray-AABB pair.\n - **hits**: (n_rays, m) whether each ray-AABB pair intersects.\n \"\"\"\n assert rays_o.ndim == 2 and rays_o.shape[-1] == 3\n assert rays_d.ndim == 2 and rays_d.shape[-1] == 3\n assert aabbs.ndim == 2 and aabbs.shape[-1] == 6\n t_mins, t_maxs, hits = _C.ray_aabb_intersect(\n rays_o.contiguous(),\n rays_d.contiguous(),\n aabbs.contiguous(),\n near_plane,\n far_plane,\n miss_value,\n )\n return t_mins, t_maxs, hits" }, { "identifier": "traverse_grids", "path": "nerfacc/grid.py", "snippet": "@torch.no_grad()\ndef traverse_grids(\n # rays\n rays_o: Tensor, # [n_rays, 3]\n rays_d: Tensor, # [n_rays, 3]\n # grids\n binaries: Tensor, # [m, resx, resy, resz]\n aabbs: Tensor, # [m, 6]\n # options\n near_planes: Optional[Tensor] = None, # [n_rays]\n far_planes: Optional[Tensor] = None, # [n_rays]\n step_size: Optional[float] = 1e-3,\n cone_angle: Optional[float] = 0.0,\n traverse_steps_limit: Optional[int] = None,\n over_allocate: Optional[bool] = False,\n rays_mask: Optional[Tensor] = None, # [n_rays]\n # pre-compute intersections\n t_sorted: Optional[Tensor] = None, # [n_rays, n_grids * 2]\n t_indices: Optional[Tensor] = None, # [n_rays, n_grids * 2]\n hits: Optional[Tensor] = None, # [n_rays, n_grids]\n) -> Tuple[RayIntervals, RaySamples, Tensor]:\n \"\"\"Ray Traversal within Multiple Grids.\n\n Note:\n This function is not differentiable to any inputs.\n\n Args:\n rays_o: (n_rays, 3) Ray origins.\n rays_d: (n_rays, 3) Normalized ray directions.\n binary_grids: (m, resx, resy, resz) Multiple binary grids with the same resolution.\n aabbs: (m, 6) Axis-aligned bounding boxes {xmin, ymin, zmin, xmax, ymax, zmax}.\n near_planes: Optional. (n_rays,) Near planes for the traversal to start. Default to 0.\n far_planes: Optional. (n_rays,) Far planes for the traversal to end. Default to infinity.\n step_size: Optional. Step size for ray traversal. Default to 1e-3.\n cone_angle: Optional. Cone angle for linearly-increased step size. 0. means\n constant step size. Default: 0.0.\n traverse_steps_limit: Optional. Maximum number of samples per ray.\n over_allocate: Optional. Whether to over-allocate the memory for the outputs.\n rays_mask: Optional. (n_rays,) Skip some rays if given.\n t_sorted: Optional. (n_rays, n_grids * 2) Pre-computed sorted t values for each ray-grid pair. Default to None.\n t_indices: Optional. (n_rays, n_grids * 2) Pre-computed sorted t indices for each ray-grid pair. Default to None.\n hits: Optional. (n_rays, n_grids) Pre-computed hit flags for each ray-grid pair. Default to None.\n\n Returns:\n A :class:`RayIntervals` object containing the intervals of the ray traversal, and\n a :class:`RaySamples` object containing the samples within each interval.\n t :class:`Tensor` of shape (n_rays,) containing the terminated t values for each ray.\n \"\"\"\n\n if near_planes is None:\n near_planes = torch.zeros_like(rays_o[:, 0])\n if far_planes is None:\n far_planes = torch.full_like(rays_o[:, 0], float(\"inf\"))\n\n if rays_mask is None:\n rays_mask = torch.ones_like(rays_o[:, 0], dtype=torch.bool)\n if traverse_steps_limit is None:\n traverse_steps_limit = -1\n if over_allocate:\n assert (\n traverse_steps_limit > 0\n ), \"traverse_steps_limit must be set if over_allocate is True.\"\n\n if t_sorted is None or t_indices is None or hits is None:\n # Compute ray aabb intersection for all levels of grid. [n_rays, m]\n t_mins, t_maxs, hits = ray_aabb_intersect(rays_o, rays_d, aabbs)\n # Sort the t values for each ray. [n_rays, m]\n t_sorted, t_indices = torch.sort(\n torch.cat([t_mins, t_maxs], dim=-1), dim=-1\n )\n\n # Traverse the grids.\n intervals, samples, termination_planes = _C.traverse_grids(\n # rays\n rays_o.contiguous(), # [n_rays, 3]\n rays_d.contiguous(), # [n_rays, 3]\n rays_mask.contiguous(), # [n_rays]\n # grids\n binaries.contiguous(), # [m, resx, resy, resz]\n aabbs.contiguous(), # [m, 6]\n # intersections\n t_sorted.contiguous(), # [n_rays, m * 2]\n t_indices.contiguous(), # [n_rays, m * 2]\n hits.contiguous(), # [n_rays, m]\n # options\n near_planes.contiguous(), # [n_rays]\n far_planes.contiguous(), # [n_rays]\n step_size,\n cone_angle,\n True,\n True,\n True,\n traverse_steps_limit,\n over_allocate,\n )\n return (\n RayIntervals._from_cpp(intervals),\n RaySamples._from_cpp(samples),\n termination_planes,\n )" }, { "identifier": "accumulate_along_rays_", "path": "nerfacc/volrend.py", "snippet": "def accumulate_along_rays_(\n weights: Tensor,\n values: Optional[Tensor] = None,\n ray_indices: Optional[Tensor] = None,\n outputs: Optional[Tensor] = None,\n) -> None:\n \"\"\"Accumulate volumetric values along the ray.\n\n Inplace version of :func:`accumulate_along_rays`.\n \"\"\"\n if values is None:\n src = weights[..., None]\n else:\n assert values.dim() == weights.dim() + 1\n assert weights.shape == values.shape[:-1]\n src = weights[..., None] * values\n if ray_indices is not None:\n assert weights.dim() == 1, \"weights must be flattened\"\n assert (\n outputs.dim() == 2 and outputs.shape[-1] == src.shape[-1]\n ), \"outputs must be of shape (n_rays, D)\"\n outputs.index_add_(0, ray_indices, src)\n else:\n outputs.add_(src.sum(dim=-2))" }, { "identifier": "render_weight_from_density", "path": "nerfacc/volrend.py", "snippet": "def render_weight_from_density(\n t_starts: Tensor,\n t_ends: Tensor,\n sigmas: Tensor,\n packed_info: Optional[Tensor] = None,\n ray_indices: Optional[Tensor] = None,\n n_rays: Optional[int] = None,\n prefix_trans: Optional[Tensor] = None,\n) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Compute rendering weights :math:`w_i` from density :math:`\\\\sigma_i` and interval :math:`\\\\delta_i`.\n\n .. math::\n w_i = T_i(1 - exp(-\\\\sigma_i\\delta_i)), \\\\quad\\\\textrm{where}\\\\quad T_i = exp(-\\\\sum_{j=1}^{i-1}\\\\sigma_j\\delta_j)\n\n This function supports both batched and flattened input tensor. For flattened input tensor, either\n (`packed_info`) or (`ray_indices` and `n_rays`) should be provided.\n\n Args:\n t_starts: The start time of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).\n t_ends: The end time of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).\n sigmas: The density values of the samples. Tensor with shape (all_samples,) or (n_rays, n_samples).\n packed_info: A tensor of shape (n_rays, 2) that specifies the start and count\n of each chunk in the flattened samples, with in total n_rays chunks.\n Useful for flattened input.\n ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples).\n n_rays: Number of rays. Only useful when `ray_indices` is provided.\n prefix_trans: The pre-computed transmittance of the samples. Tensor with shape (all_samples,).\n\n Returns:\n The rendering weights, transmittance and opacities, both with the same shape as `sigmas`.\n\n Examples:\n\n .. code-block:: python\n\n >>> t_starts = torch.tensor([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0], device=\"cuda\")\n >>> t_ends = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], device=\"cuda\")\n >>> sigmas = torch.tensor([0.4, 0.8, 0.1, 0.8, 0.1, 0.0, 0.9], device=\"cuda\")\n >>> ray_indices = torch.tensor([0, 0, 0, 1, 1, 2, 2], device=\"cuda\")\n >>> weights, transmittance, alphas = render_weight_from_density(\n >>> t_starts, t_ends, sigmas, ray_indices=ray_indices)\n weights: [0.33, 0.37, 0.03, 0.55, 0.04, 0.00, 0.59]\n transmittance: [1.00, 0.67, 0.30, 1.00, 0.45, 1.00, 1.00]\n alphas: [0.33, 0.55, 0.095, 0.55, 0.095, 0.00, 0.59]\n\n \"\"\"\n trans, alphas = render_transmittance_from_density(\n t_starts, t_ends, sigmas, packed_info, ray_indices, n_rays, prefix_trans\n )\n weights = trans * alphas\n return weights, trans, alphas" }, { "identifier": "rendering", "path": "nerfacc/volrend.py", "snippet": "def rendering(\n # ray marching results\n t_starts: Tensor,\n t_ends: Tensor,\n ray_indices: Optional[Tensor] = None,\n n_rays: Optional[int] = None,\n # radiance field\n rgb_sigma_fn: Optional[Callable] = None,\n rgb_alpha_fn: Optional[Callable] = None,\n # rendering options\n render_bkgd: Optional[Tensor] = None,\n) -> Tuple[Tensor, Tensor, Tensor, Dict]:\n \"\"\"Render the rays through the radience field defined by `rgb_sigma_fn`.\n\n This function is differentiable to the outputs of `rgb_sigma_fn` so it can\n be used for gradient-based optimization. It supports both batched and flattened input tensor.\n For flattened input tensor, both `ray_indices` and `n_rays` should be provided.\n\n\n Note:\n Either `rgb_sigma_fn` or `rgb_alpha_fn` should be provided.\n\n Warning:\n This function is not differentiable to `t_starts`, `t_ends` and `ray_indices`.\n\n Args:\n t_starts: Per-sample start distance. Tensor with shape (n_rays, n_samples) or (all_samples,).\n t_ends: Per-sample end distance. Tensor with shape (n_rays, n_samples) or (all_samples,).\n ray_indices: Ray indices of the flattened samples. LongTensor with shape (all_samples).\n n_rays: Number of rays. Only useful when `ray_indices` is provided.\n rgb_sigma_fn: A function that takes in samples {t_starts, t_ends,\n ray indices} and returns the post-activation rgb (..., 3) and density\n values (...,). The shape `...` is the same as the shape of `t_starts`.\n rgb_alpha_fn: A function that takes in samples {t_starts, t_ends,\n ray indices} and returns the post-activation rgb (..., 3) and opacity\n values (...,). The shape `...` is the same as the shape of `t_starts`.\n render_bkgd: Background color. Tensor with shape (3,).\n\n Returns:\n Ray colors (n_rays, 3), opacities (n_rays, 1), depths (n_rays, 1) and a dict\n containing extra intermediate results (e.g., \"weights\", \"trans\", \"alphas\")\n\n Examples:\n\n .. code-block:: python\n\n >>> t_starts = torch.tensor([0.1, 0.2, 0.1, 0.2, 0.3], device=\"cuda:0\")\n >>> t_ends = torch.tensor([0.2, 0.3, 0.2, 0.3, 0.4], device=\"cuda:0\")\n >>> ray_indices = torch.tensor([0, 0, 1, 1, 1], device=\"cuda:0\")\n >>> def rgb_sigma_fn(t_starts, t_ends, ray_indices):\n >>> # This is a dummy function that returns random values.\n >>> rgbs = torch.rand((t_starts.shape[0], 3), device=\"cuda:0\")\n >>> sigmas = torch.rand((t_starts.shape[0],), device=\"cuda:0\")\n >>> return rgbs, sigmas\n >>> colors, opacities, depths, extras = rendering(\n >>> t_starts, t_ends, ray_indices, n_rays=2, rgb_sigma_fn=rgb_sigma_fn)\n >>> print(colors.shape, opacities.shape, depths.shape)\n torch.Size([2, 3]) torch.Size([2, 1]) torch.Size([2, 1])\n >>> extras.keys()\n dict_keys(['weights', 'alphas', 'trans'])\n\n \"\"\"\n if ray_indices is not None:\n assert (\n t_starts.shape == t_ends.shape == ray_indices.shape\n ), \"Since nerfacc 0.5.0, t_starts, t_ends and ray_indices must have the same shape (N,). \"\n\n if rgb_sigma_fn is None and rgb_alpha_fn is None:\n raise ValueError(\n \"At least one of `rgb_sigma_fn` and `rgb_alpha_fn` should be specified.\"\n )\n\n # Query sigma/alpha and color with gradients\n if rgb_sigma_fn is not None:\n if t_starts.shape[0] != 0:\n rgbs, sigmas = rgb_sigma_fn(t_starts, t_ends, ray_indices)\n else:\n rgbs = torch.empty((0, 3), device=t_starts.device)\n sigmas = torch.empty((0,), device=t_starts.device)\n assert rgbs.shape[-1] == 3, \"rgbs must have 3 channels, got {}\".format(\n rgbs.shape\n )\n assert (\n sigmas.shape == t_starts.shape\n ), \"sigmas must have shape of (N,)! Got {}\".format(sigmas.shape)\n # Rendering: compute weights.\n weights, trans, alphas = render_weight_from_density(\n t_starts,\n t_ends,\n sigmas,\n ray_indices=ray_indices,\n n_rays=n_rays,\n )\n extras = {\n \"weights\": weights,\n \"alphas\": alphas,\n \"trans\": trans,\n \"sigmas\": sigmas,\n \"rgbs\": rgbs,\n }\n elif rgb_alpha_fn is not None:\n if t_starts.shape[0] != 0:\n rgbs, alphas = rgb_alpha_fn(t_starts, t_ends, ray_indices)\n else:\n rgbs = torch.empty((0, 3), device=t_starts.device)\n alphas = torch.empty((0,), device=t_starts.device)\n assert rgbs.shape[-1] == 3, \"rgbs must have 3 channels, got {}\".format(\n rgbs.shape\n )\n assert (\n alphas.shape == t_starts.shape\n ), \"alphas must have shape of (N,)! Got {}\".format(alphas.shape)\n # Rendering: compute weights.\n weights, trans = render_weight_from_alpha(\n alphas,\n ray_indices=ray_indices,\n n_rays=n_rays,\n )\n extras = {\n \"weights\": weights,\n \"trans\": trans,\n \"rgbs\": rgbs,\n \"alphas\": alphas,\n }\n\n # Rendering: accumulate rgbs, opacities, and depths along the rays.\n colors = accumulate_along_rays(\n weights, values=rgbs, ray_indices=ray_indices, n_rays=n_rays\n )\n opacities = accumulate_along_rays(\n weights, values=None, ray_indices=ray_indices, n_rays=n_rays\n )\n depths = accumulate_along_rays(\n weights,\n values=(t_starts + t_ends)[..., None] / 2.0,\n ray_indices=ray_indices,\n n_rays=n_rays,\n )\n depths = depths / opacities.clamp_min(torch.finfo(rgbs.dtype).eps)\n\n # Background composition.\n if render_bkgd is not None:\n colors = colors + render_bkgd * (1.0 - opacities)\n\n return colors, opacities, depths, extras" } ]
import random import numpy as np import torch from typing import Optional, Sequence from typing import Literal from typing_extensions import Literal from datasets.utils import Rays, namedtuple_map from torch.utils.data._utils.collate import collate, default_collate_fn_map from nerfacc.estimators.occ_grid import OccGridEstimator from nerfacc.estimators.prop_net import PropNetEstimator from nerfacc.grid import ray_aabb_intersect, traverse_grids from nerfacc.volrend import ( accumulate_along_rays_, render_weight_from_density, rendering, )
13,479
@torch.no_grad() def render_image_with_occgrid_test( max_samples: int, # scene radiance_field: torch.nn.Module, estimator: OccGridEstimator, rays: Rays, # rendering options near_plane: float = 0.0, far_plane: float = 1e10, render_step_size: float = 1e-3, render_bkgd: Optional[torch.Tensor] = None, cone_angle: float = 0.0, alpha_thre: float = 0.0, early_stop_eps: float = 1e-4, # only useful for dnerf timestamps: Optional[torch.Tensor] = None, ): """Render the pixels of an image.""" rays_shape = rays.origins.shape if len(rays_shape) == 3: height, width, _ = rays_shape num_rays = height * width rays = namedtuple_map( lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays ) else: num_rays, _ = rays_shape def rgb_sigma_fn(t_starts, t_ends, ray_indices): t_origins = rays.origins[ray_indices] t_dirs = rays.viewdirs[ray_indices] positions = ( t_origins + t_dirs * (t_starts[:, None] + t_ends[:, None]) / 2.0 ) if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) rgbs, sigmas = radiance_field(positions, t, t_dirs) else: rgbs, sigmas = radiance_field(positions, t_dirs) return rgbs, sigmas.squeeze(-1) device = rays.origins.device opacity = torch.zeros(num_rays, 1, device=device) depth = torch.zeros(num_rays, 1, device=device) rgb = torch.zeros(num_rays, 3, device=device) ray_mask = torch.ones(num_rays, device=device).bool() # 1 for synthetic scenes, 4 for real scenes min_samples = 1 if cone_angle == 0 else 4 iter_samples = total_samples = 0 rays_o = rays.origins rays_d = rays.viewdirs near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane) far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane) t_mins, t_maxs, hits = ray_aabb_intersect(rays_o, rays_d, estimator.aabbs) n_grids = estimator.binaries.size(0) if n_grids > 1: t_sorted, t_indices = torch.sort(torch.cat([t_mins, t_maxs], -1), -1) else: t_sorted = torch.cat([t_mins, t_maxs], -1) t_indices = torch.arange( 0, n_grids * 2, device=t_mins.device, dtype=torch.int64 ).expand(num_rays, n_grids * 2) opc_thre = 1 - early_stop_eps while iter_samples < max_samples: n_alive = ray_mask.sum().item() if n_alive == 0: break # the number of samples to add on each ray n_samples = max(min(num_rays // n_alive, 64), min_samples) iter_samples += n_samples # ray marching (intervals, samples, termination_planes) = traverse_grids( # rays rays_o, # [n_rays, 3] rays_d, # [n_rays, 3] # grids estimator.binaries, # [m, resx, resy, resz] estimator.aabbs, # [m, 6] # options near_planes, # [n_rays] far_planes, # [n_rays] render_step_size, cone_angle, n_samples, True, ray_mask, # pre-compute intersections t_sorted, # [n_rays, m*2] t_indices, # [n_rays, m*2] hits, # [n_rays, m] ) t_starts = intervals.vals[intervals.is_left] t_ends = intervals.vals[intervals.is_right] ray_indices = samples.ray_indices[samples.is_valid] packed_info = samples.packed_info # get rgb and sigma from radiance field rgbs, sigmas = rgb_sigma_fn(t_starts, t_ends, ray_indices) # volume rendering using native cuda scan
""" Copyright (c) 2022 Ruilong Li, UC Berkeley. """ try: except ImportError: NERF_SYNTHETIC_SCENES = [ "chair", "drums", "ficus", "hotdog", "lego", "materials", "mic", "ship", ] MIPNERF360_UNBOUNDED_SCENES = [ "garden", "bicycle", "bonsai", "counter", "kitchen", "room", "stump", ] LLFF_NDC_SCENES = [ "fern", "flower", "fortress", "horns", "leaves", "orchids", "room_llff", "trex", ] def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def render_image_with_occgrid( # scene radiance_field: torch.nn.Module, estimator: OccGridEstimator, rays: Rays, # rendering options near_plane: float = 0.0, far_plane: float = 1e10, render_step_size: float = 1e-3, render_bkgd: Optional[torch.Tensor] = None, cone_angle: float = 0.0, alpha_thre: float = 0.0, # test options test_chunk_size: int = 8192, # only useful for dnerf timestamps: Optional[torch.Tensor] = None, ): """Render the pixels of an image.""" rays_shape = rays.origins.shape if len(rays_shape) == 3: height, width, _ = rays_shape num_rays = height * width rays = namedtuple_map( lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays ) else: num_rays, _ = rays_shape def sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[ray_indices] t_dirs = chunk_rays.viewdirs[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0 if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) sigmas = radiance_field.query_density(positions, t) else: sigmas = radiance_field.query_density(positions) return sigmas.squeeze(-1) def rgb_sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[ray_indices] t_dirs = chunk_rays.viewdirs[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0 if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) rgbs, sigmas = radiance_field(positions, t, t_dirs) else: rgbs, sigmas = radiance_field(positions, t_dirs) return rgbs, sigmas.squeeze(-1) results = [] chunk = ( torch.iinfo(torch.int32).max if radiance_field.training else test_chunk_size ) for i in range(0, num_rays, chunk): chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays) ray_indices, t_starts, t_ends = estimator.sampling( chunk_rays.origins, chunk_rays.viewdirs, sigma_fn=sigma_fn, near_plane=near_plane, far_plane=far_plane, render_step_size=render_step_size, stratified=radiance_field.training, cone_angle=cone_angle, alpha_thre=alpha_thre, ) rgb, opacity, depth, extras = rendering( t_starts, t_ends, ray_indices, n_rays=chunk_rays.origins.shape[0], rgb_sigma_fn=rgb_sigma_fn, render_bkgd=render_bkgd, ) chunk_results = [rgb, opacity, depth, len(t_starts)] results.append(chunk_results) colors, opacities, depths, n_rendering_samples = [ torch.cat(r, dim=0) if isinstance(r[0], torch.Tensor) else r for r in zip(*results) ] return ( colors.view((*rays_shape[:-1], -1)), opacities.view((*rays_shape[:-1], -1)), depths.view((*rays_shape[:-1], -1)), sum(n_rendering_samples), ) def render_image_with_propnet( # scene radiance_field: torch.nn.Module, proposal_networks: Sequence[torch.nn.Module], estimator: PropNetEstimator, rays: Rays, # rendering options num_samples: int, num_samples_per_prop: Sequence[int], near_plane: Optional[float] = None, far_plane: Optional[float] = None, sampling_type: Literal["uniform", "lindisp"] = "lindisp", opaque_bkgd: bool = True, render_bkgd: Optional[torch.Tensor] = None, # train options proposal_requires_grad: bool = False, # test options test_chunk_size: int = 8192, ): """Render the pixels of an image.""" rays_shape = rays.origins.shape if len(rays_shape) == 3: height, width, _ = rays_shape num_rays = height * width rays = namedtuple_map( lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays ) else: num_rays, _ = rays_shape def prop_sigma_fn(t_starts, t_ends, proposal_network): t_origins = chunk_rays.origins[..., None, :] t_dirs = chunk_rays.viewdirs[..., None, :].detach() positions = t_origins + t_dirs * (t_starts + t_ends)[..., None] / 2.0 sigmas = proposal_network(positions) if opaque_bkgd: sigmas[..., -1, :] = torch.inf return sigmas.squeeze(-1) def rgb_sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[..., None, :] t_dirs = chunk_rays.viewdirs[..., None, :].repeat_interleave( t_starts.shape[-1], dim=-2 ) positions = t_origins + t_dirs * (t_starts + t_ends)[..., None] / 2.0 rgb, sigmas = radiance_field(positions, t_dirs) if opaque_bkgd: sigmas[..., -1, :] = torch.inf return rgb, sigmas.squeeze(-1) results = [] chunk = ( torch.iinfo(torch.int32).max if radiance_field.training else test_chunk_size ) for i in range(0, num_rays, chunk): chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays) t_starts, t_ends = estimator.sampling( prop_sigma_fns=[ lambda *args: prop_sigma_fn(*args, p) for p in proposal_networks ], prop_samples=num_samples_per_prop, num_samples=num_samples, n_rays=chunk_rays.origins.shape[0], near_plane=near_plane, far_plane=far_plane, sampling_type=sampling_type, stratified=radiance_field.training, requires_grad=proposal_requires_grad, ) rgb, opacity, depth, extra = rendering( t_starts, t_ends, ray_indices=None, n_rays=None, rgb_sigma_fn=rgb_sigma_fn, render_bkgd=render_bkgd, ) ray_indices = torch.arange(0, t_starts.shape[0], device=t_starts.device).repeat_interleave(t_starts.shape[1]).flatten() unique_indices, inverse = torch.unique(ray_indices, sorted=True, return_inverse=True) binnums = torch.bincount(inverse) start_positions = torch.cat([torch.tensor([0], dtype=torch.int, device=ray_indices.device), torch.cumsum(binnums, 0)[:-1]]) ray_a = torch.stack([unique_indices.int(), start_positions.int(), binnums], dim=1) chunk_results = [rgb, opacity, depth, extra, ray_a] results.append(chunk_results) colors, opacities, depths, extras, rays_a = collate( results, collate_fn_map={ **default_collate_fn_map, torch.Tensor: lambda x, **_: torch.cat(x, 0), }, ) distkwarg = {"ws": extras['weights'].flatten(), "deltas": (t_ends - t_starts).flatten(), "ts": t_starts.flatten(), "rays_a": rays_a} return ( colors.view((*rays_shape[:-1], -1)), opacities.view((*rays_shape[:-1], -1)), depths.view((*rays_shape[:-1], -1)), extras, distkwarg ) @torch.no_grad() def render_image_with_occgrid_test( max_samples: int, # scene radiance_field: torch.nn.Module, estimator: OccGridEstimator, rays: Rays, # rendering options near_plane: float = 0.0, far_plane: float = 1e10, render_step_size: float = 1e-3, render_bkgd: Optional[torch.Tensor] = None, cone_angle: float = 0.0, alpha_thre: float = 0.0, early_stop_eps: float = 1e-4, # only useful for dnerf timestamps: Optional[torch.Tensor] = None, ): """Render the pixels of an image.""" rays_shape = rays.origins.shape if len(rays_shape) == 3: height, width, _ = rays_shape num_rays = height * width rays = namedtuple_map( lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays ) else: num_rays, _ = rays_shape def rgb_sigma_fn(t_starts, t_ends, ray_indices): t_origins = rays.origins[ray_indices] t_dirs = rays.viewdirs[ray_indices] positions = ( t_origins + t_dirs * (t_starts[:, None] + t_ends[:, None]) / 2.0 ) if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) rgbs, sigmas = radiance_field(positions, t, t_dirs) else: rgbs, sigmas = radiance_field(positions, t_dirs) return rgbs, sigmas.squeeze(-1) device = rays.origins.device opacity = torch.zeros(num_rays, 1, device=device) depth = torch.zeros(num_rays, 1, device=device) rgb = torch.zeros(num_rays, 3, device=device) ray_mask = torch.ones(num_rays, device=device).bool() # 1 for synthetic scenes, 4 for real scenes min_samples = 1 if cone_angle == 0 else 4 iter_samples = total_samples = 0 rays_o = rays.origins rays_d = rays.viewdirs near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane) far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane) t_mins, t_maxs, hits = ray_aabb_intersect(rays_o, rays_d, estimator.aabbs) n_grids = estimator.binaries.size(0) if n_grids > 1: t_sorted, t_indices = torch.sort(torch.cat([t_mins, t_maxs], -1), -1) else: t_sorted = torch.cat([t_mins, t_maxs], -1) t_indices = torch.arange( 0, n_grids * 2, device=t_mins.device, dtype=torch.int64 ).expand(num_rays, n_grids * 2) opc_thre = 1 - early_stop_eps while iter_samples < max_samples: n_alive = ray_mask.sum().item() if n_alive == 0: break # the number of samples to add on each ray n_samples = max(min(num_rays // n_alive, 64), min_samples) iter_samples += n_samples # ray marching (intervals, samples, termination_planes) = traverse_grids( # rays rays_o, # [n_rays, 3] rays_d, # [n_rays, 3] # grids estimator.binaries, # [m, resx, resy, resz] estimator.aabbs, # [m, 6] # options near_planes, # [n_rays] far_planes, # [n_rays] render_step_size, cone_angle, n_samples, True, ray_mask, # pre-compute intersections t_sorted, # [n_rays, m*2] t_indices, # [n_rays, m*2] hits, # [n_rays, m] ) t_starts = intervals.vals[intervals.is_left] t_ends = intervals.vals[intervals.is_right] ray_indices = samples.ray_indices[samples.is_valid] packed_info = samples.packed_info # get rgb and sigma from radiance field rgbs, sigmas = rgb_sigma_fn(t_starts, t_ends, ray_indices) # volume rendering using native cuda scan
weights, _, alphas = render_weight_from_density(
5
2023-11-27 22:12:55+00:00
16k
facebookresearch/SOC-matching
main.py
[ { "identifier": "get_folder_name", "path": "SOC_matching/utils.py", "snippet": "def get_folder_name(cfg):\n folder_name = (\n cfg.method.algorithm\n + \"_\"\n + cfg.method.setting\n + \"_\"\n + str(cfg.method.lmbd)\n + \"_\"\n + str(cfg.method.T)\n + \"_\"\n + str(cfg.method.num_steps)\n + \"_\"\n + str(cfg.method.use_warm_start)\n + \"_\"\n + str(cfg.method.seed)\n + \"_\"\n + str(cfg.optim.batch_size)\n + \"_\"\n + str(cfg.optim.M_lr)\n + \"_\"\n + str(cfg.optim.nabla_V_lr)\n )\n return folder_name" }, { "identifier": "get_file_name", "path": "SOC_matching/utils.py", "snippet": "def get_file_name(folder_name, num_iterations=0, last=False):\n if last:\n return folder_name + \"/last.pkl\"\n file_name = str(num_iterations)\n print(f\"folder_name: {folder_name}\")\n return folder_name + \"/\" + file_name + \".pkl\"" }, { "identifier": "control_objective", "path": "SOC_matching/utils.py", "snippet": "def control_objective(\n sde, x0, ts, lmbd, batch_size, total_n_samples=65536, verbose=False\n):\n n_batches = int(total_n_samples // batch_size)\n effective_n_samples = n_batches * batch_size\n for k in range(n_batches):\n state0 = x0.repeat(batch_size, 1)\n (\n _,\n _,\n _,\n _,\n log_path_weight_deterministic,\n _,\n log_terminal_weight,\n _,\n ) = stochastic_trajectories(\n sde,\n state0,\n ts.to(state0),\n lmbd,\n verbose=verbose,\n )\n if k == 0:\n ctrl_losses = -lmbd * (log_path_weight_deterministic + log_terminal_weight)\n else:\n ctrl_loss = -lmbd * (log_path_weight_deterministic + log_terminal_weight)\n ctrl_losses = torch.cat((ctrl_losses, ctrl_loss), 0)\n if k % 32 == 31:\n print(f\"Batch {k+1}/{n_batches} done\")\n return torch.mean(ctrl_losses), torch.std(ctrl_losses) / np.sqrt(\n effective_n_samples - 1\n )" }, { "identifier": "save_results", "path": "SOC_matching/utils.py", "snippet": "def save_results(results, folder_name, file_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n with open(file_name, \"wb\") as f:\n pickle.dump(results, f)" }, { "identifier": "compute_EMA", "path": "SOC_matching/utils.py", "snippet": "def compute_EMA(value, EMA_value, EMA_coeff=0.01, itr=0):\n itr_avg = int(np.floor(1 / EMA_coeff))\n if itr == 0:\n return value\n elif itr <= itr_avg:\n return (value + itr * EMA_value) / (itr + 1)\n else:\n return EMA_coeff * value + (1 - EMA_coeff) * EMA_value" }, { "identifier": "normalization_constant", "path": "SOC_matching/utils.py", "snippet": "def normalization_constant(\n sde, x0, ts, cfg, n_batches_normalization=512, ground_truth_control=None\n):\n log_weights_list = []\n weights_list = []\n\n if ground_truth_control is not None:\n norm_sqd_diff_mean = 0\n for k in range(n_batches_normalization):\n (\n states,\n _,\n _,\n _,\n log_path_weight_deterministic,\n log_path_weight_stochastic,\n log_terminal_weight,\n controls,\n ) = stochastic_trajectories(\n sde,\n x0,\n ts.to(x0),\n cfg.method.lmbd,\n )\n log_weights = (\n log_path_weight_deterministic\n + log_path_weight_stochastic\n + log_terminal_weight\n )\n log_weights_list.append(log_weights)\n weights = torch.exp(\n log_path_weight_deterministic\n + log_path_weight_stochastic\n + log_terminal_weight\n )\n weights_list.append(weights)\n\n if ground_truth_control is not None:\n gt_controls = ground_truth_control(ts, states, t_is_tensor=True)[\n :-1, :, :\n ].detach()\n norm_sqd_diff = torch.sum(\n (gt_controls - controls) ** 2\n * weights.unsqueeze(0).unsqueeze(2)\n / (gt_controls.shape[0] * gt_controls.shape[1])\n )\n norm_sqd_diff_mean += norm_sqd_diff\n if k % 32 == 31:\n print(f\"Batch {k+1}/{n_batches_normalization} done\")\n if ground_truth_control is not None:\n norm_sqd_diff_mean = norm_sqd_diff_mean / n_batches_normalization\n else:\n norm_sqd_diff_mean = None\n\n log_weights = torch.stack(log_weights_list, dim=1)\n weights = torch.stack(weights_list, dim=1)\n\n print(\n f\"Average and std. dev. of log_weights for all batches: {torch.mean(log_weights)} {torch.std(log_weights)}\"\n )\n\n normalization_const = torch.mean(weights)\n normalization_const_std_error = torch.std(weights) / np.sqrt(\n weights.shape[0] * weights.shape[1] - 1\n )\n return normalization_const, normalization_const_std_error, norm_sqd_diff_mean" }, { "identifier": "SOC_Solver", "path": "SOC_matching/method.py", "snippet": "class SOC_Solver(nn.Module):\n noise_type = \"diagonal\"\n sde_type = \"ito\"\n\n def __init__(\n self,\n neural_sde,\n x0,\n ut,\n T=1.0,\n num_steps=100,\n lmbd=1.0,\n d=2,\n sigma=torch.eye(2),\n ):\n super().__init__()\n self.dim = neural_sde.dim\n self.neural_sde = neural_sde\n self.x0 = x0\n self.ut = ut\n self.T = T\n self.ts = torch.linspace(0, T, num_steps + 1).to(x0.device)\n self.num_steps = num_steps\n self.dt = T / num_steps\n self.lmbd = lmbd\n self.d = d\n self.y0 = torch.nn.Parameter(torch.randn(1, device=x0.device))\n self.sigma = sigma\n\n def control(self, t0, x0):\n x0 = x0.reshape(-1, self.dim)\n t0_expanded = t0.reshape(-1, 1).expand(x0.shape[0], 1)\n tx = torch.cat([t0_expanded, x0], dim=-1)\n nabla_V = self.neural_sde.nabla_V(tx)\n learned_control = -torch.einsum(\n \"ij,bj->bi\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n return learned_control\n\n def control_objective(self, batch_size, total_n_samples=65536):\n n_batches = int(total_n_samples // batch_size)\n effective_n_samples = n_batches * batch_size\n for k in range(n_batches):\n state0 = self.x0.repeat(batch_size, 1)\n (\n states,\n _,\n _,\n _,\n log_path_weight_deterministic,\n _,\n log_terminal_weight,\n _,\n ) = utils.stochastic_trajectories(\n self.neural_sde,\n state0,\n self.ts.to(state0),\n self.lmbd,\n )\n if k == 0:\n ctrl_losses = -self.lmbd * (\n log_path_weight_deterministic + log_terminal_weight\n )\n trajectory = states\n else:\n ctrl_loss = -self.lmbd * (\n log_path_weight_deterministic + log_terminal_weight\n )\n ctrl_losses = torch.cat((ctrl_losses, ctrl_loss), 0)\n if k % 32 == 31:\n print(f\"Batch {k+1}/{n_batches} done\")\n return (\n torch.mean(ctrl_losses),\n torch.std(ctrl_losses) / np.sqrt(effective_n_samples - 1),\n trajectory,\n )\n\n def loss(\n self,\n batch_size,\n compute_L2_error=False,\n optimal_control=None,\n compute_control_objective=False,\n algorithm=\"SOCM_const_M\",\n add_weights=False,\n total_n_samples=65536,\n verbose=False,\n u_warm_start=None,\n use_warm_start=True,\n use_stopping_time=False,\n ):\n\n state0 = self.x0.repeat(batch_size, 1)\n d = state0.shape[1]\n detach = algorithm != \"rel_entropy\"\n (\n states,\n noises,\n stop_indicators,\n fractional_timesteps,\n log_path_weight_deterministic,\n log_path_weight_stochastic,\n log_terminal_weight,\n controls,\n ) = utils.stochastic_trajectories(\n self.neural_sde,\n state0,\n self.ts.to(state0),\n self.lmbd,\n detach=detach,\n )\n unsqueezed_stop_indicators = stop_indicators.unsqueeze(2)\n weight = torch.exp(\n log_path_weight_deterministic\n + log_path_weight_stochastic\n + log_terminal_weight\n )\n\n if algorithm == \"rel_entropy\":\n ctrl_losses = -self.lmbd * (\n log_path_weight_deterministic + log_terminal_weight\n )\n objective = torch.mean(ctrl_losses)\n weight = weight.detach()\n learned_control = controls.detach()\n else:\n ts_repeat = self.ts.unsqueeze(1).unsqueeze(2).repeat(1, states.shape[1], 1)\n tx = torch.cat([ts_repeat, states], dim=-1)\n tx_reshape = torch.reshape(tx, (-1, tx.shape[2]))\n\n # Evaluate nabla_V\n nabla_V = self.neural_sde.nabla_V(tx_reshape)\n nabla_V = torch.reshape(nabla_V, states.shape)\n\n if u_warm_start and use_warm_start:\n sigma_inverse_transpose = torch.transpose(\n torch.inverse(self.sigma), 0, 1\n )\n u_warm_start_eval = u_warm_start(self.ts, states).detach()\n nabla_V = nabla_V - torch.einsum(\n \"ij,abj->abi\", sigma_inverse_transpose, u_warm_start_eval\n )\n\n if algorithm == \"SOCM_const_M\":\n sigma_inverse_transpose = torch.transpose(torch.inverse(self.sigma), 0, 1)\n least_squares_target_integrand_term_1 = (\n self.neural_sde.nabla_f(self.ts[0], states)\n )[:-1, :, :]\n least_squares_target_integrand_term_2 = -np.sqrt(self.lmbd) * torch.einsum(\n \"abij,abj->abi\",\n self.neural_sde.nabla_b(self.ts[0], states)[:-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, noises),\n )\n least_squares_target_integrand_term_3 = -torch.einsum(\n \"abij,abj->abi\",\n self.neural_sde.nabla_b(self.ts[0], states)[:-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, controls),\n )\n least_squares_target_terminal = self.neural_sde.nabla_g(states[-1, :, :])\n\n dts = self.ts[1:] - self.ts[:-1]\n least_squares_target_integrand_term_1_times_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_1[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_1\n * dts.unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n least_squares_target_integrand_term_2_times_sqrt_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_2[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_2\n * torch.sqrt(dts).unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n least_squares_target_integrand_term_3_times_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_3[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_3\n * dts.unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n\n cumulative_sum_least_squares_term_1 = torch.sum(\n least_squares_target_integrand_term_1_times_dt, dim=0\n ).unsqueeze(0) - torch.cumsum(\n least_squares_target_integrand_term_1_times_dt, dim=0\n )\n cumulative_sum_least_squares_term_2 = torch.sum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=0\n ).unsqueeze(0) - torch.cumsum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=0\n )\n cumulative_sum_least_squares_term_3 = torch.sum(\n least_squares_target_integrand_term_3_times_dt, dim=0\n ).unsqueeze(0) - torch.cumsum(\n least_squares_target_integrand_term_3_times_dt, dim=0\n )\n least_squares_target = (\n cumulative_sum_least_squares_term_1\n + cumulative_sum_least_squares_term_2\n + cumulative_sum_least_squares_term_3\n + least_squares_target_terminal.unsqueeze(0)\n )\n control_learned = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), least_squares_target\n )\n\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (states.shape[0] * states.shape[1])\n\n if algorithm == \"SOCM_exp\":\n sigma_inverse_transpose = torch.transpose(torch.inverse(self.sigma), 0, 1)\n exp_factor = torch.exp(-self.gamma * self.ts)\n identity = torch.eye(d).to(self.x0.device)\n least_squares_target_integrand_term_1 = (\n exp_factor.unsqueeze(1).unsqueeze(2)\n * self.neural_sde.nabla_f(self.ts[0], states)\n )[:-1, :, :]\n least_squares_target_integrand_term_2 = exp_factor[:-1].unsqueeze(\n 1\n ).unsqueeze(2) * (\n -np.sqrt(self.lmbd)\n * torch.einsum(\n \"abij,abj->abi\",\n self.neural_sde.nabla_b(self.ts[0], states)[:-1, :, :, :]\n + self.gamma * identity,\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, noises),\n )\n )\n least_squares_target_integrand_term_3 = exp_factor[:-1].unsqueeze(\n 1\n ).unsqueeze(2) * (\n -torch.einsum(\n \"abij,abj->abi\",\n self.neural_sde.nabla_b(self.ts[0], states)[:-1, :, :, :]\n + self.gamma * identity,\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, controls),\n )\n )\n least_squares_target_terminal = torch.exp(\n -self.gamma * (self.T - self.ts)\n ).unsqueeze(1).unsqueeze(2) * self.neural_sde.nabla_g(\n states[-1, :, :]\n ).unsqueeze(\n 0\n )\n\n dts = self.ts[1:] - self.ts[:-1]\n least_squares_target_integrand_term_1_times_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_1[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_1\n * dts.unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n least_squares_target_integrand_term_2_times_sqrt_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_2[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_2\n * torch.sqrt(dts).unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n least_squares_target_integrand_term_3_times_dt = torch.cat(\n (\n torch.zeros_like(\n least_squares_target_integrand_term_3[0, :, :]\n ).unsqueeze(0),\n least_squares_target_integrand_term_3\n * dts.unsqueeze(1).unsqueeze(2),\n ),\n 0,\n )\n\n inv_exp_factor = 1 / exp_factor\n cumsum_least_squares_term_1 = inv_exp_factor.unsqueeze(1).unsqueeze(2) * (\n torch.sum(\n least_squares_target_integrand_term_1_times_dt, dim=0\n ).unsqueeze(0)\n - torch.cumsum(least_squares_target_integrand_term_1_times_dt, dim=0)\n )\n cumsum_least_squares_term_2 = inv_exp_factor.unsqueeze(1).unsqueeze(2) * (\n torch.sum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=0\n ).unsqueeze(0)\n - torch.cumsum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=0\n )\n )\n cumsum_least_squares_term_3 = inv_exp_factor.unsqueeze(1).unsqueeze(2) * (\n torch.sum(\n least_squares_target_integrand_term_3_times_dt, dim=0\n ).unsqueeze(0)\n - torch.cumsum(least_squares_target_integrand_term_3_times_dt, dim=0)\n )\n\n least_squares_target = (\n cumsum_least_squares_term_1\n + cumsum_least_squares_term_2\n + cumsum_least_squares_term_3\n + least_squares_target_terminal\n )\n control_learned = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), least_squares_target\n )\n\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (states.shape[0] * states.shape[1])\n\n if algorithm == \"SOCM\":\n sigma_inverse_transpose = torch.transpose(torch.inverse(self.sigma), 0, 1)\n identity = torch.eye(d).to(self.x0.device)\n\n if use_stopping_time:\n sum_M = lambda t, s, stopping_timestep_values: self.neural_sde.M(\n t, s, stopping_timestep_values\n ).sum(dim=0)\n\n derivative_M_0 = functorch.jacrev(sum_M, argnums=1)\n derivative_M = lambda t, s, stopping_timestep_values: torch.transpose(\n torch.transpose(\n torch.transpose(\n derivative_M_0(t, s, stopping_timestep_values), 2, 3\n ),\n 1,\n 2,\n ),\n 0,\n 1,\n )\n\n M_evals = torch.zeros(len(self.ts), len(self.ts), batch_size, d, d).to(\n self.ts.device\n )\n derivative_M_evals = torch.zeros(\n len(self.ts), len(self.ts), batch_size, d, d\n ).to(self.ts.device)\n\n else:\n sum_M = lambda t, s: self.neural_sde.M(t, s).sum(dim=0)\n\n derivative_M_0 = functorch.jacrev(sum_M, argnums=1)\n derivative_M = lambda t, s: torch.transpose(\n torch.transpose(derivative_M_0(t, s), 1, 2), 0, 1\n )\n\n M_evals = torch.zeros(len(self.ts), len(self.ts), d, d).to(\n self.ts.device\n )\n derivative_M_evals = torch.zeros(len(self.ts), len(self.ts), d, d).to(\n self.ts.device\n )\n\n if use_stopping_time:\n stopping_function_output_int = (self.neural_sde.Phi(states) > 0).to(\n torch.int\n )\n stopping_timestep = (\n torch.sum(stopping_function_output_int, dim=0) - 1\n ) / (len(self.ts) - 1)\n stopping_timestep_vector = []\n\n s_vector = []\n t_vector = []\n for k, t in enumerate(self.ts):\n s_vector.append(\n torch.linspace(t, self.T, self.num_steps + 1 - k).to(self.ts.device)\n )\n t_vector.append(\n t * torch.ones(self.num_steps + 1 - k).to(self.ts.device)\n )\n if use_stopping_time:\n stopping_timestep_vector.append(\n stopping_timestep.unsqueeze(0).repeat(self.num_steps + 1 - k, 1)\n )\n s_vector = torch.cat(s_vector)\n t_vector = torch.cat(t_vector)\n if use_stopping_time:\n stopping_timestep_vector = torch.cat(stopping_timestep_vector, dim=0)\n M_evals_all = self.neural_sde.M(\n t_vector, s_vector, stopping_timestep_vector\n )\n derivative_M_evals_all = torch.nan_to_num(\n derivative_M(t_vector, s_vector, stopping_timestep_vector)\n )\n counter = 0\n for k, t in enumerate(self.ts):\n M_evals[k, k:, :, :, :] = M_evals_all[\n counter : (counter + self.num_steps + 1 - k), :, :, :\n ]\n derivative_M_evals[k, k:, :, :, :] = derivative_M_evals_all[\n counter : (counter + self.num_steps + 1 - k), :, :, :\n ]\n counter += self.num_steps + 1 - k\n else:\n M_evals_all = self.neural_sde.M(\n t_vector,\n s_vector,\n )\n derivative_M_evals_all = derivative_M(\n t_vector,\n s_vector,\n )\n counter = 0\n for k, t in enumerate(self.ts):\n M_evals[k, k:, :, :] = M_evals_all[\n counter : (counter + self.num_steps + 1 - k), :, :\n ]\n derivative_M_evals[k, k:, :, :] = derivative_M_evals_all[\n counter : (counter + self.num_steps + 1 - k), :, :\n ]\n counter += self.num_steps + 1 - k\n\n if use_stopping_time:\n least_squares_target_integrand_term_1 = torch.einsum(\n \"ijmkl,jml->ijmk\",\n M_evals,\n self.neural_sde.nabla_f(self.ts, states),\n )[:, :-1, :, :]\n else:\n least_squares_target_integrand_term_1 = torch.einsum(\n \"ijkl,jml->ijmk\",\n M_evals,\n self.neural_sde.nabla_f(self.ts, states),\n )[:, :-1, :, :]\n\n if use_stopping_time:\n M_nabla_b_term = (\n torch.einsum(\n \"ijmkl,jmln->ijmkn\",\n M_evals,\n self.neural_sde.nabla_b(self.ts, states),\n )\n - derivative_M_evals\n )\n least_squares_target_integrand_term_2 = -np.sqrt(\n self.lmbd\n ) * torch.einsum(\n \"ijmkn,jmn->ijmk\",\n M_nabla_b_term[:, :-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, noises),\n )\n else:\n M_nabla_b_term = torch.einsum(\n \"ijkl,jmln->ijmkn\",\n M_evals,\n self.neural_sde.nabla_b(self.ts, states),\n ) - derivative_M_evals.unsqueeze(2)\n least_squares_target_integrand_term_2 = -np.sqrt(\n self.lmbd\n ) * torch.einsum(\n \"ijmkn,jmn->ijmk\",\n M_nabla_b_term[:, :-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, noises),\n )\n\n least_squares_target_integrand_term_3 = -torch.einsum(\n \"ijmkn,jmn->ijmk\",\n M_nabla_b_term[:, :-1, :, :, :],\n torch.einsum(\"ij,abj->abi\", sigma_inverse_transpose, controls),\n )\n\n if use_stopping_time:\n M_evals_final = M_evals[:, -1, :, :, :]\n least_squares_target_terminal = torch.einsum(\n \"imkl,ml->imk\",\n M_evals_final,\n self.neural_sde.nabla_g(states[-1, :, :]),\n )\n else:\n M_evals_final = M_evals[:, -1, :, :]\n least_squares_target_terminal = torch.einsum(\n \"ikl,ml->imk\",\n M_evals_final,\n self.neural_sde.nabla_g(states[-1, :, :]),\n )\n\n if use_stopping_time:\n least_squares_target_integrand_term_1_times_dt = (\n least_squares_target_integrand_term_1\n * fractional_timesteps.unsqueeze(0).unsqueeze(3)\n )\n least_squares_target_integrand_term_2_times_sqrt_dt = (\n least_squares_target_integrand_term_2\n * torch.sqrt(fractional_timesteps).unsqueeze(0).unsqueeze(3)\n )\n least_squares_target_integrand_term_3_times_dt = (\n least_squares_target_integrand_term_3\n * fractional_timesteps.unsqueeze(0).unsqueeze(3)\n )\n else:\n dts = self.ts[1:] - self.ts[:-1]\n least_squares_target_integrand_term_1_times_dt = (\n least_squares_target_integrand_term_1\n * dts.unsqueeze(1).unsqueeze(2).unsqueeze(0)\n )\n least_squares_target_integrand_term_2_times_sqrt_dt = (\n least_squares_target_integrand_term_2\n * torch.sqrt(dts).unsqueeze(1).unsqueeze(2)\n )\n least_squares_target_integrand_term_3_times_dt = (\n least_squares_target_integrand_term_3 * dts.unsqueeze(1).unsqueeze(2)\n )\n\n cumsum_least_squares_term_1 = torch.sum(\n least_squares_target_integrand_term_1_times_dt, dim=1\n )\n cumsum_least_squares_term_2 = torch.sum(\n least_squares_target_integrand_term_2_times_sqrt_dt, dim=1\n )\n cumsum_least_squares_term_3 = torch.sum(\n least_squares_target_integrand_term_3_times_dt, dim=1\n )\n\n least_squares_target = (\n cumsum_least_squares_term_1\n + cumsum_least_squares_term_2\n + cumsum_least_squares_term_3\n + least_squares_target_terminal\n )\n\n if use_stopping_time:\n control_learned = -unsqueezed_stop_indicators * torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -unsqueezed_stop_indicators * torch.einsum(\n \"ij,...j->...i\",\n torch.transpose(self.sigma, 0, 1),\n least_squares_target,\n )\n else:\n control_learned = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -torch.einsum(\n \"ij,...j->...i\",\n torch.transpose(self.sigma, 0, 1),\n least_squares_target,\n )\n\n if use_stopping_time:\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (torch.sum(stop_indicators))\n else:\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (states.shape[0] * states.shape[1])\n\n if algorithm == \"SOCM_adjoint\":\n nabla_f_evals = self.neural_sde.nabla_f(self.ts, states)\n nabla_b_evals = self.neural_sde.nabla_b(self.ts, states)\n nabla_g_evals = self.neural_sde.nabla_g(states[-1, :, :])\n\n # print(f'nabla_b_evals.shape: {nabla_b_evals.shape}')\n\n a_vectors = torch.zeros_like(states)\n a = nabla_g_evals\n a_vectors[-1, :, :] = a\n\n for k in range(1,len(self.ts)):\n # a += self.dt * (nabla_f_evals[-1-k, :, :] + torch.einsum(\"mkl,ml->mk\", nabla_b_evals[-1-k, :, :, :], a))\n a += self.dt * ((nabla_f_evals[-1-k, :, :] + nabla_f_evals[-k, :, :]) / 2 + torch.einsum(\"mkl,ml->mk\", (nabla_b_evals[-1-k, :, :, :] + nabla_b_evals[-k, :, :, :]) / 2, a))\n a_vectors[-1-k, :, :] = a\n\n control_learned = -torch.einsum(\n \"ij,...j->...i\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n control_target = -torch.einsum(\n \"ij,...j->...i\",\n torch.transpose(self.sigma, 0, 1),\n a_vectors,\n )\n objective = torch.sum(\n (control_learned - control_target) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n ) / (states.shape[0] * states.shape[1])\n\n elif algorithm == \"cross_entropy\":\n learned_controls = -torch.einsum(\n \"ij,abj->abi\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n integrand_term_1 = -(1 / self.lmbd) * torch.sum(\n learned_controls[:-1, :, :] * controls, dim=2\n )\n integrand_term_2 = (1 / (2 * self.lmbd)) * torch.sum(\n learned_controls**2, dim=2\n )[:-1, :]\n deterministic_integrand = integrand_term_1 + integrand_term_2\n stochastic_integrand = -np.sqrt(1 / self.lmbd) * torch.sum(\n learned_controls[:-1, :, :] * noises, dim=2\n )\n\n if use_stopping_time:\n deterministic_integrand_times_dt = (\n deterministic_integrand * fractional_timesteps\n )\n stochastic_integrand_times_sqrt_dt = stochastic_integrand * torch.sqrt(\n fractional_timesteps\n )\n else:\n dts = self.ts[1:] - self.ts[:-1]\n deterministic_integrand_times_dt = (\n deterministic_integrand * dts.unsqueeze(1)\n )\n stochastic_integrand_times_sqrt_dt = stochastic_integrand * torch.sqrt(\n dts\n ).unsqueeze(1)\n\n deterministic_term = torch.sum(deterministic_integrand_times_dt, dim=0)\n stochastic_term = torch.sum(stochastic_integrand_times_sqrt_dt, dim=0)\n\n objective = torch.mean((deterministic_term + stochastic_term) * weight)\n\n elif (\n algorithm == \"variance\"\n or algorithm == \"log-variance\"\n or algorithm == \"moment\"\n ):\n learned_controls = -torch.einsum(\n \"ij,abj->abi\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n integrand_term_1 = -(1 / self.lmbd) * torch.sum(\n learned_controls[:-1, :, :] * controls, dim=2\n )\n integrand_term_2 = (1 / (2 * self.lmbd)) * torch.sum(\n learned_controls**2, dim=2\n )[:-1, :]\n integrand_term_3 = (\n -(1 / self.lmbd) * self.neural_sde.f(self.ts[0], states)[:-1, :]\n )\n deterministic_integrand = (\n integrand_term_1 + integrand_term_2 + integrand_term_3\n )\n stochastic_integrand = -np.sqrt(1 / self.lmbd) * torch.sum(\n learned_controls[:-1, :, :] * noises, dim=2\n )\n if use_stopping_time:\n deterministic_integrand = (\n deterministic_integrand * stop_indicators[:-1, :]\n )\n stochastic_integrand = stochastic_integrand * stop_indicators[:-1, :]\n\n if use_stopping_time:\n deterministic_integrand_times_dt = (\n deterministic_integrand * fractional_timesteps\n )\n stochastic_integrand_times_sqrt_dt = stochastic_integrand * torch.sqrt(\n fractional_timesteps\n )\n else:\n dts = self.ts[1:] - self.ts[:-1]\n deterministic_integrand_times_dt = (\n deterministic_integrand * dts.unsqueeze(1)\n )\n stochastic_integrand_times_sqrt_dt = stochastic_integrand * torch.sqrt(\n dts\n ).unsqueeze(1)\n\n deterministic_term = torch.sum(deterministic_integrand_times_dt, dim=0)\n stochastic_term = torch.sum(stochastic_integrand_times_sqrt_dt, dim=0)\n g_term = -(1 / self.lmbd) * self.neural_sde.g(states[-1, :, :])\n if algorithm == \"log-variance\":\n sum_terms = deterministic_term + stochastic_term + g_term\n elif algorithm == \"variance\":\n sum_terms = torch.exp(deterministic_term + stochastic_term + g_term)\n elif algorithm == \"moment\":\n sum_terms = deterministic_term + stochastic_term + g_term + self.y0\n\n if add_weights:\n weight_2 = weight\n else:\n weight_2 = torch.ones_like(weight)\n if algorithm == \"log-variance\" or algorithm == \"variance\":\n objective = (\n len(sum_terms)\n / (len(sum_terms) - 1)\n * (\n torch.mean(sum_terms**2 * weight_2)\n - torch.mean(sum_terms * weight_2) ** 2\n )\n )\n elif algorithm == \"moment\":\n objective = torch.mean(sum_terms**2 * weight_2)\n\n if compute_L2_error:\n if algorithm == \"rel_entropy\":\n target_control = optimal_control(self.ts, states, t_is_tensor=True)[\n :-1, :, :\n ].detach()\n else:\n target_control = optimal_control(self.ts, states, t_is_tensor=True)\n if algorithm != \"rel_entropy\":\n learned_control = -torch.einsum(\n \"ij,abj->abi\", torch.transpose(self.sigma, 0, 1), nabla_V\n )\n norm_sqd_diff = torch.sum(\n (target_control - learned_control) ** 2\n * weight.unsqueeze(0).unsqueeze(2)\n / (target_control.shape[0] * target_control.shape[1])\n )\n else:\n norm_sqd_diff = None\n\n if compute_control_objective:\n ctrl_loss_mean, ctrl_loss_std_err, trajectory = self.control_objective(\n batch_size, total_n_samples=total_n_samples\n )\n else:\n ctrl_loss_mean = None\n ctrl_loss_std_err = None\n trajectory = None\n\n if verbose:\n # To print amount of memory used in GPU\n nvidia_smi.nvmlInit()\n handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)\n # card id 0 hardcoded here, there is also a call to get all available card ids, so we could iterate\n info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)\n print(\"Total memory:\", info.total / 1048576, \"MiB\")\n print(\"Free memory:\", info.free / 1048576, \"MiB\")\n print(\"Used memory:\", info.used / 1048576, \"MiB\")\n nvidia_smi.nvmlShutdown()\n\n return (\n objective,\n norm_sqd_diff,\n ctrl_loss_mean,\n ctrl_loss_std_err,\n trajectory,\n torch.mean(weight),\n torch.std(weight),\n stop_indicators,\n )" }, { "identifier": "define_variables", "path": "SOC_matching/experiment_settings/settings.py", "snippet": "def define_variables(cfg, ts):\n if (\n cfg.method.setting == \"OU_quadratic_easy\"\n or cfg.method.setting == \"OU_quadratic_hard\"\n ):\n if cfg.method.d == 2:\n x0 = torch.tensor([0.4, 0.6]).to(cfg.method.device)\n else:\n x0 = 0.5 * torch.randn(cfg.method.d).to(cfg.method.device)\n print(f\"x0: {x0}\")\n sigma = torch.eye(cfg.method.d).to(cfg.method.device)\n if cfg.method.setting == \"OU_quadratic_hard\":\n A = 1.0 * torch.eye(cfg.method.d).to(cfg.method.device)\n P = 1.0 * torch.eye(cfg.method.d).to(cfg.method.device)\n Q = 0.5 * torch.eye(cfg.method.d).to(cfg.method.device)\n elif cfg.method.setting == \"OU_quadratic_easy\":\n A = 0.2 * torch.eye(cfg.method.d).to(cfg.method.device)\n P = 0.2 * torch.eye(cfg.method.d).to(cfg.method.device)\n Q = 0.1 * torch.eye(cfg.method.d).to(cfg.method.device)\n\n optimal_sde = ground_truth_control(cfg, ts, x0, sigma=sigma, A=A, P=P, Q=Q)\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg, ts, x0, u_warm_start, sigma=sigma, A=A, P=P, Q=Q\n )\n return x0, sigma, optimal_sde, neural_sde, u_warm_start\n\n elif cfg.method.setting == \"OU_linear\":\n x0 = torch.zeros(cfg.method.d).to(cfg.method.device)\n nu = 0.1\n xi = nu * torch.randn(cfg.method.d, cfg.method.d).to(cfg.method.device)\n omega = torch.ones(cfg.method.d).to(cfg.method.device)\n A = -torch.eye(cfg.method.d).to(cfg.method.device) + xi\n sigma = torch.eye(cfg.method.d).to(cfg.method.device) + xi\n\n optimal_sde = ground_truth_control(cfg, ts, x0, sigma=sigma, omega=omega, A=A)\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg, ts, x0, u_warm_start, sigma=sigma, omega=omega, A=A\n )\n return x0, sigma, optimal_sde, neural_sde, u_warm_start\n\n elif cfg.method.setting == \"double_well\":\n print(f\"double_well\")\n x0 = torch.zeros(cfg.method.d).to(cfg.method.device)\n\n kappa_i = 5\n nu_i = 3\n kappa = torch.ones(cfg.method.d).to(cfg.method.device)\n nu = torch.ones(cfg.method.d).to(cfg.method.device)\n kappa[0] = kappa_i\n kappa[1] = kappa_i\n kappa[2] = kappa_i\n nu[0] = nu_i\n nu[1] = nu_i\n nu[2] = nu_i\n\n sigma = torch.eye(cfg.method.d).to(cfg.method.device)\n\n optimal_sde = ground_truth_control(cfg, ts, x0, sigma=sigma, kappa=kappa, nu=nu)\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg, ts, x0, u_warm_start, sigma=sigma, kappa=kappa, nu=nu\n )\n\n return x0, sigma, optimal_sde, neural_sde, u_warm_start\n\n elif cfg.method.setting == \"molecular_dynamics\":\n print(f\"molecular_dynamics\")\n x0 = -torch.ones(cfg.method.d).to(cfg.method.device)\n\n kappa = torch.ones(cfg.method.d).to(cfg.method.device)\n sigma = torch.eye(cfg.method.d).to(cfg.method.device)\n\n optimal_sde = ground_truth_control(\n cfg,\n ts,\n x0,\n sigma=sigma,\n kappa=kappa,\n )\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg,\n ts,\n x0,\n u_warm_start,\n sigma=sigma,\n kappa=kappa,\n )\n\n return x0, sigma, optimal_sde, neural_sde, u_warm_start\n\n elif cfg.method.setting == \"multiagent_8\":\n print(f\"multiagent_8\")\n x0 = torch.tensor(\n [\n -4.0,\n 4.5,\n -7.0,\n 4.5,\n -4.0,\n 1.5,\n -7.0,\n 1.5,\n -4.0,\n -1.5,\n -7.0,\n -1.5,\n -4.0,\n -4.5,\n -7.0,\n -4.5,\n ]\n ).to(cfg.method.device)\n\n g_center = torch.tensor(\n [\n 4.0,\n 4.5,\n 7.0,\n 4.5,\n 4.0,\n 1.5,\n 7.0,\n 1.5,\n 4.0,\n -1.5,\n 7.0,\n -1.5,\n 4.0,\n -4.5,\n 7.0,\n -4.5,\n ]\n ).to(cfg.method.device)\n g_coeff = 2.00\n f_coeff = 0.05\n\n sigma = torch.eye(cfg.method.d).to(cfg.method.device)\n\n optimal_sde = ground_truth_control(\n cfg,\n ts,\n x0,\n sigma=sigma,\n g_center=g_center,\n g_coeff=g_coeff,\n f_coeff=f_coeff,\n )\n u_warm_start = set_warm_start(cfg, optimal_sde, x0, sigma)\n neural_sde = define_neural_sde(\n cfg,\n ts,\n x0,\n u_warm_start,\n sigma=sigma,\n g_center=g_center,\n g_coeff=g_coeff,\n f_coeff=f_coeff,\n )\n\n return x0, sigma, optimal_sde, neural_sde, u_warm_start" } ]
import torch import sys import logging import os import time import json import hydra import traceback from tqdm.notebook import tqdm from omegaconf import DictConfig from SOC_matching.utils import ( get_folder_name, get_file_name, control_objective, save_results, compute_EMA, normalization_constant, ) from SOC_matching.method import ( SOC_Solver, ) from SOC_matching.experiment_settings.settings import define_variables
11,786
logging.getLogger("lightning.pytorch").setLevel(logging.getLevelName("INFO")) print(cfg) print("Found {} CUDA devices.".format(torch.cuda.device_count())) for i in range(torch.cuda.device_count()): props = torch.cuda.get_device_properties(i) print( "{} \t Memory: {:.2f}GB".format( props.name, props.total_memory / (1024**3) ) ) keys = [ "SLURM_NODELIST", "SLURM_JOB_ID", "SLURM_NTASKS", "SLURM_JOB_NAME", "SLURM_PROCID", "SLURM_LOCALID", "SLURM_NODEID", ] log.info(json.dumps({k: os.environ.get(k, None) for k in keys}, indent=4)) cmd_str = " \\\n".join([f"python {sys.argv[0]}"] + ["\t" + x for x in sys.argv[1:]]) with open("cmd.sh", "w") as fout: print("#!/bin/bash\n", file=fout) print(cmd_str, file=fout) log.info(f"CWD: {os.getcwd()}") if cfg.method.use_gpu: cfg.method.device = "cuda:" + str(cfg.method.device_number) else: cfg.method.device = "cpu" torch.manual_seed(cfg.method.seed) algorithm = cfg.method.algorithm folder_name = ( cfg.method.algorithm + "_" + cfg.method.setting + "_" + str(cfg.method.lmbd) + "_" + str(cfg.method.T) + "_" + str(cfg.method.num_steps) + "_" + str(cfg.method.use_warm_start) + "_" + str(cfg.method.seed) + "_" + str(cfg.optim.batch_size) + "_" + str(cfg.optim.M_lr) + "_" + str(cfg.optim.nabla_V_lr) ) ts = torch.linspace(0, cfg.method.T, cfg.method.num_steps + 1).to(cfg.method.device) folder_name = get_folder_name(cfg) file_name = get_file_name(folder_name, num_iterations=cfg.method.num_iterations) EMA_loss = 0 EMA_norm_sqd_diff = 0 EMA_coeff = 0.01 EMA_weight_mean_coeff = 0.002 x0, sigma, optimal_sde, neural_sde, u_warm_start = define_variables(cfg, ts) if optimal_sde is not None: ground_truth_control = optimal_sde.u else: ground_truth_control = None state0 = x0.repeat(cfg.optim.batch_size, 1) ########### Compute normalization constant and control L2 error for initial control ############ print( f"Estimating normalization constant and control L2 error for initial control..." ) ( normalization_const, normalization_const_std_error, norm_sqd_diff_mean, ) = normalization_constant( neural_sde, state0, ts, cfg, n_batches_normalization=512, ground_truth_control=ground_truth_control, ) print( f"Normalization_constant (mean and std. error): {normalization_const:5.8E} {normalization_const_std_error:5.8E}" ) if ground_truth_control is not None: print( f"Control L2 error for initial control: {norm_sqd_diff_mean / normalization_const}" ) ########### Compute control loss for optimal control ############ if optimal_sde is not None: ( optimal_control_objective_mean, optimal_control_objective_std_error, ) = control_objective( optimal_sde, x0, ts, cfg.method.lmbd, cfg.optim.batch_size, total_n_samples=cfg.method.n_samples_control, verbose=False, ) print( f"Optimal control loss mean: {optimal_control_objective_mean:5.10f}, Optimal control loss std. error: {optimal_control_objective_std_error:5.10f}" )
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory. log = logging.getLogger(__name__) @hydra.main(version_base=None, config_path="configs", config_name="soc") def main(cfg: DictConfig): logging.getLogger("lightning.pytorch").setLevel(logging.getLevelName("INFO")) print(cfg) print("Found {} CUDA devices.".format(torch.cuda.device_count())) for i in range(torch.cuda.device_count()): props = torch.cuda.get_device_properties(i) print( "{} \t Memory: {:.2f}GB".format( props.name, props.total_memory / (1024**3) ) ) keys = [ "SLURM_NODELIST", "SLURM_JOB_ID", "SLURM_NTASKS", "SLURM_JOB_NAME", "SLURM_PROCID", "SLURM_LOCALID", "SLURM_NODEID", ] log.info(json.dumps({k: os.environ.get(k, None) for k in keys}, indent=4)) cmd_str = " \\\n".join([f"python {sys.argv[0]}"] + ["\t" + x for x in sys.argv[1:]]) with open("cmd.sh", "w") as fout: print("#!/bin/bash\n", file=fout) print(cmd_str, file=fout) log.info(f"CWD: {os.getcwd()}") if cfg.method.use_gpu: cfg.method.device = "cuda:" + str(cfg.method.device_number) else: cfg.method.device = "cpu" torch.manual_seed(cfg.method.seed) algorithm = cfg.method.algorithm folder_name = ( cfg.method.algorithm + "_" + cfg.method.setting + "_" + str(cfg.method.lmbd) + "_" + str(cfg.method.T) + "_" + str(cfg.method.num_steps) + "_" + str(cfg.method.use_warm_start) + "_" + str(cfg.method.seed) + "_" + str(cfg.optim.batch_size) + "_" + str(cfg.optim.M_lr) + "_" + str(cfg.optim.nabla_V_lr) ) ts = torch.linspace(0, cfg.method.T, cfg.method.num_steps + 1).to(cfg.method.device) folder_name = get_folder_name(cfg) file_name = get_file_name(folder_name, num_iterations=cfg.method.num_iterations) EMA_loss = 0 EMA_norm_sqd_diff = 0 EMA_coeff = 0.01 EMA_weight_mean_coeff = 0.002 x0, sigma, optimal_sde, neural_sde, u_warm_start = define_variables(cfg, ts) if optimal_sde is not None: ground_truth_control = optimal_sde.u else: ground_truth_control = None state0 = x0.repeat(cfg.optim.batch_size, 1) ########### Compute normalization constant and control L2 error for initial control ############ print( f"Estimating normalization constant and control L2 error for initial control..." ) ( normalization_const, normalization_const_std_error, norm_sqd_diff_mean, ) = normalization_constant( neural_sde, state0, ts, cfg, n_batches_normalization=512, ground_truth_control=ground_truth_control, ) print( f"Normalization_constant (mean and std. error): {normalization_const:5.8E} {normalization_const_std_error:5.8E}" ) if ground_truth_control is not None: print( f"Control L2 error for initial control: {norm_sqd_diff_mean / normalization_const}" ) ########### Compute control loss for optimal control ############ if optimal_sde is not None: ( optimal_control_objective_mean, optimal_control_objective_std_error, ) = control_objective( optimal_sde, x0, ts, cfg.method.lmbd, cfg.optim.batch_size, total_n_samples=cfg.method.n_samples_control, verbose=False, ) print( f"Optimal control loss mean: {optimal_control_objective_mean:5.10f}, Optimal control loss std. error: {optimal_control_objective_std_error:5.10f}" )
soc_solver = SOC_Solver(
6
2023-12-04 20:26:18+00:00
16k
yiwenlu66/learning-qp
src/modules/qp_unrolled_network.py
[ { "identifier": "QPSolver", "path": "src/modules/qp_solver.py", "snippet": "class QPSolver(nn.Module):\n \"\"\"\n Solve QP problem:\n minimize (1/2)x'Px + q'x\n subject to Hx + b >= 0,\n where x in R^n, b in R^m.\n \"\"\"\n def __init__(self, device, n, m,\n P=None, Pinv=None, H=None,\n alpha=1, beta=1,\n preconditioner=None, warm_starter=None,\n is_warm_starter_trainable=False,\n keep_X=True,\n symmetric_constraint=False,\n buffered=False,\n ):\n \"\"\"\n Initialize the QP solver.\n\n device: PyTorch device\n\n n, m: dimensions of decision variable x and constraint vector b\n\n P, Pinv, H: Optional matrices that define the QP. If not provided, must be supplied during forward pass. At most one of P and Pinv can be specified.\n\n alpha, beta: Parameters of the PDHG algorithm\n\n preconditioner: Optional preconditioner module\n\n warm_starter: Optional warm start module\n\n is_warm_starter_trainable: Flag for training the warm starter\n\n keep_X: Flag for keeping the primal-dual variable history\n\n symmetric_constraint: Flag for making the inequality constraint symmetric; when True, the constraint is assumed to be -1 <= Hx + b <= 1, instead of Hx + b >= 0.\n\n buffered: Flag for indicating whether the problem is modeled with the buffer variable \\epsilon. When True, it is assumed that the first (n-1) decision variables are the original x, and the last decision variable is \\epsilon; in this case, if symmetric constraint is enabled, then the projection is done as follows:\n 1. Project epsilon to [0, +\\infty)\n 2. Project H_x x + b_x to [-1 - eps, 1 + eps]\n\n Note: Assumes that H is full column rank when m >= n, and full row rank otherwise.\n \"\"\"\n super().__init__()\n self.device = device\n self.n = n\n self.m = m\n create_tensor = lambda t: (torch.tensor(t, dtype=torch.float, device=device).unsqueeze(0) if t is not None else None) if type(t) != torch.Tensor else t.unsqueeze(0)\n assert (P is None) or (Pinv is None), \"At most one of P and Pinv can be specified\"\n self.bP = create_tensor(P) # (1, n, n)\n self.bPinv = create_tensor(Pinv) # (1, n, n)\n self.bH = create_tensor(H) # (1, m, n)\n self.alpha = alpha\n self.beta = beta\n if preconditioner is None:\n # Use dummy preconditioner which gives D=I/beta\n self.preconditioner = Preconditioner(device, n, m, P=P, Pinv=Pinv, H=H, beta=beta, dummy=True)\n else:\n self.preconditioner = preconditioner\n self.warm_starter = warm_starter\n self.is_warm_starter_trainable = is_warm_starter_trainable\n self.keep_X = keep_X\n self.symmetric_constraint = symmetric_constraint\n self.buffered = buffered\n\n self.bIm = torch.eye(m, device=device).unsqueeze(0)\n self.X0 = torch.zeros((1, 2 * self.m), device=self.device)\n\n # If P, H are constant, we can pre-compute the transformation from z to x\n if self.bP is not None and self.bH is not None:\n self.get_sol = self.get_sol_transform(self.bP, self.bH)\n elif self.bPinv is not None and self.bH is not None:\n self.get_sol = self.get_sol_transform(self.bH, bPinv=self.bPinv)\n else:\n self.get_sol = None\n\n # If possible, cache intermediate results in the computation of the affine transform used for each PDHG iteration\n if (P is not None or Pinv is not None) and H is not None and preconditioner is None:\n self.cache_keys = [\"D\", \"tD\", \"tDD\", \"A\"]\n else:\n self.cache_keys = []\n self.cache = {}\n\n def get_sol_transform(self, H, bP=None, bPinv=None):\n \"\"\"\n Computes the transformation from dual variable z to primal variable x.\n\n H: Constraint matrix\n bP, bPinv: Either the matrix P or its inverse. Exactly one must be specified. Specifying Pinv can reduce number of linear solves.\n\n Returns: Function that performs the transformation\n \"\"\"\n bH = self.bH if self.bH is not None else H\n if self.m >= self.n:\n return lambda z, q, b: bmv(pinv(bH), z - b)\n else:\n bP_param = bP if bP is not None else bPinv\n op = solve if bP is not None else bma\n def get_sol(z, q, b):\n t = lambda bM: bM.transpose(-1, -2)\n bPinvHt = op(bP_param, t(bH))\n Mt = solve(t(bH @ bPinvHt), t(bPinvHt))\n M = t(Mt)\n bPinvq = op(bP_param, q)\n return bmv(M @ bH, bPinvq) - bPinvq + bmv(M, z - b)\n return get_sol\n\n def get_AB(self, q, b, H=None, P=None, Pinv=None):\n \"\"\"\n Computes matrices A and B used in the PDHG iterations.\n\n q, b: Coefficients in the objective and constraint\n H, P, Pinv: Matrix H, and (either the matrix P or its inverse). Must be specified if not initialized. Specifying Pinv can reduce number of linear solves.\n\n Returns: Matrices A and B\n \"\"\"\n\n def _lookup_or_compute(keys, compute_fn):\n \"\"\"Lookup variable(s) from cache or compute them if not available.\n\n keys: either a variable name (str), or a list of variable names\n compute_fn: function that computes the variable(s) if not available in cache; returns a single value if keys is a string, or a tuple of values if keys is a list\n \"\"\"\n is_single = (type(keys) == str)\n if is_single:\n keys = [keys]\n if not all([key in self.cache for key in keys]):\n values = compute_fn()\n if is_single:\n values = (values,)\n for key, value in zip(keys, values):\n if key in self.cache_keys:\n self.cache[key] = value\n else:\n values = tuple([self.cache[key] for key in keys])\n return values if not is_single else values[0]\n\n # q: (bs, n), b: (bs, m)\n if self.bP is not None or self.bPinv is not None:\n if self.bP is not None:\n bP_param = self.bP\n P_is_inv = False\n else:\n bP_param = self.bPinv\n P_is_inv = True\n else:\n if P is not None:\n bP_param = P\n P_is_inv = False\n else:\n bP_param = Pinv\n P_is_inv = True\n op = bsolve if not P_is_inv else bma\n\n bH = self.bH if self.bH is not None else H\n D, tD = _lookup_or_compute([\"D\", \"tD\"], lambda: self.preconditioner(q, b, bP_param, H, input_P_is_inversed=P_is_inv, output_tD_is_inversed=False)) # (bs, m, m) or (1, m, m)\n mu = bmv(tD, bmv(bH, op(bP_param, q)) - b) # (bs, m)\n tDD = _lookup_or_compute(\"tDD\", lambda: tD @ D)\n\n A = _lookup_or_compute(\"A\", lambda:\n torch.cat([\n torch.cat([tDD, tD], 2),\n torch.cat([-2 * self.alpha * tDD + self.bIm, self.bIm - 2 * self.alpha * tD], 2),\n ], 1) # (bs, 2m, 2m)\n )\n B = torch.cat([\n mu,\n -2 * self.alpha * mu\n ], 1) # (bs, 2m)\n return A, B\n\n def compute_residuals(self, x, z, u, q, b, P=None, H=None, Pinv=None):\n \"\"\"\n Computes the primal and dual residuals.\n\n x, z: Primal variables\n u: Dual variable\n q, b: Coefficients in the objective and constraint\n P, H, Pinv: Optional matrices defining the QP. Must be provided if not initialized.\n\n Returns: Primal and dual residuals\n \"\"\"\n # Determine effective P and H matrices\n if self.bP is not None or self.bPinv is not None:\n if self.bP is not None:\n eff_P = self.bP\n P_is_inv = False\n else:\n eff_P = self.bPinv\n P_is_inv = True\n else:\n if P is not None:\n eff_P = P\n P_is_inv = False\n else:\n eff_P = Pinv\n P_is_inv = True\n\n if self.bH is not None:\n eff_H = self.bH\n else:\n eff_H = H\n\n # Compute primal residual: Hx + b - z\n primal_residual = bmv(eff_H, x) + b - z\n\n # Determine the operation for multiplying with P or its inverse\n op = bsolve if P_is_inv else bmv\n\n # Compute dual residual: Px + q + H'u\n dual_residual = op(eff_P, x) + q + bmv(eff_H.transpose(-1, -2), u)\n\n return primal_residual, dual_residual\n\n\n def forward(\n self, q, b,\n P=None, H=None, Pinv=None,\n iters=1000,\n only_last_primal=True,\n return_residuals=False\n ):\n \"\"\"\n Solves the QP problem using PDHG.\n\n q, b: Coefficients in the objective and constraint\n P, H, Pinv: Optional matrices defining the QP, i.e., matrix H, and (either the matrix P or its inverse). Must be provided if not initialized. Using Pinv is more efficient in learned setting.\n iters: Number of PDHG iterations\n only_last_primal: Flag for returning only the last primal solution (when True, primal_sols is (bs, 1, n); otherwise (bs, iters + 1, n))\n return_residuals: Flag for returning residuals\n\n Returns: History of primal-dual variables, primal solutions, and optionally residuals of the last iteration\n \"\"\"\n # q: (bs, n), b: (bs, m)\n bs = q.shape[0]\n if self.keep_X:\n Xs = torch.zeros((bs, iters + 1, 2 * self.m), device=self.device)\n else:\n Xs = None\n primal_sols = torch.zeros((bs, (iters if not only_last_primal else 0) + 1, self.n), device=self.device)\n if self.warm_starter is not None:\n with torch.set_grad_enabled(self.is_warm_starter_trainable):\n qd, bd, Pd, Hd, Pinvd = map(lambda t: t.detach() if t is not None else None, [q, b, P, H, Pinv])\n P_param_to_ws = Pd if Pd is not None else Pinvd\n self.X0 = self.warm_starter(qd, bd, P_param_to_ws, Hd)\n get_sol = self.get_sol if self.get_sol is not None else self.get_sol_transform(H, P, Pinv)\n if self.keep_X:\n Xs[:, 0, :] = self.X0.clone()\n if not only_last_primal:\n primal_sols[:, 0, :] = get_sol(self.X0[:, self.m:], q, b)\n X = self.X0\n A, B = self.get_AB(q, b, H, P, Pinv)\n for k in range(1, iters + 1):\n # PDHG update\n X = bmv(A, X) + B # (bs, 2m)\n if not self.symmetric_constraint:\n # Project to [0, +\\infty)\n F.relu(X[:, self.m:], inplace=True)\n else:\n if not self.buffered:\n # Project to [-1, 1]\n projected = torch.clamp(X[:, self.m:], -1, 1)\n X = torch.cat((X[:, :self.m], projected), dim=1)\n else:\n # Hybrid projection: epsilon to [0, +\\infty), the rest decision variables to [-1 - eps, 1 + eps]\n # Project epsilon\n F.relu(X[:, -1:], inplace=True)\n # Project the rest variables\n projected = torch.clamp(X[:, self.m:-1], -1 - X[:, -1:], 1 + X[:, -1:])\n # Concatenate\n X = torch.cat((X[:, :self.m], projected, X[:, -1:]), dim=1)\n if self.keep_X:\n Xs[:, k, :] = X.clone()\n if not only_last_primal:\n primal_sols[:, k, :] = get_sol(X[:, self.m:], q, b)\n\n if only_last_primal:\n primal_sols[:, 0, :] = get_sol(X[:, self.m:], q, b)\n\n # Compute residuals for the last step if the flag is set\n if return_residuals:\n x_last = primal_sols[:, -1, :]\n z_last = Xs[:, -1, self.m:]\n u_last = Xs[:, -1, :self.m]\n primal_residual, dual_residual = self.compute_residuals(x_last, z_last, u_last, q, b, P, H, Pinv)\n return Xs, primal_sols, (primal_residual, dual_residual)\n else:\n return Xs, primal_sols" }, { "identifier": "WarmStarter", "path": "src/modules/warm_starter.py", "snippet": "class WarmStarter(nn.Module):\n def __init__(self, device, n, m, fixed_P=True, fixed_H=True):\n super().__init__()\n self.device = device\n self.n = n\n self.m = m\n self.fixed_P = fixed_P\n self.fixed_H = fixed_H\n num_in = n + m\n if not fixed_P:\n num_in += n * (n + 1) // 2\n if not fixed_H:\n num_in += n * m\n num_out = 2 * m\n num_hidden = max(num_in, num_out)\n self.net = nn.Sequential(\n nn.Linear(num_in, num_hidden),\n nn.ReLU(),\n nn.Linear(num_hidden, num_hidden),\n nn.ReLU(),\n nn.Linear(num_hidden, num_out),\n ).to(device=device)\n\n def forward(self, q, b, P=None, H=None):\n \"\"\"The P argument can be either P or inv(P) in the original PDHG formulation, as long as consistent.\"\"\"\n net_input = [q, b]\n if not self.fixed_P:\n net_input.append(vectorize_upper_triangular(P))\n if not self.fixed_H:\n net_input.append(H.flatten(start_dim=-2))\n net_input_t = torch.cat(net_input, 1)\n X = self.net(net_input_t)\n return X" }, { "identifier": "make_psd", "path": "src/utils/torch_utils.py", "snippet": "def make_psd(x, min_eig=0.1):\n \"\"\"Assume x is (bs, N*(N+1)/2), create (bs, N, N) batch of PSD matrices using Cholesky.\"\"\"\n bs, n_elem = x.shape\n N = (int(np.sqrt(1 + 8 * n_elem)) - 1) // 2\n cholesky_diag_index = torch.arange(N, dtype=torch.long) + 1\n cholesky_diag_index = (cholesky_diag_index * (cholesky_diag_index + 1)) // 2 - 1 # computes the indices of the future diagonal elements of the matrix\n elem = x.clone()\n elem[:, cholesky_diag_index] = np.sqrt(min_eig) + F.softplus(elem[:, cholesky_diag_index])\n tril_indices = torch.tril_indices(row=N, col=N, offset=0) # Collection that contains the indices of the non-zero elements of a lower triangular matrix\n cholesky = torch.zeros(size=(bs, N, N), dtype=torch.float, device=elem.device) #initialize a square matrix to zeros\n cholesky[:, tril_indices[0], tril_indices[1]] = elem # Assigns the elements of the vector to their correct position in the lower triangular matrix\n return cholesky @ cholesky.transpose(1, 2)" }, { "identifier": "interpolate_state_dicts", "path": "src/utils/torch_utils.py", "snippet": "def interpolate_state_dicts(state_dict_1, state_dict_2, weight):\n return {\n key: (1 - weight) * state_dict_1[key] + weight * state_dict_2[key] for key in state_dict_1.keys()\n }" }, { "identifier": "mpc2qp", "path": "src/utils/mpc_utils.py", "snippet": "def mpc2qp(n_mpc, m_mpc, N, A, B, Q, R, x_min, x_max, u_min, u_max, x0, x_ref, normalize=False, Qf=None):\n \"\"\"\n Converts Model Predictive Control (MPC) problem parameters into Quadratic Programming (QP) form.\n\n Parameters:\n - n_mpc (int): Dimension of the state space.\n - m_mpc (int): Dimension of the input space.\n - N (int): Prediction horizon.\n - A (torch.Tensor): State transition matrix, shape (n_mpc, n_mpc).\n - B (torch.Tensor): Control input matrix, shape (n_mpc, m_mpc).\n - Q (torch.Tensor): State cost matrix, shape (n_mpc, n_mpc).\n - R (torch.Tensor): Control cost matrix, shape (m_mpc, m_mpc).\n - x_min (float): Lower state bounds.\n - x_max (float): Upper state bounds.\n - u_min (float): Lower control bounds.\n - u_max (float): Upper control bounds.\n - x0 (torch.Tensor): Initial state, shape (batch_size, n_mpc).\n - x_ref (torch.Tensor): Reference state, shape (batch_size, n_mpc).\n - normalize (bool): Whether to normalize the control actions. If set to True, the solution of the QP problem will be rescaled actions within range [-1, 1].\n - Qf (torch.Tensor, optional): Terminal state cost matrix, shape (n_mpc, n_mpc).\n\n Returns:\n - n (int): Number of decision variables.\n - m (int): Number of constraints.\n - P (torch.Tensor): QP cost matrix, shape (n, n).\n - q (torch.Tensor): QP cost vector, shape (batch_size, n).\n - H (torch.Tensor): Constraint matrix, shape (m, n).\n - b (torch.Tensor): Constraint bounds, shape (batch_size, m).\n\n The converted QP problem is in form:\n minimize (1/2)x'Px + q'x\n subject to Hx + b >= 0,\n\n Notes:\n - The function assumes that A, B, Q, R are single matrices, and x0 and x_ref are in batch.\n - All tensors are expected to be on the same device.\n \"\"\"\n bs = x0.shape[0]\n device = x0.device\n\n Ax0 = torch.cat([bmv((torch.linalg.matrix_power(A, k + 1)).unsqueeze(0), x0) for k in range(N)], 1) # (bs, N * n_mpc)\n m = 2 * (n_mpc + m_mpc) * N # number of constraints\n n = m_mpc * N # number of decision variables\n\n b = torch.cat([\n Ax0 - x_min,\n x_max - Ax0,\n -u_min * torch.ones((bs, n), device=device),\n u_max * torch.ones((bs, n), device=device),\n ], 1)\n\n XU = torch.zeros((N, n_mpc, N, m_mpc), device=device)\n for k in range(N):\n for j in range(k + 1):\n XU[k, :, j, :] = (torch.linalg.matrix_power(A, k - j) @ B)\n XU = XU.flatten(0, 1).flatten(1, 2) # (N * n_MPC, N * m_MPC)\n\n Q_kron = torch.kron(torch.eye(N, device=A.device), Q)\n if Qf is not None:\n # Adjust the last block of Q_kron to include Qf\n Q_kron[-n_mpc:, -n_mpc:] += Qf\n\n q = -2 * XU.t().unsqueeze(0) @ Q_kron.unsqueeze(0) @ (kron(torch.ones((bs, N, 1), device=device), x_ref.unsqueeze(-1)) - Ax0.unsqueeze(-1)) # (bs, N * m_MPC, 1)\n q = q.squeeze(-1) # (bs, N * m_MPC) = (bs, n)\n P = 2 * XU.t() @ Q_kron @ XU + 2 * kron(torch.eye(N, device=device), R) # (n, n)\n H = torch.cat([XU, -XU, torch.eye(n, device=device), -torch.eye(n, device=device)], 0) # (m, n)\n\n if normalize:\n # u = alpha * u_normalized + beta\n alpha = (u_max - u_min) / 2 * torch.ones((m_mpc,), device=device) # (m_MPC,)\n beta = (u_max + u_min) / 2 * torch.ones((m_mpc,), device=device) # (m_MPC,)\n Alpha = torch.diag_embed(alpha.repeat(N)) # (n, n)\n Beta = beta.repeat(N) # (n,)\n P_nom = Alpha @ P @ Alpha # (n,)\n q_nom = bmv(Alpha.unsqueeze(0), q + bmv(P, Beta).unsqueeze(0)) # (bs, n)\n H_nom = H @ Alpha # (m, n)\n b_nom = (H @ Beta).unsqueeze(0) + b # (bs, m)\n P, q, H, b = P_nom, q_nom, H_nom, b_nom\n\n return n, m, P, q, H, b" }, { "identifier": "scenario_robust_mpc", "path": "src/utils/mpc_utils.py", "snippet": "def scenario_robust_mpc(mpc_baseline_parameters, r):\n \"\"\"\n Scenario-based robust MPC with process noise handling and constraints.\n\n Inputs:\n - mpc_baseline_parameters: Dict containing A, B, Q, R, Qf, disturbance magnitude, state bounds, input bounds, etc.\n\n Output: Function mapping from x0 to u0.\n \"\"\"\n\n # Extract parameters\n A = mpc_baseline_parameters['A']\n B = mpc_baseline_parameters['B']\n Q = mpc_baseline_parameters['Q']\n R = mpc_baseline_parameters['R']\n n = mpc_baseline_parameters['n_mpc']\n m = mpc_baseline_parameters['m_mpc']\n Qf = mpc_baseline_parameters.get(\"terminal_coef\", 0.) * np.eye(n)\n A_scenarios = mpc_baseline_parameters.get(\"A_scenarios\", [A])\n B_scenarios = mpc_baseline_parameters.get(\"B_scenarios\", [B])\n w_scenarios = mpc_baseline_parameters.get(\"w_scenarios\", [np.zeros((n, 1))])\n x_min = mpc_baseline_parameters['x_min']\n x_max = mpc_baseline_parameters['x_max']\n u_min = mpc_baseline_parameters['u_min']\n u_max = mpc_baseline_parameters['u_max']\n\n # Define the model\n model = do_mpc.model.Model('discrete')\n\n # States, inputs, and noise variables\n x = model.set_variable('_x', 'x', shape=(n, 1))\n u = model.set_variable('_u', 'u', shape=(m, 1))\n w = model.set_variable('_p', 'w', shape=(n, 1)) # Process noise\n\n # Uncertain parameters\n Theta_A = model.set_variable('_p', 'Theta_A', shape=A.shape)\n Theta_B = model.set_variable('_p', 'Theta_B', shape=B.shape)\n\n # System dynamics including process noise\n model.set_rhs('x', Theta_A @ x + Theta_B @ u + w)\n\n # Setup model\n model.setup()\n\n # MPC controller\n mpc = do_mpc.controller.MPC(model)\n\n # MPC parameters\n setup_mpc = {\n 'n_horizon': mpc_baseline_parameters['N'],\n 'n_robust': 1, # Exponential growth, so only 1 is reasonable\n 't_step': 0.1,\n 'store_full_solution': True,\n }\n mpc.set_param(**setup_mpc)\n\n # Uncertain parameter scenarios\n mpc.set_uncertainty_values(\n Theta_A=np.array(A_scenarios),\n Theta_B=np.array(B_scenarios),\n w=np.array(w_scenarios),\n )\n\n # Constraints on states and inputs\n eps = 1e-3\n mpc.bounds['lower','_x', 'x'] = x_min + eps\n mpc.bounds['upper','_x', 'x'] = x_max - eps\n mpc.bounds['lower','_u', 'u'] = u_min\n mpc.bounds['upper','_u', 'u'] = u_max\n\n # Objective function\n mterm = (x - r).T @ Qf @ (x - r)\n lterm = (x - r).T @ Q @ (x - r) + u.T @ R @ u\n mpc.set_objective(mterm=mterm, lterm=lterm)\n\n # Setup MPC\n mpc.setup()\n\n # Control function\n def mpc_control(x0, is_active=True):\n if is_active:\n t = time.time()\n mpc.x0 = x0\n\n # Solve the MPC problem\n u0 = mpc.make_step(x0)\n\n return u0.squeeze(-1), time.time() - t\n else:\n return np.zeros((m,)), 0.\n\n return mpc_control" }, { "identifier": "tube_robust_mpc", "path": "src/utils/mpc_utils.py", "snippet": "def tube_robust_mpc(mpc_baseline_parameters, r):\n \"\"\"\n Tube-based robust MPC with process noise handling and constraints.\n\n Inputs:\n - mpc_baseline_parameters: Dict containing A, B, Q, R, Qf, disturbance magnitude, state bounds, input bounds, etc.\n\n Output: Function mapping from x0 to u0.\n\n Reference: https://github.com/martindoff/DC-TMPC/; we only consider the case of LTI system (so that there is no successive linearization and no A2, B2).\n \"\"\"\n # Extract parameters\n A = mpc_baseline_parameters['A']\n B = mpc_baseline_parameters['B']\n Q = mpc_baseline_parameters['Q']\n R = mpc_baseline_parameters['R']\n n = mpc_baseline_parameters['n_mpc']\n m = mpc_baseline_parameters['m_mpc']\n Qf = mpc_baseline_parameters.get(\"terminal_coef\", 0.) * np.eye(n)\n N = mpc_baseline_parameters['N']\n x_min = mpc_baseline_parameters['x_min']\n x_max = mpc_baseline_parameters['x_max']\n u_min = mpc_baseline_parameters['u_min']\n u_max = mpc_baseline_parameters['u_max']\n max_disturbance_per_dim = mpc_baseline_parameters.get('max_disturbance_per_dim', 0)\n\n # Define optimization problem\n N_ver = 2 ** n # number of vertices\n\n # Optimization variables\n theta = cp.Variable(N + 1) # cost\n u = cp.Variable((m, N)) # input\n x_low = cp.Variable((n, N + 1)) # state (lower bound)\n x_up = cp.Variable((n, N + 1)) # state (upper bound)\n x_ = {} # create dictionary for 3D variable\n ws = {} # Each item is a noise vector corresponding to a vertex\n for l in range(N_ver):\n x_[l] = cp.Expression\n ws[l] = np.zeros((n,))\n\n # Parameters (value set at run time)\n x0 = cp.Parameter(n)\n\n # Define blockdiag matrices for page-wise matrix multiplication\n A_ = block_diag(*([A] * N))\n B_ = block_diag(*([B] * N))\n\n # Objective\n objective = cp.Minimize(cp.sum(theta))\n\n # Constraints\n constr = []\n\n # Assemble vertices\n for l in range(N_ver):\n # Convert l to binary string\n l_bin = bin(l)[2:].zfill(n)\n # Map binary string to lows and ups\n mapping_str_to_xs = lambda c: x_low if c == '0' else x_up\n mapping_str_to_w = lambda c: -max_disturbance_per_dim if c == '0' else max_disturbance_per_dim\n xs = map(mapping_str_to_xs, l_bin)\n w = np.array(list(map(mapping_str_to_w, l_bin))) # (n,) array\n x_[l] = cp.vstack([x[i, :] for (i, x) in enumerate(xs)])\n ws[l] = w\n\n for l in range(N_ver):\n # Define some useful variables\n x_r = cp.reshape(x_[l][:, :-1], (n * N, 1))\n u_r = cp.reshape(u, (m * N, 1))\n A_x = cp.reshape(A_ @ x_r, ((n, N)))\n B_u = cp.reshape(B_ @ u_r, (n, N))\n\n # SOC objective constraints\n for i in range(N):\n constr += [\n theta[i] >= cp.quad_form(x_[l][:, i] - r, Q) + cp.quad_form(u[:, i], R)\n ]\n\n constr += [\n theta[-1] >= cp.quad_form(x_[l][:, -1] - r, Qf)\n ]\n\n # Input constraints\n constr += [u >= u_min,\n u <= u_max]\n\n # Tube\n constr += [\n x_low[:, 1:] <= A_x + B_u + np.expand_dims(ws[l], -1)\n ]\n\n constr += [\n x_up[:, 1:] >= A_x + B_u + np.expand_dims(ws[l], -1)\n ]\n\n # State constraints\n constr += [\n x_low[:, :-1] >= x_min,\n x_up[:, :-1] >= x_min,\n x_up[:, :-1] <= x_max,\n x_low[:, :-1] <= x_max,\n x_low[:, 0] == x0,\n x_up[:, 0] == x0,\n ]\n\n # Define problem\n problem = cp.Problem(objective, constr)\n\n # Control function\n def mpc_control(x0_current, is_active=True):\n if is_active:\n t = time.time()\n x0.value = x0_current\n try:\n problem.solve(solver=cp.MOSEK, verbose=True, mosek_params={'MSK_IPAR_NUM_THREADS': 1})\n if u.value is not None:\n u0 = u.value[:, 0]\n else:\n # No solution, use default value\n warnings.warn(\"Tube MPC infeasible\")\n u0 = np.zeros((m,))\n except cp.error.SolverError:\n # solver failed, use default value\n warnings.warn(\"MOSEK failure\")\n u0 = np.zeros((m,))\n return u0, time.time() - t\n else:\n return np.zeros((m,)), 0.\n\n return mpc_control" }, { "identifier": "osqp_oracle", "path": "src/utils/osqp_utils.py", "snippet": "def osqp_oracle(q, b, P, H, return_iter_count=False, max_iter=1000):\n sol, iter_count = osqp_solve_qp_guarantee_return(\n P=P, q=q, G=-H, h=b,\n A=None, b=None, lb=None, ub=None,\n max_iter=max_iter, eps_abs=1e-10, eps_rel=1e-10,eps_prim_inf=1e-10, eps_dual_inf=1e-10, verbose=False,\n )\n if not return_iter_count:\n return sol\n else:\n return sol, iter_count" }, { "identifier": "np_batch_op", "path": "src/utils/np_batch_op.py", "snippet": "def np_batch_op(f, *arrays, max_workers=int(os.environ.get(\"MAX_CPU_WORKERS\", 8))):\n \"\"\"\n Applies a function in a batch operation on multiple arrays, possibly in parallel, handling multiple return values.\n If the function 'f' returns a single value, the function returns a single concatenated value instead of a tuple.\n\n Parameters:\n f (callable): The function to apply. Can return multiple values.\n arrays (list of np.ndarray or scipy.sparse.csc_matrix): Arrays on which the function is to be applied.\n\n Returns:\n np.ndarray or tuple: A concatenated array if 'f' returns a single value, otherwise a tuple of concatenated arrays.\n \"\"\"\n get_bs = lambda arr: 1 if type(arr) == scipy.sparse.csc_matrix else arr.shape[0]\n bs = max([get_bs(arr) for arr in arrays])\n _worker.f = f\n _worker.arrays = arrays\n\n with ProcessPoolExecutor(max_workers=max_workers) as executor:\n all_results = list(executor.map(_worker, range(bs)))\n\n processed_results = []\n for i in range(len(all_results[0])):\n results = [result[i] for result in all_results]\n if isinstance(results[0], np.ndarray):\n processed_result = np.concatenate([np.expand_dims(arr, 0) for arr in results], 0)\n else:\n processed_result = np.array(results)\n processed_results.append(processed_result)\n\n # Return a single value if there's only one result, otherwise return a tuple\n return processed_results[0] if len(processed_results) == 1 else tuple(processed_results)" } ]
import torch import numpy as np import scipy import functools import os from torch import nn from ..modules.qp_solver import QPSolver from ..modules.warm_starter import WarmStarter from ..utils.torch_utils import make_psd, interpolate_state_dicts from ..utils.mpc_utils import mpc2qp, scenario_robust_mpc, tube_robust_mpc from ..utils.osqp_utils import osqp_oracle from ..utils.np_batch_op import np_batch_op from concurrent.futures import ThreadPoolExecutor
11,241
if not self.strict_affine_layer: self.qb_affine_layer = nn.Linear(input_size, self.n_q_param + self.n_b_param, bias=not self.symmetric) else: self.qb_affine_layer = StrictAffineLayer(input_size, self.n_qp, self.m_qp, self.obs_has_half_ref) if self.n_mlp_output > 0: self.mlp = mlp_builder(input_size, self.n_mlp_output) else: self.mlp = None # TODO: add preconditioner self.warm_starter = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.warm_starter_delayed = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.train_warm_starter = train_warm_starter self.ws_loss_coef = ws_loss_coef self.ws_update_rate = ws_update_rate self.ws_loss_shaper = ws_loss_shaper # P, H are fixed when the model is in test mode, and they are constant across all states (i.e., shared_PH == True) self.fixed_PH = is_test and shared_PH # Includes losses generated by the model itself (indepedent of interaction with env), e.g., warm starting & preconditioning self.autonomous_losses = {} self.mpc_baseline = mpc_baseline self.use_osqp_for_mpc = use_osqp_for_mpc self.imitate_mpc = imitate_mpc # Whether to consider residual loss during training - this can encourage feasibility of the learned QP problem self.use_residual_loss = use_residual_loss # Whether to force the problem to be feasible self.force_feasible = force_feasible self.feasible_lambda = feasible_lambda self.solver = None self.info = {} # Reserved for storing the controllers for each simulation instance when robust MPC is enabled self.robust_controllers = [] # Store info returned by env self.env_info = {} # When running batch testing, mask envs already done, to speed up computation (implemented for robust mpc); initialized at inference time since batch size is not known during initialization self.is_active = None def initialize_solver(self): # If the problem is forced to be feasible, the dimension of the solution is increased by 1 (introduce slack variable) n_qp_actual = self.n_qp + 1 if self.force_feasible else self.n_qp m_qp_actual = self.m_qp + 1 if self.force_feasible else self.m_qp # is_warm_starter_trainable is always False, since the warm starter is trained via another inference independent of the solver # When self.fixed_PH == True, the solver is initialized with fixed P, H matrices; otherwise, P, H are not passed to the solver during initialization time, but computed during the forward pass instead if not self.fixed_PH: self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) else: # Should be called after loading state dict Pinv, H = self.get_PH() self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, Pinv=Pinv.squeeze(0), H=H.squeeze(0), warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) def compute_warm_starter_loss(self, q, b, Pinv, H, solver_Xs): qd, bd, Pinvd, Hd = map(lambda t: t.detach() if t is not None else None, [q, b, Pinv, H]) X0 = self.warm_starter(qd, bd, Pinvd, Hd) gt = solver_Xs[:, -1, :].detach() return self.ws_loss_coef * self.ws_loss_shaper(((gt - X0) ** 2).sum(dim=-1).mean()) def parallel_controller_creation(self, controller_creator, xref_np, bs): """ Create robust MPC controlller in parallel """ # Helper function for parallel execution def task_creator(index): return controller_creator(self.mpc_baseline, xref_np[index, :]) with ThreadPoolExecutor() as executor: # Executing the tasks in parallel results = executor.map(task_creator, range(bs)) # Collecting the results self.robust_controllers.extend(results) def run_mpc_baseline(self, x, use_osqp_oracle=False): robust_method = self.mpc_baseline.get("robust_method", None) x0, xref = self.mpc_baseline["obs_to_state_and_ref"](x) bs = x.shape[0] # Conversions between torch and np t = lambda a: torch.tensor(a, device=x.device, dtype=torch.float) f = lambda t: t.detach().cpu().numpy() f_sparse = lambda t: scipy.sparse.csc_matrix(t.cpu().numpy()) if robust_method is None: # Run vanilla MPC without robustness eps = 1e-3 n, m, P, q, H, b = mpc2qp( self.mpc_baseline["n_mpc"], self.mpc_baseline["m_mpc"], self.mpc_baseline["N"], t(self.mpc_baseline["A"]), t(self.mpc_baseline["B"]), t(self.mpc_baseline["Q"]), t(self.mpc_baseline["R"]), self.mpc_baseline["x_min"] + eps, self.mpc_baseline["x_max"] - eps, self.mpc_baseline["u_min"], self.mpc_baseline["u_max"], x0, xref, normalize=self.mpc_baseline.get("normalize", False), Qf=self.mpc_baseline.get("terminal_coef", 0.) * t(np.eye(self.mpc_baseline["n_mpc"])) if self.mpc_baseline.get("Qf", None) is None else t(self.mpc_baseline["Qf"]), ) if not use_osqp_oracle: solver = QPSolver(x.device, n, m, P=P, H=H) Xs, primal_sols = solver(q, b, iters=100) sol = primal_sols[:, -1, :] else:
class StrictAffineLayer(nn.Module): """ Layer mapping from obs to (q, b) in the strict affine form. """ def __init__(self, input_size, n, m, obs_has_half_ref): super().__init__() self.obs_has_half_ref = obs_has_half_ref self.input_size = input_size self.q_layer = nn.Linear(self.input_size, n, bias=False) if not self.obs_has_half_ref: self.b_layer = nn.Linear(self.input_size // 2, m, bias=True) else: self.b_layer = nn.Linear(self.input_size, m, bias=True) def forward(self, x): if not self.obs_has_half_ref: x0 = x[:, :self.input_size // 2] else: x0 = x q = self.q_layer(x) b = self.b_layer(x0) return torch.cat([q, b], dim=1) class QPUnrolledNetwork(nn.Module): """ Learn a QP problem from the input using a MLP, then solve the QP using fixed number of unrolled PDHG iterations. Form of QP: minimize (1/2)x'Px + q'x subject to Hx + b >= 0, where x in R^n, b in R^m. """ def __init__( self, device, input_size, n_qp, m_qp, qp_iter, mlp_builder, shared_PH=False, affine_qb=False, strict_affine_layer=False, obs_has_half_ref=False, symmetric=False, no_b=False, use_warm_starter=False, train_warm_starter=False, ws_loss_coef=1., ws_update_rate=0.01, ws_loss_shaper=lambda x: x ** (1 / 2), mpc_baseline=None, use_osqp_for_mpc=False, imitate_mpc=False, use_residual_loss=False, force_feasible=False, feasible_lambda=10, is_test=False, ): """mlp_builder is a function mapping (input_size, output_size) to a nn.Sequential object. If shared_PH == True, P and H are parameters indepedent of input, and q and b are functions of input; Otherwise, (P, H, q, b) are all functions of input. If affine_qb == True, then q and b are restricted to be affine functions of input. If strict_affine_layer == True (only effective when affine_qb=True), then: 1. q is linear w.r.t. (x0, xref) (no bias) 2. b is affine w.r.t. x0 (no dependence on xref) If obs_has_half_ref == True, the policy knows that the observation is in the form (x0, xref), with each taking up half of the dimension of the observation. If symmetric == True (only effective when affine_qb=True), then: 1. The bias terms are disabled in the modeling of q and b, i.e., q = Wq * x, b = Wb * x. 2. The constraint is assumed to be -1 <= Hx + b <= 1, instead of Hx + b >= 0. If no_b == True in addition to symmetric == True, then b is skipped altogether, i.e., the constraint is assumed to be -1 <= Hx <= 1. If mpc_baseline != None and imitate_mpc == False, then the forward function directly returns the solution of the MPC problem, instead of solving the learned QP problem. Can be used for benchmarking MPC. If mpc_baseline != None and imitate_mpc == True, then the forward function returns the solution of the learned QP problem, but a loss term is computed using the MPC problem. Can be used for supervised imitation learning. If force_feasible == True, solve the following problem instead of the original QP problem: minimize_{x,y} (1/2)x'Px + q'x + lambda * y^2 s.t. Hx + b + y * 1 >= 0, y >= 0, where x in R^n, y in R. In this case, the solution returned will be of dimension (n + 1). """ super().__init__() self.shared_PH = shared_PH self.affine_qb = affine_qb self.strict_affine_layer = strict_affine_layer self.obs_has_half_ref = obs_has_half_ref self.device = device self.input_size = input_size # QP dimensions: there are the number of variables and constraints WITHOUT considering the slack variable self.n_qp = n_qp self.m_qp = m_qp self.qp_iter = qp_iter self.symmetric = symmetric self.no_b = no_b self.n_P_param = n_qp * (n_qp + 1) // 2 self.n_q_param = n_qp self.n_H_param = m_qp * n_qp self.n_b_param = m_qp if not self.no_b else 0 self.n_mlp_output = 0 if not self.shared_PH: self.n_mlp_output += (self.n_P_param + self.n_H_param) self.P_params = None self.H_params = None else: self.P_params = nn.Parameter(torch.randn((self.n_P_param,), device=device)) self.H_params = nn.Parameter(torch.randn((self.n_H_param,), device=device)) if not self.affine_qb: self.n_mlp_output += (self.n_q_param + self.n_b_param) self.qb_affine_layer = None else: if not self.strict_affine_layer: self.qb_affine_layer = nn.Linear(input_size, self.n_q_param + self.n_b_param, bias=not self.symmetric) else: self.qb_affine_layer = StrictAffineLayer(input_size, self.n_qp, self.m_qp, self.obs_has_half_ref) if self.n_mlp_output > 0: self.mlp = mlp_builder(input_size, self.n_mlp_output) else: self.mlp = None # TODO: add preconditioner self.warm_starter = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.warm_starter_delayed = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.train_warm_starter = train_warm_starter self.ws_loss_coef = ws_loss_coef self.ws_update_rate = ws_update_rate self.ws_loss_shaper = ws_loss_shaper # P, H are fixed when the model is in test mode, and they are constant across all states (i.e., shared_PH == True) self.fixed_PH = is_test and shared_PH # Includes losses generated by the model itself (indepedent of interaction with env), e.g., warm starting & preconditioning self.autonomous_losses = {} self.mpc_baseline = mpc_baseline self.use_osqp_for_mpc = use_osqp_for_mpc self.imitate_mpc = imitate_mpc # Whether to consider residual loss during training - this can encourage feasibility of the learned QP problem self.use_residual_loss = use_residual_loss # Whether to force the problem to be feasible self.force_feasible = force_feasible self.feasible_lambda = feasible_lambda self.solver = None self.info = {} # Reserved for storing the controllers for each simulation instance when robust MPC is enabled self.robust_controllers = [] # Store info returned by env self.env_info = {} # When running batch testing, mask envs already done, to speed up computation (implemented for robust mpc); initialized at inference time since batch size is not known during initialization self.is_active = None def initialize_solver(self): # If the problem is forced to be feasible, the dimension of the solution is increased by 1 (introduce slack variable) n_qp_actual = self.n_qp + 1 if self.force_feasible else self.n_qp m_qp_actual = self.m_qp + 1 if self.force_feasible else self.m_qp # is_warm_starter_trainable is always False, since the warm starter is trained via another inference independent of the solver # When self.fixed_PH == True, the solver is initialized with fixed P, H matrices; otherwise, P, H are not passed to the solver during initialization time, but computed during the forward pass instead if not self.fixed_PH: self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) else: # Should be called after loading state dict Pinv, H = self.get_PH() self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, Pinv=Pinv.squeeze(0), H=H.squeeze(0), warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) def compute_warm_starter_loss(self, q, b, Pinv, H, solver_Xs): qd, bd, Pinvd, Hd = map(lambda t: t.detach() if t is not None else None, [q, b, Pinv, H]) X0 = self.warm_starter(qd, bd, Pinvd, Hd) gt = solver_Xs[:, -1, :].detach() return self.ws_loss_coef * self.ws_loss_shaper(((gt - X0) ** 2).sum(dim=-1).mean()) def parallel_controller_creation(self, controller_creator, xref_np, bs): """ Create robust MPC controlller in parallel """ # Helper function for parallel execution def task_creator(index): return controller_creator(self.mpc_baseline, xref_np[index, :]) with ThreadPoolExecutor() as executor: # Executing the tasks in parallel results = executor.map(task_creator, range(bs)) # Collecting the results self.robust_controllers.extend(results) def run_mpc_baseline(self, x, use_osqp_oracle=False): robust_method = self.mpc_baseline.get("robust_method", None) x0, xref = self.mpc_baseline["obs_to_state_and_ref"](x) bs = x.shape[0] # Conversions between torch and np t = lambda a: torch.tensor(a, device=x.device, dtype=torch.float) f = lambda t: t.detach().cpu().numpy() f_sparse = lambda t: scipy.sparse.csc_matrix(t.cpu().numpy()) if robust_method is None: # Run vanilla MPC without robustness eps = 1e-3 n, m, P, q, H, b = mpc2qp( self.mpc_baseline["n_mpc"], self.mpc_baseline["m_mpc"], self.mpc_baseline["N"], t(self.mpc_baseline["A"]), t(self.mpc_baseline["B"]), t(self.mpc_baseline["Q"]), t(self.mpc_baseline["R"]), self.mpc_baseline["x_min"] + eps, self.mpc_baseline["x_max"] - eps, self.mpc_baseline["u_min"], self.mpc_baseline["u_max"], x0, xref, normalize=self.mpc_baseline.get("normalize", False), Qf=self.mpc_baseline.get("terminal_coef", 0.) * t(np.eye(self.mpc_baseline["n_mpc"])) if self.mpc_baseline.get("Qf", None) is None else t(self.mpc_baseline["Qf"]), ) if not use_osqp_oracle: solver = QPSolver(x.device, n, m, P=P, H=H) Xs, primal_sols = solver(q, b, iters=100) sol = primal_sols[:, -1, :] else:
osqp_oracle_with_iter_count = functools.partial(osqp_oracle, return_iter_count=True)
7
2023-11-28 05:56:22+00:00
16k
Fraunhofer-SCAI/llamol
sample.py
[ { "identifier": "Transformer", "path": "model.py", "snippet": "class Transformer(nn.Module):\n last_loss: Optional[torch.Tensor]\n\n def __init__(self, params: ModelArgs, context_params: ContextArgs):\n super().__init__()\n self.params = params\n self.context_params = context_params\n self.vocab_size = params.vocab_size\n self.n_layers = params.n_layers\n\n self.tok_embeddings = nn.Embedding(params.vocab_size, params.dim)\n\n self.frag_embeddings = nn.Embedding(params.vocab_size, params.dim)\n self.frag_type_embedding = nn.Embedding(1, params.dim)\n\n self.context_lookup = {k: i for i, k in enumerate(context_params.context_keys)}\n self.conditions_type_embeddings = nn.Embedding(\n len(context_params.context_keys), params.dim\n )\n self.conditions_embeddings_lookup = nn.ModuleDict(\n {\n k: nn.Sequential(\n nn.Linear(dim, params.dim, bias=True),\n )\n for k, dim in zip(\n context_params.context_keys, context_params.context_dims\n )\n }\n )\n\n self.dropout = nn.Dropout(params.dropout)\n self.layers = torch.nn.ModuleList()\n for layer_id in range(params.n_layers):\n self.layers.append(TransformerBlock(layer_id, params))\n self.norm = RMSNorm(params.dim, eps=params.norm_eps)\n self.output = nn.Linear(params.dim, params.vocab_size, bias=False)\n\n # share the unembedding parameters with the embedding parameters\n self.tok_embeddings.weight = (\n self.output.weight\n ) # https://paperswithcode.com/method/weight-tying\n\n # some useful precompute for the RoPE relative positional embeddings\n freqs_cos, freqs_sin = precompute_freqs_cis(\n self.params.dim // self.params.n_heads, self.params.max_seq_len\n )\n self.register_buffer(\"freqs_cos\", freqs_cos, persistent=False)\n self.register_buffer(\"freqs_sin\", freqs_sin, persistent=False)\n\n # init all weights\n self.apply(self._init_weights)\n # apply special scaled init to the residual projections, per GPT-2 paper\n for pn, p in self.named_parameters():\n if pn.endswith(\"w3.weight\") or pn.endswith(\"wo.weight\"):\n torch.nn.init.normal_(\n p, mean=0.0, std=0.02 / math.sqrt(2 * params.n_layers)\n )\n\n # Initialize attribute for the loss of the last forward call. This will be set if the forward is called with a targets tensor.\n self.last_loss = None\n\n def _init_weights(self, module):\n if isinstance(module, nn.Linear):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n if module.bias is not None:\n torch.nn.init.zeros_(module.bias)\n elif isinstance(module, nn.Embedding):\n torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n\n def forward(\n self,\n tokens: torch.Tensor,\n targets: Optional[torch.Tensor] = None,\n context: Optional[Dict[str, torch.Tensor]] = None,\n fragment: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n bsz, seqlen = tokens.shape\n device = tokens.device\n\n h = self._add_context_to_seq(tokens, context, fragment, bsz, device)\n\n context_seq_len = h.shape[1] - seqlen\n\n bsz, seqlen, _ = h.shape\n\n freqs_cos = self.freqs_cos[:seqlen]\n freqs_sin = self.freqs_sin[:seqlen]\n\n for layer in self.layers:\n h = layer(h, freqs_cos, freqs_sin)\n h = self.norm(h)\n\n h = h[:, context_seq_len:]\n if targets is not None:\n # if we are given some desired targets also calculate the loss\n logits = self.output(h)\n tmp_last_loss = F.cross_entropy(\n logits.reshape(-1, logits.size(-1)),\n targets.reshape(-1),\n ignore_index=0, # Ignore Pad Tokens\n )\n\n # NOTE: This essentially does nothing for the computation,\n # because we are multiplying the weights by zero.\n # This *needs* to be done, so that we can train with DDP\n # As due to the random training process some of the weights are not used in the forward pass\n # That is unacceptable for the for the c10 backend and the training errors out.\n # Maybe there is a better fix in the future, see:\n # https://github.com/pytorch/pytorch/issues/43259\n ddp_fix = sum(p.sum() for p in self.parameters())\n zero_sum = ddp_fix * 0.0\n\n self.last_loss = tmp_last_loss + zero_sum\n else:\n # inference-time mini-optimization: only forward the output on the very last position\n logits = self.output(\n h[:, [-1], :]\n ) # note: using list [-1] to preserve the time dim\n self.last_loss = None\n\n return logits\n\n def forward_with_kvcache(\n self,\n tokens: torch.Tensor,\n targets: Optional[torch.Tensor] = None,\n context: Optional[Dict[str, torch.Tensor]] = None,\n fragment: Optional[torch.Tensor] = None,\n cache_id: int = 1,\n pos_seq_len: Optional[int] = None,\n ) -> torch.Tensor:\n bsz, seqlen = tokens.shape\n device = tokens.device\n\n h = self._add_context_to_seq(tokens, context, fragment, bsz, device)\n\n context_seq_len = h.shape[1] - seqlen\n\n bsz, seqlen, _ = h.shape\n if pos_seq_len is None:\n pos_seq_len = seqlen\n else:\n pos_seq_len = max(seqlen, pos_seq_len + context_seq_len)\n\n freqs_cos = self.freqs_cos[:pos_seq_len]\n freqs_sin = self.freqs_sin[:pos_seq_len]\n\n for layer in self.layers:\n h = layer.forward_with_kvcache(h, freqs_cos, freqs_sin, cache_id=cache_id)\n h = self.norm(h)\n\n h = h[:, context_seq_len:]\n if targets is not None:\n # if we are given some desired targets also calculate the loss\n logits = self.output(h)\n tmp_last_loss = F.cross_entropy(\n logits.reshape(-1, logits.size(-1)),\n targets.reshape(-1),\n ignore_index=0, # Ignore Pad Tokens\n )\n\n # NOTE: This essentially does nothing for the computation,\n # because we are multiplying the weights by zero.\n # This *needs* to be done, so that we can train with DDP\n # As due to the random training process some of the weights are not used in the forward pass\n # That is unacceptable for the for the c10 backend and the training errors out.\n # Maybe there is a better fix in the future, see:\n # https://github.com/pytorch/pytorch/issues/43259\n ddp_fix = sum(p.sum() for p in self.parameters())\n zero_sum = ddp_fix * 0.0\n\n self.last_loss = tmp_last_loss + zero_sum\n else:\n # inference-time mini-optimization: only forward the output on the very last position\n logits = self.output(\n h[:, [-1], :]\n ) # note: using list [-1] to preserve the time dim\n self.last_loss = None\n\n return logits\n\n def _add_context_to_seq(self, tokens, context, fragment, bsz, device):\n h = self.tok_embeddings(tokens)\n h = self.dropout(h)\n\n if fragment is not None:\n fragment_type_enc = torch.zeros_like(\n fragment, dtype=torch.long, device=device\n )\n\n h = torch.concat(\n (\n self.tok_embeddings(fragment)\n + self.frag_embeddings(fragment)\n + self.frag_type_embedding(fragment_type_enc),\n h,\n ),\n dim=1,\n )\n\n if context is not None and len(context) != 0:\n # context is a dictionary with key : context_tensor of shape (batch_size, context_dim)\n type_ids = []\n context_vals = []\n\n for emb_key, context_val in context.items():\n emb_context_val = self.conditions_embeddings_lookup[emb_key](\n context_val.unsqueeze(1).to(device)\n ).unsqueeze(1)\n\n context_vals.append(emb_context_val)\n type_ids_tensor = torch.tensor(\n [self.context_lookup[emb_key]], device=device, dtype=torch.long\n )\n type_ids.append(type_ids_tensor)\n\n context_types = (\n torch.concat(type_ids, dim=0).reshape(-1, 1).expand(-1, bsz).T\n )\n # shape(len(context),batch_size, emb_size)\n context_types = self.conditions_type_embeddings(context_types)\n\n context_vals = torch.concat(context_vals, dim=1).to(device)\n\n # SHAPE\n h = torch.concat([context_vals + context_types, h], dim=1)\n return h\n\n def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):\n # start with all of the candidate parameters\n param_dict = {pn: p for pn, p in self.named_parameters()}\n # filter out those that do not require grad\n param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}\n # create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.\n # i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.\n decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]\n nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]\n optim_groups = [\n {\"params\": decay_params, \"weight_decay\": weight_decay},\n {\"params\": nodecay_params, \"weight_decay\": 0.0},\n ]\n num_decay_params = sum(p.numel() for p in decay_params)\n num_nodecay_params = sum(p.numel() for p in nodecay_params)\n print(\n f\"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters\"\n )\n print(\n f\"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters\"\n )\n # Create AdamW optimizer and use the fused version if it is available\n fused_available = \"fused\" in inspect.signature(torch.optim.AdamW).parameters\n use_fused = fused_available and device_type == \"cuda\"\n extra_args = dict(fused=True) if use_fused else dict()\n optimizer = torch.optim.AdamW(\n optim_groups, lr=learning_rate, betas=betas, **extra_args\n )\n print(f\"using fused AdamW: {use_fused}\")\n\n return optimizer\n\n def estimate_mfu(self, fwdbwd_per_iter, dt):\n \"\"\"estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS\"\"\"\n # first estimate the number of flops we do per iteration.\n # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311\n N = sum(p.numel() for p in self.parameters())\n cfg = self.params\n L, H, Q, T = cfg.n_layers, cfg.n_heads, cfg.dim // cfg.n_heads, cfg.max_seq_len\n flops_per_token = 6 * N + 12 * L * H * Q * T\n flops_per_fwdbwd = flops_per_token * T\n flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter\n # express our flops throughput as ratio of A100 bfloat16 peak flops\n flops_achieved = flops_per_iter * (1.0 / dt) # per second\n flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS\n mfu = flops_achieved / flops_promised\n return mfu\n\n @torch.inference_mode()\n def generate(\n self,\n tokenizer: SmilesTokenizer,\n context: Union[torch.Tensor, None] = None,\n fragments: Union[torch.Tensor, None] = None,\n max_length: int = 50,\n num_gen: int = 200,\n start_smiles: Union[str, None] = None,\n temperature: float = 1.0,\n top_k: Union[int, None] = None,\n device: torch.device = torch.device(\"cpu\"),\n cache_kv: bool = False,\n ) -> List[str]:\n batch_size = num_gen\n if start_smiles is not None:\n tokenized_start_selfie = tokenizer.encode(start_smiles)[\n :-1\n ] # remove <eos> token\n tokenized_start_selfie = torch.tensor(\n tokenized_start_selfie, device=device, dtype=torch.long\n ).view(-1, 1)\n tokenized_start_selfie = tokenized_start_selfie.repeat(1, batch_size)\n\n outputs = tokenized_start_selfie.T\n else:\n outputs = (\n torch.LongTensor([[tokenizer.cls_token_id] * batch_size]).to(device)\n ).T # batch_size\n self.eval()\n\n start_len = outputs.shape[1]\n has_end_idx = np.array([0] * batch_size)\n cache_id = np.random.randint(0, int(1e10), 1).item()\n with torch.no_grad():\n with tqdm(total=max_length, desc=\"Generation\") as pbar:\n for i in range(start_len, max_length):\n # trg_tensor = #torch.LongTensor(outputs).to(model.device)\n if not cache_kv:\n logits = self(outputs, context=context, fragment=fragments)\n else:\n # logits_ = self(outputs, context=context, fragment=fragments)\n if i == start_len:\n # When starting pass the whole input, so that \"start_smiles\" works, then only the newly generated token, because of the cache\n func_input = outputs\n else:\n func_input = outputs[:, -1].unsqueeze(-1)\n logits = self.forward_with_kvcache(\n func_input,\n context=context,\n fragment=fragments,\n cache_id=cache_id,\n pos_seq_len=outputs.size(-1),\n )\n\n # raise NotImplementedError(\"Currently not working / right implemented\")\n # logits = self.forward_with_kvcache(outputs, context=context, fragment=fragments,cache_id = cache_id)\n\n logits = logits[:, -1, :] # crop to just the final time step\n if temperature == 0.0:\n # \"sample\" the single most likely index\n _, logits = torch.topk(logits, k=1, dim=-1)\n else:\n # pluck the logits at the final step and scale by desired temperature\n logits = logits / temperature\n # optionally crop the logits to only the top k options\n if top_k is not None:\n v, _ = torch.topk(logits, min(top_k, logits.size(-1)))\n logits[logits < v[:, [-1]]] = -float(\"Inf\")\n\n probs = F.softmax(logits, dim=-1)\n idx_next = torch.multinomial(probs, num_samples=1)\n\n ended_sentences = idx_next == tokenizer.sep_token_id\n if torch.count_nonzero(ended_sentences) != 0:\n indicies = torch.nonzero(ended_sentences)\n indicies = indicies.cpu().numpy()\n for end_idx in indicies[:, 0]:\n if has_end_idx[end_idx] == 0:\n has_end_idx[end_idx] = i\n\n # print(has_end_idx)\n\n if all([idx != 0 for idx in has_end_idx]):\n break\n\n # outputs.append(best_guesses)\n # outputs = torch.row_stack((outputs, idx_next))\n outputs = torch.cat((outputs, idx_next), dim=1)\n pbar.update(1)\n\n out_selfies = []\n for output, end_idx in zip(outputs.cpu().numpy(), has_end_idx):\n # Incase of limiting the max_len\n if end_idx == 0:\n selfie = [tokenizer._convert_id_to_token(idx) for idx in output[:]]\n else:\n selfie = [\n tokenizer._convert_id_to_token(idx) for idx in output[:end_idx]\n ]\n selfie = \"\".join(selfie[1:])\n out_selfies.append(selfie)\n\n # for indicies in outputs:\n # translated_sentence = [tokenizer.idx_to_tokens[idx] for idx in outputs]\n # remove start token\n return out_selfies\n\n @staticmethod\n def load(path, device: torch.device = torch.device(\"cpu\")) -> Transformer:\n data = torch.load(path, map_location=device)\n\n newinstace = Transformer(data[\"model_params\"], data[\"context_params\"])\n newinstace.load_state_dict(data[\"state_dict\"])\n return newinstace.to(device)\n\n def save(self, filepath):\n torch.save(\n {\n \"state_dict\": self.state_dict(),\n **dict(model_params=self.params, context_params=self.context_params),\n },\n filepath,\n )\n\n def getNumberTrainableParams(self) -> int:\n return sum(p.numel() for p in self.parameters() if p.requires_grad)\n\n def getNumberParams(self) -> int:\n return sum(p.numel() for p in self.parameters())" }, { "identifier": "check_metrics", "path": "plot_utils.py", "snippet": "def check_metrics(generated_smiles: List[str], dataset_smiles: List[str]):\n len_before = len(generated_smiles)\n generated_smiles = [g for g in generated_smiles if g is not None]\n len_after = len(generated_smiles)\n\n novel = novelty(generated_smiles, dataset_smiles)\n unique_at_1k = unique_at(generated_smiles, k=1000)\n unique_at_10k = unique_at(generated_smiles, k=10000)\n return dict(\n novelty=novel,\n unique_at_1k=unique_at_1k,\n unique_at_10k=unique_at_10k,\n validity=len_after / float(len_before),\n )" }, { "identifier": "plot_1D_condition", "path": "plot_utils.py", "snippet": "def plot_1D_condition(\n context_col,\n save_path,\n new_context,\n generated_smiles,\n temperature,\n context_dict,\n context_scaler=None,\n):\n for con_col in context_col:\n save_path = os.path.join(\n save_path, f\"{con_col}_{'-'.join(context_col)}_temp{temperature}\"\n )\n os.makedirs(save_path, exist_ok=True)\n\n current_context = new_context[con_col].cpu().detach().numpy()\n if con_col == \"mol_weight\":\n predicted_context = calcContextMolWeight(generated_smiles)\n elif con_col == \"logp\":\n predicted_context = calcContextLogP(generated_smiles)\n elif con_col == \"sascore\":\n predicted_context = calcContextSAScore(generated_smiles)\n elif con_col == \"energy\":\n # TODO: Change to something better\n predicted_context = calcContextEnergy(generated_smiles)\n\n if context_scaler is not None:\n raise NotImplementedError(\"Not implemented yet\")\n # context_list = context_scaler.inverse_transform(context_list)\n\n mean_vals_pred = []\n labels = np.unique(current_context)\n mse_value = []\n mad_value = []\n for label in labels:\n mask = (current_context == label).reshape(-1)\n mean_val = np.mean(predicted_context[mask])\n mean_vals_pred.append(mean_val)\n mse_value.extend((predicted_context[mask] - label) ** 2)\n mad_value.extend(abs(predicted_context[mask] - label))\n\n mse = np.mean(mse_value)\n mad = np.mean(mad_value)\n logger.info(f\"MSE {mse}\")\n logger.info(f\"MAD {mad}\")\n logger.info(f\"SD: {np.std(mad_value)}\")\n\n current_context = current_context.reshape(-1)\n\n # Create a figure and axes\n fig, ax1 = plt.subplots()\n\n # Scatter plot\n ax1.scatter(\n current_context,\n predicted_context,\n label=\"Ground Truth vs Prediction\",\n c=\"blue\",\n alpha=0.5,\n )\n ax1.plot(\n np.arange(np.min(current_context), np.max(current_context) + 1),\n np.arange(np.min(current_context), np.max(current_context) + 1),\n label=\"y=x\",\n c=\"black\",\n )\n ax1.scatter(labels, mean_vals_pred, label=\"Mean predicted values\", c=\"red\")\n ax1.set_xlabel(\"Ground Truth\")\n ax1.set_ylabel(\"Prediction\")\n\n # Histogram\n ax2 = ax1.twinx() # Create a twin Axes sharing the x-axis\n sns.histplot(\n context_dict[con_col],\n # bins=200,\n label=\"Dataset distribution\",\n alpha=0.5,\n # kde=True,\n # element=\"poly\",\n ax=ax2,\n )\n # ax2.hist(\n # context_dict[con_col],\n # bins=200,\n # label=\"Dataset distribution\",\n # alpha=0.5,\n # )\n ax2.set_ylabel(\"Frequency\")\n\n # Combine legends\n handles1, labels1 = ax1.get_legend_handles_labels()\n handles2, labels2 = ax2.get_legend_handles_labels()\n\n ax1.legend(handles1 + handles2, labels1 + labels2)\n\n plt.xlim((np.min(current_context), np.max(current_context) + 1))\n # Set title\n display_name = COL_TO_DISPLAY_NAME[con_col]\n plt.title(f\"{display_name} - temperature: {temperature} - mse: {round(mse, 4)}\")\n\n out_df = pd.DataFrame(\n {\n \"smiles\": generated_smiles,\n f\"{con_col}\": predicted_context.tolist(),\n f\"target_{con_col}\": current_context.tolist(),\n }\n )\n out_df.to_csv(os.path.join(save_path, \"predictions.csv\"), index=False)\n out_path = os.path.join(save_path, \"graph.png\")\n print(f\"Saved to {out_path}\")\n plt.savefig(out_path)\n plt.clf()" }, { "identifier": "plot_2D_condition", "path": "plot_utils.py", "snippet": "def plot_2D_condition(\n context_col,\n save_path,\n new_context,\n generated_smiles,\n temperature,\n label: Union[str, None] = None,\n):\n save_path = os.path.join(\n save_path, f\"multicond2_{'-'.join(context_col)}_temp={temperature}\"\n )\n if label is not None:\n save_path = os.path.join(save_path, label)\n\n os.makedirs(save_path, exist_ok=True)\n delta_dict = {c: [] for c in context_col}\n predicted_context_dict = {}\n for con_col in context_col:\n current_context = new_context[con_col].cpu().numpy()\n if con_col == \"mol_weight\":\n predicted_context = calcContextMolWeight(generated_smiles)\n elif con_col == \"logp\":\n predicted_context = calcContextLogP(generated_smiles)\n elif con_col == \"sascore\":\n predicted_context = calcContextSAScore(generated_smiles)\n elif con_col == \"energy\":\n # TODO: Change to something better\n predicted_context = calcContextEnergy(generated_smiles)\n\n predicted_context_dict[con_col] = np.array(predicted_context)\n delta_dict[con_col] = np.abs(current_context - np.array(predicted_context))\n\n # Create a DataFrame from delta_dict\n df = pd.DataFrame(delta_dict)\n real_values_prop1 = new_context[context_col[0]].cpu().numpy()\n real_values_prop2 = new_context[context_col[1]].cpu().numpy()\n # cmap = plt.get_cmap('Blues') # Choose a green color palette from Matplotlib\n mse_vals_x = []\n mad_vals_x = []\n mse_vals_y = []\n mad_vals_y = []\n fig = plt.figure()\n ax = plt.subplot(111)\n for v1 in np.unique(real_values_prop1):\n for v2 in np.unique(real_values_prop2):\n mask = (real_values_prop1 == v1) & (real_values_prop2 == v2)\n indices = np.nonzero(mask)[0]\n # print(\"Indices\", len(indices))\n # Get the color from the color palette based on the v1 value\n # color = cmap((v1 - np.min(real_values_prop1)) / (np.max(real_values_prop1) - np.min(real_values_prop1)))\n color = np.random.rand(\n 3,\n )\n # # Plot scatter plot with the specified color and label\n\n x_pred = predicted_context_dict[context_col[0]][indices].ravel()\n y_pred = predicted_context_dict[context_col[1]][indices].ravel()\n mse_vals_x.extend((x_pred - v1) ** 2)\n mad_vals_x.extend(np.abs(x_pred - v1))\n\n mse_vals_y.extend((y_pred - v2) ** 2)\n mad_vals_y.extend(np.abs(y_pred - v2))\n\n ax.scatter(x_pred, y_pred, color=color, alpha=0.5)\n\n # Plot KDE plot with the specified color\n # sns.kdeplot(\n # data=pd.DataFrame(\n # {\n # f\"x\": x_pred,\n # f\"y\": y_pred,\n # }\n # ),\n # x=f\"x\",\n # y=f\"y\",\n # color=color,\n # fill=False,\n # bw_adjust=2.25,\n # # label=f\"({v1}, {v2})\"\n # )\n\n ax.scatter(v1, v2, color=color, label=f\"({v1}, {v2})\", marker=\"^\", s=20.0)\n\n mse_x = np.mean(mse_vals_x)\n mad_x = np.mean(mad_vals_x)\n mse_y = np.mean(mse_vals_y)\n mad_y = np.mean(mad_vals_y)\n\n logger.info(f\"MSE {context_col[0]}: {mse_x}\")\n logger.info(f\"MAD {context_col[0]}: {mad_x}\")\n logger.info(f\"MSE {context_col[1]}: {mse_y}\")\n logger.info(f\"MAD {context_col[1]}: {mad_y}\")\n\n file_path = os.path.join(save_path, \"metrics.txt\")\n\n with open(file_path, \"w\") as f:\n f.write(f\"MSE {context_col[0]}: {mse_x} \\n\")\n f.write(f\"MAD {context_col[0]}: {mad_x} \\n\")\n f.write(f\"MSE {context_col[1]}: {mse_y} \\n\")\n f.write(f\"MAD {context_col[1]}: {mad_y} \\n\")\n\n ax.set_xlabel(COL_TO_DISPLAY_NAME[context_col[0]])\n ax.set_ylabel(COL_TO_DISPLAY_NAME[context_col[1]])\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n # Put a legend to the right of the current axis\n ax.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n ax.set_title(\"Multi Property Distribution of Generated Molecules\")\n out_path = os.path.join(save_path, \"graph.png\")\n logger.info(f\"Saved to {out_path}\")\n plt.savefig(out_path)\n plt.clf()\n return save_path" }, { "identifier": "plot_3D_condition", "path": "plot_utils.py", "snippet": "def plot_3D_condition(\n context_col, save_path, new_context, generated_smiles, temperature\n):\n save_path = os.path.join(\n save_path, f\"multicond3_{'-'.join(context_col)}_temp={temperature}\"\n )\n os.makedirs(save_path, exist_ok=True)\n predicted_context_dict = {}\n for con_col in context_col:\n predicted_context = calc_context_from_smiles(generated_smiles, con_col)\n\n predicted_context_dict[con_col] = np.array(predicted_context)\n\n real_values_prop1 = new_context[context_col[0]].cpu().numpy()\n real_values_prop2 = new_context[context_col[1]].cpu().numpy()\n real_values_prop3 = new_context[context_col[2]].cpu().numpy()\n # cmap = plt.get_cmap('Blues') # Choose a green color palette from Matplotlib\n\n mse_vals_x = []\n mad_vals_x = []\n mse_vals_y = []\n mad_vals_y = []\n mse_vals_z = []\n mad_vals_z = []\n\n fig = plt.figure()\n ax = fig.add_subplot(projection=\"3d\")\n for v1 in np.unique(real_values_prop1):\n for v2 in np.unique(real_values_prop2):\n for v3 in np.unique(real_values_prop3):\n mask = (\n (real_values_prop1 == v1)\n & (real_values_prop2 == v2)\n & (real_values_prop3 == v3)\n )\n indices = np.nonzero(mask)[0]\n # print(\"Indices\", len(indices))\n # Get the color from the color palette based on the v1 value\n # color = cmap((v1 - np.min(real_values_prop1)) / (np.max(real_values_prop1) - np.min(real_values_prop1)))\n color = np.random.rand(\n 3,\n )\n\n x_pred = predicted_context_dict[context_col[0]][indices].ravel()\n y_pred = predicted_context_dict[context_col[1]][indices].ravel()\n z_pred = predicted_context_dict[context_col[2]][indices].ravel()\n\n mse_vals_x.extend((x_pred - v1) ** 2)\n mad_vals_x.extend(np.abs(x_pred - v1))\n\n mse_vals_y.extend((y_pred - v2) ** 2)\n mad_vals_y.extend(np.abs(y_pred - v2))\n\n mse_vals_z.extend((z_pred - v3) ** 2)\n mad_vals_z.extend(np.abs(z_pred - v3))\n\n # # Plot scatter plot with the specified color and label\n ax.scatter(v1, v2, v3, color=color, label=f\"({v1}, {v2}, {v3})\", s=20.0)\n ax.scatter(\n x_pred,\n y_pred,\n z_pred,\n color=color,\n )\n\n mse_x = np.mean(mse_vals_x)\n mad_x = np.mean(mad_vals_x)\n mse_y = np.mean(mse_vals_y)\n mad_y = np.mean(mad_vals_y)\n mse_z = np.mean(mse_vals_z)\n mad_z = np.mean(mad_vals_z)\n\n logger.info(f\"MSE {context_col[0]}: {mse_x}\")\n logger.info(f\"MAD {context_col[0]}: {mad_x}\")\n logger.info(f\"MSE {context_col[1]}: {mse_y}\")\n logger.info(f\"MAD {context_col[1]}: {mad_y}\")\n logger.info(f\"MSE {context_col[2]}: {mse_z}\")\n logger.info(f\"MAD {context_col[2]}: {mad_z}\")\n\n file_path = os.path.join(save_path, \"metrics.txt\")\n\n with open(file_path, \"w\") as f:\n f.write(f\"MSE {context_col[0]}: {mse_x} \\n\")\n f.write(f\"MAD {context_col[0]}: {mad_x} \\n\")\n\n f.write(f\"MSE {context_col[1]}: {mse_y} \\n\")\n f.write(f\"MAD {context_col[1]}: {mad_y} \\n\")\n\n f.write(f\"MSE {context_col[2]}: {mse_z} \\n\")\n f.write(f\"MAD {context_col[2]}: {mad_z} \\n\")\n\n ax.set_xlabel(COL_TO_DISPLAY_NAME[context_col[0]])\n ax.set_ylabel(COL_TO_DISPLAY_NAME[context_col[1]])\n ax.set_zlabel(COL_TO_DISPLAY_NAME[context_col[2]])\n # plt.legend(\n # bbox_to_anchor=(1.0, 0.5),\n # loc=\"center right\",\n # bbox_transform=plt.gcf().transFigure,\n # )\n # plt.subplots_adjust(left=0.05, bottom=0.1, right=0.8)\n plt.legend(\n bbox_to_anchor=(1.035, 0.5),\n loc=\"center right\",\n bbox_transform=plt.gcf().transFigure,\n )\n plt.subplots_adjust(left=0.05, bottom=0.1, right=0.775)\n\n plt.title(\"Multi Property Distribution of Generated Molecules\")\n out_path = os.path.join(save_path, \"graph.png\")\n print(f\"Saved to {out_path}\")\n plt.savefig(out_path)\n plt.clf()\n\n return save_path" }, { "identifier": "plot_unconditional", "path": "plot_utils.py", "snippet": "def plot_unconditional(\n out_path: str = os.getcwd(),\n smiles: List[str] = [],\n temperature: float = 0.8,\n cmp_context_dict: Union[Dict[str, np.array], None] = None,\n context_cols: List[str] = [\"logp\", \"sascore\", \"mol_weight\"],\n):\n out_path = os.path.join(out_path, \"unconditional\")\n os.makedirs(out_path, exist_ok=True)\n\n for c in context_cols:\n plt.clf()\n\n context_cal = calc_context_from_smiles(smiles, c)\n\n if cmp_context_dict is not None:\n sns.histplot(\n cmp_context_dict[c],\n stat=\"density\",\n label=\"Dataset Distribution\",\n alpha=0.75,\n color=\"blue\",\n )\n sns.histplot(\n context_cal,\n stat=\"density\",\n label=\"Generated Molecules Distribution\",\n alpha=0.5,\n color=\"orange\",\n )\n\n if c == \"logp\":\n plt.xlim((-6, 8))\n else:\n plt.xlim((0, 10))\n\n plt.xlabel(COL_TO_DISPLAY_NAME[c])\n plt.title(\n f\"Unconditional Distribution {COL_TO_DISPLAY_NAME[c]} \\nwith Temperature {temperature}\"\n )\n plt.legend()\n\n out_file = os.path.join(out_path, f\"unc_{c}_temp={temperature}.png\")\n plt.savefig(out_file)\n logger.info(f\"Saved Unconditional to {out_file}\")" }, { "identifier": "SmilesTokenizer", "path": "tokenizer.py", "snippet": "class SmilesTokenizer(BertTokenizer):\n \"\"\"\n Creates the SmilesTokenizer class. The tokenizer heavily inherits from the BertTokenizer\n implementation found in Huggingface's transformers library. It runs a WordPiece tokenization\n algorithm over SMILES strings using the tokenisation SMILES regex developed by Schwaller et. al.\n\n Please see https://github.com/huggingface/transformers\n and https://github.com/rxn4chemistry/rxnfp for more details.\n\n Examples\n --------\n >>> from deepchem.feat.smiles_tokenizer import SmilesTokenizer\n >>> current_dir = os.path.dirname(os.path.realpath(__file__))\n >>> vocab_path = os.path.join(current_dir, 'tests/data', 'vocab.txt')\n >>> tokenizer = SmilesTokenizer(vocab_path)\n >>> print(tokenizer.encode(\"CC(=O)OC1=CC=CC=C1C(=O)O\"))\n [12, 16, 16, 17, 22, 19, 18, 19, 16, 20, 22, 16, 16, 22, 16, 16, 22, 16, 20, 16, 17, 22, 19, 18, 19, 13]\n\n\n References\n ----------\n .. [1] Schwaller, Philippe; Probst, Daniel; Vaucher, Alain C.; Nair, Vishnu H; Kreutter, David;\n Laino, Teodoro; et al. (2019): Mapping the Space of Chemical Reactions using Attention-Based Neural\n Networks. ChemRxiv. Preprint. https://doi.org/10.26434/chemrxiv.9897365.v3\n\n Notes\n ----\n This class requires huggingface's transformers and tokenizers libraries to be installed.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n\n def __init__(\n self,\n # unk_token=\"[UNK]\",\n # sep_token=\"[SEP]\",\n # pad_token=\"[PAD]\",\n # cls_token=\"[CLS]\",\n # mask_token=\"[MASK]\",\n **kwargs\n ):\n \"\"\"Constructs a SmilesTokenizer.\n\n Parameters\n ----------\n vocab_file: str\n Path to a SMILES character per line vocabulary file.\n Default vocab file is found in deepchem/feat/tests/data/vocab.txt\n \"\"\"\n\n vocab_file = os.path.join(os.path.dirname(__file__), \"data\", \"vocab.txt\")\n\n super().__init__(vocab_file, **kwargs)\n\n self.sos = \"[SOS]\"\n self.eos = \"[EOS]\"\n\n if not os.path.isfile(vocab_file):\n raise ValueError(\"Can't find a vocab file at path '{}'.\".format(vocab_file))\n self.vocab = load_vocab(vocab_file)\n self.highest_unused_index = max(\n [i for i, v in enumerate(self.vocab.keys()) if v.startswith(\"[unused\")]\n )\n self.ids_to_tokens = collections.OrderedDict(\n [(ids, tok) for tok, ids in self.vocab.items()]\n )\n self.basic_tokenizer = BasicSmilesTokenizer()\n\n @property\n def vocab_size(self):\n return len(self.vocab)\n\n @property\n def vocab_list(self):\n return list(self.vocab.keys())\n\n def _tokenize(self, text: str):\n \"\"\"\n Tokenize a string into a list of tokens.\n\n Parameters\n ----------\n text: str\n Input string sequence to be tokenized.\n \"\"\"\n\n split_tokens = [token for token in self.basic_tokenizer.tokenize(text)]\n return split_tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"\n Converts a token (str/unicode) in an id using the vocab.\n\n Parameters\n ----------\n token: str\n String token from a larger sequence to be converted to a numerical id.\n \"\"\"\n\n return self.vocab.get(token, self.vocab.get(self.unk_token))\n\n def _convert_id_to_token(self, index):\n \"\"\"\n Converts an index (integer) in a token (string/unicode) using the vocab.\n\n Parameters\n ----------\n index: int\n Integer index to be converted back to a string-based token as part of a larger sequence.\n \"\"\"\n\n return self.ids_to_tokens.get(index, self.unk_token)\n\n def convert_tokens_to_string(self, tokens: List[str]):\n \"\"\"Converts a sequence of tokens (string) in a single string.\n\n Parameters\n ----------\n tokens: List[str]\n List of tokens for a given string sequence.\n\n Returns\n -------\n out_string: str\n Single string from combined tokens.\n \"\"\"\n\n out_string: str = \" \".join(tokens).replace(\" ##\", \"\").strip()\n return out_string\n\n def add_special_tokens_ids_single_sequence(self, token_ids: List[int]):\n \"\"\"\n Adds special tokens to the a sequence for sequence classification tasks.\n A BERT sequence has the following format: [CLS] X [SEP]\n\n Parameters\n ----------\n\n token_ids: list[int]\n list of tokenized input ids. Can be obtained using the encode or encode_plus methods.\n \"\"\"\n\n return [self.cls_token_id] + token_ids + [self.sep_token_id]\n\n def add_special_tokens_single_sequence(self, tokens: List[str]):\n \"\"\"\n Adds special tokens to the a sequence for sequence classification tasks.\n A BERT sequence has the following format: [CLS] X [SEP]\n\n Parameters\n ----------\n tokens: List[str]\n List of tokens for a given string sequence.\n\n \"\"\"\n return [self.cls_token] + tokens + [self.sep_token]\n\n def add_special_tokens_ids_sequence_pair(\n self, token_ids_0: List[int], token_ids_1: List[int]\n ) -> List[int]:\n \"\"\"\n Adds special tokens to a sequence pair for sequence classification tasks.\n A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP]\n\n Parameters\n ----------\n token_ids_0: List[int]\n List of ids for the first string sequence in the sequence pair (A).\n\n token_ids_1: List[int]\n List of tokens for the second string sequence in the sequence pair (B).\n \"\"\"\n\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n\n return cls + token_ids_0 + sep + token_ids_1 + sep\n\n def add_padding_tokens(\n self, token_ids: List[int], length: int, right: bool = True\n ) -> List[int]:\n \"\"\"\n Adds padding tokens to return a sequence of length max_length.\n By default padding tokens are added to the right of the sequence.\n\n Parameters\n ----------\n token_ids: list[int]\n list of tokenized input ids. Can be obtained using the encode or encode_plus methods.\n\n length: int\n\n right: bool (True by default)\n\n Returns\n ----------\n token_ids :\n list of tokenized input ids. Can be obtained using the encode or encode_plus methods.\n\n padding: int\n Integer to be added as padding token\n\n \"\"\"\n padding = [self.pad_token_id] * (length - len(token_ids))\n\n if right:\n return token_ids + padding\n else:\n return padding + token_ids\n\n def save_vocabulary(\n self, vocab_path: str\n ): # -> tuple[str]: doctest issue raised with this return type annotation\n \"\"\"\n Save the tokenizer vocabulary to a file.\n\n Parameters\n ----------\n vocab_path: obj: str\n The directory in which to save the SMILES character per line vocabulary file.\n Default vocab file is found in deepchem/feat/tests/data/vocab.txt\n\n Returns\n ----------\n vocab_file: :obj:`Tuple(str)`:\n Paths to the files saved.\n typle with string to a SMILES character per line vocabulary file.\n Default vocab file is found in deepchem/feat/tests/data/vocab.txt\n\n \"\"\"\n index = 0\n if os.path.isdir(vocab_path):\n vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES[\"vocab_file\"])\n else:\n vocab_file = vocab_path\n with open(vocab_file, \"w\", encoding=\"utf-8\") as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\n \"Saving vocabulary to {}: vocabulary indices are not consecutive.\"\n \" Please check that the vocabulary is not corrupted!\".format(\n vocab_file\n )\n )\n index = token_index\n writer.write(token + \"\\n\")\n index += 1\n return (vocab_file,)" } ]
import os import sys import time import pandas as pd import torch import numpy as np import re import logging import argparse import rdkit.rdBase as rkrb import rdkit.RDLogger as rkl from contextlib import nullcontext from tqdm.auto import tqdm from model import Transformer from plot_utils import ( check_metrics, plot_1D_condition, plot_2D_condition, plot_3D_condition, plot_unconditional, ) from tokenizer import SmilesTokenizer from typing import Dict, List, Tuple, Union from rdkit import Chem from rdkit import DataStructs from rdkit.Chem.Fingerprints import FingerprintMols
13,254
i * gens_per_step : (i + 1) * gens_per_step ] for c in context_cols.keys() } context_dict = {"context": cd, "fragment": None} if context_smi is not None: logger.debug( f"context_smiles: {context_smi}", ) # NOTE: Remove beginning [CLS] and end token [SEP] incorporate_selfie = self.tokenizer.encode(context_smi)[ 1:-1 ] context_tensor = torch.tensor( [incorporate_selfie] * gens_per_step, dtype=torch.long, device=self.device, ) context_dict["fragment"] = context_tensor context_cols = list(context_cols.keys()) else: context_dict = self.get_context( context_cols, context_smi, num_examples=gens_per_step ) # for k in range(num_samples): y = self.model.generate( self.tokenizer, context=context_dict["context"], fragments=context_dict["fragment"], start_smiles=start_smiles, num_gen=gens_per_step, temperature=temperature, top_k=top_k, max_length=max_new_tokens, device=self.device, cache_kv=use_kv_cache, ) new_context = {k: [] for k in context_dict["context"]} for i, sample in enumerate(y): # print(sample) mol = Chem.MolFromSmiles(sample) if mol is not None: out_smiles.append(sample) for k in new_context: new_context[k].append( context_dict["context"][k][i].unsqueeze(-1) ) for k in new_context: new_context[k] = torch.concat(new_context[k], dim=0) if context is None: context = new_context else: for k in context: context[k] = torch.concat( [context[k], new_context[k]], dim=0 ) pbar.update(1) logger.info( f"Number valid generated: {len(out_smiles) / num_samples * 100} %" ) logger.info("---------------") if return_context: return (out_smiles, context) else: return out_smiles @torch.no_grad() def generate_with_evaluation( self, context_cols: Union[List[str], None] = None, context_smi: Union[str, None] = None, start_smiles: Union[str, None] = None, num_samples: int = 50, max_new_tokens: int = 256, temperature: float = 1.0, top_k: Union[int, None] = None, cmp_context_dict: Union[Dict[str, torch.Tensor], None] = None, total_gen_steps: int = 1, use_kv_cache: bool = False, ): out_smiles, new_context = self.generate( context_cols=context_cols, context_smi=context_smi, start_smiles=start_smiles, num_samples=num_samples, max_new_tokens=max_new_tokens, temperature=temperature, top_k=top_k, return_context=True, total_gen_steps=total_gen_steps, use_kv_cache=use_kv_cache, ) out_dir = os.path.dirname(self.load_path) if context_cols is not None: if len(context_cols) == 1: plot_1D_condition( context_cols, os.path.join(out_dir, "plots"), new_context, out_smiles, temperature, cmp_context_dict, context_scaler=None, ) elif len(context_cols) == 2:
# from tqdm.notebook import tqdm logger = logging.getLogger(__name__) class Sampler: def __init__( self, load_path: str, device: str = "cpu", seed: int = 1337, dtype: str = "float16", compile: bool = True, quantize: bool = False, ) -> None: self.load_path = load_path self.device = device self.dtype = dtype self.compile = compile self.quantize = quantize self.seed = seed self._init_model() def _init_model(self): np.random.seed(self.seed) torch.cuda.manual_seed(self.seed) torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn self.device_type = ( "cuda" if "cuda" in self.device else "cpu" ) # for later use in torch.autocast ptdtype = { "float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16, }[self.dtype] self.ptdtype = ptdtype self.ctx = self._autocast() # init from a model saved in a specific directory # ckpt_path = os.path.join(out_dir, "ckpt_full_dim=256.pt") self.model = Transformer.load(self.load_path, device=self.device) self.model.eval() if self.quantize: raise NotImplementedError("Not properly implemented for CPU / GPU") self.model = torch.ao.quantization.quantize_dynamic( self.model, # the original model {torch.nn.Linear}, # a set of layers to dynamically quantize dtype=torch.qint8, ) if self.compile: logger.info("Compiling the model...") self.model = torch.compile(self.model) # requires PyTorch 2.0 (optional) self.model = self.model.to(self.device) # load the tokenizer self.tokenizer = SmilesTokenizer() def get_context( self, context_col: List[str], context_smi: str, num_examples: int = 50, ): """ Returns a dictionary in the form of { "fragment": torch.tensor, "context": { "logp": torch.tensor, "sascore": torch.tensor, "mol_weight": torch.tensor } } When context_smi is set to a string, then the "fragment" field is populated. All of the properties listed in the context_col list is set to the keys and the values are set to a resonable range for each property. num_examples indicates how many values are sampled for each property. """ output_dict = {"context": {}, "fragment": None} if context_smi is not None: logger.debug( f"context_smiles: {context_smi}", ) # NOTE: Remove beginning [CLS] and end token [SEP] incorporate_selfie = self.tokenizer.encode(context_smi)[1:-1] context = torch.tensor( [incorporate_selfie] * num_examples, dtype=torch.long, device=self.device, ) output_dict["fragment"] = context if context_col is None: return output_dict if "logp" in context_col: # context = 0.5 * torch.randint( # -8, 14, (num_examples,), device=self.device, dtype=torch.float # ) # context = 0.5 * torch.randint( # -6, 6, (num_examples, 1), device=device, dtype=torch.float # ) context = torch.tensor( np.random.choice([-2, 0, 2], (num_examples,)), device=self.device, dtype=self.ptdtype, ) # context = 2.0 * torch.ones( # (num_examples,1), device=device, dtype=torch.float # ) # context = -2.0*torch.ones((num_examples,2),device=device,dtype=torch.float) # context, _ = torch.sort(context, 0) output_dict["context"]["logp"] = context if "energy" in context_col: context = 0.1 * torch.randint( -15, 15, (num_examples,), device=self.device, dtype=torch.float ) # context = -2.0*torch.ones((num_examples,2),device=device,dtype=torch.float) context, _ = torch.sort(context, 0) output_dict["context"]["energy"] = context if "sascore" in context_col: # context = 0.5 * torch.randint( # 2, 20, (num_examples, ), device=self.device, dtype=torch.float # ) context = torch.tensor( np.random.choice([2, 3, 4], (num_examples,)), device=self.device, dtype=torch.float, ) # context = 0.5 * torch.randint( # 4, 8, (num_examples, 1), device=device, dtype=torch.float # ) # context = 2.0*torch.ones((num_examples,1),device=device,dtype=torch.float) # context, _ = torch.sort(context, 0) output_dict["context"]["sascore"] = context if "mol_weight" in context_col: # context = 0.5 * torch.randint( # 2, 20, (num_examples,), device=self.device, dtype=torch.float # ) context = torch.tensor( np.random.choice([2.0, 3.0, 4.0], (num_examples,)), device=self.device, dtype=torch.float, ) # context = 0.5 * torch.randint( # 2, 20, (num_examples, 1), device=device, dtype=torch.float # ) # context = 2.5*torch.ones((num_examples,1),device=device,dtype=torch.float) # context, _ = torch.sort(context, 0) output_dict["context"]["mol_weight"] = context return output_dict def _autocast(self): if "cuda" in self.device: if self.dtype == "bfloat16" and torch.cuda.is_bf16_supported(): return torch.cuda.amp.autocast(dtype=torch.bfloat16) elif self.dtype == "float16": return torch.cuda.amp.autocast(dtype=torch.float16) else: return torch.cuda.amp.autocast(dtype=torch.float32) else: # cpu return nullcontext() @torch.no_grad() def generate( self, context_cols: Union[List[str], None, Dict[str, torch.Tensor]] = None, context_smi: Union[str, None] = None, start_smiles: Union[str, None] = None, num_samples: int = 50, max_new_tokens: int = 256, temperature: float = 1.0, top_k: Union[int, None] = None, return_context: bool = False, total_gen_steps: int = 1, use_kv_cache: bool = False, ) -> Union[List[str], Tuple[List[str], List[float]]]: """ Generates a list of SMILES. With the default options it would generate them unconditionally. Params: - context_cols : When a list the context is randomly sampled from the get_context method, when given a dictionary the context values are taken from the dictionary instead. - context_smi : Further conditioning by the usage of a molecular fragment . start_smiles : Can be used to start the SMILES with a specific string, the model then generates the next tokens including that start sequence. - num_samples : Controlls how many SMILES in total will be generated be the model. - max_new_tokens : Controlls the maximum length of each SMILES (in tokens) that is generated. - temperature: Controlls the randomness of the model. A temperature = 1.0 means it is the trained distribution. A temperature < 1 is more deterministic and temperature > 1 is more random - top_k : Clamps the probability distribution to the top k tokens. From these the next token is then sampled from. - return_context : Whether the context that was given to the model should be returned. - total_gen_steps : In how many sub steps the generation should be split up to. Useful when generation 10k + SMILES and wanting to chunk these into for example 10 * 1k generations with total_gen_steps = 10. - use_kv_cache: Runs the generation using kv-caching. It is faster, but takes more memory. """ with self.ctx: gens_per_step = num_samples // total_gen_steps logger.debug(f"Gens per Step: {gens_per_step}") context = None # {"context": None, "fragment" : None} out_smiles = [] with tqdm(total=total_gen_steps, desc="Batch") as pbar: for i in range(total_gen_steps): if isinstance(context_cols, dict): # TODO: Test if same length cd = { c: context_cols[c][ i * gens_per_step : (i + 1) * gens_per_step ] for c in context_cols.keys() } context_dict = {"context": cd, "fragment": None} if context_smi is not None: logger.debug( f"context_smiles: {context_smi}", ) # NOTE: Remove beginning [CLS] and end token [SEP] incorporate_selfie = self.tokenizer.encode(context_smi)[ 1:-1 ] context_tensor = torch.tensor( [incorporate_selfie] * gens_per_step, dtype=torch.long, device=self.device, ) context_dict["fragment"] = context_tensor context_cols = list(context_cols.keys()) else: context_dict = self.get_context( context_cols, context_smi, num_examples=gens_per_step ) # for k in range(num_samples): y = self.model.generate( self.tokenizer, context=context_dict["context"], fragments=context_dict["fragment"], start_smiles=start_smiles, num_gen=gens_per_step, temperature=temperature, top_k=top_k, max_length=max_new_tokens, device=self.device, cache_kv=use_kv_cache, ) new_context = {k: [] for k in context_dict["context"]} for i, sample in enumerate(y): # print(sample) mol = Chem.MolFromSmiles(sample) if mol is not None: out_smiles.append(sample) for k in new_context: new_context[k].append( context_dict["context"][k][i].unsqueeze(-1) ) for k in new_context: new_context[k] = torch.concat(new_context[k], dim=0) if context is None: context = new_context else: for k in context: context[k] = torch.concat( [context[k], new_context[k]], dim=0 ) pbar.update(1) logger.info( f"Number valid generated: {len(out_smiles) / num_samples * 100} %" ) logger.info("---------------") if return_context: return (out_smiles, context) else: return out_smiles @torch.no_grad() def generate_with_evaluation( self, context_cols: Union[List[str], None] = None, context_smi: Union[str, None] = None, start_smiles: Union[str, None] = None, num_samples: int = 50, max_new_tokens: int = 256, temperature: float = 1.0, top_k: Union[int, None] = None, cmp_context_dict: Union[Dict[str, torch.Tensor], None] = None, total_gen_steps: int = 1, use_kv_cache: bool = False, ): out_smiles, new_context = self.generate( context_cols=context_cols, context_smi=context_smi, start_smiles=start_smiles, num_samples=num_samples, max_new_tokens=max_new_tokens, temperature=temperature, top_k=top_k, return_context=True, total_gen_steps=total_gen_steps, use_kv_cache=use_kv_cache, ) out_dir = os.path.dirname(self.load_path) if context_cols is not None: if len(context_cols) == 1: plot_1D_condition( context_cols, os.path.join(out_dir, "plots"), new_context, out_smiles, temperature, cmp_context_dict, context_scaler=None, ) elif len(context_cols) == 2:
plot_2D_condition(
3
2023-11-28 09:50:31+00:00
16k
lampmerchant/tashrouter
tashrouter/router/router.py
[ { "identifier": "RoutingTable", "path": "tashrouter/router/routing_table.py", "snippet": "class RoutingTable:\n '''A Router's routing table.'''\n \n STATE_GOOD = 1\n STATE_SUS = 2\n STATE_BAD = 3\n STATE_WORST = 4\n \n def __init__(self, router):\n self._router = router\n self._entry_by_network = {}\n self._state_by_entry = {}\n self._lock = Lock()\n \n def __contains__(self, entry):\n with self._lock:\n return True if entry in self._state_by_entry else False\n \n def __iter__(self):\n with self._lock:\n retval = deque(self._state_by_entry.keys())\n yield from retval\n \n def get_by_network(self, network):\n '''Look up and return an entry in this RoutingTable by network number. Returns (entry, is_bad).'''\n with self._lock:\n entry = self._entry_by_network.get(network)\n if entry is None: return None, None\n return entry, True if self._state_by_entry[entry] in (self.STATE_BAD, self.STATE_WORST) else False\n \n def mark_bad(self, network_min, network_max):\n '''If this RoutingTable has an entry with the given network range, mark it bad. Return True if it existed, else False.'''\n with self._lock:\n cur_entries = set(self._entry_by_network.get(network) for network in range(network_min, network_max + 1))\n if len(cur_entries) != 1: return False\n cur_entry = cur_entries.pop() # this is either None or an entry with a coincident range to the new one\n if not cur_entry: return False\n if self._state_by_entry[cur_entry] != self.STATE_WORST: self._state_by_entry[cur_entry] = self.STATE_BAD\n return True\n \n def consider(self, entry):\n '''Consider a new entry for addition to the table. Return True if added, False if not.'''\n \n with self._lock:\n if entry in self._state_by_entry:\n self._state_by_entry[entry] = self.STATE_GOOD\n return True\n cur_entries = set(self._entry_by_network.get(network) for network in range(entry.network_min, entry.network_max + 1))\n if len(cur_entries) != 1: return False # this network range overlaps one that's already defined, can't do anything with it\n cur_entry = cur_entries.pop()\n \n # range currently undefined, add new entry to the table\n if cur_entry is None:\n pass\n # range fully defined by an entry that is either bad or further away, add new entry to the table\n elif cur_entry.distance >= entry.distance or self._state_by_entry[cur_entry] in (self.STATE_BAD, self.STATE_WORST):\n pass\n # range fully defined by an entry representing a route that is now further than we thought, add new entry to the table\n elif (cur_entry.next_network, cur_entry.next_node, cur_entry.port) == (entry.next_network, entry.next_node, entry.port):\n pass\n # range fully defined by a good entry that is closer than the new one, ignore new entry\n else:\n return False\n \n if cur_entry: self._state_by_entry.pop(cur_entry)\n self._state_by_entry[entry] = self.STATE_GOOD\n for network in range(entry.network_min, entry.network_max + 1): self._entry_by_network[network] = entry\n logging.debug('%s adding: %s', str(self._router), str(entry))\n return True\n \n def age(self):\n '''Age the RoutingTableEntries in this RoutingTable.'''\n entries_to_delete = set()\n networks_to_delete = deque()\n with self._lock:\n for entry in set(self._entry_by_network.values()):\n if self._state_by_entry[entry] == self.STATE_WORST:\n logging.debug('%s aging out: %s', str(self._router), str(entry))\n entries_to_delete.add(entry)\n self._state_by_entry.pop(entry)\n try:\n self._router.zone_information_table.remove_networks(entry.network_min, entry.network_max)\n except ValueError as e:\n logging.warning(\"%s couldn't remove networks from zone information table: %s\", str(self._router), e.args[0])\n elif self._state_by_entry[entry] == self.STATE_BAD:\n self._state_by_entry[entry] = self.STATE_WORST\n elif self._state_by_entry[entry] == self.STATE_SUS:\n self._state_by_entry[entry] = self.STATE_BAD\n elif self._state_by_entry[entry] == self.STATE_GOOD and entry.distance != 0:\n self._state_by_entry[entry] = self.STATE_SUS\n for network, entry in self._entry_by_network.items():\n if entry in entries_to_delete: networks_to_delete.append(network)\n for network in networks_to_delete: self._entry_by_network.pop(network)\n \n def entries(self):\n '''Yield entries from this RoutingTable along with their badness state.'''\n with self._lock: retval = deque(self._state_by_entry.items())\n for entry, state in retval: yield entry, True if state in (self.STATE_BAD, self.STATE_WORST) else False\n \n def set_port_range(self, port, network_min, network_max):\n '''Set the network range for a given port, unsetting any previous entries in the table that defined it.'''\n entries_to_delete = set()\n networks_to_delete = deque()\n with self._lock:\n for network, entry in self._entry_by_network.items():\n if entry.port is port and entry.distance == 0:\n entries_to_delete.add(entry)\n networks_to_delete.append(network)\n for entry in entries_to_delete:\n logging.debug('%s deleting: %s', str(self._router), str(entry))\n self._state_by_entry.pop(entry)\n try:\n self._router.zone_information_table.remove_networks(entry.network_min, entry.network_max)\n except ValueError as e:\n logging.warning(\"%s couldn't remove networks from zone information table: %s\", str(self._router), e.args[0])\n for network in networks_to_delete: self._entry_by_network.pop(network)\n entry = RoutingTableEntry(extended_network=port.extended_network,\n network_min=network_min,\n network_max=network_max,\n distance=0,\n port=port,\n next_network=0,\n next_node=0)\n logging.debug('%s adding: %s', str(self._router), str(entry))\n for network in range(network_min, network_max + 1): self._entry_by_network[network] = entry\n self._state_by_entry[entry] = self.STATE_GOOD" }, { "identifier": "ZoneInformationTable", "path": "tashrouter/router/zone_information_table.py", "snippet": "class ZoneInformationTable:\n '''Zone Information Table (ZIT).'''\n \n def __init__(self, router):\n self._router = router\n self._network_min_to_network_max = {}\n self._network_min_to_zone_name_set = {}\n self._network_min_to_default_zone_name = {}\n self._zone_name_to_network_min_set = {}\n self._ucased_zone_name_to_zone_name = {}\n self._lock = Lock()\n \n def _check_range(self, network_min, network_max=None):\n looked_up_network_max = self._network_min_to_network_max.get(network_min)\n if network_max is None:\n if looked_up_network_max is None:\n raise ValueError('network range %d-? does not exist' % network_min)\n else:\n return looked_up_network_max\n elif looked_up_network_max == network_max: # if network range exists as given\n return network_max\n elif looked_up_network_max is not None:\n raise ValueError('network range %d-%d overlaps %d-%d' % (network_min, network_max, network_min, looked_up_network_max))\n else: # check for overlap\n for existing_min, existing_max in self._network_min_to_network_max.items():\n if existing_min > network_max or existing_max < network_min: continue\n raise ValueError('network range %d-%d overlaps %d-%d' % (network_min, network_max, existing_min, existing_max))\n return None\n \n def add_networks_to_zone(self, zone_name, network_min, network_max=None):\n '''Add a range of networks to a zone, adding the zone if it isn't in the table.'''\n \n if network_max and network_max < network_min: raise ValueError('range %d-%d is backwards' % (network_min, network_max))\n ucased_zone_name = ucase(zone_name)\n \n with self._lock:\n \n if ucased_zone_name in self._ucased_zone_name_to_zone_name:\n zone_name = self._ucased_zone_name_to_zone_name[ucased_zone_name]\n else:\n self._ucased_zone_name_to_zone_name[ucased_zone_name] = zone_name\n self._zone_name_to_network_min_set[zone_name] = set()\n \n check_range = self._check_range(network_min, network_max)\n if check_range:\n network_max = check_range\n self._network_min_to_zone_name_set[network_min].add(zone_name)\n now_default = False\n else:\n self._network_min_to_network_max[network_min] = network_max\n self._network_min_to_zone_name_set[network_min] = set((zone_name,))\n self._network_min_to_default_zone_name[network_min] = zone_name\n now_default = True\n \n logging.debug('%s adding network range %d-%d to zone %s%s', str(self._router), network_min, network_max,\n zone_name.decode('mac_roman', 'replace'), ' (now default zone for this range)' if now_default else '')\n self._zone_name_to_network_min_set[zone_name].add(network_min)\n \n def remove_networks(self, network_min, network_max=None):\n '''Remove a range of networks from all zones, removing associated zones if now empty of networks.'''\n if network_max and network_max < network_min: raise ValueError('range %d-%d is backwards' % (network_min, network_max))\n with self._lock:\n network_max = self._check_range(network_min, network_max)\n if not network_max: return\n logging.debug('%s removing network range %d-%d from all zones', str(self._router), network_min, network_max)\n for zone_name in self._network_min_to_zone_name_set[network_min]:\n s = self._zone_name_to_network_min_set[zone_name]\n s.remove(network_min)\n if not s:\n logging.debug('%s removing zone %s because it no longer contains any networks', str(self._router),\n zone_name.decode('mac_roman', 'replace'))\n self._zone_name_to_network_min_set.pop(zone_name)\n self._ucased_zone_name_to_zone_name.pop(ucase(zone_name))\n self._network_min_to_default_zone_name.pop(network_min)\n self._network_min_to_zone_name_set.pop(network_min)\n self._network_min_to_network_max.pop(network_min)\n \n def zones(self):\n '''Return the zones in this ZIT.'''\n with self._lock:\n return list(self._zone_name_to_network_min_set.keys())\n \n def zones_in_network_range(self, network_min, network_max=None):\n '''Return a deque containing the names of all zones in the given range of networks, default zone name first.'''\n if network_max and network_max < network_min: raise ValueError('range %d-%d is backwards' % (network_min, network_max))\n with self._lock:\n if not self._check_range(network_min, network_max): return deque()\n default_zone_name = self._network_min_to_default_zone_name[network_min]\n retval = deque(zone_name for zone_name in self._network_min_to_zone_name_set[network_min] if zone_name != default_zone_name)\n retval.appendleft(default_zone_name)\n return retval\n \n def networks_in_zone(self, zone_name):\n '''Return a deque containing the network numbers of all networks in the given zone.'''\n with self._lock:\n zone_name = self._ucased_zone_name_to_zone_name.get(ucase(zone_name))\n if zone_name is None: return deque()\n retval = deque()\n for network_min in self._zone_name_to_network_min_set[zone_name]:\n retval.extend(range(network_min, self._network_min_to_network_max[network_min] + 1))\n return retval" }, { "identifier": "Datagram", "path": "tashrouter/datagram.py", "snippet": "class Datagram:\n '''DDP datagram.'''\n \n MAX_DATA_LENGTH = 586\n \n hop_count: int\n destination_network: int\n source_network: int\n destination_node: int\n source_node: int\n destination_socket: int\n source_socket: int\n ddp_type: int\n data: bytes\n \n @classmethod\n def from_long_header_bytes(cls, data):\n '''Construct a Datagram object from bytes in the long-header format and raise ValueErrors if there are issues.'''\n if len(data) < 13: raise ValueError('data too short, must be at least 13 bytes for long-header DDP datagram')\n (first, second, checksum, destination_network, source_network, destination_node, source_node, destination_socket, source_socket,\n ddp_type) = struct.unpack('>BBHHHBBBBB', data[:13])\n if first & 0xC0: raise ValueError('invalid long DDP header, top two bits of first byte must be zeroes')\n hop_count = (first & 0x3C) >> 2\n length = (first & 0x3) << 8 | second\n if length > 13 + cls.MAX_DATA_LENGTH:\n raise ValueError('invalid long DDP header, length %d is greater than %d' % (length, cls.MAX_DATA_LENGTH))\n if length != len(data):\n raise ValueError('invalid long DDP header, length field says %d but actual length is %d' % (length, len(data)))\n if checksum != 0:\n calc_checksum = ddp_checksum(data[4:])\n if calc_checksum != checksum:\n raise ValueError('invalid long DDP header, checksum is 0x%04X but should be 0x%04X' % (checksum, calc_checksum))\n return cls(hop_count=hop_count,\n destination_network=destination_network,\n source_network=source_network,\n destination_node=destination_node,\n source_node=source_node,\n destination_socket=destination_socket,\n source_socket=source_socket,\n ddp_type=ddp_type,\n data=data[13:])\n \n @classmethod\n def from_short_header_bytes(cls, destination_node, source_node, data):\n '''Construct a Datagram object from bytes in the short-header format and raise ValueErrors if there are issues.'''\n if len(data) < 5: raise ValueError('data too short, must be at least 5 bytes for short-header DDP datagram')\n first, second, destination_socket, source_socket, ddp_type = struct.unpack('>BBBBB', data[0:5])\n if first & 0xFC: raise ValueError('invalid short DDP header, top six bits of first byte must be zeroes')\n length = (first & 0x3) << 8 | second\n if length > 5 + cls.MAX_DATA_LENGTH:\n raise ValueError('invalid short DDP header, length %d is greater than %d' % (length, cls.MAX_DATA_LENGTH))\n if length != len(data):\n raise ValueError('invalid short DDP header, length field says %d but actual length is %d' % (length, len(data)))\n return cls(hop_count=0,\n destination_network=0,\n source_network=0,\n destination_node=destination_node,\n source_node=source_node,\n destination_socket=destination_socket,\n source_socket=source_socket,\n ddp_type=ddp_type,\n data=data[5:])\n \n def _check_ranges(self):\n '''Check that the Datagram's parameters are in range, raise ValueError if not.'''\n for name, min_value, max_value in (('hop count', 0, 15),\n ('destination network', 0, 65534),\n ('source network', 0, 65534),\n ('destination node', 0, 255),\n ('source node', 1, 254),\n ('destination socket', 0, 255),\n ('source socket', 0, 255),\n ('DDP type', 0, 255)):\n value = getattr(self, name.lower().replace(' ', '_'))\n if not min_value <= value <= max_value:\n raise ValueError('invalid %s %d, must be in range %d-%d' % (name, value, min_value, max_value))\n \n def as_long_header_bytes(self):\n '''Return this Datagram in long-header format as bytes and raise ValueErrors if there are issues.'''\n self._check_ranges()\n if len(self.data) > self.MAX_DATA_LENGTH:\n raise ValueError('data length %d is greater than max length %d' % (len(self.data), self.MAX_DATA_LENGTH))\n header = struct.pack('>HHBBBBB',\n self.destination_network,\n self.source_network,\n self.destination_node,\n self.source_node,\n self.destination_socket,\n self.source_socket,\n self.ddp_type)\n data = header + self.data\n length = 4 + len(data)\n checksum = 0\n for byte in data:\n checksum += byte\n checksum = (checksum & 0x7FFF) << 1 | (1 if checksum & 0x8000 else 0)\n checksum = checksum or 0xFFFF # because a zero value in the checksum field means one was not calculated\n header = struct.pack('>BBH',\n (self.hop_count & 0xF) << 2 | (length & 0x300) >> 8,\n length & 0xFF,\n checksum)\n return header + data\n \n def as_short_header_bytes(self):\n '''Return this Datagram in short-header format as bytes and raise ValueErrors if there are issues.'''\n if self.hop_count > 0:\n raise ValueError('invalid hop count %d, short-header datagrams may not have non-zero hop count' % self.hop_count)\n self._check_ranges()\n if len(self.data) > self.MAX_DATA_LENGTH:\n raise ValueError('data length %d is greater than max length %d' % (len(self.data), self.MAX_DATA_LENGTH))\n length = 5 + len(self.data)\n header = struct.pack('>BBBBB',\n (length & 0x300) >> 8,\n length & 0xFF,\n self.destination_socket,\n self.source_socket,\n self.ddp_type)\n return header + self.data\n \n def copy(self, **kwargs):\n '''Return a copy of this Datagram, replacing params specified by kwargs, if any.'''\n return dataclasses.replace(self, **kwargs)\n \n def hop(self):\n '''Return a copy of this Datagram with the hop count incremented by one.'''\n return self.copy(hop_count=self.hop_count + 1)" }, { "identifier": "EchoService", "path": "tashrouter/service/echo.py", "snippet": "class EchoService(Service):\n '''A Service which implements AppleTalk Echo Protocol (AEP).'''\n \n ECHO_SAS = 4\n ECHO_DDP_TYPE = 4\n \n ECHO_FUNC_REQUEST_BYTE = b'\\x01'\n ECHO_FUNC_REPLY_BYTE = b'\\x02'\n \n def __init__(self):\n self.thread = None\n self.queue = Queue()\n self.stop_flag = object()\n self.started_event = Event()\n self.stopped_event = Event()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.stopped_event.wait()\n \n def _run(self, router):\n self.started_event.set()\n while True:\n item = self.queue.get()\n if item is self.stop_flag: break\n datagram, rx_port = item\n if datagram.ddp_type != self.ECHO_DDP_TYPE: continue\n if not datagram.data: continue\n if datagram.data[0:1] != self.ECHO_FUNC_REQUEST_BYTE: continue\n router.reply(datagram, rx_port, self.ECHO_DDP_TYPE, self.ECHO_FUNC_REPLY_BYTE + datagram.data[1:])\n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n self.queue.put((datagram, rx_port))" }, { "identifier": "NameInformationService", "path": "tashrouter/service/name_information.py", "snippet": "class NameInformationService(Service):\n '''A Service that implements Name Binding Protocol (NBP).'''\n \n NBP_SAS = 2\n NBP_DDP_TYPE = 2\n \n NBP_CTRL_BRRQ = 1\n NBP_CTRL_LKUP = 2\n NBP_CTRL_LKUP_REPLY = 3\n NBP_CTRL_FWDREQ = 4\n \n MAX_FIELD_LEN = 32\n \n def __init__(self):\n self.thread = None\n self.queue = Queue()\n self.stop_flag = object()\n self.started_event = Event()\n self.stopped_event = Event()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.stopped_event.wait()\n \n def _run(self, router):\n \n self.started_event.set()\n \n while True:\n \n item = self.queue.get()\n if item is self.stop_flag: break\n datagram, rx_port = item\n \n if datagram.ddp_type != self.NBP_DDP_TYPE: continue\n if len(datagram.data) < 12: continue\n func_tuple_count, nbp_id, req_network, req_node, req_socket, _, object_field = struct.unpack('>BBHBBBB', datagram.data[:8])\n func = func_tuple_count >> 4\n tuple_count = func_tuple_count & 0xF\n if tuple_count != 1 or func not in (self.NBP_CTRL_BRRQ, self.NBP_CTRL_FWDREQ): continue\n if object_field < 1 or object_field > self.MAX_FIELD_LEN: continue\n if len(datagram.data) < 8 + object_field: continue\n type_field = datagram.data[8 + object_field]\n if type_field < 1 or type_field > self.MAX_FIELD_LEN: continue\n if len(datagram.data) < 9 + object_field + type_field: continue\n zone_field = datagram.data[9 + object_field + type_field]\n if zone_field > self.MAX_FIELD_LEN: continue\n if len(datagram.data) < 10 + object_field + type_field + zone_field: continue\n zone_field = datagram.data[10 + object_field + type_field:10 + object_field + type_field + zone_field] or b'*'\n type_field = datagram.data[9 + object_field:9 + object_field + type_field]\n object_field = datagram.data[8:8 + object_field]\n \n common_data = b''.join((struct.pack('>BHBBBB', nbp_id, req_network, req_node, req_socket, 0, len(object_field)),\n object_field,\n struct.pack('>B', len(type_field)),\n type_field,\n struct.pack('>B', len(zone_field)),\n zone_field))\n lkup_data = struct.pack('>B', (self.NBP_CTRL_LKUP << 4) | 1) + common_data\n fwdreq_data = struct.pack('>B', (self.NBP_CTRL_FWDREQ << 4) | 1) + common_data\n \n if func == self.NBP_CTRL_BRRQ:\n \n # if zone is *, try to sub in the zone name associated with the nonextended network whence the BrRq comes\n if zone_field == b'*':\n if rx_port.extended_network: continue # BrRqs from extended networks must provide zone name\n if rx_port.network:\n entry, _ = router.routing_table.get_by_network(rx_port.network)\n if entry:\n try:\n zones = router.zone_information_table.zones_in_network_range(entry.network_min)\n except ValueError:\n pass\n else:\n if len(zones) == 1: zone_field = zones[0] # there should not be more than one zone\n \n # if zone is still *, just broadcast a LkUp on the requesting network and call it done\n if zone_field == b'*':\n rx_port.send(0x0000, 0xFF, Datagram(hop_count=0,\n destination_network=0x0000,\n source_network=rx_port.network,\n destination_node=0xFF,\n source_node=rx_port.node,\n destination_socket=self.NBP_SAS,\n source_socket=self.NBP_SAS,\n ddp_type=self.NBP_DDP_TYPE,\n data=lkup_data))\n # we know the zone, so multicast LkUps to directly-connected networks and send FwdReqs to non-directly-connected ones\n else:\n entries = set(router.routing_table.get_by_network(network)\n for network in router.zone_information_table.networks_in_zone(zone_field))\n entries.discard((None, None))\n for entry, _ in entries:\n if entry.distance == 0:\n entry.port.multicast(zone_field, Datagram(hop_count=0,\n destination_network=0x0000,\n source_network=entry.port.network,\n destination_node=0xFF,\n source_node=entry.port.node,\n destination_socket=self.NBP_SAS,\n source_socket=self.NBP_SAS,\n ddp_type=self.NBP_DDP_TYPE,\n data=lkup_data))\n else:\n router.route(Datagram(hop_count=0,\n destination_network=entry.network_min,\n source_network=0,\n destination_node=0x00,\n source_node=0,\n destination_socket=self.NBP_SAS,\n source_socket=self.NBP_SAS,\n ddp_type=self.NBP_DDP_TYPE,\n data=fwdreq_data))\n \n elif func == self.NBP_CTRL_FWDREQ:\n \n entry, _ = router.routing_table.get_by_network(datagram.destination_network)\n if entry is None or entry.distance != 0: continue # FwdReq thinks we're directly connected to this network but we're not\n entry.port.multicast(zone_field, Datagram(hop_count=0,\n destination_network=0x0000,\n source_network=entry.port.network,\n destination_node=0xFF,\n source_node=entry.port.node,\n destination_socket=self.NBP_SAS,\n source_socket=self.NBP_SAS,\n ddp_type=self.NBP_DDP_TYPE,\n data=lkup_data))\n \n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n self.queue.put((datagram, rx_port))" }, { "identifier": "RoutingTableAgingService", "path": "tashrouter/service/routing_table_aging.py", "snippet": "class RoutingTableAgingService(Service):\n '''A Service which ages the Router's RoutingTable on a regular basis.'''\n \n DEFAULT_TIMEOUT = 20 # seconds\n \n def __init__(self, timeout=DEFAULT_TIMEOUT):\n self.timeout = timeout\n self.thread = None\n self.started_event = Event()\n self.stop_requested_event = Event()\n self.stopped_event = Event()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.stop_requested_event.set()\n self.stopped_event.wait()\n \n def _run(self, router):\n self.started_event.set()\n while True:\n if self.stop_requested_event.wait(timeout=self.timeout): break\n router.routing_table.age()\n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n pass" }, { "identifier": "RtmpRespondingService", "path": "tashrouter/service/rtmp/responding.py", "snippet": "class RtmpRespondingService(Service, RtmpService):\n '''A Service which responds to inbound RTMP Datagrams and maintains the Router's RoutingTable.'''\n \n def __init__(self):\n self.thread = None\n self.started_event = Event()\n self.queue = Queue()\n self.stop_flag = object()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.queue.join()\n \n def _run(self, router):\n \n while True:\n \n if self.started_event.is_set():\n self.queue.task_done()\n else:\n self.started_event.set()\n \n item = self.queue.get()\n if item is self.stop_flag: break\n datagram, rx_port = item\n \n if datagram.ddp_type == self.RTMP_DDP_TYPE_DATA:\n \n # process header\n if len(datagram.data) < 4: continue # invalid, datagram too short\n sender_network, id_length, sender_node = struct.unpack('>HBB', datagram.data[0:4])\n if id_length != 8: continue # invalid, AppleTalk node numbers are only 8 bits in length\n data = datagram.data[4:]\n if rx_port.extended_network:\n if len(data) < 6: continue # invalid, datagram too short to contain at least one extended network tuple\n sender_network_min, range_distance, sender_network_max, rtmp_version = struct.unpack('>HBHB', data[0:6])\n if range_distance != 0x80: continue # invalid, first tuple must be the sender's extended network tuple\n else:\n if len(data) < 3: continue\n sender_network_min = sender_network_max = sender_network\n zero, rtmp_version = struct.unpack('>HB', data[0:3])\n if zero != 0: continue # invalid, this word must be zero on a nonextended network\n data = data[3:]\n if rtmp_version != self.RTMP_VERSION: continue # invalid, don't recognize this RTMP format\n \n # interpret tuples\n tuples = deque()\n data_idx = 0\n while True:\n packed = data[data_idx:data_idx + 3]\n if len(packed) != 3: break\n network_min, range_distance = struct.unpack('>HB', packed)\n if range_distance & 0x80:\n extended_network = True\n packed = data[data_idx + 3:data_idx + 6]\n if len(packed) != 3: break\n network_max, _ = struct.unpack('>HB', packed)\n data_idx += 6\n else:\n extended_network = False\n network_max = network_min\n data_idx += 3\n tuples.append((extended_network, network_min, network_max, range_distance & 0x1F))\n if data_idx != len(data): continue # invalid, tuples did not end where expected\n \n # if this Port doesn't know its network range yet, accept that this is from the network's seed router\n if rx_port.network_min == rx_port.network_max == 0: rx_port.set_network_range(sender_network_min, sender_network_max)\n \n # resolve the given tuples with the current RoutingTable\n for extended_network, network_min, network_max, distance in tuples:\n # if the entry is too many hops away or is a notify-neighbor entry, mark any entry we have as bad\n if distance >= 15:\n router.routing_table.mark_bad(network_min, network_max)\n # otherwise have the table consider a new entry based on this tuple\n else:\n router.routing_table.consider(RoutingTableEntry(extended_network=extended_network,\n network_min=network_min,\n network_max=network_max,\n distance=distance + 1,\n port=rx_port,\n next_network=sender_network,\n next_node=sender_node))\n \n elif datagram.ddp_type != self.RTMP_DDP_TYPE_REQUEST or not datagram.data:\n \n continue\n \n elif datagram.data[0] == self.RTMP_FUNC_REQUEST:\n \n if 0 in (rx_port.network_min, rx_port.network_max): continue\n if datagram.hop_count != 0: continue # we have to send responses out of the same port they came in, no routing\n response_data = struct.pack('>HBB', rx_port.network, 8, rx_port.node)\n if rx_port.extended_network:\n response_data += struct.pack('>HBHB', rx_port.network_min, 0x80, rx_port.network_max, self.RTMP_VERSION)\n router.reply(datagram, rx_port, self.RTMP_DDP_TYPE_DATA, response_data)\n \n elif datagram.data[0] in (self.RTMP_FUNC_RDR_SPLIT_HORIZON, self.RTMP_FUNC_RDR_NO_SPLIT_HORIZON):\n \n split_horizon = True if datagram.data[0] == self.RTMP_FUNC_RDR_SPLIT_HORIZON else False\n for datagram_data in self.make_routing_table_datagram_data(router, rx_port, split_horizon):\n router.reply(datagram, rx_port, self.RTMP_DDP_TYPE_DATA, datagram_data)\n \n self.queue.task_done()\n \n def inbound(self, datagram, rx_port):\n self.queue.put((datagram, rx_port))" }, { "identifier": "RtmpSendingService", "path": "tashrouter/service/rtmp/sending.py", "snippet": "class RtmpSendingService(Service, RtmpService):\n '''A Service which sends RTMP Datagrams containing the Router's RoutingTable to its Ports on a regular basis.'''\n \n DEFAULT_TIMEOUT = 10 # seconds\n \n def __init__(self, timeout=DEFAULT_TIMEOUT):\n self.timeout = timeout\n self.thread = None\n self.started_event = Event()\n self.queue = Queue()\n self.stop_flag = object()\n self.force_send_flag = object()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.queue.join()\n \n def _run(self, router):\n self.started_event.set()\n while True:\n try:\n item = self.queue.get(timeout=self.timeout)\n except Empty:\n item = None\n if item is self.stop_flag: break\n for port in router.ports:\n if 0 in (port.node, port.network): continue\n for datagram_data in self.make_routing_table_datagram_data(router, port):\n port.send(0x0000, 0xFF, Datagram(hop_count=0,\n destination_network=0x0000,\n source_network=port.network,\n destination_node=0xFF,\n source_node=port.node,\n destination_socket=self.RTMP_SAS,\n source_socket=self.RTMP_SAS,\n ddp_type=self.RTMP_DDP_TYPE_DATA,\n data=datagram_data))\n if item is not None: self.queue.task_done()\n self.queue.task_done()\n \n def inbound(self, datagram, rx_port):\n pass\n \n def force_send(self):\n '''Force this service to immediately send an RTMP Datagram for testing purposes.'''\n self.queue.put(self.force_send_flag)\n self.queue.join()" }, { "identifier": "ZipRespondingService", "path": "tashrouter/service/zip/responding.py", "snippet": "class ZipRespondingService(Service, ZipService):\n '''A Service that implements Zone Information Protocol (ZIP).'''\n \n def __init__(self):\n self.thread = None\n self.queue = Queue()\n self.stop_flag = object()\n self.started_event = Event()\n self.stopped_event = Event()\n self._pending_network_zone_name_set = {}\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.queue.put(self.stop_flag)\n self.stopped_event.wait()\n \n def _reply(self, router, datagram):\n \n if len(datagram.data) < 2: return\n func, count = struct.unpack('>BB', datagram.data[:2])\n data = datagram.data[2:]\n \n networks_and_zone_names = deque()\n while len(data) >= 3:\n network_min, zone_name_length = struct.unpack('>HB', data[:3])\n zone_name = data[3:3 + zone_name_length]\n if len(zone_name) != zone_name_length: break\n data = data[3 + zone_name_length:]\n if zone_name_length == 0: continue\n networks_and_zone_names.append((network_min, zone_name))\n if not networks_and_zone_names: return\n \n network_min_to_network_max = {}\n for entry in router.routing_table:\n network_min_to_network_max[entry.network_min] = entry.network_max\n \n if func == self.ZIP_FUNC_REPLY:\n for network_min, zone_name in networks_and_zone_names:\n try:\n network_max = network_min_to_network_max[network_min]\n except KeyError:\n logging.warning('%s ZIP reply refers to a network range (starting with %d) with which we are not familiar', str(router), \n network_min)\n else:\n try:\n router.zone_information_table.add_networks_to_zone(zone_name, network_min, network_max)\n except ValueError as e:\n logging.warning(\"%s ZIP reply couldn't be added to zone information table: %s\", str(router), e.args[0])\n elif func == self.ZIP_FUNC_EXT_REPLY:\n #TODO this code is fragile and I do not like it\n network_min = None\n for network_min, zone_name in networks_and_zone_names:\n if network_min not in self._pending_network_zone_name_set: self._pending_network_zone_name_set[network_min] = set()\n self._pending_network_zone_name_set[network_min].add(zone_name)\n if network_min is not None and len(self._pending_network_zone_name_set.get(network_min, ())) >= count and count >= 1:\n for zone_name in self._pending_network_zone_name_set.pop(network_min):\n try:\n network_max = network_min_to_network_max[network_min]\n except KeyError:\n logging.warning('%s ZIP reply refers to a network range (starting with %d) with which we are not familiar', str(router),\n network_min)\n else:\n try:\n router.zone_information_table.add_networks_to_zone(zone_name, network_min, network_max)\n except ValueError as e:\n logging.warning(\"%s ZIP reply couldn't be added to zone information table: %s\", str(router), e.args[0])\n \n @classmethod\n def _query(cls, router, datagram, rx_port):\n if len(datagram.data) < 4: return\n network_count = datagram.data[1]\n if len(datagram.data) != (network_count * 2) + 2: return\n # in imitation of AppleTalk Internet Router, we only respond with extended replies even if a regular reply would fit\n # we also give one list per requested network even if the requested networks are in the same range and the lists are the same;\n # that is, if the sender requests zones for networks 3 and 4 and there is a zones list for networks 3-5, we will reply with the\n # zone list for network 3 twice... seems silly, but this is how ATIR does it so *shrug*\n for network_idx in range(network_count):\n requested_network = struct.unpack('>H', datagram.data[(network_idx * 2) + 2:(network_idx * 2) + 4])[0]\n entry, _ = router.routing_table.get_by_network(requested_network)\n if entry is None: continue\n try:\n zone_names = router.zone_information_table.zones_in_network_range(entry.network_min)\n except ValueError:\n continue\n datagram_data = deque()\n datagram_data_length = 0\n for zone_name in chain(zone_names, (None,)):\n list_item = None if zone_name is None else struct.pack('>HB', entry.network_min, len(zone_name)) + zone_name\n if list_item is None or datagram_data_length + len(list_item) > Datagram.MAX_DATA_LENGTH - 2:\n router.reply(datagram, rx_port, cls.ZIP_DDP_TYPE, struct.pack('>BB', cls.ZIP_FUNC_EXT_REPLY,\n len(zone_names)) + b''.join(datagram_data))\n datagram_data = deque()\n datagram_data_length = 0\n if list_item is not None:\n datagram_data.append(list_item)\n datagram_data_length += len(list_item)\n \n @classmethod\n def _get_net_info(cls, router, datagram, rx_port):\n if 0 in (rx_port.network, rx_port.network_min, rx_port.network_max): return\n if len(datagram.data) < 7: return\n if datagram.data[1:6] != b'\\0\\0\\0\\0\\0': return\n given_zone_name = datagram.data[7:7 + datagram.data[6]]\n given_zone_name_ucase = ucase(given_zone_name)\n flags = cls.ZIP_GETNETINFO_ZONE_INVALID | cls.ZIP_GETNETINFO_ONLY_ONE_ZONE\n default_zone_name = None\n number_of_zones = 0\n multicast_address = b''\n try:\n zone_names = router.zone_information_table.zones_in_network_range(rx_port.network_min, rx_port.network_max)\n except ValueError as e:\n logging.warning(\"%s couldn't get zone names in port network range for GetNetInfo: %s\", router, e.args[0])\n return\n for zone_name in zone_names:\n number_of_zones += 1\n if default_zone_name is None:\n # zones_in_network_range returns the default zone first\n default_zone_name = zone_name\n multicast_address = rx_port.multicast_address(zone_name)\n if ucase(zone_name) == given_zone_name_ucase:\n flags &= ~cls.ZIP_GETNETINFO_ZONE_INVALID\n multicast_address = rx_port.multicast_address(zone_name)\n if number_of_zones > 1:\n flags &= ~cls.ZIP_GETNETINFO_ONLY_ONE_ZONE\n if not flags & cls.ZIP_GETNETINFO_ZONE_INVALID: break\n if number_of_zones == 0: return\n if not multicast_address: flags |= cls.ZIP_GETNETINFO_USE_BROADCAST\n reply_data = b''.join((\n struct.pack('>BBHHB', cls.ZIP_FUNC_GETNETINFO_REPLY, flags, rx_port.network_min, rx_port.network_max, len(given_zone_name)),\n given_zone_name,\n struct.pack('>B', len(multicast_address)),\n multicast_address,\n struct.pack('>B', len(default_zone_name)) if flags & cls.ZIP_GETNETINFO_ZONE_INVALID else b'',\n default_zone_name if flags & cls.ZIP_GETNETINFO_ZONE_INVALID else b''))\n router.reply(datagram, rx_port, cls.ZIP_DDP_TYPE, reply_data)\n \n @classmethod\n def _get_my_zone(cls, router, datagram, rx_port):\n _, _, tid, _, _, start_index = struct.unpack('>BBHBBH', datagram.data)\n if start_index != 0: return\n entry, _ = router.routing_table.get_by_network(datagram.source_network)\n if entry is None: return\n try:\n zone_name = next(iter(router.zone_information_table.zones_in_network_range(entry.network_min)), None)\n except ValueError:\n return\n if not zone_name: return\n router.reply(datagram, rx_port, cls.ATP_DDP_TYPE, struct.pack('>BBHBBHB',\n cls.ATP_FUNC_TRESP | cls.ATP_EOM,\n 0,\n tid,\n 0,\n 0,\n 1,\n len(zone_name)) + zone_name)\n \n @classmethod\n def _get_zone_list(cls, router, datagram, rx_port, local=False):\n _, _, tid, _, _, start_index = struct.unpack('>BBHBBH', datagram.data)\n if local:\n try:\n zone_iter = iter(router.zone_information_table.zones_in_network_range(rx_port.network_min, rx_port.network_max))\n except ValueError as e:\n logging.warning(\"%s couldn't get zone names in port network range for GetLocalZones: %s\", router, e.args[0])\n return\n else:\n zone_iter = iter(router.zone_information_table.zones())\n for _ in range(start_index - 1): next(zone_iter, None) # skip over start_index-1 entries (index is 1-relative)\n last_flag = 0\n zone_list = deque()\n num_zones = 0\n data_length = 8\n while zone_name := next(zone_iter, None):\n if data_length + 1 + len(zone_name) > Datagram.MAX_DATA_LENGTH: break\n zone_list.append(struct.pack('>B', len(zone_name)))\n zone_list.append(zone_name)\n num_zones += 1\n data_length += 1 + len(zone_name)\n else:\n last_flag = 1\n router.reply(datagram, rx_port, cls.ATP_DDP_TYPE, struct.pack('>BBHBBH',\n cls.ATP_FUNC_TRESP | cls.ATP_EOM,\n 0,\n tid,\n last_flag,\n 0,\n num_zones) + b''.join(zone_list))\n \n def _run(self, router):\n self.started_event.set()\n while True:\n item = self.queue.get()\n if item is self.stop_flag: break\n datagram, rx_port = item\n if datagram.ddp_type == self.ZIP_DDP_TYPE:\n if not datagram.data: continue\n if datagram.data[0] in (self.ZIP_FUNC_REPLY, self.ZIP_FUNC_EXT_REPLY):\n self._reply(router, datagram)\n elif datagram.data[0] == self.ZIP_FUNC_QUERY:\n self._query(router, datagram, rx_port)\n elif datagram.data[0] == self.ZIP_FUNC_GETNETINFO_REQUEST:\n self._get_net_info(router, datagram, rx_port)\n elif datagram.ddp_type == self.ATP_DDP_TYPE:\n if len(datagram.data) != 8: continue\n control, bitmap, _, func, zero, _ = struct.unpack('>BBHBBH', datagram.data)\n if control != self.ATP_FUNC_TREQ or bitmap != 1 or zero != 0: continue\n if func == self.ZIP_ATP_FUNC_GETMYZONE:\n self._get_my_zone(router, datagram, rx_port)\n elif func == self.ZIP_ATP_FUNC_GETZONELIST:\n self._get_zone_list(router, datagram, rx_port, local=False)\n elif func == self.ZIP_ATP_FUNC_GETLOCALZONES:\n self._get_zone_list(router, datagram, rx_port, local=True)\n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n self.queue.put((datagram, rx_port))" }, { "identifier": "ZipSendingService", "path": "tashrouter/service/zip/sending.py", "snippet": "class ZipSendingService(Service, ZipService):\n '''A Service which sends ZIP queries to fill out its router's Zone Information Table.'''\n \n DEFAULT_TIMEOUT = 10 # seconds\n \n def __init__(self, timeout=DEFAULT_TIMEOUT):\n self.timeout = timeout\n self.thread = None\n self.started_event = Event()\n self.stop_requested_event = Event()\n self.stopped_event = Event()\n \n def start(self, router):\n self.thread = Thread(target=self._run, args=(router,))\n self.thread.start()\n self.started_event.wait()\n \n def stop(self):\n self.stop_requested_event.set()\n self.stopped_event.wait()\n \n def _run(self, router):\n \n self.started_event.set()\n \n while True:\n \n if self.stop_requested_event.wait(timeout=self.timeout): break\n \n queries = {} # (port, network, node) -> network_mins\n for entry in router.routing_table:\n try:\n if next(iter(router.zone_information_table.zones_in_network_range(entry.network_min, entry.network_max)), None): continue\n except ValueError as e:\n logging.warning('%s apparent disjoin between routing table and zone information table: %s', router, e.args[0])\n continue\n if entry.distance == 0:\n key = (entry.port, 0x0000, 0xFF)\n else:\n key = (entry.port, entry.next_network, entry.next_node)\n if key not in queries: queries[key] = deque()\n queries[key].append(entry.network_min)\n \n for port_network_node, network_mins in queries.items():\n port, network, node = port_network_node\n if 0 in (port.node, port.network): continue\n datagram_data = deque()\n for network_min in chain(network_mins, (None,)):\n if network_min is None or len(datagram_data) * 2 + 4 > Datagram.MAX_DATA_LENGTH:\n datagram_data.appendleft(struct.pack('>BB', self.ZIP_FUNC_QUERY, len(datagram_data)))\n port.send(network, node, Datagram(hop_count=0,\n destination_network=network,\n source_network=port.network,\n destination_node=node,\n source_node=port.node,\n destination_socket=self.ZIP_SAS,\n source_socket=self.ZIP_SAS,\n ddp_type=self.ZIP_DDP_TYPE,\n data=b''.join(datagram_data)))\n if network_min is not None: datagram_data = deque((struct.pack('>H', network_min),))\n else:\n datagram_data.append(struct.pack('>H', network_min))\n \n self.stopped_event.set()\n \n def inbound(self, datagram, rx_port):\n pass" } ]
import logging from .routing_table import RoutingTable from .zone_information_table import ZoneInformationTable from ..datagram import Datagram from ..service.echo import EchoService from ..service.name_information import NameInformationService from ..service.routing_table_aging import RoutingTableAgingService from ..service.rtmp.responding import RtmpRespondingService from ..service.rtmp.sending import RtmpSendingService from ..service.zip.responding import ZipRespondingService from ..service.zip.sending import ZipSendingService
11,972
'''The heart of this whole affair.''' class Router: '''A router, a device which sends Datagrams to Ports and runs Services.''' def __init__(self, short_str, ports): self._short_str = short_str self.ports = ports self._services = ( (EchoService.ECHO_SAS, EchoService()),
'''The heart of this whole affair.''' class Router: '''A router, a device which sends Datagrams to Ports and runs Services.''' def __init__(self, short_str, ports): self._short_str = short_str self.ports = ports self._services = ( (EchoService.ECHO_SAS, EchoService()),
(NameInformationService.NBP_SAS, NameInformationService()),
4
2023-12-02 15:17:07+00:00
16k
jags111/ComfyUI_Jags_Audiotools
libs/dance_diffusion/dd/inference.py
[ { "identifier": "SchedulerType", "path": "libs/diffusion_library/scheduler.py", "snippet": "class SchedulerType(str, enum.Enum):\n V_DDPM = 'V_DDPM'\n V_SPLICED_DDPM_COSINE = 'V_SPLICED_DDPM_COSINE'\n V_LOG = 'V_LOG'\n V_CRASH = 'V_CRASH'\n \n K_KARRAS = 'K_KARRAS'\n K_EXPONENTIAL = 'K_EXPONENTIAL'\n K_POLYEXPONENTIAL = 'K_POLYEXPONENTIAL'\n K_VP = 'K_VP'\n \n @classmethod\n def is_v_scheduler(cls, value):\n return value[0] == 'V'\n \n def get_step_list(self, n: int, device: str, **schedule_args):\n #if SchedulerType.is_v_scheduler(self):\n # n -= 1\n\n if self == SchedulerType.V_DDPM:\n return torch.nn.functional.pad(vscheduling.get_ddpm_schedule(torch.linspace(1, 0, n)), [0,1], value=0.0).to(device)\n elif self == SchedulerType.V_SPLICED_DDPM_COSINE:\n return vscheduling.get_spliced_ddpm_cosine_schedule(torch.linspace(1, 0, n + 1)).to(device)\n elif self == SchedulerType.V_LOG:\n return torch.nn.functional.pad(\n vscheduling.get_log_schedule(\n torch.linspace(1, 0, n),\n schedule_args.get('min_log_snr', -10.0),\n schedule_args.get('max_log_snr', 10.0)\n ),\n [0,1],\n value=0.0\n ).to(device)\n elif self == SchedulerType.V_CRASH:\n sigma = torch.sin(torch.linspace(1, 0, n + 1) * math.pi / 2) ** 2\n alpha = (1 - sigma ** 2) ** 0.5\n return vscheduling.alpha_sigma_to_t(alpha, sigma).to(device)\n elif self == SchedulerType.K_KARRAS:\n return kscheduling.get_sigmas_karras(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n schedule_args.get('rho', 7.0),\n device = device\n )\n elif self == SchedulerType.K_EXPONENTIAL:\n return kscheduling.get_sigmas_exponential(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n device = device\n )\n elif self == SchedulerType.K_POLYEXPONENTIAL:\n return kscheduling.get_sigmas_polyexponential(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n schedule_args.get('rho', 1.0),\n device = device\n )\n elif self == SchedulerType.K_VP:\n return kscheduling.get_sigmas_vp(\n n,\n schedule_args.get('beta_d', 1.205),\n schedule_args.get('beta_min', 0.09),\n schedule_args.get('eps_s', 0.001),\n device = device\n )\n else:\n raise Exception(f\"No get_step_list implementation for scheduler_type '{self}'\")" }, { "identifier": "SamplerType", "path": "libs/diffusion_library/sampler.py", "snippet": "class SamplerType(str, enum.Enum):\n V_DDPM = 'V_DDPM'\n V_DDIM = 'V_DDIM'\n V_PRK = 'V_PRK'\n V_PIE = 'V_PIE'\n V_PLMS = 'V_PLMS'\n V_PLMS2 = 'V_PLMS2'\n V_IPLMS = 'V_IPLMS'\n \n K_EULER = 'K_EULER'\n K_EULERA = 'K_EULERA'\n K_HEUN = 'K_HEUN'\n K_DPM2 = 'K_DPM2'\n K_DPM2A = 'K_DPM2A'\n K_LMS = 'K_LMS'\n K_DPMF = 'K_DPMF'\n K_DPMA = 'K_DPMA'\n K_DPMPP2SA = 'K_DPMPP2SA'\n K_DPMPP2M = 'K_DPMPP2M'\n K_DPMPPSDE = 'K_DPMPPSDE'\n\n @classmethod\n def is_v_sampler(cls, value):\n return value[0] == 'V'\n\n def sample(self, model_fn, x_t, steps, callback, **sampler_args) -> torch.Tensor:\n if self == SamplerType.V_DDPM:\n if sampler_args.get('is_reverse'):\n return vsampling.reverse_sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n else:\n return vsampling.sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n elif self == SamplerType.V_DDIM:\n if sampler_args.get('is_reverse'): # HACK: Technically incorrect since DDIM implies eta > 0.0\n return vsampling.reverse_sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n else:\n return vsampling.sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('eta', 0.1),\n sampler_args.get('extra_args', {}),\n callback\n )\n elif self == SamplerType.V_PRK:\n return vsampling.prk_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PIE:\n return vsampling.pie_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PLMS:\n return vsampling.plms_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PLMS2:\n return vsampling.plms2_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_IPLMS:\n return vsampling.iplms_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.K_EULER:\n return ksampling.sample_euler(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_EULERA:\n return ksampling.sample_euler_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_HEUN:\n return ksampling.sample_heun(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_DPM2:\n return ksampling.sample_dpm_2(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_DPM2A:\n return ksampling.sample_dpm_2_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_LMS:\n return ksampling.sample_lms(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('order', 4)\n )\n elif self == SamplerType.K_DPMF:# sample_dpm_fast\n return ksampling.sample_dpm_fast(\n model_fn,\n x_t,\n sampler_args.get('sigma_min', 0.001),\n sampler_args.get('sigma_max', 1.0),\n sampler_args.get('n', 3),\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_DPMA:\n return ksampling.sample_dpm_adaptive(\n model_fn,\n x_t,\n sampler_args.get('sigma_min', 0.001),\n sampler_args.get('sigma_max', 1.0),\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('order', 3),\n sampler_args.get('rtol', 0.05),\n sampler_args.get('atol', 0.0078),\n sampler_args.get('h_init', 0.05),\n sampler_args.get('pcoeff', 0.0),\n sampler_args.get('icoeff', 1.0),\n sampler_args.get('dcoeff', 0.0),\n sampler_args.get('accept_safety', 0.81),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None),\n sampler_args.get('return_info', False)\n )\n elif self == SamplerType.K_DPMPP2SA:\n return ksampling.sample_dpmpp_2s_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_DPMPP2M:\n return ksampling.sample_dpmpp_2m(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False)\n )\n elif self == SamplerType.K_DPMPPSDE:\n return ksampling.sample_dpmpp_sde(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None),\n sampler_args.get('r', 1/2)\n )\n else:\n raise Exception(f\"No sample implementation for sampler_type '{self}'\")" }, { "identifier": "ModelWrapperBase", "path": "libs/dance_diffusion/base/model.py", "snippet": "class ModelWrapperBase():\n \n def __init__(self):\n #self.uuid: str = None\n #self.name: str = None\n self.path: str = None\n \n self.device_accelerator: torch.device = None\n \n self.chunk_size: int = None\n self.sample_rate: int = None\n \n \n def load(\n self,\n path: str,\n device_accelerator: torch.device,\n optimize_memory_use:bool=False,\n chunk_size: int=131072,\n sample_rate: int=48000\n ):\n raise NotImplementedError" }, { "identifier": "InferenceBase", "path": "libs/dance_diffusion/base/inference.py", "snippet": "class InferenceBase():\n def __init__(\n self,\n device_accelerator: torch.device,\n device_offload: torch.device,\n optimize_memory_use: bool,\n use_autocast: bool,\n model: ModelWrapperBase\n ):\n self.device_accelerator = device_accelerator\n self.device_offload = device_offload if(optimize_memory_use==True) else None\n self.optimize_memory_use = optimize_memory_use\n self.use_autocast = use_autocast\n self.model = model\n self.generator = torch.Generator(device_accelerator)# if (device_accelerator.type != 'mps') else torch.device('cpu'))\n self.rng_state = None\n \n def set_device_accelerator(\n self,\n device: torch.device = None\n ):\n self.device_accelerator = device\n \n def get_device_accelerator(\n self\n ) -> torch.device:\n return self.device_accelerator\n \n def set_model(\n self,\n model: ModelWrapperBase = None\n ):\n self.model = model\n \n def get_model(\n self\n ) -> ModelWrapperBase:\n return self.model\n\n def expand(\n self,\n tensor: torch.Tensor,\n expansion_map: list[int]\n ) -> torch.Tensor:\n out = torch.empty([0], device=self.device_accelerator)\n \n for i in range(tensor.shape[0]):\n out = torch.cat([out, tensor[i,:,:].expand(expansion_map[i], -1, -1)], 0)\n \n return out\n \n \n # def cc_randn(self, shape:tuple, seed:int, device:torch.device, dtype = None, rng_state_in:torch.Tensor = None):\n \n # initial_rng_state = self.generator.get_state()\n # rng_state_out = torch.empty([shape[0], shape[1]], dtype=torch.ByteTensor,device=self.generator.device)\n \n # rn = torch.empty(shape,device=device, dtype=dtype, device=device)\n \n # for sample in range(shape[0]):\n # for channel in range(shape[1]):\n # self.generator.manual_seed(seed + sample * shape[1] + channel) if(rng_state_in == None) else self.generator.set_state(rng_state_in[sample, channel])\n # rn[sample, channel] = torch.randn([shape[2]], generator=self.generator, dtype=dtype, device=device)\n # rng_state_out[sample, channel] = self.generator.get_state()\n \n # self.rng_state = rng_state_out\n # self.generator.set_state(initial_rng_state)\n # return rn\n \n # def cc_randn_like(self, input:torch.Tensor, seed:int, rng_state_in:torch.Tensor = None) -> Tuple[torch.Tensor, torch.Tensor]:\n \n # initial_rng_state = self.generator.get_state()\n # rng_state_out = torch.empty([input.shape[0], input.shape[1]], dtype=torch.ByteTensor,device=self.generator.device)\n \n # rn = torch.empty_like(input)\n \n # for sample in range(input.shape[0]):\n # for channel in range(input.shape[1]):\n # self.generator.manual_seed(seed + sample * input.shape[1] + channel) if(rng_state_in == None) else self.generator.set_state(rng_state_in[sample, channel])\n # rn[sample, channel] = torch.randn([input.shape[2]], generator=self.generator, dtype=input.dtype, device=input.device)\n # rng_state_out[sample, channel] = self.generator.get_state()\n \n # self.rng_state = rng_state_out\n # self.generator.set_state(initial_rng_state)\n # return rn\n \n \n def autocast_context(self):\n if self.device_accelerator.type == 'cuda':\n return torch.cuda.amp.autocast()\n elif self.device_accelerator.type == 'cpu':\n return torch.cpu.amp.autocast()\n elif self.device_accelerator.type == 'mps':\n return nullcontext()\n else:\n return torch.autocast(self.device_accelerator.type, dtype=torch.float32)\n\n @contextlib.contextmanager\n def offload_context(self, model):\n \"\"\"\n Used by inference implementations, this context manager moves the\n passed model to the inference's `device_accelerator` device on enter,\n and then returns it to the `device_offload` device on exit.\n\n It also wraps the `inference.autocast_context()` context.\n \"\"\"\n\n autocast = self.autocast_context() if self.use_autocast else nullcontext()\n \n with autocast:\n if self.optimize_memory_use:\n model.to(self.device_accelerator)\n\n yield None\n\n if self.optimize_memory_use:\n model.to(self.device_offload)" }, { "identifier": "tensor_slerp_2D", "path": "libs/util/util.py", "snippet": "def tensor_slerp_2D(a: torch.Tensor, b: torch.Tensor, t: float):\n slerped = torch.empty_like(a)\n \n for channel in range(a.size(0)):\n slerped[channel] = tensor_slerp(a[channel], b[channel], t)\n \n return slerped" }, { "identifier": "PosteriorSampling", "path": "libs/util/util.py", "snippet": "class PosteriorSampling(torch.nn.Module):\n def __init__(self, model, x_T, measurement, mask, scale):\n super().__init__()\n self.model = model\n self.x_prev = x_T\n self.measurement = measurement\n self.mask = mask\n self.scale = scale\n \n @torch.enable_grad()\n def forward(self, input, sigma, **kwargs):\n x_t = input.detach().requires_grad_()\n out = self.model(x_t, sigma, **kwargs)\n difference = (self.measurement - out) * self.mask\n norm = torch.linalg.norm(difference)\n norm_grad = torch.autograd.grad(outputs=norm, inputs=x_t)[0].detach()\n \n return out.detach() - self.scale * norm_grad\n \n # x_t = input.detach().requires_grad_()\n # x_0_hat = self.model(input, sigma, **kwargs).detach().requires_grad_()\n \n # difference = (self.measurement - x_0_hat) * self.mask\n # norm = torch.linalg.norm(difference)\n # norm_grad = torch.autograd.grad(outputs=norm, inputs=self.x_prev)[0].detach()\n \n # self.x_prev = x_t.detach().requires_grad_()\n \n # return x_t.detach() - norm_grad * self.scale" }, { "identifier": "SchedulerType", "path": "libs/diffusion_library/scheduler.py", "snippet": "class SchedulerType(str, enum.Enum):\n V_DDPM = 'V_DDPM'\n V_SPLICED_DDPM_COSINE = 'V_SPLICED_DDPM_COSINE'\n V_LOG = 'V_LOG'\n V_CRASH = 'V_CRASH'\n \n K_KARRAS = 'K_KARRAS'\n K_EXPONENTIAL = 'K_EXPONENTIAL'\n K_POLYEXPONENTIAL = 'K_POLYEXPONENTIAL'\n K_VP = 'K_VP'\n \n @classmethod\n def is_v_scheduler(cls, value):\n return value[0] == 'V'\n \n def get_step_list(self, n: int, device: str, **schedule_args):\n #if SchedulerType.is_v_scheduler(self):\n # n -= 1\n\n if self == SchedulerType.V_DDPM:\n return torch.nn.functional.pad(vscheduling.get_ddpm_schedule(torch.linspace(1, 0, n)), [0,1], value=0.0).to(device)\n elif self == SchedulerType.V_SPLICED_DDPM_COSINE:\n return vscheduling.get_spliced_ddpm_cosine_schedule(torch.linspace(1, 0, n + 1)).to(device)\n elif self == SchedulerType.V_LOG:\n return torch.nn.functional.pad(\n vscheduling.get_log_schedule(\n torch.linspace(1, 0, n),\n schedule_args.get('min_log_snr', -10.0),\n schedule_args.get('max_log_snr', 10.0)\n ),\n [0,1],\n value=0.0\n ).to(device)\n elif self == SchedulerType.V_CRASH:\n sigma = torch.sin(torch.linspace(1, 0, n + 1) * math.pi / 2) ** 2\n alpha = (1 - sigma ** 2) ** 0.5\n return vscheduling.alpha_sigma_to_t(alpha, sigma).to(device)\n elif self == SchedulerType.K_KARRAS:\n return kscheduling.get_sigmas_karras(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n schedule_args.get('rho', 7.0),\n device = device\n )\n elif self == SchedulerType.K_EXPONENTIAL:\n return kscheduling.get_sigmas_exponential(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n device = device\n )\n elif self == SchedulerType.K_POLYEXPONENTIAL:\n return kscheduling.get_sigmas_polyexponential(\n n,\n schedule_args.get('sigma_min', 0.001),\n schedule_args.get('sigma_max', 1.0),\n schedule_args.get('rho', 1.0),\n device = device\n )\n elif self == SchedulerType.K_VP:\n return kscheduling.get_sigmas_vp(\n n,\n schedule_args.get('beta_d', 1.205),\n schedule_args.get('beta_min', 0.09),\n schedule_args.get('eps_s', 0.001),\n device = device\n )\n else:\n raise Exception(f\"No get_step_list implementation for scheduler_type '{self}'\")" }, { "identifier": "SamplerType", "path": "libs/diffusion_library/sampler.py", "snippet": "class SamplerType(str, enum.Enum):\n V_DDPM = 'V_DDPM'\n V_DDIM = 'V_DDIM'\n V_PRK = 'V_PRK'\n V_PIE = 'V_PIE'\n V_PLMS = 'V_PLMS'\n V_PLMS2 = 'V_PLMS2'\n V_IPLMS = 'V_IPLMS'\n \n K_EULER = 'K_EULER'\n K_EULERA = 'K_EULERA'\n K_HEUN = 'K_HEUN'\n K_DPM2 = 'K_DPM2'\n K_DPM2A = 'K_DPM2A'\n K_LMS = 'K_LMS'\n K_DPMF = 'K_DPMF'\n K_DPMA = 'K_DPMA'\n K_DPMPP2SA = 'K_DPMPP2SA'\n K_DPMPP2M = 'K_DPMPP2M'\n K_DPMPPSDE = 'K_DPMPPSDE'\n\n @classmethod\n def is_v_sampler(cls, value):\n return value[0] == 'V'\n\n def sample(self, model_fn, x_t, steps, callback, **sampler_args) -> torch.Tensor:\n if self == SamplerType.V_DDPM:\n if sampler_args.get('is_reverse'):\n return vsampling.reverse_sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n else:\n return vsampling.sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n elif self == SamplerType.V_DDIM:\n if sampler_args.get('is_reverse'): # HACK: Technically incorrect since DDIM implies eta > 0.0\n return vsampling.reverse_sample(\n model_fn,\n x_t,\n steps,\n 0.0,\n sampler_args.get('extra_args', {}),\n callback\n )\n else:\n return vsampling.sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('eta', 0.1),\n sampler_args.get('extra_args', {}),\n callback\n )\n elif self == SamplerType.V_PRK:\n return vsampling.prk_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PIE:\n return vsampling.pie_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PLMS:\n return vsampling.plms_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_PLMS2:\n return vsampling.plms2_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.V_IPLMS:\n return vsampling.iplms_sample(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n True,\n callback\n )\n elif self == SamplerType.K_EULER:\n return ksampling.sample_euler(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_EULERA:\n return ksampling.sample_euler_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_HEUN:\n return ksampling.sample_heun(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_DPM2:\n return ksampling.sample_dpm_2(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('s_churn', 0.0),\n sampler_args.get('s_tmin', 0.0),\n sampler_args.get('s_tmax',float('inf')),\n sampler_args.get('s_noise', 1.0)\n )\n elif self == SamplerType.K_DPM2A:\n return ksampling.sample_dpm_2_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_LMS:\n return ksampling.sample_lms(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('order', 4)\n )\n elif self == SamplerType.K_DPMF:# sample_dpm_fast\n return ksampling.sample_dpm_fast(\n model_fn,\n x_t,\n sampler_args.get('sigma_min', 0.001),\n sampler_args.get('sigma_max', 1.0),\n sampler_args.get('n', 3),\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_DPMA:\n return ksampling.sample_dpm_adaptive(\n model_fn,\n x_t,\n sampler_args.get('sigma_min', 0.001),\n sampler_args.get('sigma_max', 1.0),\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('order', 3),\n sampler_args.get('rtol', 0.05),\n sampler_args.get('atol', 0.0078),\n sampler_args.get('h_init', 0.05),\n sampler_args.get('pcoeff', 0.0),\n sampler_args.get('icoeff', 1.0),\n sampler_args.get('dcoeff', 0.0),\n sampler_args.get('accept_safety', 0.81),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None),\n sampler_args.get('return_info', False)\n )\n elif self == SamplerType.K_DPMPP2SA:\n return ksampling.sample_dpmpp_2s_ancestral(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None)\n )\n elif self == SamplerType.K_DPMPP2M:\n return ksampling.sample_dpmpp_2m(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False)\n )\n elif self == SamplerType.K_DPMPPSDE:\n return ksampling.sample_dpmpp_sde(\n model_fn,\n x_t,\n steps,\n sampler_args.get('extra_args', {}),\n callback,\n sampler_args.get('disable', False),\n sampler_args.get('eta', 0.1),\n sampler_args.get('s_noise', 1.0),\n sampler_args.get('noise_sampler', None),\n sampler_args.get('r', 1/2)\n )\n else:\n raise Exception(f\"No sample implementation for sampler_type '{self}'\")" }, { "identifier": "ModelWrapperBase", "path": "libs/dance_diffusion/base/model.py", "snippet": "class ModelWrapperBase():\n \n def __init__(self):\n #self.uuid: str = None\n #self.name: str = None\n self.path: str = None\n \n self.device_accelerator: torch.device = None\n \n self.chunk_size: int = None\n self.sample_rate: int = None\n \n \n def load(\n self,\n path: str,\n device_accelerator: torch.device,\n optimize_memory_use:bool=False,\n chunk_size: int=131072,\n sample_rate: int=48000\n ):\n raise NotImplementedError" }, { "identifier": "InferenceBase", "path": "libs/dance_diffusion/base/inference.py", "snippet": "class InferenceBase():\n def __init__(\n self,\n device_accelerator: torch.device,\n device_offload: torch.device,\n optimize_memory_use: bool,\n use_autocast: bool,\n model: ModelWrapperBase\n ):\n self.device_accelerator = device_accelerator\n self.device_offload = device_offload if(optimize_memory_use==True) else None\n self.optimize_memory_use = optimize_memory_use\n self.use_autocast = use_autocast\n self.model = model\n self.generator = torch.Generator(device_accelerator)# if (device_accelerator.type != 'mps') else torch.device('cpu'))\n self.rng_state = None\n \n def set_device_accelerator(\n self,\n device: torch.device = None\n ):\n self.device_accelerator = device\n \n def get_device_accelerator(\n self\n ) -> torch.device:\n return self.device_accelerator\n \n def set_model(\n self,\n model: ModelWrapperBase = None\n ):\n self.model = model\n \n def get_model(\n self\n ) -> ModelWrapperBase:\n return self.model\n\n def expand(\n self,\n tensor: torch.Tensor,\n expansion_map: list[int]\n ) -> torch.Tensor:\n out = torch.empty([0], device=self.device_accelerator)\n \n for i in range(tensor.shape[0]):\n out = torch.cat([out, tensor[i,:,:].expand(expansion_map[i], -1, -1)], 0)\n \n return out\n \n \n # def cc_randn(self, shape:tuple, seed:int, device:torch.device, dtype = None, rng_state_in:torch.Tensor = None):\n \n # initial_rng_state = self.generator.get_state()\n # rng_state_out = torch.empty([shape[0], shape[1]], dtype=torch.ByteTensor,device=self.generator.device)\n \n # rn = torch.empty(shape,device=device, dtype=dtype, device=device)\n \n # for sample in range(shape[0]):\n # for channel in range(shape[1]):\n # self.generator.manual_seed(seed + sample * shape[1] + channel) if(rng_state_in == None) else self.generator.set_state(rng_state_in[sample, channel])\n # rn[sample, channel] = torch.randn([shape[2]], generator=self.generator, dtype=dtype, device=device)\n # rng_state_out[sample, channel] = self.generator.get_state()\n \n # self.rng_state = rng_state_out\n # self.generator.set_state(initial_rng_state)\n # return rn\n \n # def cc_randn_like(self, input:torch.Tensor, seed:int, rng_state_in:torch.Tensor = None) -> Tuple[torch.Tensor, torch.Tensor]:\n \n # initial_rng_state = self.generator.get_state()\n # rng_state_out = torch.empty([input.shape[0], input.shape[1]], dtype=torch.ByteTensor,device=self.generator.device)\n \n # rn = torch.empty_like(input)\n \n # for sample in range(input.shape[0]):\n # for channel in range(input.shape[1]):\n # self.generator.manual_seed(seed + sample * input.shape[1] + channel) if(rng_state_in == None) else self.generator.set_state(rng_state_in[sample, channel])\n # rn[sample, channel] = torch.randn([input.shape[2]], generator=self.generator, dtype=input.dtype, device=input.device)\n # rng_state_out[sample, channel] = self.generator.get_state()\n \n # self.rng_state = rng_state_out\n # self.generator.set_state(initial_rng_state)\n # return rn\n \n \n def autocast_context(self):\n if self.device_accelerator.type == 'cuda':\n return torch.cuda.amp.autocast()\n elif self.device_accelerator.type == 'cpu':\n return torch.cpu.amp.autocast()\n elif self.device_accelerator.type == 'mps':\n return nullcontext()\n else:\n return torch.autocast(self.device_accelerator.type, dtype=torch.float32)\n\n @contextlib.contextmanager\n def offload_context(self, model):\n \"\"\"\n Used by inference implementations, this context manager moves the\n passed model to the inference's `device_accelerator` device on enter,\n and then returns it to the `device_offload` device on exit.\n\n It also wraps the `inference.autocast_context()` context.\n \"\"\"\n\n autocast = self.autocast_context() if self.use_autocast else nullcontext()\n \n with autocast:\n if self.optimize_memory_use:\n model.to(self.device_accelerator)\n\n yield None\n\n if self.optimize_memory_use:\n model.to(self.device_offload)" }, { "identifier": "tensor_slerp_2D", "path": "libs/util/util.py", "snippet": "def tensor_slerp_2D(a: torch.Tensor, b: torch.Tensor, t: float):\n slerped = torch.empty_like(a)\n \n for channel in range(a.size(0)):\n slerped[channel] = tensor_slerp(a[channel], b[channel], t)\n \n return slerped" }, { "identifier": "PosteriorSampling", "path": "libs/util/util.py", "snippet": "class PosteriorSampling(torch.nn.Module):\n def __init__(self, model, x_T, measurement, mask, scale):\n super().__init__()\n self.model = model\n self.x_prev = x_T\n self.measurement = measurement\n self.mask = mask\n self.scale = scale\n \n @torch.enable_grad()\n def forward(self, input, sigma, **kwargs):\n x_t = input.detach().requires_grad_()\n out = self.model(x_t, sigma, **kwargs)\n difference = (self.measurement - out) * self.mask\n norm = torch.linalg.norm(difference)\n norm_grad = torch.autograd.grad(outputs=norm, inputs=x_t)[0].detach()\n \n return out.detach() - self.scale * norm_grad\n \n # x_t = input.detach().requires_grad_()\n # x_0_hat = self.model(input, sigma, **kwargs).detach().requires_grad_()\n \n # difference = (self.measurement - x_0_hat) * self.mask\n # norm = torch.linalg.norm(difference)\n # norm_grad = torch.autograd.grad(outputs=norm, inputs=self.x_prev)[0].detach()\n \n # self.x_prev = x_t.detach().requires_grad_()\n \n # return x_t.detach() - norm_grad * self.scale" } ]
import torch from tqdm.auto import trange from diffusion.utils import t_to_alpha_sigma from k_diffusion.external import VDenoiser from typing import Tuple, Callable from libs.diffusion_library.scheduler import SchedulerType from libs.diffusion_library.sampler import SamplerType from libs.dance_diffusion.base.model import ModelWrapperBase from libs.dance_diffusion.base.inference import InferenceBase from libs.util.util import tensor_slerp_2D, PosteriorSampling from typing import Tuple, Callable from libs.diffusion_library.scheduler import SchedulerType from libs.diffusion_library.sampler import SamplerType from libs.dance_diffusion.base.model import ModelWrapperBase from libs.dance_diffusion.base.inference import InferenceBase from libs.util.util import tensor_slerp_2D, PosteriorSampling
11,160
callback, **sampler_args ).float() def generate_interpolation( self, callback: Callable = None, batch_size: int = None, # seed: int = None, interpolation_positions: list[float] = None, audio_source: torch.Tensor = None, audio_target: torch.Tensor = None, expansion_map: list[int] = None, noise_level: float = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, **kwargs ) -> torch.Tensor: if SamplerType.is_v_sampler(sampler): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[step_list < noise_level] step_list[-1] += 1e-7 #HACK avoid division by 0 in reverse sampling model = self.model.model else: scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[:-1] #HACK avoid division by 0 in reverse sampling model = VDenoiser(self.model.model) if self.optimize_memory_use and batch_size < 2: x_0_source = audio_source x_0_target = audio_target with self.offload_context(self.model.model): x_T_source = sampler.sample( model, x_0_source, step_list.flip(0), callback, **sampler_args ) with self.offload_context(self.model.model): x_T_target = sampler.sample( model, x_0_target, step_list.flip(0), callback, **sampler_args ) x_T = torch.cat([x_T_source, x_T_target], dim=0) else: x_0 = torch.cat([audio_source, audio_target], dim=0) with self.offload_context(self.model.model): x_T = sampler.sample( model, x_0, step_list.flip(0), callback, **sampler_args ) if SamplerType.is_v_sampler(sampler): #HACK reset schedule after hack step_list[-1] = 0.0 else: step_list = torch.cat([step_list, step_list.new_zeros([1])]) x_Int = torch.empty([batch_size, 2, self.model.chunk_size], device=self.device_accelerator) for pos in range(len(interpolation_positions)): x_Int[pos] = tensor_slerp_2D(x_T[0], x_T[1], interpolation_positions[pos]) with self.offload_context(self.model.model): return sampler.sample( model, x_Int, step_list, callback, **sampler_args ).float() def generate_inpainting( self, callback: Callable = None, batch_size: int = None, seed: int = None, audio_source: torch.Tensor = None, expansion_map: list[int] = None, mask: torch.Tensor = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, inpainting_args = None, **kwargs ) -> torch.Tensor: self.generator.manual_seed(seed) method = inpainting_args.get('method') if(method == 'repaint'): raise Exception("Repaint currently not supported due to changed requirements") elif(method == 'posterior_guidance'): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) if SamplerType.is_v_sampler(sampler): raise Exception('V-Sampler currently not supported for posterior guidance. Please choose a K-Sampler.') else: x_T = audio_source + step_list[0] * torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator)
class DDInference(InferenceBase): def __init__( self, device_accelerator: torch.device = None, device_offload: torch.device = None, optimize_memory_use: bool = False, use_autocast: bool = True, model: ModelWrapperBase = None ): super().__init__(device_accelerator, device_offload, optimize_memory_use, use_autocast, model) def generate( self, callback: Callable = None, batch_size: int = None, seed: int = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args: dict = None, sampler: SamplerType = None, sampler_args: dict = None, **kwargs ): self.generator.manual_seed(seed) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)#step_list = step_list[:-1] if sampler in [SamplerType.V_PRK, SamplerType.V_PLMS, SamplerType.V_PIE, SamplerType.V_PLMS2, SamplerType.V_IPLMS] else step_list if SamplerType.is_v_sampler(sampler): x_T = torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator) model = self.model.model else: x_T = step_list[0] * torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator) model = VDenoiser(self.model.model) with self.offload_context(self.model.model): return sampler.sample( model, x_T, step_list, callback, **sampler_args ).float() def generate_variation( self, callback: Callable = None, batch_size: int = None, seed: int = None, audio_source: torch.Tensor = None, expansion_map: list[int] = None, noise_level: float = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, **kwargs ) -> torch.Tensor: self.generator.manual_seed(seed) audio_source = self.expand(audio_source, expansion_map) if SamplerType.is_v_sampler(sampler): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[step_list < noise_level] alpha_T, sigma_T = t_to_alpha_sigma(step_list[0]) x_T = alpha_T * audio_source + sigma_T * torch.randn(audio_source.shape, device=audio_source.device, generator=self.generator) model = self.model.model else: scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) x_T = audio_source + step_list[0] * torch.randn(audio_source.shape, device=audio_source.device, generator=self.generator) model = VDenoiser(self.model.model) with self.offload_context(self.model.model): return sampler.sample( model, x_T, step_list, callback, **sampler_args ).float() def generate_interpolation( self, callback: Callable = None, batch_size: int = None, # seed: int = None, interpolation_positions: list[float] = None, audio_source: torch.Tensor = None, audio_target: torch.Tensor = None, expansion_map: list[int] = None, noise_level: float = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, **kwargs ) -> torch.Tensor: if SamplerType.is_v_sampler(sampler): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[step_list < noise_level] step_list[-1] += 1e-7 #HACK avoid division by 0 in reverse sampling model = self.model.model else: scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[:-1] #HACK avoid division by 0 in reverse sampling model = VDenoiser(self.model.model) if self.optimize_memory_use and batch_size < 2: x_0_source = audio_source x_0_target = audio_target with self.offload_context(self.model.model): x_T_source = sampler.sample( model, x_0_source, step_list.flip(0), callback, **sampler_args ) with self.offload_context(self.model.model): x_T_target = sampler.sample( model, x_0_target, step_list.flip(0), callback, **sampler_args ) x_T = torch.cat([x_T_source, x_T_target], dim=0) else: x_0 = torch.cat([audio_source, audio_target], dim=0) with self.offload_context(self.model.model): x_T = sampler.sample( model, x_0, step_list.flip(0), callback, **sampler_args ) if SamplerType.is_v_sampler(sampler): #HACK reset schedule after hack step_list[-1] = 0.0 else: step_list = torch.cat([step_list, step_list.new_zeros([1])]) x_Int = torch.empty([batch_size, 2, self.model.chunk_size], device=self.device_accelerator) for pos in range(len(interpolation_positions)): x_Int[pos] = tensor_slerp_2D(x_T[0], x_T[1], interpolation_positions[pos]) with self.offload_context(self.model.model): return sampler.sample( model, x_Int, step_list, callback, **sampler_args ).float() def generate_inpainting( self, callback: Callable = None, batch_size: int = None, seed: int = None, audio_source: torch.Tensor = None, expansion_map: list[int] = None, mask: torch.Tensor = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, inpainting_args = None, **kwargs ) -> torch.Tensor: self.generator.manual_seed(seed) method = inpainting_args.get('method') if(method == 'repaint'): raise Exception("Repaint currently not supported due to changed requirements") elif(method == 'posterior_guidance'): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) if SamplerType.is_v_sampler(sampler): raise Exception('V-Sampler currently not supported for posterior guidance. Please choose a K-Sampler.') else: x_T = audio_source + step_list[0] * torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator)
model = PosteriorSampling(
11
2023-11-28 09:09:59+00:00
16k
Matrixeigs/UncertaintyManagementInteroperablePowerTransportationSystems
TestCaseDistributionSystems/uc_mmgs_fully_recourse_problem.py
[ { "identifier": "case33", "path": "TestCaseDistributionSystems/test_cases/case33.py", "snippet": "def case33():\n \"\"\"Power flow data for 33 bus, 6 generator case.\n Please see L{caseformat} for details on the case file format.\n\n Based on data from ...\n\n Alsac, O. & Stott, B., I{\"Optimal Load Flow with Steady State Security\"},\n IEEE Transactions on Power Apparatus and Systems, Vol. PAS 93, No. 3,\n 1974, pp. 745-751.\n\n ... with branch parameters rounded to nearest 0.01, shunt values divided\n by 100 and shunt on bus 10 moved to bus 5, load at bus 5 zeroed out.\n Generator locations, costs and limits and bus areas were taken from ...\n\n Ferrero, R.W., Shahidehpour, S.M., Ramesh, V.C., I{\"Transaction analysis\n in deregulated power systems using game theory\"}, IEEE Transactions on\n Power Systems, Vol. 12, No. 3, Aug 1997, pp. 1340-1347.\n\n Generator Q limits were derived from Alsac & Stott, using their Pmax\n capacities. V limits and line |S| limits taken from Alsac & Stott.\n\n @return: Power flow data for 30 bus, 6 generator case.\n @see: U{http://www.pserc.cornell.edu/matpower/}\n \"\"\"\n ppc = {\"version\": '2'}\n\n ##----- Power Flow Data -----##\n ## system MVA base\n ppc[\"baseMVA\"] = 100.0\n\n ## bus data\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\n ppc[\"bus\"] = array([\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [2, 1, 0.1, 0.06, 0, 0, 1, 1, 0, 12.66, 1, 1.1, 0.95],\n [3, 1, 0.09, 0.04, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [4, 1, 0.12, 0.08, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [5, 1, 0.06, 0.03, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [6, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [7, 1, 0.2, 0.1, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [8, 1, 0.2, 0.1, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [9, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [10, 1, 0.06, 0.02, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [11, 1, 0.045, 0.03, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [12, 1, 0.06, 0.035, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [13, 1, 0.06, 0.035, 0, 0, 2, 1, 0, 12.66, 1, 1.1, 0.95],\n [14, 1, 0.12, 0.08, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [15, 1, 0.06, 0.01, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [16, 1, 0.06, 0.02, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [17, 1, 0.06, 0.02, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [18, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [19, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [20, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [21, 1, 0.09, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [22, 2, 0.09, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.1, 0.95],\n [23, 2, 0.09, 0.05, 0, 0, 2, 1, 0, 12.66, 1, 1.1, 0.95],\n [24, 1, 0.42, 0.20, 0, 0.04, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [25, 1, 0.42, 0.2, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [26, 1, 0.06, 0.025, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [27, 1, 0.06, 0.025, 0, 0, 3, 1, 0, 12.66, 1, 1.1, 0.95],\n [28, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [29, 1, 0.12, 0.07, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [30, 1, 0.2, 0.6, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [31, 1, 0.15, 0.07, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [32, 1, 0.21, 0.1, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [33, 1, 0.06, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n ])\n\n ## generator data\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf, start-up time, shut-down time and initial condition!\n ppc[\"gen\"] = array([\n [1, 23.54, 0, 150, -20, 1, 100, 1, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1],\n ])\n\n ## branch data\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\n ppc[\"branch\"] = array([\n [1, 2, 0.057525912, 0.029324489, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [2, 3, 0.307595167, 0.15666764, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [3, 4, 0.228356656, 0.116299674, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [4, 5, 0.237777928, 0.121103899, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [5, 6, 0.510994811, 0.441115179, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [6, 7, 0.116798814, 0.386084969, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [7, 8, 0.44386045, 0.146684835, 0, 90, 90, 90, 0, 0, 1, -360, 360],\n [8, 9, 0.642643047, 0.461704714, 0, 70, 70, 70, 0, 0, 1, -360, 360],\n [9, 10, 0.651378001, 0.461704714, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [10, 11, 0.122663712, 0.040555144, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [11, 12, 0.233597628, 0.077241951, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [12, 13, 0.915922324, 0.720633708, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [13, 14, 0.337917936, 0.444796338, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [14, 15, 0.368739846, 0.328184702, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [15, 16, 0.465635443, 0.340039282, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [16, 17, 0.804239697, 1.073775422, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [17, 18, 0.456713311, 0.358133116, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [2, 19, 0.102323747, 0.097644308, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [19, 20, 0.938508419, 0.845668336, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [20, 21, 0.255497406, 0.298485858, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [21, 22, 0.442300637, 0.584805173, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [3, 23, 0.28151509, 0.192356167, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [23, 24, 0.560284909, 0.442425422, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [24, 25, 0.559037059, 0.43743402, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [6, 26, 0.126656834, 0.064513875, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [26, 27, 0.177319567, 0.090281989, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [27, 28, 0.660736881, 0.582559042, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [28, 29, 0.501760717, 0.437122057, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [29, 30, 0.316642084, 0.161284687, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [30, 31, 0.607952801, 0.600840053, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [31, 32, 0.193728802, 0.225798562, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [32, 33, 0.212758523, 0.330805188, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [7, 20, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [8, 14, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [11, 21, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [17, 32, 0.3120, 0.3120, 0, 65, 65, 65, 0, 0, 0, -360, 360],\n [24, 28, 0.3120, 0.3120, 0, 16, 16, 16, 0, 0, 0, -360, 360]\n ])\n\n ##----- OPF Data -----##\n ## area data\n # area refbus\n ppc[\"areas\"] = array([\n [1, 8],\n [2, 23],\n [3, 26],\n ])\n\n ## generator cost data\n # 1 startup shutdown n x1 y1 ... xn yn\n # 2 startup shutdown n c(n-1) ... c0\n ppc[\"gencost\"] = array([\n [0, 0, 0, 3, 0.0, 20, 0]\n ])\n\n return ppc" }, { "identifier": "micro_grid", "path": "TestCasesMicrogrids/test_cases/cases_unit_commitment.py", "snippet": "AC_PD = array([323.0284, 308.2374, 318.1886, 307.9809, 331.2170, 368.6539, 702.0040, 577.7045, 1180.4547, 1227.6240,\n 1282.9344, 1311.9738, 1268.9502, 1321.7436, 1323.9218, 1327.1464, 1386.9117, 1321.6387, 1132.0476,\n 1109.2701, 882.5698, 832.4520, 349.3568, 299.9920])\nDC_PD = array([287.7698, 287.7698, 287.7698, 287.7698, 299.9920, 349.3582, 774.4047, 664.0625, 1132.6996, 1107.7366,\n 1069.6837, 1068.9819, 1027.3295, 1096.3820, 1109.4778, 1110.7039, 1160.1270, 1078.7839, 852.2514,\n 791.5814, 575.4085, 551.1441, 349.3568, 299.992])\nDG = {\"PMIN\": 0,\n \"PMAX\": 5,\n \"QMIN\": -5,\n \"QMAX\": 5,\n \"COST_A\": 0.01,\n \"COST_B\": 0.5}\nUG = {\"PMIN\": -5,\n \"PMAX\": 5,\n \"QMIN\": -5,\n \"QMAX\": 5,\n \"COST\": Price_UG, } # The cost should be a profile\nESS = {\"PDC_MAX\": 5,\n \"PCH_MAX\": 5,\n \"EFF_DC\": 0.95,\n \"EFF_CH\": 0.95,\n \"E0\": 10,\n \"EMIN\": 5,\n \"EMAX\": 20, }\nBIC = {\"PMAX\": 5,\n \"QMAX\": 5,\n \"SMAX\": 5,\n \"EFF_AC2DC\": 0.9,\n \"EFF_DC2AC\": 0.9, }\nMG = {\"PMAX\": 5,\n \"PMIN\": -5,\n \"QMAX\": 5,\n \"QMIN\": -5\n }\nPD = {\"AC\": AC_PD / max(AC_PD),\n \"AC_MAX\": 5,\n \"DC\": DC_PD / max(DC_PD),\n \"DC_MAX\": 5}\nQD = {\"AC\": AC_PD / max(AC_PD),\n \"AC_MAX\": 5, }\nPV = {\"PMAX\": 0,\n \"COST\": 0}" }, { "identifier": "PBIC_AC2DC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PBIC_AC2DC = 4" }, { "identifier": "PG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PG = 0" }, { "identifier": "PESS_DC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PESS_DC = 8" }, { "identifier": "PBIC_DC2AC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PBIC_DC2AC = 5" }, { "identifier": "PUG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PUG = 2" }, { "identifier": "PESS_CH", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PESS_CH = 7" }, { "identifier": "PMESS", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PMESS = 11 # Reactive power unit commitment of" }, { "identifier": "EESS", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "EESS = 9" }, { "identifier": "NX_MG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "NX_MG = 12" }, { "identifier": "QBIC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "QBIC = 6" }, { "identifier": "QUG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "QUG = 3" }, { "identifier": "QG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "QG = 1" }, { "identifier": "PPV", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PPV = 10" }, { "identifier": "DataBaseManagement", "path": "TestCaseDistributionSystems/database_management_pv.py", "snippet": "class DataBaseManagement():\n\n def __init__(self, host=\"localhost\", user=\"ems\", password=\"12345678\", db=\"mess_pv\"):\n \"\"\"\n Initialized the database connection string\n :param host: host ip\n :param user: user name\n :param password: password\n :param db: database name\n :return\n \"\"\"\n self.db = pymysql.connect(host=host, user=user, password=password, db=db)\n\n def create_table(self, table_name, nl=32, nb=33, ng=6, nmg=3):\n \"\"\"\n Creat table name\n :param table_name:\n :param nb:\n :param nb:\n :param ng:\n :return: no return value\n \"\"\"\n cursor = self.db.cursor()\n sql = \"DROP TABLE IF EXISTS \"\n cursor.execute(sql + table_name)\n if table_name == \"distribution_networks\":\n sql_start = \"\"\"CREATE TABLE distribution_networks (\"\"\"\n sql = 'SCENARIO INT,\\n TIME INT NOT NULL,\\n '\n for i in range(nl):\n sql += \"PIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nl):\n sql += \"QIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nl):\n sql += \"IIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nb):\n sql += \"V{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(ng):\n sql += \"PG{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(ng - 1):\n sql += \"QG{0} DECIMAL(8,6),\\n \".format(i)\n sql += \"QG{0} DECIMAL(8,6)\\n \".format(ng - 1)\n sql_end = \"\"\")\"\"\"\n elif table_name == \"micro_grids\":\n sql_start = \"\"\"CREATE TABLE micro_grids (\"\"\"\n sql = 'SCENARIO INT,\\n MG INT,\\n TIME INT,\\n '\n sql += 'PG DECIMAL(8,4),\\n QG DECIMAL(8,4),\\n PUG DECIMAL(8,4),\\n QUG DECIMAL(8,4),\\n '\n sql += 'PBIC_AC2DC DECIMAL(8,4),\\n PBIC_DC2AC DECIMAL(8,4),\\n QBIC DECIMAL(8,4),\\n PESS_CH DECIMAL(7,4),\\n '\n sql += 'PESS_DC DECIMAL(8,4),\\n EESS DECIMAL(8,4),\\n PPV DECIMAL(8,4),\\n PMESS DECIMAL(8,4)'\n sql_end = \"\"\")\"\"\"\n elif table_name == \"mobile_energy_storage_systems\":\n sql_start = \"\"\"CREATE TABLE mobile_energy_storage_systems (\"\"\"\n sql = 'SCENARIO INT,\\n MESS INT,\\n TIME INT,\\n'\n for i in range(nmg):\n sql += \"PDC_MG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PCH_MG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"EESS DECIMAL(8,4)\\n \"\n sql_end = \"\"\")\"\"\"\n elif table_name == \"first_stage_solutions\": # First-stage solution table\n sql_start = \"\"\"CREATE TABLE first_stage_solutions (\"\"\"\n sql = 'TIME INT,\\n'\n for i in range(ng):\n sql += \"PG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"RG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg - 1):\n sql += \"PG_MG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"RG_MG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"IESS{0} INT,\\n \".format(i)\n sql += \"PESS_DC{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"PESS_CH{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"RESS{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"ESS{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"PG_MG{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"RG_MG{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"IESS{0} INT,\\n \".format(nmg - 1)\n sql += \"PESS_DC{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"PESS_CH{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"RESS{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"ESS{0} DECIMAL(8,4)\\n \".format(nmg - 1)\n sql_end = \"\"\")\"\"\"\n elif table_name == \"fisrt_stage_mess\": # First-stage solution table\n sql_start = \"\"\"CREATE TABLE fisrt_stage_mess (\"\"\"\n sql = 'MESS INT,\\n TIME INT,\\n'\n for i in range(nmg):\n sql += \"IDC_MG{0} INT,\\n \".format(i)\n for i in range(nmg):\n sql += \"PDC_MG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PCH_MG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"RMESS{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"MESS_F_STOP INT,\\n \"\n sql += \"MESS_T_STOP INT\\n \"\n sql_end = \"\"\")\"\"\"\n else:\n sql_start = \"\"\"CREATE TABLE scenarios (\"\"\"\n sql = 'SCENARIO INT,\\n WEIGHT DECIMAL(8,4),\\n TIME INT,\\n'\n for i in range(nb):\n sql += \"PD{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PD_AC{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PD_DC{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg - 1):\n sql += \"PPV{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"PPV{0} DECIMAL(8,4)\\n\".format(nmg - 1)\n sql_end = \"\"\")\"\"\"\n\n cursor.execute(sql_start + sql + sql_end)\n cursor.close()\n\n def insert_data_ds(self, table_name, nl=32, nb=33, ng=6, scenario=0, time=0, pij=0, qij=0, lij=0, vi=0, pg=0, qg=0):\n \"\"\"\n Insert data into table_name\n :param table_name:\n :param nl:\n :param nb:\n :param ng:\n :param pij:\n :param qij:\n :param lij:\n :param vi:\n :param pg:\n :param qg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,TIME,\"\n value = \"{0},{1},\".format(scenario, time)\n for i in range(nl):\n sql += \"PIJ{0},\".format(i)\n value += \"{0},\".format(pij[i])\n for i in range(nl):\n sql += \"QIJ{0},\".format(i)\n value += \"{0},\".format(qij[i])\n for i in range(nl):\n sql += \"IIJ{0},\".format(i)\n value += \"{0},\".format(lij[i])\n for i in range(nb):\n sql += \"V{0},\".format(i)\n value += \"{0},\".format(vi[i])\n for i in range(ng):\n sql += \"PG{0},\".format(i)\n value += \"{0},\".format(pg[i])\n for i in range(ng - 1):\n sql += \"QG{0},\".format(i)\n value += \"{0},\".format(qg[i])\n sql += \"QG{0}\".format(ng - 1)\n value += \"{0}\".format(qg[ng - 1])\n\n sql += \") VALUES (\" + value + \")\"\n\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_mg(self, table_name, scenario=0, time=0, mg=0, pg=0, qg=0, pug=0, qug=0, pbic_ac2dc=0, pbic_dc2ac=0,\n qbic=0, pess_ch=0, pess_dc=0, eess=0, pmess=0, ppv=0):\n \"\"\"\n insert microgrid data\n :param table_name:\n :param scenario:\n :param time:\n :param mg:\n :param pg:\n :param qg:\n :param pug:\n :param qug:\n :param pbic_ac2dc:\n :param pbic_dc2ac:\n :param qbic:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param pmess:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,MG,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, mg, time)\n sql += \"PG,QG,PUG,QUG,PBIC_AC2DC,PBIC_DC2AC,QBIC,PESS_CH,PESS_DC,EESS,PPV,PMESS\"\n value += \"{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11}\".format(pg, qg, pug, qug, pbic_ac2dc, pbic_dc2ac,\n qbic, pess_ch, pess_dc, eess, ppv, pmess)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_first_stage_mess(self, table_name, time=0, mess=0, imess=[0, 0, 0], pmess_ch=[0, 0, 0],\n pmess_dc=[0, 0, 0], rmess=[0, 0, 0], mess_f_stop=0, mess_t_stop=0, nmg=3):\n \"\"\"\n insert mobile energy storage systems data in the first-stage\n :param table_name:\n :param scenario:\n :param time:\n :param mess:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param nmg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"MESS,TIME,\"\n value = \"{0},{1},\".format(mess, time)\n for i in range(nmg):\n sql += \"IDC_MG{0},\".format(i)\n value += \"{0},\".format(imess[i])\n for i in range(nmg):\n sql += \"PDC_MG{0},\".format(i)\n value += \"{0},\".format(pmess_dc[i])\n for i in range(nmg):\n sql += \"PCH_MG{0},\".format(i)\n value += \"{0},\".format(pmess_ch[i])\n for i in range(nmg):\n sql += \"RMESS{0},\".format(i)\n value += \"{0},\".format(rmess[i])\n sql += \"MESS_F_STOP,MESS_T_STOP\"\n value += \"{0},{1}\".format(mess_f_stop, mess_t_stop)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_mess(self, table_name, scenario=0, time=0, mess=0, pmess_ch=[0, 0, 0], pmess_dc=[0, 0, 0],\n emess=0, nmg=3):\n \"\"\"\n insert mobile energy storage systems data\n :param table_name:\n :param scenario:\n :param time:\n :param mess:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param nmg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,MESS,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, mess, time)\n for i in range(nmg):\n sql += \"PDC_MG{0},\".format(i)\n value += \"{0},\".format(pmess_dc[i])\n for i in range(nmg):\n sql += \"PCH_MG{0},\".format(i)\n value += \"{0},\".format(pmess_ch[i])\n sql += \"EESS\"\n value += \"{0}\".format(emess)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_first_stage(self, table_name, time=0, ng=2, nmg=2, pg=[0, 0], rg=[0, 0], pg_mg=[0, 0],\n rg_mg=[0, 0], iess=[0, 0], pess_dc=[0, 0], pess_ch=[0, 0], ress=[0, 0], ess=[0, 0]):\n \"\"\"\n insert scenario data\n :param table_name:\n :param scenario:\n :param weight:\n :param time:\n :param nb:\n :param nmg:\n :param pd:\n :param pd_ac:\n :param pd_dc:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"TIME,\"\n value = \"{0},\".format(time)\n for i in range(ng):\n sql += \"PG{0},\".format(i)\n sql += \"RG{0},\".format(i)\n value += \"{0},\".format(pg[i])\n value += \"{0},\".format(rg[i])\n if nmg > 1:\n for i in range(nmg - 1):\n sql += \"PG_MG{0},\".format(i)\n sql += \"RG_MG{0},\".format(i)\n sql += \"IESS{0},\".format(i)\n sql += \"PESS_DC{0},\".format(i)\n sql += \"PESS_CH{0},\".format(i)\n sql += \"RESS{0},\".format(i)\n sql += \"ESS{0},\".format(i)\n value += \"{0},\".format(pg_mg[i])\n value += \"{0},\".format(rg_mg[i])\n value += \"{0},\".format(iess[i])\n value += \"{0},\".format(pess_dc[i])\n value += \"{0},\".format(pess_ch[i])\n value += \"{0},\".format(ress[i])\n value += \"{0},\".format(ess[i])\n sql += \"PG_MG{0},\".format(nmg - 1)\n sql += \"RG_MG{0},\".format(nmg - 1)\n sql += \"IESS{0},\".format(nmg - 1)\n sql += \"PESS_DC{0},\".format(nmg - 1)\n sql += \"PESS_CH{0},\".format(nmg - 1)\n sql += \"RESS{0},\".format(nmg - 1)\n sql += \"ESS{0}\".format(nmg - 1)\n value += \"{0},\".format(pg_mg[nmg - 1])\n value += \"{0},\".format(rg_mg[nmg - 1])\n value += \"{0},\".format(iess[nmg - 1])\n value += \"{0},\".format(pess_dc[nmg - 1])\n value += \"{0},\".format(pess_ch[nmg - 1])\n value += \"{0},\".format(ress[nmg - 1])\n value += \"{0}\".format(ess[nmg - 1])\n else:\n sql += \"PG_MG{0},\".format(nmg - 1)\n sql += \"RG_MG{0},\".format(nmg - 1)\n sql += \"IESS{0},\".format(nmg - 1)\n sql += \"PESS_DC{0},\".format(nmg - 1)\n sql += \"PESS_CH{0},\".format(nmg - 1)\n sql += \"RESS{0},\".format(nmg - 1)\n sql += \"ESS{0}\".format(nmg - 1)\n value += \"{0},\".format(pg_mg)\n value += \"{0},\".format(rg_mg)\n value += \"{0},\".format(iess)\n value += \"{0},\".format(pess_dc)\n value += \"{0},\".format(pess_ch)\n value += \"{0},\".format(ress)\n value += \"{0}\".format(ess)\n\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_scenario(self, table_name, scenario=0, weight=0, time=0, nb=1, nmg=2, pd=[0, 0], pd_ac=[0, 0],\n pd_dc=[0, 0], ppv=[0, 0]):\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,WEIGHT,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, weight, time)\n for i in range(nb):\n sql += \"PD{0},\".format(i)\n value += \"{0},\".format(pd[i])\n for i in range(nmg):\n sql += \"PD_AC{0},\".format(i)\n value += \"{0},\".format(pd_ac[i])\n for i in range(nmg):\n sql += \"PD_DC{0},\".format(i)\n value += \"{0},\".format(pd_dc[i])\n for i in range(nmg - 1):\n sql += \"PPV{0},\".format(i)\n value += \"{0},\".format(ppv[i])\n if nmg > 1:\n sql += \"PPV{0}\".format(nmg - 1)\n value += \"{0}\".format(ppv[nmg - 1])\n\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def inquery_data_scenario(self, table_name, scenario=0, time=0):\n cursor = self.db.cursor()\n # sql = \"SELECT * FROM \" + table_name + \" ;\"\n sql = \"SELECT * FROM \" + table_name + \" WHERE SCENARIO={0} AND TIME={1};\".format(scenario, time)\n cursor.execute(sql)\n data = cursor.fetchall()\n n_data = len(data[0])\n\n temp = []\n for i in range(n_data): temp.append(float(data[0][i]))\n\n cursor.close()\n return temp" }, { "identifier": "ScenarioReduction", "path": "StochasticOptimization/scenario_reduction.py", "snippet": "class ScenarioReduction():\n def __init__(self):\n self.name = \"Scenario reduction\"\n\n def run(self, scenario, weight, n_reduced, power):\n \"\"\"\n\n :param scenario: A fan scenario tree, when more stage are considered, some merge operation can be implemented\n :param weight: Weight of each scenario\n :param n_reduced: Number of scenarios needs to be reduced\n :param power: The power in the distance calculation\n :return:\n \"\"\"\n n_scenario = scenario.shape[0] # number of original scenarios\n c = zeros((n_scenario, n_scenario))\n # Calculate the c matrix\n for i in range(n_scenario):\n for j in range(n_scenario):\n c[i, j] = linalg.norm((scenario[i, :] - scenario[j, :]), 2)\n c[i, j] = max([1, linalg.norm(scenario[i, :], power - 1), linalg.norm(scenario[j, :], power - 1)]) * \\\n c[i, j]\n\n J = arange(n_scenario) # The original index range\n J_reduced = array([])\n # Implement the iteration\n for n in range(n_reduced): # find the minimal distance\n print(\"The reduction is in process {0}\".format(n))\n c_n = inf * ones(n_scenario)\n c_n[J] = 0\n for u in J:\n # Delete the i-th distance\n J_temp = delete(J, where(J == u))\n for k in J_temp:\n c_k_j = delete(c[int(k)], J_temp)\n c_n[int(u)] += weight[int(k)] * min(c_k_j)\n u_i = argmin(c_n)\n J_reduced = append(J_reduced, u_i)\n J = delete(J, where(J == u_i))\n # Optimal redistribution\n p_s = weight.copy()\n p_s[J_reduced.astype(int)] = 0\n\n for i in J_reduced:\n c_temp = c[int(i), :]\n c_temp[J_reduced.astype(int)] = inf\n index = argmin(c_temp)\n p_s[index] += weight[int(i)]\n\n scenario_reduced = scenario[J.astype(int), :]\n weight_reduced = p_s[J.astype(int)]\n\n return scenario_reduced, weight_reduced" } ]
from TestCaseDistributionSystems.test_cases import case33 from TestCasesMicrogrids.test_cases.cases_unit_commitment import micro_grid from TestCasesTransportationSystems.test_cases import case3, TIME, LOCATION from scipy import zeros, shape, ones, diag, concatenate, eye from scipy.sparse import csr_matrix as sparse from scipy.sparse import hstack, vstack, lil_matrix from numpy import flatnonzero as find from numpy import array, tile, arange, random from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, RATE_A from pypower.idx_bus import PD, VMAX, VMIN, QD, BUS_I from pypower.idx_gen import GEN_BUS, PMAX, PMIN, QMAX, QMIN from pypower.ext2int import ext2int from Solvers.mixed_integer_quadratic_constrained_cplex import mixed_integer_quadratic_constrained_programming as miqcp from Solvers.mixed_integer_programming_gurobi import mixed_integer_linear_programming as milp from copy import deepcopy from TestCaseDistributionSystems.data_format.idx_MG_PV import PBIC_AC2DC, PG, PESS_DC, PBIC_DC2AC, PUG, PESS_CH, \ PMESS, EESS, NX_MG, QBIC, QUG, QG, PPV from TestCaseDistributionSystems.database_management_pv import DataBaseManagement from StochasticOptimization.scenario_reduction import ScenarioReduction
12,828
self.name = "Unit commitment with tess" def main(self, power_networks, micro_grids, profile, pv_profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formualtion(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, pv_profile=pv_profile, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {}
""" Stochastic optimal power flow with multiple microgrids and mobile energy storage systems @author: Zhao Tianyang @e-mail: [email protected] @date: 10 Jan 2019 Major updates: 1) Update code style using PEP 8 -- Style Guide for Python Code 2) Store data in database 3) Scenario generation and reduction 4) Automatic results analysis Nomenclature: nV: number of variables mg: microgrid ds: distribution systems me: mobile energy storage systems ch: charging dc: discharging ele: electricity tra: traffic i,j,k: index t: time index T: time periods tns:traffic networks pns:power networks """ class StochasticDynamicOptimalPowerFlowTess(): def __init__(self): self.name = "Unit commitment with tess" def main(self, power_networks, micro_grids, profile, pv_profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formualtion(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, pv_profile=pv_profile, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {}
db_management = DataBaseManagement()
15
2023-11-27 15:57:53+00:00
16k
andryyy/ehlocomputer
models/listeners.py
[ { "identifier": "defaults", "path": "config/defaults.py", "snippet": "ACCEPT_LANGUAGES = [\"en\", \"de\"]\nMAX_HISTORIC_REVISIONS = 5\nWEBAUTHN_CHALLENGE_TIMEOUT = 30 # seconds\nPROXY_AUTH_TIMEOUT = 300 # seconds\nTABLE_PAGE_SIZE = 10\nTINYDB = {\n \"storage\": RedisLockMiddleware(JSONStorage),\n \"sort_keys\": True,\n \"indent\": 2,\n}\nPODMAN_BINARY = \"/usr/bin/podman\"\nTRUSTED_PROXIES = [\"127.0.0.1\", \"::1\"]" }, { "identifier": "lego", "path": "config/lego.py", "snippet": "DNS_PROVIDERS = {\n \"allinkl\": [\n {\"ALL_INKL_LOGIN\": \"KAS login\"},\n {\"ALL_INKL_PASSWORD\": \"KAS password\"},\n {\"ALL_INKL_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"ALL_INKL_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ALL_INKL_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n ],\n \"arvancloud\": [\n {\"ARVANCLOUD_API_KEY\": \"API key\"},\n {\"ARVANCLOUD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"ARVANCLOUD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ARVANCLOUD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"ARVANCLOUD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"autodns\": [\n {\"AUTODNS_API_PASSWORD\": \"User Password\"},\n {\"AUTODNS_API_USER\": \"Username\"},\n {\"AUTODNS_CONTEXT\": \"API context (4 for production, 1 for testing. Defaults to 4)\"},\n {\"AUTODNS_HTTP_TIMEOUT\": \"API request timeout, defaults to 30 seconds\"},\n {\"AUTODNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"AUTODNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"AUTODNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"azure\": [\n {\"AZURE_CLIENT_ID\": \"Client ID\"},\n {\"AZURE_CLIENT_SECRET\": \"Client secret\"},\n {\"AZURE_ENVIRONMENT\": \"Azure environment, one of: public, usgovernment, and china\"},\n {\"AZURE_RESOURCE_GROUP\": \"DNS zone resource group\"},\n {\"AZURE_SUBSCRIPTION_ID\": \"DNS zone subscription ID\"},\n {\"AZURE_TENANT_ID\": \"Tenant ID\"},\n {\"AZURE_METADATA_ENDPOINT\": \"Metadata Service endpoint URL\"},\n {\"AZURE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"AZURE_PRIVATE_ZONE\": \"Set to true to use Azure Private DNS Zones and not public\"},\n {\"AZURE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"AZURE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"AZURE_ZONE_NAME\": \"Zone name to use inside Azure DNS service to add the TXT record in\"},\n ],\n \"bindman\": [\n {\"BINDMAN_MANAGER_ADDRESS\": \"The server URL, should have scheme, hostname, and port (if required) of the Bindman-DNS Manager server\"},\n {\"BINDMAN_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"BINDMAN_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"BINDMAN_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n ],\n \"bluecat\": [\n {\"BLUECAT_CONFIG_NAME\": \"Configuration name\"},\n {\"BLUECAT_DNS_VIEW\": \"External DNS View Name\"},\n {\"BLUECAT_PASSWORD\": \"API password\"},\n {\"BLUECAT_SERVER_URL\": \"The server URL, should have scheme, hostname, and port (if required) of the authoritative Bluecat BAM serve\"},\n {\"BLUECAT_USER_NAME\": \"API username\"},\n {\"BLUECAT_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"BLUECAT_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"BLUECAT_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"BLUECAT_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"brandit\": [\n {\"BRANDIT_API_KEY\": \"The API key\"},\n {\"BRANDIT_API_USERNAME\": \"The API username\"},\n {\"BRANDIT_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"BRANDIT_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"BRANDIT_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"BRANDIT_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"bunny\": [\n {\"BUNNY_API_KEY\": \"API key\"},\n {\"BUNNY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"BUNNY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"BUNNY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"checkdomain\": [\n {\"CHECKDOMAIN_TOKEN\": \"API token\"},\n {\"CHECKDOMAIN_HTTP_TIMEOUT\": \"API request timeout, defaults to 30 seconds\"},\n {\"CHECKDOMAIN_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CHECKDOMAIN_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CHECKDOMAIN_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"civo\": [\n {\"CIVO_TOKEN\": \"Authentication token\"},\n {\"CIVO_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CIVO_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CIVO_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"clouddns\": [\n {\"CLOUDDNS_CLIENT_ID\": \"Client ID\"},\n {\"CLOUDDNS_EMAIL\": \"Account email\"},\n {\"CLOUDDNS_PASSWORD\": \"Account password\"},\n {\"CLOUDDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"cloudflare\": [\n {\"CLOUDFLARE_API_KEY\": \"Alias to CF_API_KEY\"},\n {\"CLOUDFLARE_DNS_API_TOKEN\": \"Alias to CF_DNS_API_TOKEN\"},\n {\"CLOUDFLARE_EMAIL\": \"Alias to CF_API_EMAIL\"},\n {\"CLOUDFLARE_ZONE_API_TOKEN\": \"Alias to CF_ZONE_API_TOKEN\"},\n {\"CLOUDFLARE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDFLARE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDFLARE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDFLARE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"cloudns\": [\n {\"CLOUDNS_AUTH_ID\": \"The API user ID\"},\n {\"CLOUDNS_AUTH_PASSWORD\": \"The password for API user ID\"},\n {\"CLOUDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDNS_SUB_AUTH_ID\": \"The API sub user ID\"},\n {\"CLOUDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"cloudru\": [\n {\"CLOUDRU_KEY_ID\": \"Key ID (login)\"},\n {\"CLOUDRU_SECRET\": \"Key Secret\"},\n {\"CLOUDRU_SERVICE_INSTANCE_ID\": \"Service Instance ID (parentId)\"},\n {\"CLOUDRU_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDRU_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDRU_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDRU_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"CLOUDRU_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"cloudxns\": [\n {\"CLOUDXNS_API_KEY\": \"The API key\"},\n {\"CLOUDXNS_SECRET_KEY\": \"The API secret key\"},\n {\"CLOUDXNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CLOUDXNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CLOUDXNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CLOUDXNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"conoha\": [\n {\"CONOHA_API_PASSWORD\": \"The API password\"},\n {\"CONOHA_API_USERNAME\": \"The API username\"},\n {\"CONOHA_TENANT_ID\": \"Tenant ID\"},\n {\"CONOHA_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CONOHA_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CONOHA_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CONOHA_REGION\": \"The region\"},\n {\"CONOHA_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"constellix\": [\n {\"CONSTELLIX_API_KEY\": \"User API key\"},\n {\"CONSTELLIX_SECRET_KEY\": \"User secret key\"},\n {\"CONSTELLIX_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"CONSTELLIX_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"CONSTELLIX_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"CONSTELLIX_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"derak\": [\n {\"DERAK_API_KEY\": \"The API key\"},\n {\"DERAK_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DERAK_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DERAK_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DERAK_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"DERAK_WEBSITE_ID\": \"Force the zone/website ID\"},\n ],\n \"desec\": [\n {\"DESEC_TOKEN\": \"Domain token\"},\n {\"DESEC_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DESEC_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DESEC_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DESEC_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"designate\": [\n {\"DESIGNATE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DESIGNATE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DESIGNATE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dnshomede\": [\n {\"DNSHOMEDE_CREDENTIALS\": \"Comma-separated list of domain:password credential pairs\"}\n ],\n \"dnsimple\": [\n {\"DNSIMPLE_OAUTH_TOKEN\": \"OAuth token\"},\n {\"DNSIMPLE_BASE_URL\": \"API endpoint URL\"},\n {\"DNSIMPLE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DNSIMPLE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DNSIMPLE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dnsmadeeasy\": [\n {\"DNSMADEEASY_API_KEY\": \"The API key\"},\n {\"DNSMADEEASY_API_SECRET\": \"The API Secret key\"},\n {\"DNSMADEEASY_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DNSMADEEASY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DNSMADEEASY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DNSMADEEASY_SANDBOX\": \"Activate the sandbox (boolean)\"},\n {\"DNSMADEEASY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dnspod\": [\n {\"DNSPOD_API_KEY\": \"The user token\"},\n {\"DNSPOD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DNSPOD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DNSPOD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DNSPOD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dode\": [\n {\"DODE_TOKEN\": \"API token\"},\n {\"DODE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DODE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DODE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DODE_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"DODE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"domeneshop\": [\n {\"DOMENESHOP_API_SECRET\": \"API secret\"},\n {\"DOMENESHOP_API_TOKEN\": \"API token\"},\n {\"DOMENESHOP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DOMENESHOP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DOMENESHOP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n ],\n \"dreamhost\": [\n {\"DREAMHOST_API_KEY\": \"The API key\"},\n {\"DREAMHOST_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DREAMHOST_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DREAMHOST_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DREAMHOST_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"duckdns\": [\n {\"DUCKDNS_TOKEN\": \"Account token\"},\n {\"DUCKDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DUCKDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DUCKDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DUCKDNS_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"DUCKDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dyn\": [\n {\"DYN_CUSTOMER_NAME\": \"Customer name\"},\n {\"DYN_PASSWORD\": \"Password\"},\n {\"DYN_USER_NAME\": \"User name\"},\n {\"DYN_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DYN_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DYN_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DYN_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"dynu\": [\n {\"DYNU_API_KEY\": \"API key\"},\n {\"DYNU_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"DYNU_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"DYNU_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"DYNU_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"easydns\": [\n {\"EASYDNS_KEY\": \"API Key\"},\n {\"EASYDNS_TOKEN\": \"API Token\"},\n {\"EASYDNS_ENDPOINT\": \"The endpoint URL of the API Server\"},\n {\"EASYDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"EASYDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"EASYDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"EASYDNS_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"EASYDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"efficientip\": [\n {\"EFFICIENTIP_DNS_NAME\": \"DNS name (ex: dns.smart)\"},\n {\"EFFICIENTIP_HOSTNAME\": \"Hostname (ex: foo.example.com)\"},\n {\"EFFICIENTIP_PASSWORD\": \"Password\"},\n {\"EFFICIENTIP_USERNAME\": \"Username\"},\n {\"EFFICIENTIP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"EFFICIENTIP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"EFFICIENTIP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"EFFICIENTIP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"EFFICIENTIP_VIEW_NAME\": \"View name (ex: external)\"},\n ],\n \"epik\": [\n {\"EPIK_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"EPIK_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"EPIK_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"EPIK_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"exoscale\": [\n {\"EXOSCALE_API_KEY\": \"API key\"},\n {\"EXOSCALE_API_SECRET\": \"API secret\"},\n {\"EXOSCALE_API_ZONE\": \"API zone\"},\n {\"EXOSCALE_ENDPOINT\": \"API endpoint URL\"},\n {\"EXOSCALE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"EXOSCALE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"EXOSCALE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"EXOSCALE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"freemyip\": [\n {\"FREEMYIP_TOKEN\": \"Account token\"},\n {\"FREEMYIP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"FREEMYIP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"FREEMYIP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"FREEMYIP_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"FREEMYIP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"gandi\": [\n {\"GANDI_API_KEY\": \"API key\"},\n {\"GANDI_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GANDI_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GANDI_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GANDI_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"gandiv5\": [\n {\"GANDIV5_API_KEY\": \"API key\"},\n {\"GANDIV5_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GANDIV5_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GANDIV5_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GANDIV5_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"gcore\": [\n {\"GCORE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GCORE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GCORE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GCORE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"glesys\": [\n {\"GLESYS_API_KEY\": \"API key\"},\n {\"GLESYS_API_USER\": \"API user\"},\n {\"GLESYS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GLESYS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GLESYS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GLESYS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"godaddy\": [\n {\"GODADDY_API_KEY\": \"API key\"},\n {\"GODADDY_API_SECRET\": \"API secret\"},\n {\"GODADDY_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"GODADDY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"GODADDY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"GODADDY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"hetzner\": [\n {\"HETZNER_API_KEY\": \"API key\"},\n {\"HETZNER_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"HETZNER_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HETZNER_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HETZNER_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"hostingde\": [\n {\"HOSTINGDE_API_KEY\": \"API key\"},\n {\"HOSTINGDE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"HOSTINGDE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HOSTINGDE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HOSTINGDE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"HOSTINGDE_ZONE_NAME\": \"Zone name in ACE format\"},\n ],\n \"hosttech\": [\n {\"HOSTTECH_API_KEY\": \"API login\"},\n {\"HOSTTECH_PASSWORD\": \"API password\"},\n {\"HOSTTECH_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"HOSTTECH_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HOSTTECH_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HOSTTECH_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"httpreq\": [\n {\"HTTPREQ_ENDPOINT\": \"The URL of the server\"},\n {\"HTTPREQ_MODE\": \"'RAW', none\"},\n {\"HTTPREQ_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"HTTPREQ_PASSWORD\": \"Basic authentication password\"},\n {\"HTTPREQ_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HTTPREQ_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HTTPREQ_USERNAME\": \"Basic authentication username\"},\n ],\n \"hurricane\": [\n {\"HURRICANE_TOKENS\": \"TXT record names and tokens\"}\n ],\n \"hyperone\": [\n {\"HYPERONE_LOCATION_ID\": \"Specifies location (region) to be used in API calls. (default pl-waw-1)\"},\n {\"HYPERONE_PASSPORT_LOCATION\": \"Allows to pass custom passport file location (default ~/.h1/passport.json)\"},\n {\"HYPERONE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"HYPERONE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"HYPERONE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"iij\": [\n {\"IIJ_API_ACCESS_KEY\": \"API access key\"},\n {\"IIJ_API_SECRET_KEY\": \"API secret key\"},\n {\"IIJ_DO_SERVICE_CODE\": \"DO service code\"},\n {\"IIJ_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"IIJ_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"IIJ_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"IIJ_DPF_API_TOKEN\": \"API token\"},\n {\"IIJ_DPF_DPM_SERVICE_CODE\": \"IIJ Managed DNS Service's service code\"},\n {\"IIJ_DPF_POLLING_INTERVAL\": \"Time between DNS propagation check, defaults to 5 second\"},\n {\"IIJ_DPF_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation, defaults to 660 second\"},\n {\"IIJ_DPF_TTL\": \"The TTL of the TXT record used for the DNS challenge, default to 300\"},\n ],\n \"infoblox\": [\n {\"INFOBLOX_HOST\": \"Host URI\"},\n {\"INFOBLOX_PASSWORD\": \"Account Password\"},\n {\"INFOBLOX_USERNAME\": \"Account Username\"},\n {\"INFOBLOX_DNS_VIEW\": \"The view for the TXT records, default: External\"},\n {\"INFOBLOX_HTTP_TIMEOUT\": \"HTTP request timeout\"},\n {\"INFOBLOX_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"INFOBLOX_PORT\": \"The port for the infoblox grid manager, default: 443\"},\n {\"INFOBLOX_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"INFOBLOX_SSL_VERIFY\": \"Whether or not to verify the TLS certificate, default: true\"},\n {\"INFOBLOX_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n {\"INFOBLOX_WAPI_VERSION\": \"The version of WAPI being used, default: 2.11\"},\n ],\n \"infomaniak\": [\n {\"INFOMANIAK_ACCESS_TOKEN\": \"Access token\"},\n {\"INFOMANIAK_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"INFOMANIAK_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"INFOMANIAK_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"INFOMANIAK_TTL\": \"The TTL of the TXT record used for the DNS challenge in seconds\"},\n ],\n \"inwx\": [\n {\"INWX_PASSWORD\": \"Password\"},\n {\"INWX_USERNAME\": \"Username\"},\n {\"INWX_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"INWX_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation (default 360s)\"},\n {\"INWX_SANDBOX\": \"Activate the sandbox (boolean)\"},\n {\"INWX_SHARED_SECRET\": \"shared secret related to 2FA\"},\n {\"INWX_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ionos\": [\n {\"IONOS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"IONOS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"IONOS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"IONOS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ipv64\": [\n {\"IPV64_API_KEY\": \"Account API Key\"},\n {\"IPV64_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"IPV64_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"IPV64_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"IPV64_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"IPV64_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"iwantmyname\": [\n {\"IWANTMYNAME_PASSWORD\": \"API password\"},\n {\"IWANTMYNAME_USERNAME\": \"API username\"},\n {\"IWANTMYNAME_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"IWANTMYNAME_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"IWANTMYNAME_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"IWANTMYNAME_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"joker\": [\n {\"JOKER_API_KEY\": \"API key (only with DMAPI mode)\"},\n {\"JOKER_API_MODE\": \"'DMAPI' or 'SVC'. DMAPI is for resellers accounts. (Default: DMAPI)\"},\n {\"JOKER_PASSWORD\": \"Joker.com password\"},\n {\"JOKER_USERNAME\": \"Joker.com username\"},\n {\"JOKER_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"JOKER_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"JOKER_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"JOKER_SEQUENCE_INTERVAL\": \"Time between sequential requests (only with 'SVC' mode)\"},\n {\"JOKER_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"liara\": [\n {\"LIARA_API_KEY\": \"The API key\"},\n {\"LIARA_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"LIARA_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LIARA_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"LIARA_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"lightsail\": [\n {\"LIGHTSAIL_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LIGHTSAIL_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n ],\n \"linode\": [\n {\"LINODE_TOKEN\": \"API token\"},\n {\"LINODE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"LINODE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LINODE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"LINODE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"loopia\": [\n {\"LOOPIA_API_PASSWORD\": \"API password\"},\n {\"LOOPIA_API_USER\": \"API username\"},\n {\"LOOPIA_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"LOOPIA_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LOOPIA_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"LOOPIA_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"luadns\": [\n {\"LUADNS_API_TOKEN\": \"API token\"},\n {\"LUADNS_API_USERNAME\": \"Username (your email)\"},\n {\"LUADNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"LUADNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"LUADNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"LUADNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"metaname\": [\n {\"METANAME_ACCOUNT_REFERENCE\": \"The four-digit reference of a Metaname account\"},\n {\"METANAME_API_KEY\": \"API Key\"},\n {\"METANAME_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"METANAME_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"METANAME_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"mydnsjp\": [\n {\"MYDNSJP_MASTER_ID\": \"Master ID\"},\n {\"MYDNSJP_PASSWORD\": \"Password\"},\n {\"MYDNSJP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"MYDNSJP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"MYDNSJP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"MYDNSJP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"mythicbeasts\": [\n {\"MYTHICBEASTS_PASSWORD\": \"Password\"},\n {\"MYTHICBEASTS_USERNAME\": \"User name\"},\n {\"MYTHICBEASTS_API_ENDPOINT\": \"The endpoint for the API (must implement v2)\"},\n {\"MYTHICBEASTS_AUTH_API_ENDPOINT\": \"The endpoint for Mythic Beasts' Authentication\"},\n {\"MYTHICBEASTS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"MYTHICBEASTS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"MYTHICBEASTS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"MYTHICBEASTS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"namecheap\": [\n {\"NAMECHEAP_API_KEY\": \"API key\"},\n {\"NAMECHEAP_API_USER\": \"API user\"},\n {\"NAMECHEAP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NAMECHEAP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NAMECHEAP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NAMECHEAP_SANDBOX\": \"Activate the sandbox (boolean)\"},\n {\"NAMECHEAP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"namesilo\": [\n {\"NAMESILO_API_KEY\": \"Client ID\"},\n {\"NAMESILO_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NAMESILO_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation, it is better to set larger than 15m\"},\n {\"NAMESILO_TTL\": \"The TTL of the TXT record used for the DNS challenge, should be in [3600, 2592000]\"},\n ],\n \"nearlyfreespeech\": [\n {\"NEARLYFREESPEECH_API_KEY\": \"API Key for API requests\"},\n {\"NEARLYFREESPEECH_LOGIN\": \"Username for API requests\"},\n {\"NEARLYFREESPEECH_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NEARLYFREESPEECH_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NEARLYFREESPEECH_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NEARLYFREESPEECH_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"NEARLYFREESPEECH_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"netcup\": [\n {\"NETCUP_API_KEY\": \"API key\"},\n {\"NETCUP_API_PASSWORD\": \"API password\"},\n {\"NETCUP_CUSTOMER_NUMBER\": \"Customer number\"},\n {\"NETCUP_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NETCUP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NETCUP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NETCUP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"netlify\": [\n {\"NETLIFY_TOKEN\": \"Token\"},\n {\"NETLIFY_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NETLIFY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NETLIFY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NETLIFY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"nicmanager\": [\n {\"NICMANAGER_API_EMAIL\": \"Email-based login\"},\n {\"NICMANAGER_API_LOGIN\": \"Login, used for Username-based login\"},\n {\"NICMANAGER_API_PASSWORD\": \"Password, always required\"},\n {\"NICMANAGER_API_USERNAME\": \"Username, used for Username-based login\"},\n {\"NICMANAGER_API_MODE\": \"mode: 'anycast' or 'zone' (default: 'anycast')\"},\n {\"NICMANAGER_API_OTP\": \"TOTP Secret (optional)\"},\n {\"NICMANAGER_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NICMANAGER_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NICMANAGER_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NICMANAGER_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"nifcloud\": [\n {\"NIFCLOUD_ACCESS_KEY_ID\": \"Access key\"},\n {\"NIFCLOUD_SECRET_ACCESS_KEY\": \"Secret access key\"},\n {\"NIFCLOUD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NIFCLOUD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NIFCLOUD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NIFCLOUD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"njalla\": [\n {\"NJALLA_TOKEN\": \"API token\"},\n {\"NJALLA_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NJALLA_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NJALLA_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NJALLA_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"nodion\": [\n {\"NODION_API_TOKEN\": \"The API token\"},\n {\"NODION_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NODION_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NODION_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NODION_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ns1\": [\n {\"NS1_API_KEY\": \"API key\"},\n {\"NS1_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"NS1_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"NS1_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"NS1_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"otc\": [\n {\"OTC_DOMAIN_NAME\": \"Domain name\"},\n {\"OTC_IDENTITY_ENDPOINT\": \"Identity endpoint URL\"},\n {\"OTC_PASSWORD\": \"Password\"},\n {\"OTC_PROJECT_NAME\": \"Project name\"},\n {\"OTC_USER_NAME\": \"User name\"},\n {\"OTC_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"OTC_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"OTC_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"OTC_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ovh\": [\n {\"OVH_APPLICATION_KEY\": \"Application key\"},\n {\"OVH_APPLICATION_SECRET\": \"Application secret\"},\n {\"OVH_CONSUMER_KEY\": \"Consumer key\"},\n {\"OVH_ENDPOINT\": \"Endpoint URL (ovh-eu or ovh-ca)\"},\n {\"OVH_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"OVH_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"OVH_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"OVH_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"pdns\": [\n {\"PDNS_API_KEY\": \"API key\"},\n {\"PDNS_API_URL\": \"API URL\"},\n {\"PDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"PDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"PDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"PDNS_SERVER_NAME\": \"Name of the server in the URL, 'localhost' by default\"},\n {\"PDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"plesk\": [\n {\"PLESK_PASSWORD\": \"API password\"},\n {\"PLESK_USERNAME\": \"API username\"},\n {\"PLESK_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"PLESK_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"PLESK_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"PLESK_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"porkbun\": [\n {\"PORKBUN_API_KEY\": \"API key\"},\n {\"PORKBUN_SECRET_API_KEY\": \"secret API key\"},\n {\"PORKBUN_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"PORKBUN_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"PORKBUN_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"PORKBUN_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"rackspace\": [\n {\"RACKSPACE_API_KEY\": \"API key\"},\n {\"RACKSPACE_USER\": \"API user\"},\n {\"RACKSPACE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"RACKSPACE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"RACKSPACE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"RACKSPACE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"rcodezero\": [\n {\"RCODEZERO_API_TOKEN\": \"API token\"},\n {\"RCODEZERO_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"RCODEZERO_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"RCODEZERO_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"RCODEZERO_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"regru\": [\n {\"REGRU_PASSWORD\": \"API password\"},\n {\"REGRU_USERNAME\": \"API username\"},\n {\"REGRU_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"REGRU_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"REGRU_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"REGRU_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"rfc2136\": [\n {\"RFC2136_NAMESERVER\": \"Network address in the form 'host' or 'host:port'\"},\n {\"RFC2136_TSIG_KEY\": \"Name of the secret key as defined in DNS server configuration. To disable TSIG authentication, leave the 'RFC2136_TSIG*' variables unset.\"},\n {\"RFC2136_TSIG_SECRET\": \"Secret key payload. To disable TSIG authentication, leave the' RFC2136_TSIG*' variables unset.\"},\n {\"RFC2136_DNS_TIMEOUT\": \"API request timeout\"},\n {\"RFC2136_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"RFC2136_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"RFC2136_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"RFC2136_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"rimuhosting\": [\n {\"RIMUHOSTING_API_KEY\": \"User API key\"},\n {\"RIMUHOSTING_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"RIMUHOSTING_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"RIMUHOSTING_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"RIMUHOSTING_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"safedns\": [\n {\"SAFEDNS_AUTH_TOKEN\": \"Authentication token\"},\n {\"SAFEDNS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SAFEDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SAFEDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SAFEDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"sakuracloud\": [\n {\"SAKURACLOUD_ACCESS_TOKEN\": \"Access token\"},\n {\"SAKURACLOUD_ACCESS_TOKEN_SECRET\": \"Access token secret\"},\n {\"SAKURACLOUD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SAKURACLOUD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SAKURACLOUD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SAKURACLOUD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"scaleway\": [\n {\"SCALEWAY_API_TOKEN\": \"API token\"},\n {\"SCALEWAY_PROJECT_ID\": \"Project to use (optional)\"},\n {\"SCALEWAY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SCALEWAY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SCALEWAY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"selectel\": [\n {\"SELECTEL_API_TOKEN\": \"API token\"},\n {\"SELECTEL_BASE_URL\": \"API endpoint URL\"},\n {\"SELECTEL_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SELECTEL_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SELECTEL_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SELECTEL_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"servercow\": [\n {\"SERVERCOW_PASSWORD\": \"API password\"},\n {\"SERVERCOW_USERNAME\": \"API username\"},\n {\"SERVERCOW_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SERVERCOW_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SERVERCOW_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SERVERCOW_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"simply\": [\n {\"SIMPLY_ACCOUNT_NAME\": \"Account name\"},\n {\"SIMPLY_API_KEY\": \"API key\"},\n {\"SIMPLY_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SIMPLY_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SIMPLY_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SIMPLY_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"sonic\": [\n {\"SONIC_API_KEY\": \"API Key\"},\n {\"SONIC_USER_ID\": \"User ID\"},\n {\"SONIC_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"SONIC_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"SONIC_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"SONIC_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"SONIC_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"stackpath\": [\n {\"STACKPATH_CLIENT_ID\": \"Client ID\"},\n {\"STACKPATH_CLIENT_SECRET\": \"Client secret\"},\n {\"STACKPATH_STACK_ID\": \"Stack ID\"},\n {\"STACKPATH_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"STACKPATH_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"STACKPATH_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"tencentcloud\": [\n {\"TENCENTCLOUD_SECRET_ID\": \"Access key ID\"},\n {\"TENCENTCLOUD_SECRET_KEY\": \"Access Key secret\"},\n {\"TENCENTCLOUD_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"TENCENTCLOUD_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"TENCENTCLOUD_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"TENCENTCLOUD_REGION\": \"Region\"},\n {\"TENCENTCLOUD_SESSION_TOKEN\": \"Access Key token\"},\n {\"TENCENTCLOUD_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"transip\": [\n {\"TRANSIP_ACCOUNT_NAME\": \"Account name\"},\n {\"TRANSIP_PRIVATE_KEY_PATH\": \"Private key path\"},\n {\"TRANSIP_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"TRANSIP_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"TRANSIP_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"ultradns\": [\n {\"ULTRADNS_PASSWORD\": \"API Password\"},\n {\"ULTRADNS_USERNAME\": \"API Username\"},\n {\"ULTRADNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ULTRADNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"ULTRADNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vegadns\": [\n {\"VEGADNS_URL\": \"API endpoint URL\"},\n {\"VEGADNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VEGADNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VEGADNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vercel\": [\n {\"VERCEL_API_TOKEN\": \"Authentication token\"},\n {\"VERCEL_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"VERCEL_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VERCEL_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VERCEL_TEAM_ID\": \"Team ID (ex: team_xxxxxxxxxxxxxxxxxxxxxxxx)\"},\n {\"VERCEL_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"versio\": [\n {\"VERSIO_PASSWORD\": \"Basic authentication password\"},\n {\"VERSIO_USERNAME\": \"Basic authentication username\"},\n {\"VERSIO_ENDPOINT\": \"The endpoint URL of the API Server\"},\n {\"VERSIO_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"VERSIO_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VERSIO_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VERSIO_SEQUENCE_INTERVAL\": \"Time between sequential requests, default 60s\"},\n {\"VERSIO_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vinyldns\": [\n {\"VINYLDNS_ACCESS_KEY\": \"The VinylDNS API key\"},\n {\"VINYLDNS_HOST\": \"The VinylDNS API URL\"},\n {\"VINYLDNS_SECRET_KEY\": \"The VinylDNS API Secret key\"},\n {\"VINYLDNS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VINYLDNS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VINYLDNS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vscale\": [\n {\"VSCALE_API_TOKEN\": \"API token\"},\n {\"VSCALE_BASE_URL\": \"API endpoint URL\"},\n {\"VSCALE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"VSCALE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VSCALE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VSCALE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"vultr\": [\n {\"VULTR_API_KEY\": \"API key\"},\n {\"VULTR_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"VULTR_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"VULTR_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"VULTR_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"websupport\": [\n {\"WEBSUPPORT_API_KEY\": \"API key\"},\n {\"WEBSUPPORT_SECRET\": \"API secret\"},\n {\"WEBSUPPORT_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"WEBSUPPORT_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"WEBSUPPORT_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"WEBSUPPORT_SEQUENCE_INTERVAL\": \"Time between sequential requests\"},\n {\"WEBSUPPORT_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"wedos\": [\n {\"WEDOS_USERNAME\": \"Username is the same as for the admin account\"},\n {\"WEDOS_WAPI_PASSWORD\": \"Password needs to be generated and IP allowed in the admin interface\"},\n {\"WEDOS_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"WEDOS_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"WEDOS_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"WEDOS_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"zoneee\": [\n {\"ZONEEE_API_KEY\": \"API key\"},\n {\"ZONEEE_API_USER\": \"API user\"},\n {\"ZONEEE_ENDPOINT\": \"API endpoint URL\"},\n {\"ZONEEE_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"ZONEEE_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ZONEEE_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"ZONEEE_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n \"zonomi\": [\n {\"ZONOMI_API_KEY\": \"User API key\"},\n {\"ZONOMI_HTTP_TIMEOUT\": \"API request timeout\"},\n {\"ZONOMI_POLLING_INTERVAL\": \"Time between DNS propagation check\"},\n {\"ZONOMI_PROPAGATION_TIMEOUT\": \"Maximum waiting time for DNS propagation\"},\n {\"ZONOMI_TTL\": \"The TTL of the TXT record used for the DNS challenge\"},\n ],\n}" }, { "identifier": "utc_now_as_str", "path": "utils/helpers.py", "snippet": "def utc_now_as_str():\n return datetime.now(timezone.utc).strftime(\"%Y-%m-%dT%H:%M:%S%z\")" }, { "identifier": "ensure_list", "path": "utils/helpers.py", "snippet": "@validate_call\ndef ensure_list(s: str | list[str] | None) -> list:\n if s:\n if isinstance(s, str):\n return [s]\n if isinstance(s, list):\n return s\n return []" }, { "identifier": "to_unique_sorted_str_list", "path": "utils/helpers.py", "snippet": "@validate_call\ndef to_unique_sorted_str_list(l: list[str]) -> list:\n _l = [x for x in set(l) if x != \"\"]\n return sorted(_l, key=lambda x: str(x))" }, { "identifier": "get_validated_fqdn", "path": "utils/helpers.py", "snippet": "@validate_call\ndef get_validated_fqdn(hostname: str) -> str:\n regex = re.compile(\n r\"^((?![-])[-A-Z\\d]{1,63}(?<!-)[.])*(?!-)[-A-Z\\d]{1,63}(?<!-)?$\", re.IGNORECASE\n )\n if len(hostname) > 253:\n raise ValueError(f\"{hostname} is too long\")\n if regex.match(hostname):\n return hostname\n else:\n raise ValueError(f\"{hostname} is not a valid FQDN\")" }, { "identifier": "flatten", "path": "utils/helpers.py", "snippet": "@validate_call\ndef flatten(l: list[list]):\n return [i for sub_list in l for i in sub_list]" } ]
import json import os import re import uuid from config import defaults from config import lego from config.database import * from email_validator import validate_email from pydantic import ( AfterValidator, BaseModel, EmailStr, Field, FilePath, HttpUrl, field_validator, model_validator, validator, ) from pydantic.networks import IPv4Address, IPv6Address from typing import Annotated, Any, Literal from . import ( utc_now_as_str, ensure_list, to_unique_sorted_str_list, get_validated_fqdn, flatten, )
12,610
class ListenerCreate(BaseModel): id: Annotated[str, Field(default_factory=lambda: str(uuid.uuid4()))] name: Annotated[str, Field(min_length=1)] configuration: dict = {} historic: list = [] created: Annotated[str, Field(default_factory=utc_now_as_str)] updated: Annotated[str, Field(default_factory=utc_now_as_str)] class ListenerLegoConfig(BaseModel): lego_provider: str acme_terms_agreed: Literal[True, "true"] provider_config: dict acme_server: Annotated[str, AfterValidator(lambda x: str(HttpUrl(x)))] acme_email: EmailStr key_type: Literal["EC256", "EC384", "RSA2048", "RSA4096", "RSA8192"] = "RSA2048" domains: str @model_validator(mode="before") @classmethod def check_lego(self, data: Any) -> Any:
class ListenerCreate(BaseModel): id: Annotated[str, Field(default_factory=lambda: str(uuid.uuid4()))] name: Annotated[str, Field(min_length=1)] configuration: dict = {} historic: list = [] created: Annotated[str, Field(default_factory=utc_now_as_str)] updated: Annotated[str, Field(default_factory=utc_now_as_str)] class ListenerLegoConfig(BaseModel): lego_provider: str acme_terms_agreed: Literal[True, "true"] provider_config: dict acme_server: Annotated[str, AfterValidator(lambda x: str(HttpUrl(x)))] acme_email: EmailStr key_type: Literal["EC256", "EC384", "RSA2048", "RSA4096", "RSA8192"] = "RSA2048" domains: str @model_validator(mode="before") @classmethod def check_lego(self, data: Any) -> Any:
if data.get("lego_provider") not in lego.LEGO_DNS_PROVIDERS.keys():
1
2023-12-01 08:36:45+00:00
16k
fzmi/ubdd
models/dino/models/dino/dino.py
[ { "identifier": "box_ops", "path": "models/dino/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "NestedTensor", "path": "models/dino/util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == 'auto':\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\"tensors dim must be 3 or 4 but {}({})\".format(self.tensors.dim(), self.tensors.shape))\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\n 'tensors.shape': self.tensors.shape,\n 'mask.shape': self.mask.shape\n }" }, { "identifier": "nested_tensor_from_tensor_list", "path": "models/dino/util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)" }, { "identifier": "accuracy", "path": "models/dino/util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "models/dino/util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "models/dino/util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if __torchvision_need_compat_flag < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "is_dist_avail_and_initialized", "path": "models/dino/util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "inverse_sigmoid", "path": "models/dino/util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-3):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)" }, { "identifier": "build_backbone", "path": "models/dino/models/dino/backbone.py", "snippet": "def build_backbone(args):\n \"\"\"\n Useful args:\n - backbone: backbone name\n - lr_backbone: \n - dilation\n - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n - backbone_freeze_keywords: \n - use_checkpoint: for swin only for now\n\n \"\"\"\n position_embedding = build_position_encoding(args)\n train_backbone = args.lr_backbone > 0\n if not train_backbone:\n raise ValueError(\"Please set lr_backbone > 0\")\n return_interm_indices = args.return_interm_indices\n assert return_interm_indices in [[0,1,2,3], [1,2,3], [3]]\n backbone_freeze_keywords = args.backbone_freeze_keywords\n use_checkpoint = getattr(args, 'use_checkpoint', False)\n\n if args.backbone in ['resnet50', 'resnet101']:\n backbone = Backbone(args.backbone, train_backbone, args.dilation, \n return_interm_indices, \n batch_norm=FrozenBatchNorm2d)\n bb_num_channels = backbone.num_channels\n elif args.backbone in ['swin_T_224_1k', 'swin_B_224_22k', 'swin_B_384_22k', 'swin_L_224_22k', 'swin_L_384_22k']:\n pretrain_img_size = int(args.backbone.split('_')[-2])\n backbone = build_swin_transformer(args.backbone, \\\n pretrain_img_size=pretrain_img_size, \\\n out_indices=tuple(return_interm_indices), \\\n dilation=args.dilation, use_checkpoint=use_checkpoint)\n\n # freeze some layers\n if backbone_freeze_keywords is not None:\n for name, parameter in backbone.named_parameters():\n for keyword in backbone_freeze_keywords:\n if keyword in name:\n parameter.requires_grad_(False)\n break\n if \"backbone_dir\" in args:\n pretrained_dir = args.backbone_dir\n PTDICT = {\n 'swin_T_224_1k': 'swin_tiny_patch4_window7_224.pth',\n 'swin_B_384_22k': 'swin_base_patch4_window12_384.pth',\n 'swin_L_384_22k': 'swin_large_patch4_window12_384_22k.pth',\n }\n pretrainedpath = os.path.join(pretrained_dir, PTDICT[args.backbone])\n checkpoint = torch.load(pretrainedpath, map_location='cpu')['model']\n from collections import OrderedDict\n def key_select_function(keyname):\n if 'head' in keyname:\n return False\n if args.dilation and 'layers.3' in keyname:\n return False\n return True\n _tmp_st = OrderedDict({k:v for k, v in clean_state_dict(checkpoint).items() if key_select_function(k)})\n _tmp_st_output = backbone.load_state_dict(_tmp_st, strict=False)\n print(str(_tmp_st_output))\n bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]\n elif args.backbone in ['convnext_xlarge_22k']:\n backbone = build_convnext(modelname=args.backbone, pretrained=True, out_indices=tuple(return_interm_indices),backbone_dir=args.backbone_dir)\n bb_num_channels = backbone.dims[4 - len(return_interm_indices):]\n else:\n raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n \n\n assert len(bb_num_channels) == len(return_interm_indices), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n\n model = Joiner(backbone, position_embedding)\n model.num_channels = bb_num_channels \n assert isinstance(bb_num_channels, List), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n return model" }, { "identifier": "build_matcher", "path": "models/dino/models/dino/matcher.py", "snippet": "def build_matcher(args):\n assert args.matcher_type in ['HungarianMatcher', 'SimpleMinsumMatcher'], \"Unknown args.matcher_type: {}\".format(args.matcher_type)\n if args.matcher_type == 'HungarianMatcher':\n return HungarianMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n )\n elif args.matcher_type == 'SimpleMinsumMatcher':\n return SimpleMinsumMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n ) \n else:\n raise NotImplementedError(\"Unknown args.matcher_type: {}\".format(args.matcher_type))" }, { "identifier": "DETRsegm", "path": "models/dino/models/dino/segmentation.py", "snippet": "class DETRsegm(nn.Module):\n def __init__(self, detr, freeze_detr=False):\n super().__init__()\n self.detr = detr\n\n if freeze_detr:\n for p in self.parameters():\n p.requires_grad_(False)\n\n hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead\n self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0)\n self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)\n\n def forward(self, samples: NestedTensor):\n if isinstance(samples, (list, torch.Tensor)):\n samples = nested_tensor_from_tensor_list(samples)\n features, pos = self.detr.backbone(samples)\n\n bs = features[-1].tensors.shape[0]\n\n src, mask = features[-1].decompose()\n assert mask is not None\n src_proj = self.detr.input_proj(src)\n hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])\n\n outputs_class = self.detr.class_embed(hs)\n outputs_coord = self.detr.bbox_embed(hs).sigmoid()\n out = {\"pred_logits\": outputs_class[-1], \"pred_boxes\": outputs_coord[-1]}\n if self.detr.aux_loss:\n out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord)\n\n # FIXME h_boxes takes the last one computed, keep this in mind\n bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)\n\n seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])\n outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])\n\n out[\"pred_masks\"] = outputs_seg_masks\n return out" }, { "identifier": "PostProcessPanoptic", "path": "models/dino/models/dino/segmentation.py", "snippet": "class PostProcessPanoptic(nn.Module):\n \"\"\"This class converts the output of the model to the final panoptic result, in the format expected by the\n coco panoptic API \"\"\"\n\n def __init__(self, is_thing_map, threshold=0.85):\n \"\"\"\n Parameters:\n is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether\n the class is a thing (True) or a stuff (False) class\n threshold: confidence threshold: segments with confidence lower than this will be deleted\n \"\"\"\n super().__init__()\n self.threshold = threshold\n self.is_thing_map = is_thing_map\n\n def forward(self, outputs, processed_sizes, target_sizes=None):\n \"\"\" This function computes the panoptic prediction from the model's predictions.\n Parameters:\n outputs: This is a dict coming directly from the model. See the model doc for the content.\n processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the\n model, ie the size after data augmentation but before batching.\n target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size\n of each prediction. If left to None, it will default to the processed_sizes\n \"\"\"\n if target_sizes is None:\n target_sizes = processed_sizes\n assert len(processed_sizes) == len(target_sizes)\n out_logits, raw_masks, raw_boxes = outputs[\"pred_logits\"], outputs[\"pred_masks\"], outputs[\"pred_boxes\"]\n assert len(out_logits) == len(raw_masks) == len(target_sizes)\n preds = []\n\n def to_tuple(tup):\n if isinstance(tup, tuple):\n return tup\n return tuple(tup.cpu().tolist())\n\n for cur_logits, cur_masks, cur_boxes, size, target_size in zip(\n out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes\n ):\n # we filter empty queries and detection below threshold\n scores, labels = cur_logits.softmax(-1).max(-1)\n keep = labels.ne(outputs[\"pred_logits\"].shape[-1] - 1) & (scores > self.threshold)\n cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)\n cur_scores = cur_scores[keep]\n cur_classes = cur_classes[keep]\n cur_masks = cur_masks[keep]\n cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode=\"bilinear\").squeeze(1)\n cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])\n\n h, w = cur_masks.shape[-2:]\n assert len(cur_boxes) == len(cur_classes)\n\n # It may be that we have several predicted masks for the same stuff class.\n # In the following, we track the list of masks ids for each stuff class (they are merged later on)\n cur_masks = cur_masks.flatten(1)\n stuff_equiv_classes = defaultdict(lambda: [])\n for k, label in enumerate(cur_classes):\n if not self.is_thing_map[label.item()]:\n stuff_equiv_classes[label.item()].append(k)\n\n def get_ids_area(masks, scores, dedup=False):\n # This helper function creates the final panoptic segmentation image\n # It also returns the area of the masks that appears on the image\n\n m_id = masks.transpose(0, 1).softmax(-1)\n\n if m_id.shape[-1] == 0:\n # We didn't detect any mask :(\n m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)\n else:\n m_id = m_id.argmax(-1).view(h, w)\n\n if dedup:\n # Merge the masks corresponding to the same stuff class\n for equiv in stuff_equiv_classes.values():\n if len(equiv) > 1:\n for eq_id in equiv:\n m_id.masked_fill_(m_id.eq(eq_id), equiv[0])\n\n final_h, final_w = to_tuple(target_size)\n\n seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))\n seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)\n\n np_seg_img = (\n torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()\n )\n m_id = torch.from_numpy(rgb2id(np_seg_img))\n\n area = []\n for i in range(len(scores)):\n area.append(m_id.eq(i).sum().item())\n return area, seg_img\n\n area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)\n if cur_classes.numel() > 0:\n # We know filter empty masks as long as we find some\n while True:\n filtered_small = torch.as_tensor(\n [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device\n )\n if filtered_small.any().item():\n cur_scores = cur_scores[~filtered_small]\n cur_classes = cur_classes[~filtered_small]\n cur_masks = cur_masks[~filtered_small]\n area, seg_img = get_ids_area(cur_masks, cur_scores)\n else:\n break\n\n else:\n cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)\n\n segments_info = []\n for i, a in enumerate(area):\n cat = cur_classes[i].item()\n segments_info.append({\"id\": i, \"isthing\": self.is_thing_map[cat], \"category_id\": cat, \"area\": a})\n del cur_classes\n\n with io.BytesIO() as out:\n seg_img.save(out, format=\"PNG\")\n predictions = {\"png_string\": out.getvalue(), \"segments_info\": segments_info}\n preds.append(predictions)\n return preds" }, { "identifier": "PostProcessSegm", "path": "models/dino/models/dino/segmentation.py", "snippet": "class PostProcessSegm(nn.Module):\n def __init__(self, threshold=0.5):\n super().__init__()\n self.threshold = threshold\n\n @torch.no_grad()\n def forward(self, results, outputs, orig_target_sizes, max_target_sizes):\n assert len(orig_target_sizes) == len(max_target_sizes)\n max_h, max_w = max_target_sizes.max(0)[0].tolist()\n outputs_masks = outputs[\"pred_masks\"].squeeze(2)\n outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode=\"bilinear\", align_corners=False)\n outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()\n\n for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):\n img_h, img_w = t[0], t[1]\n results[i][\"masks\"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)\n results[i][\"masks\"] = F.interpolate(\n results[i][\"masks\"].float(), size=tuple(tt.tolist()), mode=\"nearest\"\n ).byte()\n\n return results" }, { "identifier": "dice_loss", "path": "models/dino/models/dino/segmentation.py", "snippet": "def dice_loss(inputs, targets, num_boxes):\n \"\"\"\n Compute the DICE loss, similar to generalized IOU for masks\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n \"\"\"\n inputs = inputs.sigmoid()\n inputs = inputs.flatten(1)\n numerator = 2 * (inputs * targets).sum(1)\n denominator = inputs.sum(-1) + targets.sum(-1)\n loss = 1 - (numerator + 1) / (denominator + 1)\n return loss.sum() / num_boxes" }, { "identifier": "build_deformable_transformer", "path": "models/dino/models/dino/deformable_transformer.py", "snippet": "def build_deformable_transformer(args):\n decoder_query_perturber = None\n if args.decoder_layer_noise:\n from .utils import RandomBoxPerturber\n decoder_query_perturber=RandomBoxPerturber(\n x_noise_scale=args.dln_xy_noise, y_noise_scale=args.dln_xy_noise, \n w_noise_scale=args.dln_hw_noise, h_noise_scale=args.dln_hw_noise)\n\n use_detached_boxes_dec_out = False\n try:\n use_detached_boxes_dec_out = args.use_detached_boxes_dec_out\n except:\n use_detached_boxes_dec_out =False\n\n return DeformableTransformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n num_queries=args.num_queries,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_unicoder_layers=args.unic_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n query_dim=args.query_dim,\n activation=args.transformer_activation,\n num_patterns=args.num_patterns,\n modulate_hw_attn=True,\n\n deformable_encoder=True,\n deformable_decoder=True,\n num_feature_levels=args.num_feature_levels,\n enc_n_points=args.enc_n_points,\n dec_n_points=args.dec_n_points,\n use_deformable_box_attn=args.use_deformable_box_attn,\n box_attn_type=args.box_attn_type,\n\n learnable_tgt_init=True,\n decoder_query_perturber=decoder_query_perturber,\n\n add_channel_attention=args.add_channel_attention,\n add_pos_value=args.add_pos_value,\n random_refpoints_xy=args.random_refpoints_xy,\n\n # two stage\n two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n two_stage_pat_embed=args.two_stage_pat_embed,\n two_stage_add_query_num=args.two_stage_add_query_num,\n two_stage_learn_wh=args.two_stage_learn_wh,\n two_stage_keep_all_tokens=args.two_stage_keep_all_tokens,\n dec_layer_number=args.dec_layer_number,\n rm_self_attn_layers=None,\n key_aware_type=None,\n layer_share_type=None,\n\n rm_detach=None,\n decoder_sa_type=args.decoder_sa_type,\n module_seq=args.decoder_module_seq,\n\n embed_init_tgt=args.embed_init_tgt,\n use_detached_boxes_dec_out=use_detached_boxes_dec_out\n )" }, { "identifier": "sigmoid_focal_loss", "path": "models/dino/models/dino/utils.py", "snippet": "def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n return loss.mean(1).sum() / num_boxes" }, { "identifier": "MLP", "path": "models/dino/models/dino/utils.py", "snippet": "class MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "MODULE_BUILD_FUNCS", "path": "models/dino/models/registry.py", "snippet": "MODULE_BUILD_FUNCS = Registry('model build functions')" }, { "identifier": "prepare_for_cdn", "path": "models/dino/models/dino/dn_components.py", "snippet": "def prepare_for_cdn(dn_args, training, num_queries, num_classes, hidden_dim, label_enc):\n \"\"\"\n A major difference of DINO from DN-DETR is that the author process pattern embedding pattern embedding in its detector\n forward function and use learnable tgt embedding, so we change this function a little bit.\n :param dn_args: targets, dn_number, label_noise_ratio, box_noise_scale\n :param training: if it is training or inference\n :param num_queries: number of queires\n :param num_classes: number of classes\n :param hidden_dim: transformer hidden dim\n :param label_enc: encode labels in dn\n :return:\n \"\"\"\n if training:\n targets, dn_number, label_noise_ratio, box_noise_scale = dn_args\n # positive and negative dn queries\n dn_number = dn_number * 2\n known = [(torch.ones_like(t['labels'])).cuda() for t in targets]\n batch_size = len(known)\n known_num = [sum(k) for k in known]\n if int(max(known_num)) == 0:\n dn_number = 1\n else:\n if dn_number >= 100:\n dn_number = dn_number // (int(max(known_num) * 2))\n elif dn_number < 1:\n dn_number = 1\n if dn_number == 0:\n dn_number = 1\n unmask_bbox = unmask_label = torch.cat(known)\n labels = torch.cat([t['labels'] for t in targets])\n boxes = torch.cat([t['boxes'] for t in targets])\n batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])\n\n known_indice = torch.nonzero(unmask_label + unmask_bbox)\n known_indice = known_indice.view(-1)\n\n known_indice = known_indice.repeat(2 * dn_number, 1).view(-1)\n known_labels = labels.repeat(2 * dn_number, 1).view(-1)\n known_bid = batch_idx.repeat(2 * dn_number, 1).view(-1)\n known_bboxs = boxes.repeat(2 * dn_number, 1)\n known_labels_expaned = known_labels.clone()\n known_bbox_expand = known_bboxs.clone()\n\n if label_noise_ratio > 0:\n p = torch.rand_like(known_labels_expaned.float())\n chosen_indice = torch.nonzero(p < (label_noise_ratio * 0.5)).view(-1) # half of bbox prob\n new_label = torch.randint_like(chosen_indice, 0, num_classes) # randomly put a new one here\n known_labels_expaned.scatter_(0, chosen_indice, new_label)\n single_pad = int(max(known_num))\n\n pad_size = int(single_pad * 2 * dn_number)\n positive_idx = torch.tensor(range(len(boxes))).long().cuda().unsqueeze(0).repeat(dn_number, 1)\n positive_idx += (torch.tensor(range(dn_number)) * len(boxes) * 2).long().cuda().unsqueeze(1)\n positive_idx = positive_idx.flatten()\n negative_idx = positive_idx + len(boxes)\n if box_noise_scale > 0:\n known_bbox_ = torch.zeros_like(known_bboxs)\n known_bbox_[:, :2] = known_bboxs[:, :2] - known_bboxs[:, 2:] / 2\n known_bbox_[:, 2:] = known_bboxs[:, :2] + known_bboxs[:, 2:] / 2\n\n diff = torch.zeros_like(known_bboxs)\n diff[:, :2] = known_bboxs[:, 2:] / 2\n diff[:, 2:] = known_bboxs[:, 2:] / 2\n\n rand_sign = torch.randint_like(known_bboxs, low=0, high=2, dtype=torch.float32) * 2.0 - 1.0\n rand_part = torch.rand_like(known_bboxs)\n rand_part[negative_idx] += 1.0\n rand_part *= rand_sign\n known_bbox_ = known_bbox_ + torch.mul(rand_part,\n diff).cuda() * box_noise_scale\n known_bbox_ = known_bbox_.clamp(min=0.0, max=1.0)\n known_bbox_expand[:, :2] = (known_bbox_[:, :2] + known_bbox_[:, 2:]) / 2\n known_bbox_expand[:, 2:] = known_bbox_[:, 2:] - known_bbox_[:, :2]\n\n m = known_labels_expaned.long().to('cuda')\n input_label_embed = label_enc(m)\n input_bbox_embed = inverse_sigmoid(known_bbox_expand)\n\n padding_label = torch.zeros(pad_size, hidden_dim).cuda()\n padding_bbox = torch.zeros(pad_size, 4).cuda()\n\n input_query_label = padding_label.repeat(batch_size, 1, 1)\n input_query_bbox = padding_bbox.repeat(batch_size, 1, 1)\n\n map_known_indice = torch.tensor([]).to('cuda')\n if len(known_num):\n map_known_indice = torch.cat([torch.tensor(range(num)) for num in known_num]) # [1,2, 1,2,3]\n map_known_indice = torch.cat([map_known_indice + single_pad * i for i in range(2 * dn_number)]).long()\n if len(known_bid):\n input_query_label[(known_bid.long(), map_known_indice)] = input_label_embed\n input_query_bbox[(known_bid.long(), map_known_indice)] = input_bbox_embed\n\n tgt_size = pad_size + num_queries\n attn_mask = torch.ones(tgt_size, tgt_size).to('cuda') < 0\n # match query cannot see the reconstruct\n attn_mask[pad_size:, :pad_size] = True\n # reconstruct cannot see each other\n for i in range(dn_number):\n if i == 0:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n if i == dn_number - 1:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * i * 2] = True\n else:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * 2 * i] = True\n\n dn_meta = {\n 'pad_size': pad_size,\n 'num_dn_group': dn_number,\n }\n else:\n\n input_query_label = None\n input_query_bbox = None\n attn_mask = None\n dn_meta = None\n\n return input_query_label, input_query_bbox, attn_mask, dn_meta" }, { "identifier": "dn_post_process", "path": "models/dino/models/dino/dn_components.py", "snippet": "def dn_post_process(outputs_class, outputs_coord, dn_meta, aux_loss, _set_aux_loss):\n \"\"\"\n post process of dn after output from the transformer\n put the dn part in the dn_meta\n \"\"\"\n if dn_meta and dn_meta['pad_size'] > 0:\n output_known_class = outputs_class[:, :, :dn_meta['pad_size'], :]\n output_known_coord = outputs_coord[:, :, :dn_meta['pad_size'], :]\n outputs_class = outputs_class[:, :, dn_meta['pad_size']:, :]\n outputs_coord = outputs_coord[:, :, dn_meta['pad_size']:, :]\n out = {'pred_logits': output_known_class[-1], 'pred_boxes': output_known_coord[-1]}\n if aux_loss:\n out['aux_outputs'] = _set_aux_loss(output_known_class, output_known_coord)\n dn_meta['output_known_lbs_bboxes'] = out\n return outputs_class, outputs_coord" } ]
import copy import math import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from models.dino.util import box_ops from models.dino.util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized, inverse_sigmoid) from .backbone import build_backbone from .matcher import build_matcher from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, dice_loss) from .deformable_transformer import build_deformable_transformer from .utils import sigmoid_focal_loss, MLP from ..registry import MODULE_BUILD_FUNCS from .dn_components import prepare_for_cdn,dn_post_process
11,429
l_dict.update(self.get_loss(loss, aux_outputs_known, targets, dn_pos_idx, num_boxes*scalar, **kwargs)) l_dict = {k + f'_dn_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) else: l_dict = dict() l_dict['loss_bbox_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_giou_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_ce_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda') l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda') l_dict = {k + f'_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) # interm_outputs loss if 'interm_outputs' in outputs: interm_outputs = outputs['interm_outputs'] indices = self.matcher(interm_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, interm_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_interm': v for k, v in l_dict.items()} losses.update(l_dict) # enc output loss if 'enc_outputs' in outputs: for i, enc_outputs in enumerate(outputs['enc_outputs']): indices = self.matcher(enc_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, enc_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_enc_{i}': v for k, v in l_dict.items()} losses.update(l_dict) if return_indices: indices_list.append(indices0_copy) return losses, indices_list return losses def prep_for_dn(self,dn_meta): output_known_lbs_bboxes = dn_meta['output_known_lbs_bboxes'] num_dn_groups,pad_size=dn_meta['num_dn_group'],dn_meta['pad_size'] assert pad_size % num_dn_groups==0 single_pad=pad_size//num_dn_groups return output_known_lbs_bboxes,single_pad,num_dn_groups class PostProcess(nn.Module): """ This module converts the model's output into the format expected by the coco api""" def __init__(self, num_select=100, nms_iou_threshold=-1) -> None: super().__init__() self.num_select = num_select self.nms_iou_threshold = nms_iou_threshold @torch.no_grad() def forward(self, outputs, target_sizes, not_to_xyxy=False, test=False): """ Perform the computation Parameters: outputs: raw outputs of the model target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch For evaluation, this must be the original image size (before any data augmentation) For visualization, this should be the image size after data augment, but before padding """ num_select = self.num_select out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), num_select, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] if not_to_xyxy: boxes = out_bbox else: boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) if test: assert not not_to_xyxy boxes[:,:,2:] = boxes[:,:,2:] - boxes[:,:,:2] boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] if self.nms_iou_threshold > 0: item_indices = [nms(b, s, iou_threshold=self.nms_iou_threshold) for b,s in zip(boxes, scores)] results = [{'scores': s[i], 'labels': l[i], 'boxes': b[i]} for s, l, b, i in zip(scores, labels, boxes, item_indices)] else: results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results
# ------------------------------------------------------------------------ # DINO # Copyright (c) 2022 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class DINO(nn.Module): """ This is the Cross-Attention Detector module that performs object detection """ def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False, iter_update=False, query_dim=2, random_refpoints_xy=False, fix_refpoints_hw=-1, num_feature_levels=1, nheads=8, # two stage two_stage_type='no', # ['no', 'standard'] two_stage_add_query_num=0, dec_pred_class_embed_share=True, dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, decoder_sa_type = 'sa', num_patterns = 0, dn_number = 100, dn_box_noise_scale = 0.4, dn_label_noise_ratio = 0.5, dn_labelbook_size = 100, ): """ Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_classes: number of object classes num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. fix_refpoints_hw: -1(default): learn w and h for each box seperately >0 : given fixed number -2 : learn a shared w and h """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.num_classes = num_classes self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim) # setting query dim self.query_dim = query_dim assert query_dim == 4 self.random_refpoints_xy = random_refpoints_xy self.fix_refpoints_hw = fix_refpoints_hw # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), )) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList([ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )]) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_class_embed_share = dec_pred_class_embed_share self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = nn.Linear(hidden_dim, num_classes) _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) # init the two embed layers prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) _class_embed.bias.data = torch.ones(self.num_classes) * bias_value nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] if dec_pred_class_embed_share: class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] else: class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type self.two_stage_add_query_num = two_stage_add_query_num assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type) if two_stage_type != 'no': if two_stage_bbox_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None if self.two_stage_add_query_num > 0: self.init_ref_points(two_stage_add_query_num) self.decoder_sa_type = decoder_sa_type assert decoder_sa_type in ['sa', 'ca_label', 'ca_content'] if decoder_sa_type == 'ca_label': self.label_embedding = nn.Embedding(num_classes, hidden_dim) for layer in self.transformer.decoder.layers: layer.label_embedding = self.label_embedding else: for layer in self.transformer.decoder.layers: layer.label_embedding = None self.label_embedding = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) if self.random_refpoints_xy: self.refpoint_embed.weight.data[:, :2].uniform_(0,1) self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2]) self.refpoint_embed.weight.data[:, :2].requires_grad = False if self.fix_refpoints_hw > 0: print("fix_refpoints_hw: {}".format(self.fix_refpoints_hw)) assert self.random_refpoints_xy self.refpoint_embed.weight.data[:, 2:] = self.fix_refpoints_hw self.refpoint_embed.weight.data[:, 2:] = inverse_sigmoid(self.refpoint_embed.weight.data[:, 2:]) self.refpoint_embed.weight.data[:, 2:].requires_grad = False elif int(self.fix_refpoints_hw) == -1: pass elif int(self.fix_refpoints_hw) == -2: print('learn a shared h and w') assert self.random_refpoints_xy self.refpoint_embed = nn.Embedding(use_num_queries, 2) self.refpoint_embed.weight.data[:, :2].uniform_(0,1) self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2]) self.refpoint_embed.weight.data[:, :2].requires_grad = False self.hw_embed = nn.Embedding(1, 1) else: raise NotImplementedError('Unknown fix_refpoints_hw {}'.format(self.fix_refpoints_hw)) def forward(self, samples: NestedTensor, targets:List=None): """ The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) if self.dn_number > 0 or targets is not None: input_query_label, input_query_bbox, attn_mask, dn_meta =\ prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale), training=self.training,num_queries=self.num_queries,num_classes=self.num_classes, hidden_dim=self.hidden_dim,label_enc=self.label_enc) else: assert targets is None input_query_bbox = input_query_label = attn_mask = dn_meta = None hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, poss,input_query_label,attn_mask) # In case num object=0 hs[0] += self.label_enc.weight[0,0]*0.0 # deformable-detr-like anchor update # reference_before_sigmoid = inverse_sigmoid(reference[:-1]) # n_dec, bs, nq, 4 outputs_coord_list = [] for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)): layer_delta_unsig = layer_bbox_embed(layer_hs) layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) layer_outputs_unsig = layer_outputs_unsig.sigmoid() outputs_coord_list.append(layer_outputs_unsig) outputs_coord_list = torch.stack(outputs_coord_list) outputs_class = torch.stack([layer_cls_embed(layer_hs) for layer_cls_embed, layer_hs in zip(self.class_embed, hs)]) if self.dn_number > 0 and dn_meta is not None: outputs_class, outputs_coord_list = \ dn_post_process(outputs_class, outputs_coord_list, dn_meta,self.aux_loss,self._set_aux_loss) out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord_list[-1]} if self.aux_loss: out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) # for encoder output if hs_enc is not None: # prepare intermediate outputs interm_coord = ref_enc[-1] interm_class = self.transformer.enc_out_class_embed(hs_enc[-1]) out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} # prepare enc outputs if hs_enc.shape[0] > 1: enc_outputs_coord = [] enc_outputs_class = [] for layer_id, (layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc) in enumerate(zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1])): layer_enc_delta_unsig = layer_box_embed(layer_hs_enc) layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc) layer_enc_outputs_coord = layer_enc_outputs_coord_unsig.sigmoid() layer_enc_outputs_class = layer_class_embed(layer_hs_enc) enc_outputs_coord.append(layer_enc_outputs_coord) enc_outputs_class.append(layer_enc_outputs_class) out['enc_outputs'] = [ {'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord) ] out['dn_meta'] = dn_meta return out @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] class SetCriterion(nn.Module): """ This class computes the loss for Conditional DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) """ def __init__(self, num_classes, matcher, weight_dict, focal_alpha, losses): """ Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.num_classes = num_classes self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (Binary focal loss) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] """ assert 'pred_logits' in outputs src_logits = outputs['pred_logits'] idx = self._get_src_permutation_idx(indices) target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2]+1], dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:,:,:-1] loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1] losses = {'loss_ce': loss_ce} if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes # calculate the x,y and h,w loss with torch.no_grad(): losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """Compute the losses related to the masks: the focal loss and the dice loss. targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] """ assert "pred_masks" in outputs src_idx = self._get_src_permutation_idx(indices) tgt_idx = self._get_tgt_permutation_idx(indices) src_masks = outputs["pred_masks"] src_masks = src_masks[src_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(src_masks) target_masks = target_masks[tgt_idx] # upsample predictions to the target size src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False) src_masks = src_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(src_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), "loss_dice": dice_loss(src_masks, target_masks, num_boxes), } return losses def _get_src_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) src_idx = torch.cat([src for (src, _) in indices]) return batch_idx, src_idx def _get_tgt_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) tgt_idx = torch.cat([tgt for (_, tgt) in indices]) return batch_idx, tgt_idx def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): loss_map = { 'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, 'masks': self.loss_masks, } assert loss in loss_map, f'do you really want to compute {loss} loss?' return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) def forward(self, outputs, targets, return_indices=False): """ This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc return_indices: used for vis. if True, the layer0-5 indices will be returned as well. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} device=next(iter(outputs.values())).device indices = self.matcher(outputs_without_aux, targets) if return_indices: indices0_copy = indices indices_list = [] # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device) if is_dist_avail_and_initialized(): torch.distributed.all_reduce(num_boxes) num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() # Compute all the requested losses losses = {} # prepare for dn loss dn_meta = outputs['dn_meta'] if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta: output_known_lbs_bboxes,single_pad, scalar = self.prep_for_dn(dn_meta) dn_pos_idx = [] dn_neg_idx = [] for i in range(len(targets)): if len(targets[i]['labels']) > 0: t = torch.range(0, len(targets[i]['labels']) - 1).long().cuda() t = t.unsqueeze(0).repeat(scalar, 1) tgt_idx = t.flatten() output_idx = (torch.tensor(range(scalar)) * single_pad).long().cuda().unsqueeze(1) + t output_idx = output_idx.flatten() else: output_idx = tgt_idx = torch.tensor([]).long().cuda() dn_pos_idx.append((output_idx, tgt_idx)) dn_neg_idx.append((output_idx + single_pad // 2, tgt_idx)) output_known_lbs_bboxes=dn_meta['output_known_lbs_bboxes'] l_dict = {} for loss in self.losses: kwargs = {} if 'labels' in loss: kwargs = {'log': False} l_dict.update(self.get_loss(loss, output_known_lbs_bboxes, targets, dn_pos_idx, num_boxes*scalar,**kwargs)) l_dict = {k + f'_dn': v for k, v in l_dict.items()} losses.update(l_dict) else: l_dict = dict() l_dict['loss_bbox_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_giou_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_ce_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda') l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda') losses.update(l_dict) for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if 'aux_outputs' in outputs: for idx, aux_outputs in enumerate(outputs['aux_outputs']): indices = self.matcher(aux_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta: aux_outputs_known = output_known_lbs_bboxes['aux_outputs'][idx] l_dict={} for loss in self.losses: kwargs = {} if 'labels' in loss: kwargs = {'log': False} l_dict.update(self.get_loss(loss, aux_outputs_known, targets, dn_pos_idx, num_boxes*scalar, **kwargs)) l_dict = {k + f'_dn_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) else: l_dict = dict() l_dict['loss_bbox_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_giou_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_ce_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda') l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda') l_dict = {k + f'_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) # interm_outputs loss if 'interm_outputs' in outputs: interm_outputs = outputs['interm_outputs'] indices = self.matcher(interm_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, interm_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_interm': v for k, v in l_dict.items()} losses.update(l_dict) # enc output loss if 'enc_outputs' in outputs: for i, enc_outputs in enumerate(outputs['enc_outputs']): indices = self.matcher(enc_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, enc_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_enc_{i}': v for k, v in l_dict.items()} losses.update(l_dict) if return_indices: indices_list.append(indices0_copy) return losses, indices_list return losses def prep_for_dn(self,dn_meta): output_known_lbs_bboxes = dn_meta['output_known_lbs_bboxes'] num_dn_groups,pad_size=dn_meta['num_dn_group'],dn_meta['pad_size'] assert pad_size % num_dn_groups==0 single_pad=pad_size//num_dn_groups return output_known_lbs_bboxes,single_pad,num_dn_groups class PostProcess(nn.Module): """ This module converts the model's output into the format expected by the coco api""" def __init__(self, num_select=100, nms_iou_threshold=-1) -> None: super().__init__() self.num_select = num_select self.nms_iou_threshold = nms_iou_threshold @torch.no_grad() def forward(self, outputs, target_sizes, not_to_xyxy=False, test=False): """ Perform the computation Parameters: outputs: raw outputs of the model target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch For evaluation, this must be the original image size (before any data augmentation) For visualization, this should be the image size after data augment, but before padding """ num_select = self.num_select out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), num_select, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] if not_to_xyxy: boxes = out_bbox else: boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) if test: assert not not_to_xyxy boxes[:,:,2:] = boxes[:,:,2:] - boxes[:,:,:2] boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] if self.nms_iou_threshold > 0: item_indices = [nms(b, s, iou_threshold=self.nms_iou_threshold) for b,s in zip(boxes, scores)] results = [{'scores': s[i], 'labels': l[i], 'boxes': b[i]} for s, l, b, i in zip(scores, labels, boxes, item_indices)] else: results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results
@MODULE_BUILD_FUNCS.registe_with_name(module_name='dino')
17
2023-12-04 00:27:58+00:00
16k
girgle/DouZero_For_New_HLDDZ
test.py
[ { "identifier": "GameHelper", "path": "GameHelper.py", "snippet": "class GameHelper:\n def __init__(self):\n self.ScreenZoomRate = None\n self.counter = QTime()\n self.Pics = {}\n self.PicsCV = {}\n st = time.time()\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n self.Interrupt = False\n self.RealRate = (1440, 810)\n self.GetZoomRate()\n for file in os.listdir(\"./pics\"):\n info = file.split(\".\")\n if info[1] == \"png\":\n tmpImage = Image.open(\"./pics/\" + file)\n imgCv = cv2.imread(\"./pics/\" + file)\n self.Pics.update({info[0]: tmpImage})\n self.PicsCV.update({info[0]: imgCv})\n\n def sleep(self, ms):\n self.counter.restart()\n while self.counter.elapsed() < ms:\n QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 50)\n\n def Screenshot(self, region=None): # -> (im, (left, top))\n try_count = 3\n success = False\n while try_count > 0 and not success:\n try:\n try_count -= 1\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n hwnd = self.Handle\n left, top, right, bot = win32gui.GetWindowRect(hwnd)\n width = right - left\n height = bot - top\n self.RealRate = (width, height)\n width = int(width)\n height = int(height)\n hwndDC = win32gui.GetWindowDC(hwnd)\n mfcDC = win32ui.CreateDCFromHandle(hwndDC)\n saveDC = mfcDC.CreateCompatibleDC()\n saveBitMap = win32ui.CreateBitmap()\n saveBitMap.CreateCompatibleBitmap(mfcDC, width, height)\n saveDC.SelectObject(saveBitMap)\n result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 3)\n bmpinfo = saveBitMap.GetInfo()\n bmpstr = saveBitMap.GetBitmapBits(True)\n im = Image.frombuffer(\n \"RGB\",\n (bmpinfo['bmWidth'], bmpinfo['bmHeight']),\n bmpstr, 'raw', 'BGRX', 0, 1)\n win32gui.DeleteObject(saveBitMap.GetHandle())\n saveDC.DeleteDC()\n mfcDC.DeleteDC()\n win32gui.ReleaseDC(hwnd, hwndDC)\n im = im.resize((1440, 810))\n if region is not None:\n im = im.crop((region[0], region[1], region[0] + region[2], region[1] + region[3]))\n if result:\n success = True\n return im, (left, top)\n except Exception as e:\n print(\"截图时出现错误:\", repr(e))\n self.sleep(200)\n return None, (0, 0)\n\n def GetZoomRate(self):\n self.ScreenZoomRate = ctypes.windll.shcore.GetScaleFactorForDevice(0) / 100\n\n def LocateOnScreen(self, templateName, region, confidence=0.8, img=None):\n if img is not None:\n image = img\n else:\n image, _ = self.Screenshot()\n imgcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n return LocateOnImage(imgcv, self.PicsCV[templateName], region=region, confidence=confidence)\n\n def ClickOnImage(self, templateName, region=None, confidence=0.8, img=None):\n if img is not None:\n image = img\n else:\n image, _ = self.Screenshot()\n imgcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n result = LocateOnImage(imgcv, self.PicsCV[templateName], region=region, confidence=confidence)\n\n if result is not None:\n self.LeftClick(result)\n print(result)\n\n def LeftClick(self, pos):\n x, y = pos\n x = (x / 1440) * self.RealRate[0]\n y = (y / 810) * self.RealRate[1]\n x = int(x)\n y = int(y)\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n left, top, _, _ = win32gui.GetWindowRect(self.Handle)\n x, y = int(left + x), int(top + y)\n\n pyautogui.mouseDown(x, y, button='left')\n time.sleep(0.1)\n pyautogui.mouseUp(x, y, button='left')\n time.sleep(0.1)\n pyautogui.moveTo(int(left + 1000), int(top + 550))\n\n '''win32gui.SetActiveWindow(self.Handle)\n lParam = win32api.MAKELONG(x, y)\n\n win32gui.PostMessage(self.Handle, WM_ACTIVATE, WA_ACTIVE, lParam)\n win32gui.PostMessage(self.Handle, WM_ACTIVATE, WA_ACTIVE, lParam)\n win32gui.PostMessage(self.Handle, WM_MOUSEMOVE, MK_LBUTTON, lParam)\n win32gui.PostMessage(self.Handle, WM_LBUTTONDOWN, MK_LBUTTON, lParam)\n win32gui.PostMessage(self.Handle, WM_LBUTTONUP, MK_LBUTTON, lParam)'''\n\n def LeftClick2(self, pos):\n x, y = pos\n x = (x / 1440) * self.RealRate[0]\n y = (y / 810) * self.RealRate[1]\n x = int(x)\n y = int(y)\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n left, top, _, _ = win32gui.GetWindowRect(self.Handle)\n x, y = int(left + x), int(top + y)\n\n pyautogui.mouseDown(x, y, button='left')\n time.sleep(0.1)\n pyautogui.mouseUp(x, y, button='left')" }, { "identifier": "get_move_type", "path": "douzero/env/move_detector.py", "snippet": "def get_move_type(move):\n move_size = len(move)\n move_dict = collections.Counter(move)\n\n if move_size == 0:\n return {'type': TYPE_0_PASS}\n\n if move_size == 1:\n return {'type': TYPE_1_SINGLE, 'rank': move[0]}\n\n if move_size == 2:\n if move[0] == move[1]:\n return {'type': TYPE_2_PAIR, 'rank': move[0]}\n elif move == [20, 30]: # Kings\n return {'type': TYPE_5_KING_BOMB}\n else:\n return {'type': TYPE_15_WRONG}\n\n if move_size == 3:\n if len(move_dict) == 1:\n return {'type': TYPE_3_TRIPLE, 'rank': move[0]}\n else:\n return {'type': TYPE_15_WRONG}\n\n if move_size == 4:\n if len(move_dict) == 1:\n return {'type': TYPE_4_BOMB, 'rank': move[0]}\n elif len(move_dict) == 2:\n if move[0] == move[1] == move[2] or move[1] == move[2] == move[3]:\n return {'type': TYPE_6_3_1, 'rank': move[1]}\n else:\n return {'type': TYPE_15_WRONG}\n else:\n return {'type': TYPE_15_WRONG}\n\n if is_continuous_seq(move):\n return {'type': TYPE_8_SERIAL_SINGLE, 'rank': move[0], 'len': len(move)}\n\n if move_size == 5:\n if len(move_dict) == 2:\n return {'type': TYPE_7_3_2, 'rank': move[2]}\n else:\n return {'type': TYPE_15_WRONG}\n\n count_dict = collections.defaultdict(int)\n for c, n in move_dict.items():\n count_dict[n] += 1\n\n if move_size == 6:\n if (len(move_dict) == 2 or len(move_dict) == 3) and count_dict.get(4) == 1 and \\\n (count_dict.get(2) == 1 or count_dict.get(1) == 2):\n return {'type': TYPE_13_4_2, 'rank': move[2]}\n\n if move_size == 8 and (((len(move_dict) == 3 or len(move_dict) == 2) and\n (count_dict.get(4) == 1 and count_dict.get(2) == 2)) or count_dict.get(4) == 2):\n return {'type': TYPE_14_4_22, 'rank': max([c for c, n in move_dict.items() if n == 4])}\n\n mdkeys = sorted(move_dict.keys())\n if len(move_dict) == count_dict.get(2) and is_continuous_seq(mdkeys):\n return {'type': TYPE_9_SERIAL_PAIR, 'rank': mdkeys[0], 'len': len(mdkeys)}\n\n if len(move_dict) == count_dict.get(3) and is_continuous_seq(mdkeys):\n return {'type': TYPE_10_SERIAL_TRIPLE, 'rank': mdkeys[0], 'len': len(mdkeys)}\n\n # Check Type 11 (serial 3+1) and Type 12 (serial 3+2)\n if count_dict.get(3, 0) >= MIN_TRIPLES:\n serial_3 = list()\n single = list()\n pair = list()\n\n for k, v in move_dict.items():\n if v == 3:\n serial_3.append(k)\n elif v == 1:\n single.append(k)\n elif v == 2:\n pair.append(k)\n else: # no other possibilities\n return {'type': TYPE_15_WRONG}\n\n serial_3.sort()\n if is_continuous_seq(serial_3):\n if len(serial_3) == len(single)+len(pair)*2:\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3)}\n if len(serial_3) == len(pair) and len(move_dict) == len(serial_3) * 2:\n return {'type': TYPE_12_SERIAL_3_2, 'rank': serial_3[0], 'len': len(serial_3)}\n\n if len(serial_3) == 4:\n if is_continuous_seq(serial_3[1:]):\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[1], 'len': len(serial_3) - 1}\n if is_continuous_seq(serial_3[:-1]):\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3) - 1}\n\n return {'type': TYPE_15_WRONG}" }, { "identifier": "Ui_Form", "path": "MainWindow.py", "snippet": "class Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(677, 450)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(9)\n font.setBold(True)\n font.setItalic(False)\n font.setWeight(75)\n Form.setFont(font)\n Form.setWindowOpacity(0.8)\n self.WinRate = QtWidgets.QLabel(Form)\n self.WinRate.setGeometry(QtCore.QRect(320, 120, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.WinRate.setFont(font)\n self.WinRate.setAlignment(QtCore.Qt.AlignCenter)\n self.WinRate.setObjectName(\"WinRate\")\n self.UserHandCards = QtWidgets.QLabel(Form)\n self.UserHandCards.setGeometry(QtCore.QRect(30, 330, 351, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.UserHandCards.setFont(font)\n self.UserHandCards.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.UserHandCards.setObjectName(\"UserHandCards\")\n self.ThreeLandlordCards = QtWidgets.QLabel(Form)\n self.ThreeLandlordCards.setGeometry(QtCore.QRect(30, 120, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.ThreeLandlordCards.setFont(font)\n self.ThreeLandlordCards.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.ThreeLandlordCards.setObjectName(\"ThreeLandlordCards\")\n self.BidWinrate = QtWidgets.QLabel(Form)\n self.BidWinrate.setGeometry(QtCore.QRect(30, 220, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.BidWinrate.setFont(font)\n self.BidWinrate.setObjectName(\"BidWinrate\")\n self.PreWinrate = QtWidgets.QLabel(Form)\n self.PreWinrate.setGeometry(QtCore.QRect(30, 280, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.PreWinrate.setFont(font)\n self.PreWinrate.setObjectName(\"PreWinrate\")\n self.label = QtWidgets.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(490, 320, 101, 41))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.label.setFont(font)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setObjectName(\"label\")\n self.LPlayedCard = QtWidgets.QLabel(Form)\n self.LPlayedCard.setGeometry(QtCore.QRect(170, 120, 102, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.LPlayedCard.setFont(font)\n self.LPlayedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.LPlayedCard.setObjectName(\"LPlayedCard\")\n self.splitter_2 = QtWidgets.QSplitter(Form)\n self.splitter_2.setGeometry(QtCore.QRect(20, 380, 621, 41))\n self.splitter_2.setOrientation(QtCore.Qt.Horizontal)\n self.splitter_2.setObjectName(\"splitter_2\")\n self.SingleButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.SingleButton.setFont(font)\n self.SingleButton.setObjectName(\"SingleButton\")\n self.LoopButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.LoopButton.setFont(font)\n self.LoopButton.setObjectName(\"LoopButton\")\n self.StopButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.StopButton.setFont(font)\n self.StopButton.setObjectName(\"StopButton\")\n self.tableWidget = QtWidgets.QTableWidget(Form)\n self.tableWidget.setGeometry(QtCore.QRect(20, 10, 611, 75))\n self.tableWidget.setMaximumSize(QtCore.QSize(16777215, 75))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(12)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.tableWidget.setFont(font)\n self.tableWidget.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.tableWidget.setStyleSheet(\"QTableWidget{\\n\"\n\"color:#DCDCDC;\\n\"\n\"background:#444444;\\n\"\n\"border:1px solid #242424;\\n\"\n\"alternate-background-color:#525252;\\n\"\n\"gridline-color:#242424;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QTableWidget::item:selected{\\n\"\n\"color:#DCDCDC;\\n\"\n\"background:qlineargradient(spread:pad,x1:0,y1:0,x2:0,y2:1,stop:0 #484848,stop:1 #383838);\\n\"\n\"}\\n\"\n\" \\n\"\n\"QTableWidget::item:hover{\\n\"\n\"background:#5B5B5B;\\n\"\n\"}\\n\"\n\"QHeaderView::section{\\n\"\n\"text-align:center;\\n\"\n\"background:#5E5E5E;\\n\"\n\"padding:3px;\\n\"\n\"margin:0px;\\n\"\n\"color:#DCDCDC;\\n\"\n\"border:1px solid #242424;\\n\"\n\"border-left-width:0;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar:vertical{\\n\"\n\"background:#484848;\\n\"\n\"padding:0px;\\n\"\n\"border-radius:6px;\\n\"\n\"max-width:12px;\\n\"\n\"}\\n\"\n\" \\n\"\n\" \\n\"\n\"QScrollBar::handle:vertical{\\n\"\n\"background:#CCCCCC;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar::handle:hover:vertical,QScrollBar::handle:pressed:vertical{\\n\"\n\"background:#A7A7A7;\\n\"\n\"}\\n\"\n\"QScrollBar::sub-page:vertical{\\n\"\n\"background:444444;\\n\"\n\"}\\n\"\n\" \\n\"\n\" \\n\"\n\"QScrollBar::add-page:vertical{\\n\"\n\"background:5B5B5B;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar::add-line:vertical{\\n\"\n\"background:none;\\n\"\n\"}\\n\"\n\"QScrollBar::sub-line:vertical{\\n\"\n\"background:none;\\n\"\n\"}\")\n self.tableWidget.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.tableWidget.setMidLineWidth(-1)\n self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.tableWidget.setAutoScroll(False)\n self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)\n self.tableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.tableWidget.setTextElideMode(QtCore.Qt.ElideNone)\n self.tableWidget.setObjectName(\"tableWidget\")\n self.tableWidget.setColumnCount(15)\n self.tableWidget.setRowCount(1)\n item = QtWidgets.QTableWidgetItem()\n self.tableWidget.setVerticalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(3, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(4, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(5, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(6, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(7, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(8, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(9, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(10, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(11, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(12, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(13, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(14, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 0, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 1, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 2, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 3, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 4, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 5, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 6, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 7, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 8, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 9, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 10, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 11, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 12, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 13, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 14, item)\n self.tableWidget.horizontalHeader().setVisible(True)\n self.tableWidget.horizontalHeader().setCascadingSectionResizes(True)\n self.tableWidget.horizontalHeader().setDefaultSectionSize(41)\n self.tableWidget.horizontalHeader().setStretchLastSection(True)\n self.tableWidget.verticalHeader().setVisible(False)\n self.tableWidget.verticalHeader().setCascadingSectionResizes(False)\n self.tableWidget.verticalHeader().setDefaultSectionSize(40)\n self.tableWidget.verticalHeader().setHighlightSections(True)\n self.tableWidget.verticalHeader().setMinimumSectionSize(40)\n self.tableWidget.verticalHeader().setSortIndicatorShown(False)\n self.RPlayedCard = QtWidgets.QLabel(Form)\n self.RPlayedCard.setGeometry(QtCore.QRect(490, 120, 102, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.RPlayedCard.setFont(font)\n self.RPlayedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.RPlayedCard.setObjectName(\"RPlayedCard\")\n self.PredictedCard = QtWidgets.QLabel(Form)\n self.PredictedCard.setGeometry(QtCore.QRect(320, 190, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.PredictedCard.setFont(font)\n self.PredictedCard.setStyleSheet(\"\")\n self.PredictedCard.setFrameShape(QtWidgets.QFrame.Panel)\n self.PredictedCard.setLineWidth(1)\n self.PredictedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.PredictedCard.setObjectName(\"PredictedCard\")\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Hi\"))\n self.WinRate.setText(_translate(\"Form\", \"评分\"))\n self.UserHandCards.setText(_translate(\"Form\", \"手牌\"))\n self.ThreeLandlordCards.setText(_translate(\"Form\", \"地主牌\"))\n self.BidWinrate.setText(_translate(\"Form\", \"叫牌胜率:\"))\n self.PreWinrate.setText(_translate(\"Form\", \"局前胜率:\"))\n self.label.setText(_translate(\"Form\", \"游戏状态\"))\n self.LPlayedCard.setText(_translate(\"Form\", \"上家出牌区域\"))\n self.SingleButton.setText(_translate(\"Form\", \"单局\"))\n self.LoopButton.setText(_translate(\"Form\", \" 连续\"))\n self.StopButton.setText(_translate(\"Form\", \"停止\"))\n item = self.tableWidget.horizontalHeaderItem(0)\n item.setText(_translate(\"Form\", \"大\"))\n item = self.tableWidget.horizontalHeaderItem(1)\n item.setText(_translate(\"Form\", \"小\"))\n item = self.tableWidget.horizontalHeaderItem(2)\n item.setText(_translate(\"Form\", \"2\"))\n item = self.tableWidget.horizontalHeaderItem(3)\n item.setText(_translate(\"Form\", \"A\"))\n item = self.tableWidget.horizontalHeaderItem(4)\n item.setText(_translate(\"Form\", \"K\"))\n item = self.tableWidget.horizontalHeaderItem(5)\n item.setText(_translate(\"Form\", \"Q\"))\n item = self.tableWidget.horizontalHeaderItem(6)\n item.setText(_translate(\"Form\", \"J\"))\n item = self.tableWidget.horizontalHeaderItem(7)\n item.setText(_translate(\"Form\", \"10\"))\n item = self.tableWidget.horizontalHeaderItem(8)\n item.setText(_translate(\"Form\", \"9\"))\n item = self.tableWidget.horizontalHeaderItem(9)\n item.setText(_translate(\"Form\", \"8\"))\n item = self.tableWidget.horizontalHeaderItem(10)\n item.setText(_translate(\"Form\", \"7\"))\n item = self.tableWidget.horizontalHeaderItem(11)\n item.setText(_translate(\"Form\", \"6\"))\n item = self.tableWidget.horizontalHeaderItem(12)\n item.setText(_translate(\"Form\", \"5\"))\n item = self.tableWidget.horizontalHeaderItem(13)\n item.setText(_translate(\"Form\", \"4\"))\n item = self.tableWidget.horizontalHeaderItem(14)\n item.setText(_translate(\"Form\", \"3\"))\n __sortingEnabled = self.tableWidget.isSortingEnabled()\n self.tableWidget.setSortingEnabled(False)\n item = self.tableWidget.item(0, 0)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 1)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 2)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 3)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 4)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 5)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 6)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 7)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 8)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 9)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 10)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 11)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 12)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 13)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 14)\n item.setText(_translate(\"Form\", \"0\"))\n self.tableWidget.setSortingEnabled(__sortingEnabled)\n self.RPlayedCard.setText(_translate(\"Form\", \"下家出牌区域\"))\n self.PredictedCard.setText(_translate(\"Form\", \"AI出牌区域\"))" }, { "identifier": "GameEnv", "path": "douzero/env/game.py", "snippet": "class GameEnv(object):\n\n def __init__(self, players):\n\n self.card_play_action_seq = []\n\n self.three_landlord_cards = None\n self.game_over = False\n\n self.acting_player_position = None\n self.player_utility_dict = None\n\n self.players = players\n\n self.last_move_dict = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.played_cards = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.last_move = []\n self.last_two_moves = []\n\n self.num_wins = {'landlord': 0,\n 'farmer': 0}\n\n self.num_scores = {'landlord': 0,\n 'farmer': 0}\n\n self.info_sets = {'landlord': InfoSet('landlord'),\n 'landlord_up': InfoSet('landlord_up'),\n 'landlord_down': InfoSet('landlord_down')}\n\n self.bomb_num = 0\n self.last_pid = 'landlord'\n\n self.bid_info = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n self.bid_count = 0\n self.multiply_count = {'landlord': 1,\n 'landlord_up': 1,\n 'landlord_down': 1}\n self.step_count = 0\n\n\n def card_play_init(self, card_play_data):\n self.info_sets['landlord'].player_hand_cards = \\\n card_play_data['landlord']\n self.info_sets['landlord_up'].player_hand_cards = \\\n card_play_data['landlord_up']\n self.info_sets['landlord_down'].player_hand_cards = \\\n card_play_data['landlord_down']\n self.three_landlord_cards = card_play_data['three_landlord_cards']\n self.get_acting_player_position()\n self.game_infoset = self.get_infoset()\n\n\n def game_done(self):\n if len(self.info_sets['landlord'].player_hand_cards) == 0 or \\\n len(self.info_sets['landlord_up'].player_hand_cards) == 0 or \\\n len(self.info_sets['landlord_down'].player_hand_cards) == 0:\n # if one of the three players discards his hand,\n # then game is over.\n self.compute_player_utility()\n self.update_num_wins_scores()\n\n self.game_over = True\n\n def compute_player_utility(self):\n\n if len(self.info_sets['landlord'].player_hand_cards) == 0:\n self.player_utility_dict = {'landlord': 2,\n 'farmer': -1}\n else:\n self.player_utility_dict = {'landlord': -2,\n 'farmer': 1}\n\n def update_num_wins_scores(self):\n for pos, utility in self.player_utility_dict.items():\n base_score = 2 if pos == 'landlord' else 1\n if utility > 0:\n self.num_wins[pos] += 1\n self.winner = pos\n self.num_scores[pos] += base_score * (2 ** self.bomb_num)\n else:\n self.num_scores[pos] -= base_score * (2 ** self.bomb_num)\n\n def get_winner(self):\n return self.winner\n\n def get_bomb_num(self):\n return self.bomb_num\n\n def step(self, position, action=[]):\n win_rate = 0\n if self.acting_player_position == position:\n action, actions_confidence = self.players[1].act(self.game_infoset)\n # 计算胜率\n win_rate = actions_confidence\n # win_rate = max(actions_confidence, -1)\n # win_rate = min(win_rate, 1)\n # win_rate = str(round(float((win_rate + 1) / 2), 4))\n\n if len(action) > 0:\n self.last_pid = self.acting_player_position\n\n if action in bombs:\n self.bomb_num += 1\n\n self.last_move_dict[\n self.acting_player_position] = action.copy()\n\n self.card_play_action_seq.append((position, action))\n self.update_acting_player_hand_cards(action)\n\n self.played_cards[self.acting_player_position] += action\n\n if self.acting_player_position == 'landlord' and \\\n len(action) > 0 and \\\n len(self.three_landlord_cards) > 0:\n for card in action:\n if len(self.three_landlord_cards) > 0:\n if card in self.three_landlord_cards:\n self.three_landlord_cards.remove(card)\n else:\n break\n self.game_done()\n if not self.game_over:\n self.get_acting_player_position()\n self.game_infoset = self.get_infoset()\n # 返回动作和胜率,只有玩家角色会接受返回值\n action_message = {\"action\": str(''.join([EnvCard2RealCard[c] for c in action])),\n \"win_rate\": str(round(float(win_rate), 4))}\n return action_message\n\n def get_last_move(self):\n last_move = []\n if len(self.card_play_action_seq) != 0:\n if len(self.card_play_action_seq[-1][1]) == 0:\n last_move = self.card_play_action_seq[-2][1]\n else:\n last_move = self.card_play_action_seq[-1][1]\n\n return last_move\n\n def get_last_two_moves(self):\n last_two_moves = [[], []]\n for card in self.card_play_action_seq[-2:]:\n last_two_moves.insert(0, card[1])\n last_two_moves = last_two_moves[:2]\n return last_two_moves\n\n def get_acting_player_position(self):\n if self.acting_player_position is None:\n self.acting_player_position = 'landlord'\n\n else:\n if self.acting_player_position == 'landlord':\n self.acting_player_position = 'landlord_down'\n\n elif self.acting_player_position == 'landlord_down':\n self.acting_player_position = 'landlord_up'\n\n else:\n self.acting_player_position = 'landlord'\n\n return self.acting_player_position\n\n def update_acting_player_hand_cards(self, action):\n if action != []:\n # 更新玩家手牌,删除对应的牌\n if self.acting_player_position == self.players[0]:\n for card in action:\n self.info_sets[self.acting_player_position].player_hand_cards.remove(card)\n # 更新另外两个玩家手牌,删除相同数量的牌\n else:\n del self.info_sets[self.acting_player_position].player_hand_cards[0:len(action)]\n self.info_sets[self.acting_player_position].player_hand_cards.sort()\n\n def get_legal_card_play_actions(self):\n mg = MovesGener(\n self.info_sets[self.acting_player_position].player_hand_cards)\n\n action_sequence = self.card_play_action_seq\n\n rival_move = []\n if len(action_sequence) != 0:\n if len(action_sequence[-1][1]) == 0:\n rival_move = action_sequence[-2][1]\n else:\n rival_move = action_sequence[-1][1]\n\n rival_type = md.get_move_type(rival_move)\n rival_move_type = rival_type['type']\n rival_move_len = rival_type.get('len', 1)\n moves = list()\n\n if rival_move_type == md.TYPE_0_PASS:\n moves = mg.gen_moves()\n\n elif rival_move_type == md.TYPE_1_SINGLE:\n all_moves = mg.gen_type_1_single()\n moves = ms.filter_type_1_single(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_2_PAIR:\n all_moves = mg.gen_type_2_pair()\n moves = ms.filter_type_2_pair(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_3_TRIPLE:\n all_moves = mg.gen_type_3_triple()\n moves = ms.filter_type_3_triple(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_4_BOMB:\n all_moves = mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb()\n moves = ms.filter_type_4_bomb(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_5_KING_BOMB:\n moves = []\n\n elif rival_move_type == md.TYPE_6_3_1:\n all_moves = mg.gen_type_6_3_1()\n moves = ms.filter_type_6_3_1(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_7_3_2:\n all_moves = mg.gen_type_7_3_2()\n moves = ms.filter_type_7_3_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_8_SERIAL_SINGLE:\n all_moves = mg.gen_type_8_serial_single(repeat_num=rival_move_len)\n moves = ms.filter_type_8_serial_single(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_9_SERIAL_PAIR:\n all_moves = mg.gen_type_9_serial_pair(repeat_num=rival_move_len)\n moves = ms.filter_type_9_serial_pair(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_10_SERIAL_TRIPLE:\n all_moves = mg.gen_type_10_serial_triple(repeat_num=rival_move_len)\n moves = ms.filter_type_10_serial_triple(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_11_SERIAL_3_1:\n all_moves = mg.gen_type_11_serial_3_1(repeat_num=rival_move_len)\n moves = ms.filter_type_11_serial_3_1(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_12_SERIAL_3_2:\n all_moves = mg.gen_type_12_serial_3_2(repeat_num=rival_move_len)\n moves = ms.filter_type_12_serial_3_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_13_4_2:\n all_moves = mg.gen_type_13_4_2()\n moves = ms.filter_type_13_4_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_14_4_22:\n all_moves = mg.gen_type_14_4_22()\n moves = ms.filter_type_14_4_22(all_moves, rival_move)\n\n if rival_move_type not in [md.TYPE_0_PASS,\n md.TYPE_4_BOMB, md.TYPE_5_KING_BOMB]:\n moves = moves + mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb()\n\n if len(rival_move) != 0: # rival_move is not 'pass'\n moves = moves + [[]]\n\n for m in moves:\n m.sort()\n\n return moves\n\n def reset(self):\n self.card_play_action_seq = []\n\n self.three_landlord_cards = None\n self.game_over = False\n\n self.acting_player_position = None\n self.player_utility_dict = None\n\n self.last_move_dict = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.played_cards = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.last_move = []\n self.last_two_moves = []\n\n self.info_sets = {'landlord': InfoSet('landlord'),\n 'landlord_up': InfoSet('landlord_up'),\n 'landlord_down': InfoSet('landlord_down')}\n\n self.bomb_num = 0\n self.last_pid = 'landlord'\n self.bid_info = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n self.bid_count = 0\n self.multiply_count = {'landlord': 0,\n 'landlord_up': 0,\n 'landlord_down': 0}\n self.step_count = 0\n\n def get_infoset(self):\n self.info_sets[\n self.acting_player_position].last_pid = self.last_pid\n\n self.info_sets[\n self.acting_player_position].legal_actions = \\\n self.get_legal_card_play_actions()\n\n self.info_sets[\n self.acting_player_position].bomb_num = self.bomb_num\n\n self.info_sets[\n self.acting_player_position].last_move = self.get_last_move()\n\n self.info_sets[\n self.acting_player_position].last_two_moves = self.get_last_two_moves()\n\n self.info_sets[\n self.acting_player_position].last_move_dict = self.last_move_dict\n\n self.info_sets[self.acting_player_position].num_cards_left_dict = \\\n {pos: len(self.info_sets[pos].player_hand_cards)\n for pos in ['landlord', 'landlord_up', 'landlord_down']}\n\n self.info_sets[self.acting_player_position].other_hand_cards = []\n\n '''\n 调整计算其他人手牌的方法,整副牌减去玩家手牌与出过的牌\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n if pos != self.acting_player_position:\n self.info_sets[\n self.acting_player_position].other_hand_cards += \\\n self.info_sets[pos].player_hand_cards\n '''\n # 把出过的牌中三个子列表合成一个列表\n played_cards_tmp = []\n for i in list(self.played_cards.values()):\n played_cards_tmp.extend(i)\n # 出过的牌和玩家手上的牌\n played_and_hand_cards = played_cards_tmp + self.info_sets[self.acting_player_position].player_hand_cards\n # 整副牌减去出过的牌和玩家手上的牌,就是其他人的手牌\n for i in set(AllEnvCard):\n self.info_sets[\n self.acting_player_position].other_hand_cards.extend([i] * (AllEnvCard.count(i) - played_and_hand_cards.count(i)))\n\n self.info_sets[self.acting_player_position].played_cards = \\\n self.played_cards\n self.info_sets[self.acting_player_position].three_landlord_cards = \\\n self.three_landlord_cards\n self.info_sets[self.acting_player_position].card_play_action_seq = \\\n self.card_play_action_seq\n\n self.info_sets[\n self.acting_player_position].all_handcards = \\\n {pos: self.info_sets[pos].player_hand_cards\n for pos in ['landlord', 'landlord_up', 'landlord_down']}\n\n # Custom bid info\n self.info_sets[self.acting_player_position].bid_info = bid_infos[self.acting_player_position]\n\n return deepcopy(self.info_sets[self.acting_player_position])" }, { "identifier": "DeepAgent", "path": "douzero/evaluation/deep_agent.py", "snippet": "class DeepAgent:\n\n def __init__(self, position, model_path):\n self.model_type = \"old\"\n if \"general\" in model_path:\n self.model_type = \"general\"\n elif \"resnet\" in model_path:\n self.model_type = \"resnet\"\n self.model = _load_model(position, model_path, self.model_type)\n\n def act(self, infoset):\n obs = get_obs(infoset, model_type=self.model_type)\n z_batch = torch.from_numpy(obs['z_batch']).float()\n x_batch = torch.from_numpy(obs['x_batch']).float()\n if torch.cuda.is_available():\n z_batch, x_batch = z_batch.cuda(), x_batch.cuda()\n y_pred = self.model.forward(z_batch, x_batch, return_value=True)['values']\n y_pred = y_pred.detach().cpu().numpy()\n\n best_action_index = np.argmax(y_pred, axis=0)[0]\n best_action = infoset.legal_actions[best_action_index]\n best_action_confidence = y_pred[best_action_index]\n return best_action, best_action_confidence" } ]
import GameHelper as gh import os import sys import time import threading import pyautogui import win32gui import multiprocessing as mp import DetermineColor as DC import cv2 import numpy as np import traceback import BidModel import LandlordModel import FarmerModel from GameHelper import GameHelper from PIL import Image from skimage.metrics import structural_similarity as ssim from collections import defaultdict from douzero.env.move_detector import get_move_type from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QGraphicsItem, QGraphicsPixmapItem, QInputDialog, QMessageBox from PyQt5.QtGui import QPixmap, QIcon from PyQt5.QtCore import QTime, QEventLoop from MainWindow import Ui_Form from douzero.env.game import GameEnv from douzero.evaluation.deep_agent import DeepAgent from cnocr import CnOcr
11,932
'''from GameHelper import GameHelper import numpy as np import cv2 from main import MyPyQT_Form as form from collections import defaultdict import GameHelper as gh GeneralBtnPos = (268, 481, 1240, 255) helper = GameHelper() helper.ScreenZoomRate = 1.0 img = cv2.imread("chaojijiabei.png") img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) result = helper.LocateOnScreen("chaojijiabei_btn", img=img, region=GeneralBtnPos) print(result)''' # -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx ocr = CnOcr(det_model_name='en_PP-OCRv3_det', rec_model_name='en_PP-OCRv3', cand_alphabet="12345678910JQKA") # 所有参数都使用默认值
'''from GameHelper import GameHelper import numpy as np import cv2 from main import MyPyQT_Form as form from collections import defaultdict import GameHelper as gh GeneralBtnPos = (268, 481, 1240, 255) helper = GameHelper() helper.ScreenZoomRate = 1.0 img = cv2.imread("chaojijiabei.png") img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) result = helper.LocateOnScreen("chaojijiabei_btn", img=img, region=GeneralBtnPos) print(result)''' # -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx ocr = CnOcr(det_model_name='en_PP-OCRv3_det', rec_model_name='en_PP-OCRv3', cand_alphabet="12345678910JQKA") # 所有参数都使用默认值
helper = GameHelper()
0
2023-12-01 04:04:30+00:00
16k
yongzhuo/MacroGPT-Pretrain
macro_gpt/ft_gpt/train.pt.add.py
[ { "identifier": "CUDA_VISIBLE_DEVICES", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "CUDA_VISIBLE_DEVICES = \"0\"" }, { "identifier": "USE_TORCH", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "USE_TORCH = \"1\"" }, { "identifier": "CPU_NUMS", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "CPU_NUMS = \"9\"" }, { "identifier": "LlamaForCausalLM", "path": "macro_gpt/models/llama/modeling_llama.py", "snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)\n logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]\n logits = torch.cat(logits, dim=-1)\n else:\n # logits = self.lm_head(hidden_states)\n logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype))\n logits = logits.float()\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values is not None:\n past_length = past_key_values[0][0].shape[2]\n\n # Some generation methods already pass only the last input ID\n if input_ids.shape[1] > past_length:\n remove_prefix_length = past_length\n else:\n # Default to old behavior: keep only final ID\n remove_prefix_length = input_ids.shape[1] - 1\n\n input_ids = input_ids[:, remove_prefix_length:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -input_ids.shape[1] :]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n )\n return reordered_past" }, { "identifier": "LlamaTokenizer", "path": "macro_gpt/models/llama/tokenization_llama.py", "snippet": "class LlamaTokenizer(PreTrainedTokenizer):\n \"\"\"\n Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is\n no padding token in the original model.\n\n Args:\n vocab_file (`str`):\n Path to the vocabulary file.\n unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"<unk>\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"<s>\"`):\n The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.\n eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"</s>\"`):\n The end of sequence token.\n pad_token (`str` or `tokenizers.AddedToken`, *optional*):\n A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by\n attention mechanisms or loss computation.\n sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):\n Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for\n SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,\n to set:\n\n - `enable_sampling`: Enable subword regularization.\n - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.\n\n - `nbest_size = {0,1}`: No sampling is performed.\n - `nbest_size > 1`: samples from the nbest_size results.\n - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)\n using forward-filtering-and-backward-sampling algorithm.\n\n - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for\n BPE-dropout.\n\n add_bos_token (`bool`, *optional*, defaults to `True`):\n Whether or not to add an `bos_token` at the start of sequences.\n add_eos_token (`bool`, *optional*, defaults to `False`):\n Whether or not to add an `eos_token` at the end of sequences.\n clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):\n Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like\n extra spaces.\n use_default_system_prompt (`bool`, *optional*, defaults to `True`):\n Whether or not the default system prompt for Llama should be used.\n spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to add spaces between special tokens.\n legacy (`bool`, *optional*):\n Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622\n and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple\n example:\n\n - `legacy=True`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=True)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\")\n [8774, 32099, 3, 5, 1]\n ```\n - `legacy=False`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=False)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\") # the extra space `[3]` is no longer here\n [8774, 32099, 5, 1]\n ```\n Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.\n\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n pad_token=None,\n sp_model_kwargs: Optional[Dict[str, Any]] = None,\n add_bos_token=True,\n add_eos_token=False,\n clean_up_tokenization_spaces=False,\n use_default_system_prompt=True,\n spaces_between_special_tokens=False,\n legacy=None,\n **kwargs,\n ):\n self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs\n bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token\n eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token\n unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token\n pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token\n\n if legacy is None:\n logger.warning_once(\n f\"You are using the default legacy behaviour of the {self.__class__}. This is\"\n \" expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you.\"\n \" If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it\"\n \" means, and thouroughly read the reason why this was added as explained in\"\n \" https://github.com/huggingface/transformers/pull/24565\"\n )\n legacy = True\n\n self.legacy = legacy\n self.vocab_file = vocab_file\n self.add_bos_token = add_bos_token\n self.add_eos_token = add_eos_token\n self.use_default_system_prompt = use_default_system_prompt\n self.sp_model = self.get_spm_processor(kwargs.pop(\"from_slow\", False))\n\n super().__init__(\n bos_token=bos_token,\n eos_token=eos_token,\n unk_token=unk_token,\n pad_token=pad_token,\n add_bos_token=add_bos_token,\n add_eos_token=add_eos_token,\n sp_model_kwargs=self.sp_model_kwargs,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n use_default_system_prompt=use_default_system_prompt,\n spaces_between_special_tokens=spaces_between_special_tokens,\n legacy=legacy,\n **kwargs,\n )\n\n @property\n def unk_token_length(self):\n return len(self.sp_model.encode(str(self.unk_token)))\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor\n def get_spm_processor(self, from_slow=False):\n tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n if self.legacy or from_slow: # no dependency on protobuf\n tokenizer.Load(self.vocab_file)\n return tokenizer\n\n with open(self.vocab_file, \"rb\") as f:\n sp_model = f.read()\n model_pb2 = import_protobuf(f\"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)\")\n model = model_pb2.ModelProto.FromString(sp_model)\n normalizer_spec = model_pb2.NormalizerSpec()\n normalizer_spec.add_dummy_prefix = False\n model.normalizer_spec.MergeFrom(normalizer_spec)\n sp_model = model.SerializeToString()\n tokenizer.LoadFromSerializedProto(sp_model)\n return tokenizer\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"sp_model\"] = None\n state[\"sp_model_proto\"] = self.sp_model.serialized_model_proto()\n return state\n\n def __setstate__(self, d):\n self.__dict__ = d\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.LoadFromSerializedProto(self.sp_model_proto)\n\n @property\n def vocab_size(self):\n \"\"\"Returns vocab size\"\"\"\n return self.sp_model.get_piece_size()\n\n def get_vocab(self):\n \"\"\"Returns vocab as a dict\"\"\"\n vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize\n def tokenize(self, text: \"TextInput\", add_special_tokens=False, **kwargs) -> List[str]:\n \"\"\"\n Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the\n first token is special.\n \"\"\"\n if self.legacy or len(text) == 0:\n return super().tokenize(text, **kwargs)\n\n tokens = super().tokenize(SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, \" \"), **kwargs)\n\n if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:\n tokens = tokens[1:]\n return tokens\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize\n def _tokenize(self, text, **kwargs):\n \"\"\"\n Returns a tokenized string.\n\n We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any\n SPIECE_UNDERLINE. For example: `self.sp_model.encode(f\"{SPIECE_UNDERLINE}Hey\", out_type = str)` will give\n `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f\"{unk_token}text\"` and strip the\n `unk_token`. Here is an example with `unk_token = \"<unk>\"` and `unk_token_length = 4`.\n `self.tokenizer.sp_model.encode(\"<unk> Hey\", out_type = str)[4:]`.\n \"\"\"\n tokens = self.sp_model.encode(text, out_type=str)\n if self.legacy or not text.startswith((SPIECE_UNDERLINE, \" \")):\n return tokens\n\n # 1. Encode string + prefix ex: \"<unk> Hey\"\n tokens = self.sp_model.encode(self.unk_token + text, out_type=str)\n # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']\n return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n return self.sp_model.piece_to_id(token)\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n token = self.sp_model.IdToPiece(index)\n return token\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n # since we manually add the prefix space, we have to remove it when decoding\n if tokens[0].startswith(SPIECE_UNDERLINE):\n tokens[0] = tokens[0][1:]\n\n current_sub_tokens = []\n out_string = \"\"\n prev_is_special = False\n for i, token in enumerate(tokens):\n # make sure that special tokens are not decoded using sentencepiece model\n if token in self.all_special_tokens:\n if not prev_is_special and i != 0 and self.legacy:\n out_string += \" \"\n out_string += self.sp_model.decode(current_sub_tokens) + token\n prev_is_special = True\n current_sub_tokens = []\n else:\n current_sub_tokens.append(token)\n prev_is_special = False\n out_string += self.sp_model.decode(current_sub_tokens)\n return out_string\n\n def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:\n \"\"\"\n Save the vocabulary and special tokens file to a directory.\n\n Args:\n save_directory (`str`):\n The directory in which to save the vocabulary.\n\n Returns:\n `Tuple(str)`: Paths to the files saved.\n \"\"\"\n if not os.path.isdir(save_directory):\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\")\n return\n out_vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n elif not os.path.isfile(self.vocab_file):\n with open(out_vocab_file, \"wb\") as fi:\n content_spiece_model = self.sp_model.serialized_model_proto()\n fi.write(content_spiece_model)\n\n return (out_vocab_file,)\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = bos_token_id + token_ids_0 + eos_token_id\n\n if token_ids_1 is not None:\n output = output + bos_token_id + token_ids_1 + eos_token_id\n\n return output\n\n def get_special_tokens_mask(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(\n token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True\n )\n\n bos_token_id = [1] if self.add_bos_token else []\n eos_token_id = [1] if self.add_eos_token else []\n\n if token_ids_1 is None:\n return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id\n return (\n bos_token_id\n + ([0] * len(token_ids_0))\n + eos_token_id\n + bos_token_id\n + ([0] * len(token_ids_1))\n + eos_token_id\n )\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT\n sequence pair mask has the following format:\n\n ```\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n ```\n\n if token_ids_1 is None, only returns the first portion of the mask (0s).\n\n Args:\n token_ids_0 (`List[int]`):\n List of ids.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).\n \"\"\"\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)\n\n if token_ids_1 is not None:\n output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)\n\n return output\n\n @property\n def default_chat_template(self):\n \"\"\"\n LLaMA uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.\n Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict\n user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering\n rather than needing special tokens. The system message is partly 'embedded' in the first user message, which\n results in an unusual token ordering when it is present. This template should definitely be changed if you wish\n to fine-tune a model with more flexible role ordering!\n\n The output should look something like:\n\n <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos> <bos>[INST] Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST]\n \"\"\"\n\n template = (\n \"{% if messages[0]['role'] == 'system' %}\"\n \"{% set loop_messages = messages[1:] %}\" # Extract system message if it's present\n \"{% set system_message = messages[0]['content'] %}\"\n \"{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}\"\n \"{% set loop_messages = messages %}\" # Or use the default system message if the flag is set\n \"{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}\"\n \"{% else %}\"\n \"{% set loop_messages = messages %}\"\n \"{% set system_message = false %}\"\n \"{% endif %}\"\n \"{% for message in loop_messages %}\" # Loop over all non-system messages\n \"{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\"\n \"{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\"\n \"{% endif %}\"\n \"{% if loop.index0 == 0 and system_message != false %}\" # Embed system message in first message\n \"{% set content = '<<SYS>>\\\\n' + system_message + '\\\\n<</SYS>>\\\\n\\\\n' + message['content'] %}\"\n \"{% else %}\"\n \"{% set content = message['content'] %}\"\n \"{% endif %}\"\n \"{% if message['role'] == 'user' %}\" # After all of that, handle messages/roles in a fairly normal way\n \"{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}\"\n \"{% elif message['role'] == 'system' %}\"\n \"{{ '<<SYS>>\\\\n' + content.strip() + '\\\\n<</SYS>>\\\\n\\\\n' }}\"\n \"{% elif message['role'] == 'assistant' %}\"\n \"{{ ' ' + content.strip() + ' ' + eos_token }}\"\n \"{% endif %}\"\n \"{% endfor %}\"\n )\n template = template.replace(\"USE_DEFAULT_PROMPT\", \"true\" if self.use_default_system_prompt else \"false\")\n default_message = DEFAULT_SYSTEM_PROMPT.replace(\"\\n\", \"\\\\n\").replace(\"'\", \"\\\\'\")\n template = template.replace(\"DEFAULT_SYSTEM_MESSAGE\", default_message)\n\n return template" }, { "identifier": "LlamaConfig", "path": "macro_gpt/models/llama/modeling_llama.py", "snippet": "def is_flash_attn_available():\n def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[Tuple[bool, str], bool]:\ndef _get_unpad_data(padding_mask):\ndef _make_causal_mask(\n input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0\n):\ndef _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):\n def __init__(self, hidden_size, eps=1e-6):\n def forward(self, hidden_states):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n def forward(self, x, seq_len=None):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):\n def _set_cos_sin_cache(self, seq_len, device, dtype):\n def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):\n def _set_cos_sin_cache(self, seq_len, device, dtype):\ndef rotate_half(x):\ndef apply_rotary_pos_emb(q, k, cos, sin, position_ids):\n def __init__(self, config):\n def forward(self, x):\ndef repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:\n def __init__(self, config: LlamaConfig):\n def _init_rope(self):\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n padding_mask: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n padding_mask: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n def _flash_attention_forward(\n self, query_states, key_states, value_states, padding_mask, query_length, dropout=0.0, softmax_scale=None\n ):\n def _upad_input(self, query_layer, key_layer, value_layer, padding_mask, query_length):\n def __init__(self, config: LlamaConfig):\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n padding_mask: Optional[torch.LongTensor] = None,\n ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n def _init_weights(self, module):\n def _set_gradient_checkpointing(self, module, value=False):\n def __init__(self, config: LlamaConfig):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPast]:\n def create_custom_forward(module):\n def custom_forward(*inputs):\n def __init__(self, config):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def set_decoder(self, decoder):\n def get_decoder(self):\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n def _reorder_cache(past_key_values, beam_idx):\n def __init__(self, config):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, SequenceClassifierOutputWithPast]:\n_CONFIG_FOR_DOC = \"LlamaConfig\"\nLLAMA_START_DOCSTRING = r\"\"\"\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`LlamaConfig`]):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\nLLAMA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\nclass LlamaRMSNorm(nn.Module):\nclass LlamaRotaryEmbedding(nn.Module):\nclass LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):\nclass LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):\nclass LlamaMLP(nn.Module):\nclass LlamaAttention(nn.Module):\nclass LlamaFlashAttention2(LlamaAttention):\nclass LlamaDecoderLayer(nn.Module):\nclass LlamaPreTrainedModel(PreTrainedModel):\nclass LlamaModel(LlamaPreTrainedModel):\nclass LlamaForCausalLM(LlamaPreTrainedModel):\nclass LlamaForSequenceClassification(LlamaPreTrainedModel):" }, { "identifier": "PATH_MODEL_PRETRAIN", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "PATH_MODEL_PRETRAIN = \"\"" }, { "identifier": "DATA_PATH", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "DATA_PATH = \"../datasets/tigerbot-train-00001-of-00097.json\"" }, { "identifier": "MODEL_SAVE_DIR", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MODEL_SAVE_DIR = \"model_macrogpt_1b3_float32\"" }, { "identifier": "REPO_ID", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "REPO_ID = \"Macropodus/macrogpt-tokenizer\"" }, { "identifier": "MICRO_BATCH_SIZE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MICRO_BATCH_SIZE = 4 # default=4 # this could actually be 5 but i like powers of 2" }, { "identifier": "BATCH_SIZE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "BATCH_SIZE = 128" }, { "identifier": "GRADIENT_ACCUMULATION_STEPS", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "GRADIENT_ACCUMULATION_STEPS = BATCH_SIZE // MICRO_BATCH_SIZE" }, { "identifier": "LEARNING_RATE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "LEARNING_RATE = 3e-4 # default=3e-4 # the Karpathy constant" }, { "identifier": "EPOCHS", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "EPOCHS = 1 # default=3 # we don't always need 3 tbh" }, { "identifier": "SAVE_STEPS", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "SAVE_STEPS = 384" }, { "identifier": "VAL_SET_SIZE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "VAL_SET_SIZE = 0" }, { "identifier": "TARGET_MODULES", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "TARGET_MODULES = [\"query_key_value\"]" }, { "identifier": "IS_PARALLELIZABLE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "IS_PARALLELIZABLE = False" }, { "identifier": "MODEL_PARALLEL", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MODEL_PARALLEL = False" }, { "identifier": "USE_CACHE", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "USE_CACHE = False" }, { "identifier": "MAX_LENGTH_Q", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MAX_LENGTH_Q = 1024 - 2 # default=128 - 2" }, { "identifier": "MAX_LENGTH_A", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MAX_LENGTH_A = 1024 - 2 # default=128 - 2" }, { "identifier": "MAX_LENGTH_QA", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "MAX_LENGTH_QA = MAX_LENGTH_Q + MAX_LENGTH_A + 4" }, { "identifier": "LORA_DROPOUT", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "LORA_DROPOUT = 0.05" }, { "identifier": "LORA_ALPHA", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "LORA_ALPHA = 16" }, { "identifier": "LORA_R", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "LORA_R = 8" }, { "identifier": "PATH_MODEL_CONFIG", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "PATH_MODEL_CONFIG = \"config_macrogpt_1b3_float32.json\" or MODEL_SAVE_DIR" }, { "identifier": "PATH_TOKENIZER_PRETRAIN", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "PATH_TOKENIZER_PRETRAIN = REPO_ID or \"./macrogpt.model\"" } ]
import random import copy import sys import os import bitsandbytes as bnb import torch.nn as nn import transformers import torch from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import CUDA_VISIBLE_DEVICES, USE_TORCH, CPU_NUMS # from config from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from peft import (get_peft_model_state_dict, get_peft_model, LoraConfig) from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.modeling_utils import unwrap_model from tensorboardX import SummaryWriter from datasets import load_dataset from macro_gpt.models.llama.modeling_llama import LlamaForCausalLM as LLMForCausalLM from macro_gpt.models.llama.tokenization_llama import LlamaTokenizer as LLMTokenizer from macro_gpt.models.llama.modeling_llama import LlamaConfig as LLMConfig from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import PATH_MODEL_PRETRAIN, DATA_PATH, MODEL_SAVE_DIR, REPO_ID from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import MICRO_BATCH_SIZE, BATCH_SIZE, GRADIENT_ACCUMULATION_STEPS from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import LEARNING_RATE, EPOCHS, SAVE_STEPS, VAL_SET_SIZE, TARGET_MODULES from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import IS_PARALLELIZABLE, MODEL_PARALLEL, USE_CACHE from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import MAX_LENGTH_Q, MAX_LENGTH_A, MAX_LENGTH_QA from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import LORA_DROPOUT, LORA_ALPHA, LORA_R from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import PATH_MODEL_CONFIG, PATH_TOKENIZER_PRETRAIN
13,109
print_rank_0(out) return out def data_collator(batch): def get_position_ids(seq, bos_token_id): seq_length = len(seq) position_ids = torch.arange(seq_length, dtype=torch.long).unsqueeze(0) return position_ids def get_masks(seq, special_ids=IDS_ORG): """ padding-mask """ # mask until ID_SOP attention_mask = torch.ones((1, len(seq), len(seq))) attention_mask.tril_() # ### 如果 padding-right, 也mask掉 # for idx, s in enumerate(seq): # if s in special_ids: # attention_mask[..., idx] = 1 attention_mask = (attention_mask < 0.5).bool() return attention_mask len_max_batch = [len(batch[i].get("input_ids")) + len(batch[i].get("labels")) + 1 for i in range(len(batch))] len_max_batch = min(MAX_LENGTH_QA, max(len_max_batch)) batch_attention_mask = [] batch_position_ids = [] batch_input_ids = [] batch_labels = [] for ba in batch: x, y = ba.get("input_ids"), ba.get("labels") len_padding = len_max_batch - len(x) - len(y) if tokenizer.padding_side and tokenizer.padding_side == "left": labels = [-100] * len_padding + x + y input_ids = [ID_PAD] * (len_padding) + x + y else: labels = x + y + [-100] * len_padding input_ids = x + y + [ID_PAD] * (len_padding) tensor_position_ids = get_position_ids(input_ids, bos_token_id=ID_SOP) tensor_attention_mask = get_masks(input_ids, special_ids=IDS_ORG) tensor_input_ids = torch.tensor(input_ids, dtype=torch.long) tensor_labels = torch.tensor(labels, dtype=torch.long) batch_attention_mask.append(tensor_attention_mask) batch_position_ids.append(tensor_position_ids) batch_input_ids.append(tensor_input_ids) batch_labels.append(tensor_labels) # print_rank_0(batch_attention_mask) batch_attention_mask = torch.stack(batch_attention_mask) batch_position_ids = torch.stack(batch_position_ids) batch_input_ids = torch.stack(batch_input_ids) batch_labels = torch.stack(batch_labels) input_dict = { # "full_attention_mask": copy.deepcopy(batch_attention_mask), # "attention_mask": batch_attention_mask, # "position_ids": batch_position_ids, "input_ids": batch_input_ids, "labels": batch_labels, } # print_rank_0(input_dict) return input_dict def dfs_file(path_dir): """ 递归获取某个目录下的所有文件(所有层, 包括子目录) Args: path_dir[String]:, path of dir, eg. "/home/data" Returns: data[List]: data of input, eg. ["2020_01_08.txt"] """ path_files = [] for root, dirs, files in os.walk(path_dir): # 分别代表根目录、文件夹、文件 for file in files: # 遍历文件 file_path = os.path.join(root, file) # 获取文件绝对路径 path_files.append(file_path) # 将文件路径添加进列表 files = list(set(path_files)) files.sort() # the same list return files def print_rank_0(*args): """ 只打印 0 号GPU的 """ # if torch.distributed.get_rank() == 0: # 一般用0,当然,可以选任意的rank保存。 # print(*args) print(*args) def local_rank_is_0(): """ 判断是哪台机子的 """ # flag = False # if torch.distributed.get_rank() == 0: # flag = True # return flag return True # import torch.distributed as dist # dist.init_process_group(backend='nccl') # torch.distributed.init_process_group() tokenizer = LLMTokenizer.from_pretrained(PATH_TOKENIZER_PRETRAIN) # tokenizer.pad_token = tokenizer.eos_token # tokenizer.padding_side = "left" # Allow batched inference tokenizer.padding_side = "right" # Allow batched inference # ID_gMASK = 64790 # ID_BOS = 64792 # ID_EOS = 64793 # ID_MASK = 64789 # ID_PAD = 2 ID_EOP = 2 ID_SOP = 1 ID_BOS = 1 ID_EOS = 2 ID_PAD = 0 IDS_ORG = [ID_PAD] # { "<|endoftext|>": 50256, # "### End": 50257, # "### Instruction:": 50258, # "### Response:\n": 50259 # } model = LLMForCausalLM.from_pretrained(PATH_MODEL_CONFIG) model.gradient_checkpointing_enable() model.enable_input_require_grads() model.is_parallelizable = IS_PARALLELIZABLE model.model_parallel = MODEL_PARALLEL
# !/usr/bin/python # -*- coding: utf-8 -*- # @time : 2023/3/5 21:04 # @author : Mo # @function: macro-gpt path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) sys.path.append(path_root) os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:3072" os.environ["CUDA_VISIBLE_DEVICES"] = CUDA_VISIBLE_DEVICES os.environ["USE_TORCH"] = USE_TORCH os.environ["OMP_NUM_THREADS"] = CPU_NUMS # export OMP_NUM_THREADS=1 os.environ["OPENBLAS_NUM_THREADS"] = CPU_NUMS # export OPENBLAS_NUM_THREADS=1 os.environ["MKL_NUM_THREADS"] = CPU_NUMS # export MKL_NUM_THREADS=1 os.environ["VECLIB_MAXIMUM_THREADS"] = CPU_NUMS # export VECLIB_MAXIMUM_THREADS=1 os.environ["NUMEXPR_NUM_THREADS"] = CPU_NUMS # export NUMEXPR_NUM_THREADS=1 def save_model_state(model, config=None, model_save_dir="./", model_name="adapter_model.bin"): """ 仅保存 有梯度 的 模型参数(推荐使用) """ if not os.path.exists(model_save_dir): os.makedirs(model_save_dir) # save config if config: config.save_pretrained(model_save_dir) # config.to_dict() # save model path_model = os.path.join(model_save_dir, model_name) # grad_params_dict = {k: v.to("cpu") for k, v in model.named_parameters() # if v.requires_grad == True} grad_params_dict = {k: v.to("cpu") for k, v in model.named_parameters()} torch.save(grad_params_dict, path_model) print_rank_0("******model_save_path is {}******".format(path_model)) def print_rank_0_named_parameters(model, use_print_rank_0_data=False): """ 打印模型训练参数/数据类型信息 """ trainable_params = 0 all_param = 0 for name, param in model.named_parameters(): if use_print_rank_0_data: print_rank_0((name, param.data.dtype, param.requires_grad, param.data)) else: print_rank_0((name, param.data.dtype, param.requires_grad)) num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel all_param += num_params if param.requires_grad: trainable_params += num_params print_rank_0(f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}") def prepare_model_for_half_training(model, output_embedding_layer_name="lm_head", use_gradient_checkpointing=True, layer_norm_names=["layer_norm"]): r""" This method wrapps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model, (`transformers.PreTrainedModel`): The loaded model from `transformers` """ # 不要使用 model.half(), 这样会先截取精度再训练了, 最初data就要保持half for name, param in model.named_parameters(): # freeze base model's layers # cast layer norm in fp32 for stability for 8bit models if param.ndim == 1 and any(layer_norm_name in name for layer_norm_name in layer_norm_names): param.data = param.data.to(torch.float32) elif output_embedding_layer_name in name: # lm_head也需要是tf.float32(最后一层) param.data = param.data.to(torch.float32) else: param.data = param.data.to(torch.half) if use_gradient_checkpointing: # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) # enable gradient checkpointing for memory efficiency model.gradient_checkpointing_enable() return model def generate_prompt(data_point, is_logger=False): # sorry about the formatting disaster gotta move fast # text_1 = f"指令:\n{data_point.get('instruction', '')}\n问:\n{data_point.get('input', '')}\n答:\n" \ # if data_point.get('input', '') else f"指令:\n{data_point.get('instruction', '')}\n答:\n" # text_2 = f"{data_point.get('output', '')}" text_a = data_point.get("a", "") prompt_str_1 = text_a # end with gMASK, <sop> x = tokenizer.encode(prompt_str_1) if len(x) > MAX_LENGTH_QA - 2: x = x[:MAX_LENGTH_QA - 2] if not x: x = [ID_PAD, ID_EOS] if x and x[-1] != ID_EOS: x += [ID_EOS] out = {"input_ids": x, "labels": []} if is_logger: print_rank_0(prompt_str_1) print_rank_0(out) return out def data_collator(batch): def get_position_ids(seq, bos_token_id): seq_length = len(seq) position_ids = torch.arange(seq_length, dtype=torch.long).unsqueeze(0) return position_ids def get_masks(seq, special_ids=IDS_ORG): """ padding-mask """ # mask until ID_SOP attention_mask = torch.ones((1, len(seq), len(seq))) attention_mask.tril_() # ### 如果 padding-right, 也mask掉 # for idx, s in enumerate(seq): # if s in special_ids: # attention_mask[..., idx] = 1 attention_mask = (attention_mask < 0.5).bool() return attention_mask len_max_batch = [len(batch[i].get("input_ids")) + len(batch[i].get("labels")) + 1 for i in range(len(batch))] len_max_batch = min(MAX_LENGTH_QA, max(len_max_batch)) batch_attention_mask = [] batch_position_ids = [] batch_input_ids = [] batch_labels = [] for ba in batch: x, y = ba.get("input_ids"), ba.get("labels") len_padding = len_max_batch - len(x) - len(y) if tokenizer.padding_side and tokenizer.padding_side == "left": labels = [-100] * len_padding + x + y input_ids = [ID_PAD] * (len_padding) + x + y else: labels = x + y + [-100] * len_padding input_ids = x + y + [ID_PAD] * (len_padding) tensor_position_ids = get_position_ids(input_ids, bos_token_id=ID_SOP) tensor_attention_mask = get_masks(input_ids, special_ids=IDS_ORG) tensor_input_ids = torch.tensor(input_ids, dtype=torch.long) tensor_labels = torch.tensor(labels, dtype=torch.long) batch_attention_mask.append(tensor_attention_mask) batch_position_ids.append(tensor_position_ids) batch_input_ids.append(tensor_input_ids) batch_labels.append(tensor_labels) # print_rank_0(batch_attention_mask) batch_attention_mask = torch.stack(batch_attention_mask) batch_position_ids = torch.stack(batch_position_ids) batch_input_ids = torch.stack(batch_input_ids) batch_labels = torch.stack(batch_labels) input_dict = { # "full_attention_mask": copy.deepcopy(batch_attention_mask), # "attention_mask": batch_attention_mask, # "position_ids": batch_position_ids, "input_ids": batch_input_ids, "labels": batch_labels, } # print_rank_0(input_dict) return input_dict def dfs_file(path_dir): """ 递归获取某个目录下的所有文件(所有层, 包括子目录) Args: path_dir[String]:, path of dir, eg. "/home/data" Returns: data[List]: data of input, eg. ["2020_01_08.txt"] """ path_files = [] for root, dirs, files in os.walk(path_dir): # 分别代表根目录、文件夹、文件 for file in files: # 遍历文件 file_path = os.path.join(root, file) # 获取文件绝对路径 path_files.append(file_path) # 将文件路径添加进列表 files = list(set(path_files)) files.sort() # the same list return files def print_rank_0(*args): """ 只打印 0 号GPU的 """ # if torch.distributed.get_rank() == 0: # 一般用0,当然,可以选任意的rank保存。 # print(*args) print(*args) def local_rank_is_0(): """ 判断是哪台机子的 """ # flag = False # if torch.distributed.get_rank() == 0: # flag = True # return flag return True # import torch.distributed as dist # dist.init_process_group(backend='nccl') # torch.distributed.init_process_group() tokenizer = LLMTokenizer.from_pretrained(PATH_TOKENIZER_PRETRAIN) # tokenizer.pad_token = tokenizer.eos_token # tokenizer.padding_side = "left" # Allow batched inference tokenizer.padding_side = "right" # Allow batched inference # ID_gMASK = 64790 # ID_BOS = 64792 # ID_EOS = 64793 # ID_MASK = 64789 # ID_PAD = 2 ID_EOP = 2 ID_SOP = 1 ID_BOS = 1 ID_EOS = 2 ID_PAD = 0 IDS_ORG = [ID_PAD] # { "<|endoftext|>": 50256, # "### End": 50257, # "### Instruction:": 50258, # "### Response:\n": 50259 # } model = LLMForCausalLM.from_pretrained(PATH_MODEL_CONFIG) model.gradient_checkpointing_enable() model.enable_input_require_grads() model.is_parallelizable = IS_PARALLELIZABLE model.model_parallel = MODEL_PARALLEL
model.config.use_cache = USE_CACHE
20
2023-11-30 12:39:19+00:00
16k
owkin/fedeca
fedeca/scripts/dp_logreg.py
[ { "identifier": "TorchDPFedAvgAlgo", "path": "fedeca/algorithms/torch_dp_fed_avg_algo.py", "snippet": "class TorchDPFedAvgAlgo(TorchFedAvgAlgo):\n \"\"\"To be inherited.\n\n Wraps the necessary operation so a torch model can be trained in the Federated\n Averaging strategy using DP.\n \"\"\"\n\n def __init__(\n self,\n model: torch.nn.Module,\n criterion: torch.nn.modules.loss._Loss,\n optimizer: torch.optim.Optimizer,\n dataset: torch.utils.data.Dataset,\n num_updates: int,\n batch_size: int,\n scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n with_batch_norm_parameters: bool = False,\n seed: Optional[int] = None,\n use_gpu: bool = True,\n dp_target_epsilon: float = None,\n dp_target_delta: float = None,\n dp_max_grad_norm: float = None,\n num_rounds: int = None,\n *args,\n **kwargs,\n ):\n \"\"\"Instantiate a TorchDPFedAvgAlgo.\n\n Parameters\n ----------\n model : torch.nn.modules.module.Module\n A torch model.\n criterion : torch.nn.modules.loss._Loss\n A torch criterion (loss).\n optimizer : torch.optim.Optimizer\n A torch optimizer linked to the model.\n dataset : torch.utils.data.Dataset\n Refer to the doc of the parent class.\n This behavior can be changed by re-writing the `_local_train` or\n `predict` methods.\n num_updates : int\n The number of updates to perform. Note that here we do not use\n NpIndexGenerators.\n batch_size : int\n The batch-size to target in expectation (Poisson sampling).\n scheduler : torch.optim.lr_scheduler._LRScheduler, Optional\n A torch scheduler that will be called at every batch. If None, no\n scheduler will be used. Defaults to None.\n with_batch_norm_parameters : bool\n Whether to include the batch norm layer parameters in the federated\n average strategy. Defaults to False.\n seed : typing.Optional[int]\n Seed set at the algo initialization on each organization.\n Defaults to None.\n use_gpu : bool\n Whether to use the GPUs if they are available. Defaults to True.\n dp_target_epsilon : float\n The target epsilon for (epsilon, delta)-differential private guarantee.\n Defaults to None.\n dp_target_delta : float\n The target delta for (epsilon, delta)-differential private guarantee.\n Defaults to None.\n dp_max_grad_norm : float\n The maximum L2 norm of per-sample gradients; used to enforce\n differential privacy. Defaults to None.\n num_rounds : int\n The number of rounds used to train the algo. Although this is very\n peculiar for a substra Algorithm to need access to this quantity,\n Opacus needs the number of rounds and updates used to compute the\n total number of training steps in order to compute a noise level\n respecting user constraints.\n \"\"\"\n super().__init__(\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n dataset=dataset,\n scheduler=scheduler,\n seed=seed,\n use_gpu=use_gpu,\n index_generator=None,\n *args,\n **kwargs,\n )\n self._with_batch_norm_parameters = with_batch_norm_parameters\n self.dp_target_delta = dp_target_delta\n self.dp_target_epsilon = dp_target_epsilon\n self.dp_max_grad_norm = dp_max_grad_norm\n self.num_rounds = num_rounds\n\n self._apply_dp = (\n (self.dp_target_epsilon is not None)\n and (self.dp_max_grad_norm is not None)\n and (self.dp_target_delta is not None)\n )\n\n if not (self._apply_dp):\n raise ValueError(\n \"Do not use this Algo without DP you risk running into batch\"\n \" sampling issues, instead use TorchFedAvgAlgo with NpIndexGenerator\"\n )\n if self.num_rounds is None:\n raise ValueError(\n \"if you want to perform DP-training you need to prespecify the\"\n \" number of rounds in advance.\"\n )\n self.num_updates = num_updates\n self.batch_size = batch_size\n\n self.num_total_steps = self.num_updates * self.num_rounds\n\n def _local_train(\n self,\n train_dataset: torch.utils.data.Dataset,\n ):\n \"\"\"Contain the local training loop.\n\n Train the model on ``num_updates`` minibatches for the torch dataset.\n\n Parameters\n ----------\n train_dataset : torch.utils.data.Dataset\n train_dataset build from the x and y returned by the opener.\n \"\"\"\n # Create torch dataloader it is important that it has a self.batch_size\n # batch size as len(train_data_loader) will be called by opacus\n train_data_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=self.batch_size\n )\n if not hasattr(self, \"size_train_dataset\"):\n self.size_train_dataset = len(train_dataset)\n\n if not hasattr(\n self, \"accountant\"\n ): # if the attribute is not already there, need to instantiate the Engine\n # Important to use RDP to be able to use high epsilons\n # see https://github.com/pytorch/opacus/issues/604\n privacy_engine = PrivacyEngine(accountant=\"rdp\")\n\n if not hasattr(self, \"sample_rate\"):\n self.sample_rate = self.batch_size / len(train_dataset)\n else:\n assert np.allclose(\n self.sample_rate, self.batch_size / self.size_train_dataset\n ), \"The length of the dataset has changed\"\n\n # We will need it later\n self.noise_multiplier = get_noise_multiplier(\n target_epsilon=self.dp_target_epsilon,\n target_delta=self.dp_target_delta,\n sample_rate=self.sample_rate,\n steps=self.num_total_steps,\n accountant=privacy_engine.accountant.mechanism(),\n )\n\n (\n self._model,\n self._optimizer,\n train_data_loader,\n ) = privacy_engine.make_private(\n module=self._model,\n optimizer=self._optimizer,\n data_loader=train_data_loader,\n noise_multiplier=self.noise_multiplier,\n max_grad_norm=self.dp_max_grad_norm,\n poisson_sampling=True,\n )\n self.accountant = privacy_engine.accountant\n\n else:\n train_data_loader = DPDataLoader.from_data_loader(train_data_loader)\n\n for x_batch, y_batch in train_data_loader:\n x_batch = x_batch.to(self._device)\n y_batch = y_batch.to(self._device)\n # As batch-size is variable sometimes the batch is empty\n if x_batch.nelement() == 0:\n continue\n # Forward pass\n y_pred = self._model(x_batch)\n\n # Compute Loss\n loss = self._criterion(y_pred, y_batch)\n\n self._optimizer.zero_grad()\n loss.backward()\n\n self._optimizer.step()\n\n if self._scheduler is not None:\n self._scheduler.step()\n\n @remote_data\n def train(\n self,\n datasamples: Any,\n shared_state: Optional[FedAvgAveragedState] = None,\n ) -> FedAvgSharedState:\n \"\"\"Train method of the DP federated averaging strategy.\n\n This method is essentially the same as the regular federated average\n algorithm but without an index generator.\n\n Parameters\n ----------\n datasamples : typing.Any\n Input data returned by the ``get_data`` method from the opener.\n shared_state : FedAvgAveragedState, Optional\n Dictionary containing torch parameters that will be set to the model.\n Defaults to None.\n\n Returns\n -------\n FedAvgSharedState\n Weight update (delta between fine-tuned weights and previous weights).\n \"\"\"\n # Note that we don't simply inherit from the method from FedAvgTorchAlgo\n # because it assumes the existence of the NpIndexGenerator\n\n # Create torch dataset\n train_dataset = self._dataset(datasamples, is_inference=False)\n\n if shared_state is not None:\n # The shared states is the average of the model parameter updates\n # for all organizations\n # Hence we need to add it to the previous local state parameters\n parameter_updates = [\n torch.from_numpy(x).to(self._device)\n for x in shared_state.avg_parameters_update\n ]\n weight_manager.increment_parameters(\n model=self._model,\n updates=parameter_updates,\n with_batch_norm_parameters=self._with_batch_norm_parameters,\n )\n\n old_parameters = weight_manager.get_parameters(\n model=self._model,\n with_batch_norm_parameters=self._with_batch_norm_parameters,\n )\n\n # Train mode for torch model\n self._model.train()\n\n # Train the model\n self._local_train(train_dataset)\n\n self._model.eval()\n\n parameters_update = weight_manager.subtract_parameters(\n parameters=weight_manager.get_parameters(\n model=self._model,\n with_batch_norm_parameters=self._with_batch_norm_parameters,\n ),\n parameters_to_subtract=old_parameters,\n )\n\n # Re set to the previous state\n weight_manager.set_parameters(\n model=self._model,\n parameters=old_parameters,\n with_batch_norm_parameters=self._with_batch_norm_parameters,\n )\n\n return FedAvgSharedState(\n n_samples=len(train_dataset),\n parameters_update=[p.cpu().detach().numpy() for p in parameters_update],\n )\n\n def _local_predict(\n self,\n predict_dataset: torch.utils.data.Dataset,\n predictions_path,\n return_predictions=False,\n ):\n \"\"\"Predict.\n\n Parameters\n ----------\n predict_dataset : torch.utils.data.Dataset\n Predict dataset built from the `x` returned by the opener.\n\n Important\n ---------\n The responsibility is on the user to save the computed predictions.\n Substrafl provides the `TorchAlgo._save_predictions` method for this\n purpose.\n The user can load those predictions from a metric file with the command:\n `y_pred = np.load(inputs['predictions'])`.\n\n Raises\n ------\n BatchSizeNotFoundError\n No default batch size has been found to perform local prediction.\n Please override the predict function of your algorithm.\n \"\"\"\n # Note that we don't simply inherit from the method from FedAvgTorchAlgo\n # because it assumes the existence of the NpIndexGenerator\n\n predict_loader = torch.utils.data.DataLoader(\n predict_dataset, batch_size=self.batch_size, shuffle=False, drop_last=False\n )\n\n self._model.eval()\n\n predictions = []\n with torch.no_grad():\n for x in predict_loader:\n x = x.to(self._device)\n predictions.append(self._model(x))\n predictions = torch.cat(predictions, 0)\n predictions = predictions.cpu().detach()\n if return_predictions:\n return predictions\n else:\n self._save_predictions(predictions, predictions_path)\n\n def _get_state_to_save(self) -> dict:\n \"\"\"Get all attibutes to save and pass on to next state.\n\n Returns\n -------\n dict\n The dict with all quantities to persist.\n \"\"\"\n checkpoint = super()._get_state_to_save()\n\n list_attrs_to_save = [\n \"dp_max_grad_norm\",\n \"dp_target_epsilon\",\n \"dp_target_delta\",\n \"num_rounds\",\n \"num_updates\",\n \"num_total_steps\",\n \"batch_size\",\n ]\n list_of_attrs_after_train = [\n \"noise_multiplier\",\n \"sample_rate\",\n \"size_train_dataset\",\n ]\n # For some reason this method is called before ever calling train so\n # at first it doesn't have an accountant\n if hasattr(self, \"accountant\"):\n checkpoint[\"privacy_accountant_state_dict\"] = self.accountant.state_dict()\n list_attrs_to_save += list_of_attrs_after_train\n\n for attr in list_attrs_to_save:\n checkpoint[attr] = getattr(self, attr)\n\n return checkpoint\n\n def _update_from_checkpoint(self, path) -> dict:\n \"\"\"Set self attributes using saved values.\n\n Parameters\n ----------\n path : Path\n Path towards the checkpoint to use.\n\n Returns\n -------\n dict\n The emptied checkpoint.\n \"\"\"\n # One cannot simply call checkpoint = super()._update_from_checkpoint(path)\n # because we have to change the model class if it should be changed\n # (and optimizer) aka if we find a specific key in the checkpoint\n assert (\n path.is_file()\n ), f'Cannot load the model - does not exist {list(path.parent.glob(\"*\"))}'\n checkpoint = torch.load(path, map_location=self._device)\n # For some reason substrafl save and load client before calling train\n if \"privacy_accountant_state_dict\" in checkpoint:\n self.accountant = RDPAccountant()\n self.accountant.load_state_dict(\n checkpoint.pop(\"privacy_accountant_state_dict\")\n )\n self.sample_rate = checkpoint.pop(\"sample_rate\")\n self.size_train_dataset = checkpoint.pop(\"size_train_dataset\")\n self.noise_multiplier = checkpoint.pop(\"noise_multiplier\")\n # The init is messing up the fact that the model has become\n # a grad sampler and the optimizer a DPOptimizer, their classes\n # do not persist between serializations\n # Those lines will allow to load corresponding state_dicts wo errors\n if not isinstance(self._model, GradSampleModule):\n self._model = wrap_model(self._model, grad_sample_mode=\"hooks\")\n\n if not isinstance(self._optimizer, DPOptimizer):\n self._optimizer = DPOptimizer(\n self._optimizer,\n noise_multiplier=self.noise_multiplier,\n max_grad_norm=self.dp_max_grad_norm,\n expected_batch_size=self.batch_size,\n )\n\n self._optimizer.attach_step_hook(\n self.accountant.get_optimizer_hook_fn(self.sample_rate)\n )\n\n self._model.load_state_dict(checkpoint.pop(\"model_state_dict\"))\n\n if self._optimizer is not None:\n self._optimizer.load_state_dict(checkpoint.pop(\"optimizer_state_dict\"))\n\n if self._scheduler is not None:\n self._scheduler.load_state_dict(checkpoint.pop(\"scheduler_state_dict\"))\n\n self._index_generator = checkpoint.pop(\"index_generator\")\n\n if self._device == torch.device(\"cpu\"):\n torch.set_rng_state(checkpoint.pop(\"rng_state\").to(self._device))\n else:\n torch.cuda.set_rng_state(checkpoint.pop(\"rng_state\").to(\"cpu\"))\n\n attr_names = [\n \"dp_max_grad_norm\",\n \"dp_target_epsilon\",\n \"dp_target_delta\",\n \"num_rounds\",\n \"num_updates\",\n \"num_total_steps\",\n \"batch_size\",\n ]\n\n for attr in attr_names:\n setattr(self, attr, checkpoint.pop(attr))\n\n return checkpoint" }, { "identifier": "LogisticRegressionTorch", "path": "fedeca/fedeca_core.py", "snippet": "class LogisticRegressionTorch(nn.Module):\n \"\"\"Pytorch logistic regression class.\"\"\"\n\n def __init__(self, ndim, torch_dtype=torch.float64):\n \"\"\"Initialize Logistic Regression model in PyTorch.\n\n Parameters\n ----------\n ndim : int\n Number of input dimensions.\n torch_dtype : torch.dtype, optional\n Data type for PyTorch tensors, by default torch.float64.\n \"\"\"\n self.torch_dtype = torch_dtype\n self.ndim = ndim\n super(LogisticRegressionTorch, self).__init__()\n self.fc1 = nn.Linear(self.ndim, 1).to(self.torch_dtype)\n # Zero-init as in sklearn\n self.fc1.weight.data.fill_(0.0)\n self.fc1.bias.data.fill_(0.0)\n\n def forward(self, x, eval=False):\n \"\"\"Perform a forward pass through the Logistic Regression model.\n\n Parameters\n ----------\n x : torch.Tensor\n Input tensor of shape (batch_size, ndim).\n eval : bool, optional\n Set to True during evaluation, by default False.\n\n Returns\n -------\n torch.Tensor\n Predicted probabilities after passing through sigmoid activation.\n \"\"\"\n x = self.fc1(x)\n return torch.sigmoid(x)" }, { "identifier": "Experiment", "path": "fedeca/utils/substrafl_utils.py", "snippet": "class Experiment:\n \"\"\"Experiment class.\"\"\"\n\n def __init__(\n self,\n strategies: list,\n num_rounds_list: list[int],\n ds_client=None,\n train_data_nodes: Union[list[TrainDataNode], None] = None,\n metrics_dicts_list: Union[list[dict], None] = None,\n test_data_nodes: Union[list[TestDataNode], None] = None,\n aggregation_node: Union[AggregationNode, None] = None,\n evaluation_frequency: Union[int, None] = None,\n experiment_folder: str = \"./experiments\",\n clean_models: bool = False,\n fedeca_path: Union[str, None] = None,\n algo_dependencies: Union[list, None] = None,\n ):\n \"\"\"Initialize an experiment.\n\n Parameters\n ----------\n ds_client : fl.client.Client\n Federated Learning client object used to register computations.\n strategies : list\n List of strategies to run.\n train_data_nodes : Union[list[TrainDataNode], None]\n List of data nodes for training. If None cannot use the run method\n directly.\n num_rounds_list : list\n List of number of rounds for each strategy.\n metrics_dicts_list : list[dict], optional\n Dict of metric functions, by default None.\n test_data_nodes : list, optional\n List of data nodes for testing, by default None.\n aggregation_node : fl.data.DataNode, optional\n Aggregation node, by default None.\n evaluation_frequency : int, optional\n Frequency of evaluation, by default 1.\n experiment_folder : str, optional\n Folder path for experiment outputs, by default \"./experiments\".\n clean_models : bool, optional\n Whether to clean models after training, by default False.\n fedeca_path : str, optional\n Path to the FedECA package, by default None.\n algo_dependencies : list, optional\n List of algorithm dependencies, by default [].\n \"\"\"\n if metrics_dicts_list is not None:\n assert len(strategies) == len(metrics_dicts_list)\n assert len(num_rounds_list) == len(strategies)\n self.strategies = strategies\n self.metrics_dicts_list = metrics_dicts_list\n self.num_rounds_list = num_rounds_list\n self.ds_client = ds_client\n self.train_data_nodes = train_data_nodes\n self.test_data_nodes = test_data_nodes\n self.simu_mode = False\n\n if self.test_data_nodes is None:\n assert metrics_dicts_list is not None\n if self.train_data_nodes is not None:\n self.test_data_nodes = [\n TestDataNode(\n t.organization_id, t.data_manager_key, t.data_sample_keys, []\n )\n for t in self.train_data_nodes\n ]\n else:\n if metrics_dicts_list and not all(\n [len(t.metric_functions) == 0 for t in self.test_data_nodes]\n ):\n print(\n \"\"\"WARNING: you are passing metrics to test data nodes with existing\n metric_functions this will overwrite them\"\"\"\n )\n print(\n [\n (f\"Client {i}\", t.metric_functions)\n for i, t in enumerate(self.test_data_nodes)\n ]\n )\n\n self.evaluation_frequency = evaluation_frequency\n\n self.aggregation_node = aggregation_node\n self.experiment_folder = experiment_folder\n self.clean_models = clean_models\n\n # Packaging the right dependencies\n if fedeca_path is None:\n fedeca_path = os.getcwd()\n repo_folder = Path(\n git.Repo(fedeca_path, search_parent_directories=True).working_dir\n ).resolve()\n wheel_folder = repo_folder / \"temp\"\n os.makedirs(wheel_folder, exist_ok=True)\n for stale_wheel in wheel_folder.glob(\"fedeca*.whl\"):\n stale_wheel.unlink()\n process = subprocess.Popen(\n f\"python -m build --wheel --outdir {wheel_folder} {repo_folder}\",\n shell=True,\n stdout=subprocess.PIPE,\n )\n process.wait()\n assert process.returncode == 0, \"Failed to build the wheel\"\n wheel_path = next(wheel_folder.glob(\"fedeca*.whl\"))\n if algo_dependencies is None:\n algo_dependencies = []\n\n self.algo_dependencies = Dependency(\n pypi_dependencies=[\"numpy==1.23.1\", \"torch==1.11.0\", \"lifelines\", \"pandas\"]\n + algo_dependencies,\n local_dependencies=[wheel_path],\n )\n\n self.experiment_path = str(Path(self.experiment_folder))\n os.makedirs(self.experiment_path, exist_ok=True)\n self.run_strategies = 0\n self.tasks = {}\n self.compute_plan_keys = []\n self.performances_strategies = []\n\n def fit(\n self,\n data: pd.DataFrame,\n nb_clients: Union[int, None] = None,\n split_method: Union[Callable, str] = \"uniform\",\n split_method_kwargs: Union[Callable, None] = None,\n data_path: Union[str, None] = None,\n backend_type: str = \"subprocess\",\n urls: Union[list[str], None] = None,\n tokens: Union[list[str], None] = None,\n ):\n \"\"\"Fit strategies on global data split across clients.\n\n For test if provided we use test_data_nodes from int or the\n train_data_nodes in the latter train=test.\n\n Parameters\n ----------\n data : pd.DataFrame\n The global data to be split has to be a dataframe as we only support\n one opener type.\n nb_clients : Union[int, None], optional\n The number of clients used to split data across, by default None\n split_method : Union[Callable, None], optional\n How to split data across the nb_clients, by default None.\n split_method_kwargs : Union[Callable, None], optional\n Argument of the function used to split data, by default None.\n data_path : Union[str, None]\n Where to store the data on disk when backend is not remote.\n backend_type: str\n The backend to use for substra. Can be either:\n [\"subprocess\", \"docker\", \"remote\"]. Defaults to \"subprocess\".\n urls: Union[list[str], None]\n Urls corresponding to clients API if using remote backend_type.\n Defaults to None.\n tokens: Union[list[str], None]\n Tokens necessary to authenticate each client API if backend_type\n is remote. Defauts to None.\n \"\"\"\n # Reset experiment so that it can fit on a new dataset\n self.reset_experiment()\n\n if data_path is not None:\n self.experiment_path = data_path\n\n # We first have to create the TrainDataNodes objects for this we split\n # the data into nb_clients using split_method\n (\n self.clients,\n self.train_data_nodes,\n test_data_nodes,\n _,\n _,\n ) = split_dataframe_across_clients(\n df=data,\n n_clients=nb_clients,\n split_method=split_method,\n split_method_kwargs=split_method_kwargs,\n backend_type=backend_type,\n data_path=data_path,\n urls=urls,\n tokens=tokens,\n )\n if self.test_data_nodes is None:\n self.test_data_nodes = test_data_nodes\n self.run()\n\n def run(self, num_strategies_to_run=None):\n \"\"\"Run the experiment.\n\n Parameters\n ----------\n num_strategies_to_run : int, optional\n Number of strategies to run, by default None.\n \"\"\"\n assert (\n self.train_data_nodes is not None\n ), \"you have to define train_data_nodes first before running\"\n assert (\n self.test_data_nodes is not None\n ), \"you have to define test_data_nodes first before running\"\n if num_strategies_to_run is None:\n num_strategies_to_run = len(self.strategies) - self.run_strategies\n assert (self.run_strategies + num_strategies_to_run) <= len(\n self.strategies\n ), f\"\"\"You cannot run {num_strategies_to_run} strategies more there is only\n {len(self.strategies)} strategies and you have already run {self.run_strategies}\n of them.\"\"\"\n # If no client is given we take the first one\n if self.ds_client is None:\n self.ds_client = self.clients[list(self.clients.keys())[0]]\n\n # If no AggregationNode is given we take the first one\n if self.aggregation_node is None:\n print(\"Using the first client as a server.\")\n kwargs_agg_node = {\n \"organization_id\": self.train_data_nodes[0].organization_id\n }\n self.aggregation_node = AggregationNode(**kwargs_agg_node)\n\n if not hasattr(self, \"experiment_kwargs\"):\n self.experiment_kwargs = {\n \"experiment_folder\": self.experiment_path,\n \"clean_models\": self.clean_models,\n \"dependencies\": self.algo_dependencies,\n \"client\": self.ds_client,\n }\n if hasattr(self.ds_client, \"is_simu\"):\n self.simu_mode = self.ds_client.is_simu\n\n # inelegant but cannot slice on a zip object\n strategies = self.strategies[\n self.run_strategies : (self.run_strategies + num_strategies_to_run)\n ] # noqa: E203\n metrics_dicts_list = self.metrics_dicts_list[\n self.run_strategies : (\n self.run_strategies + num_strategies_to_run\n ) # noqa: E203\n ]\n num_rounds_list = self.num_rounds_list[\n self.run_strategies : (\n self.run_strategies + num_strategies_to_run\n ) # noqa: E203\n ]\n for i, (strategy, metrics_dict, num_rounds) in enumerate(\n zip(strategies, metrics_dicts_list, num_rounds_list)\n ):\n for t in self.test_data_nodes:\n t.metric_functions = metrics_dict\n\n current_kwargs = self.experiment_kwargs\n current_kwargs[\"strategy\"] = strategy\n current_kwargs[\"num_rounds\"] = num_rounds\n current_kwargs[\"train_data_nodes\"] = self.train_data_nodes\n current_kwargs[\"aggregation_node\"] = self.aggregation_node\n # Evaluation frequency depend on current strategy\n # If None evaluate once at the end of the strategy\n if self.evaluation_frequency is None:\n evaluation_strategy = EvaluationStrategy(\n test_data_nodes=self.test_data_nodes,\n eval_rounds=[num_rounds_list[i]],\n )\n else:\n evaluation_strategy = EvaluationStrategy(\n test_data_nodes=self.test_data_nodes,\n eval_frequency=self.evaluation_frequency[i],\n )\n current_kwargs[\"evaluation_strategy\"] = evaluation_strategy\n current_kwargs[\"simu_mode\"] = self.simu_mode\n current_kwargs[\"name\"] = f\"Fedeca: {strategy.__class__.__name__}\"\n xp_output = execute_experiment(**current_kwargs)\n\n if self.simu_mode:\n scores = [t.scores for t in self.test_data_nodes]\n robust_cox_variance = False\n for idx, s in enumerate(scores):\n print(f\"====Client {idx}====\")\n try:\n print(s[-1])\n except IndexError:\n robust_cox_variance = True\n print(\"No metric\")\n # TODO Check that it is well formatted it's probably not\n self.performances_strategies.append(pd.DataFrame(xp_output))\n # Hacky hacky hack\n if robust_cox_variance:\n xp_output = self.train_data_nodes\n else:\n xp_output = self.train_data_nodes[0]\n\n self.compute_plan_keys.append(xp_output)\n\n if not (self.simu_mode):\n self.tasks[self.compute_plan_keys[i].key] = {}\n tasks = self.ds_client.list_task(\n filters={\"compute_plan_key\": [self.compute_plan_keys[i].key]}\n )[::-1]\n tasks_names = [t.function.name for t in tasks]\n self.tasks[self.compute_plan_keys[i].key][\"tasks\"] = tasks\n self.tasks[self.compute_plan_keys[i].key][\"tasks_names\"] = tasks_names\n self.tasks[self.compute_plan_keys[i].key][\"num_tasks\"] = len(tasks)\n\n self.run_strategies += 1\n\n def get_outmodel(self, task_name, strategy_idx=0, idx_task=0):\n \"\"\"Get the output model.\n\n Parameters\n ----------\n task_name : str\n Name of the task.\n strategy_idx : int, optional\n Index of the strategy, by default 0.\n idx_task : int, optional\n Index of the task, by default 0.\n \"\"\"\n assert not (self.simu_mode), \"This function cannot be used in simu mode\"\n\n # We get all matches and order them chronologically\n tasks_dict_from_strategy = self.tasks[self.compute_plan_keys[strategy_idx].key]\n return get_outmodel_function(\n task_name, idx_task=idx_task, tasks_dict=tasks_dict_from_strategy\n )\n\n def reset_experiment(self):\n \"\"\"Reset the state of the object.\n\n So it can be fit with a new dataset.\n \"\"\"\n self.run_strategies = 0\n self.tasks = {}\n self.compute_plan_keys = []\n self.performances_strategies = []\n self.train_data_nodes = None\n self.test_data_nodes = None" }, { "identifier": "make_substrafl_torch_dataset_class", "path": "fedeca/utils/substrafl_utils.py", "snippet": "def make_substrafl_torch_dataset_class(\n target_cols,\n event_col,\n duration_col,\n dtype=\"float64\",\n return_torch_tensors=False,\n):\n \"\"\"Create a custom SubstraflTorchDataset class for survival analysis.\n\n Parameters\n ----------\n target_cols : list\n List of target columns.\n event_col : str\n Name of the event column.\n duration_col : str\n Name of the duration column.\n dtype : str, optional\n Data type, by default \"float64\".\n return_torch_tensors : bool, optional\n Returns torch.Tensor. Defaults to False.\n\n Returns\n -------\n type\n Custom SubstraflTorchDataset class.\n \"\"\"\n assert len(target_cols) == 1 or all(\n [t in [event_col, duration_col] for t in target_cols]\n )\n if len(target_cols) == 1:\n print(f\"Making a dataset class to fit a model to predict {target_cols[0]}\")\n columns_to_drop = [event_col, duration_col]\n elif len(target_cols) == 2:\n assert set(target_cols) == set(\n [event_col, duration_col]\n ), \"Your targets should be event_col and duration_col\"\n # DO NOT MODIFY THIS LINE !!!!!\n target_cols = [duration_col, event_col]\n columns_to_drop = []\n\n class MySubstraflTorchDataset(SubstraflTorchDataset):\n def __init__(self, datasamples, is_inference):\n super().__init__(\n datasamples=datasamples,\n is_inference=is_inference,\n target_columns=target_cols,\n columns_to_drop=columns_to_drop,\n dtype=dtype,\n return_torch_tensors=return_torch_tensors,\n )\n\n return MySubstraflTorchDataset" }, { "identifier": "make_accuracy_function", "path": "fedeca/utils/substrafl_utils.py", "snippet": "def make_accuracy_function(treatment_col: str):\n \"\"\"Build accuracy function.\n\n Parameters\n ----------\n treatment_col: str,\n Column name for the treatment allocation.\n \"\"\"\n\n def accuracy(datasamples, predictions_path):\n y_true = datasamples[treatment_col]\n if isinstance(predictions_path, str) or isinstance(predictions_path, Path):\n y_pred = np.load(predictions_path)\n else:\n y_pred = predictions_path\n return accuracy_score(y_true, y_pred > 0.5)\n\n return accuracy" }, { "identifier": "CoxData", "path": "fedeca/utils/survival_utils.py", "snippet": "class CoxData:\n \"\"\"Simulate Cox data.\n\n This class simulates survival data following Cox model assumptions.\n \"\"\"\n\n def __init__(\n self,\n n_samples: int = 1000,\n ndim: int = 10,\n features_type: Literal[\n \"cov_toeplitz\",\n \"cov_uniform\",\n \"indep_gauss\",\n ] = \"cov_toeplitz\",\n cate: float | Literal[\"random\", \"linear\"] = 1.0,\n propensity: Literal[\"constant\", \"linear\"] = \"constant\",\n prop_treated: float = 0.5,\n overlap: float = 0.0,\n cov_corr: float = 0.5,\n scale_t: float = 1.0,\n shape_t: float = 1.0,\n censoring_factor: float = 0.5,\n percent_ties: Optional[float] = None,\n random_censoring: bool = False,\n seed: _SeedType = None,\n standardize_features: bool = True,\n dtype: Literal[\"float32\", \"float64\"] = \"float64\",\n ):\n r\"\"\"Cox Data generator class.\n\n This class generates data according to a Cox proportional hazards model\n in continuous time as follows:\n .. math::\n S(t|x) = P(T > t | X=x)\n \\\\lambda(t|x) = \\\\frac{d \\\\log S(t|x)}{dt}\n \\\\lambda(t|x) = \\\\lambda_0(t)e^{\\\\beta^T x}\n \\\\Lambda_0(t|x) = \\\\int_0^t \\\\lambda_0(u)du = (\\\\frac{t}{s})^k\n X \\\\sim \\\\mathcal{N}(0, C)\n \\\\beta \\\\sim \\\\mathcal{N}(0, I)\n\n Parameters\n ----------\n n_samples: int, optional\n Number of samples to generate. Defaults to 1000\n ndim: int, optional\n Number of features, defaults to 10.\n features_type: `{\"cov_toeplitz\", \"cov_uniform\", \"indep_gauss\"}`, optional\n cate: {float, `{\"random\", \"linear\"}`, Callable}\n The way to assign treatment effect (hazard ratio) to samples.\n * \"float\": Constant hazard ratio for all samples.\n * \"random\": Hazard ratio follows log-normal distribution.\n * \"linear\": Hazard ratio depends on a linear combination of\n features with random coefficients.\n Defaults to 1.0 (no treatment effect).\n propensity: {`{\"constant\", \"linear\"}`, Callable}\n The way to assign propensity scores (probabilities of being treated)\n to samples.\n * \"linear\": Propensity scores depend on a linear combination of\n features with random coefficients.\n * \"constant\": All propensity scores take the value of the constant\n defined by the parameter `prop_treated`.\n Defaults to \"constant\".\n cov_corr: float, optional\n The correlation of the covariance matrix.\n scale_t: float, optional\n Scale parameter `s` in the equations above. Defaults to `1.0`.\n shape_t: float, optional\n Shape parameter `k` in the equations above. Defaults to `1.0`.\n censoring_factor: float, optional\n Parameter used to determine the probability of being censored\n (with respect to the median). Defaults to `0.5`.\n percent_ties: float, optional\n Parameter that control the percentage of samples who have the same outcome.\n Defaults to None.\n random_censoring: bool, optional\n Whether to censor completely independently of the rest or not.\n When true, censors samples with probability censoring_factor.\n When false, samples are censored if the drawn event times\n (drawn from the Cox model) is smaller than an independent\n exponential variable with scale factor\n `censoring_factor * mean_time`, where `mean_time`\n is the empirical mean of drawn event times.\n Defaults to False.\n seed: {None, int, Sequence[int], SeedSequence, BitGenerator, Generator},\n optional\n The seed for reproducibility. Defaults to None.\n standardize_features: bool, optional\n Whether to standardize features or not. Defaults to True.\n dtype : `{\"float64\", \"float32\"}`, default=\"float64\"\n Type of the arrays used.\n \"\"\"\n self.n_samples = n_samples\n self.ndim = ndim\n self.features_type: Final = features_type\n self.rng = np.random.default_rng(seed)\n self.prop_treated = prop_treated\n self.overlap = overlap\n self.cate = cate\n self.propensity = propensity\n self.cov_corr = cov_corr\n self.scale_t = scale_t\n self.shape_t = shape_t\n self.censoring_factor = censoring_factor\n self.random_censoring = random_censoring\n self.standardize_features = standardize_features\n self.dtype: Final = dtype\n self.coeffs = None\n self.percent_ties = percent_ties\n self.average_treatment_effect_ = None\n self.probability_treated = None\n\n def standardize_data(self, features: np.ndarray):\n \"\"\"Standardize data. Make data reduced centered.\n\n Standardize the data by substracting the mean of each columns\n and dividing by the standard deviation.\n\n Parameters\n ----------\n features : np.ndarray\n Features to standardize.\n\n Returns\n -------\n np.ndarray\n Normalized features.\n \"\"\"\n features -= features.mean(axis=0)\n features /= features.std(axis=0)\n return features\n\n def generate_data(\n self,\n n_samples: Optional[int] = None,\n seed: _SeedType = None,\n use_cate: bool = True,\n ):\n \"\"\"Generate final survival data.\n\n Use the collection of methods of the class to\n generate data following Cox assumptions.\n\n Returns\n -------\n tuple\n A tuple of np.ndarrays.\n\n Raises\n ------\n ValueError\n If `propensity` is neither \"constant\" nor \"linear\".\n ValueError\n If `cate` is neither \"linear\", \"random\" nor a constant type int or float.\n \"\"\"\n if n_samples is None:\n n_samples = self.n_samples\n if seed is None:\n seed = self.rng\n rng = np.random.default_rng(seed)\n\n if self.features_type == \"cov_uniform\":\n X = features_normal_cov_uniform(\n n_samples, self.ndim, dtype=self.dtype, seed=rng\n )\n elif self.features_type == \"indep_gauss\":\n X = rng.standard_normal(size=(n_samples, self.ndim)).astype(self.dtype)\n else:\n X = features_normal_cov_toeplitz(\n n_samples, self.ndim, self.cov_corr, dtype=self.dtype, seed=rng\n )\n if self.standardize_features:\n X = self.standardize_data(X)\n\n if self.propensity == \"constant\":\n treat_alloc = random_treatment_allocation(\n n_samples, self.prop_treated, seed=rng\n )\n propensity_scores = np.repeat(self.prop_treated, n_samples)\n\n elif self.propensity == \"linear\":\n func_propensity = linear_propensity(\n ndim=self.ndim,\n overlap=self.overlap,\n prop_treated=self.prop_treated,\n seed=rng,\n )\n propensity_scores = np.apply_along_axis(func_propensity, -1, X)\n treat_alloc = rng.binomial(1, propensity_scores)\n else:\n raise ValueError(\"propensity must be either `constant` or `linear`\")\n\n self.coeffs = rng.normal(size=(self.ndim,)).astype(self.dtype)\n u = X.dot(self.coeffs)\n if use_cate:\n if self.cate == \"linear\":\n func_cate = linear_cate(ndim=self.ndim, seed=rng)\n elif self.cate == \"random\":\n func_cate = random_cate(seed=rng)\n elif isinstance(self.cate, (int, float)):\n func_cate = constant_cate(self.cate)\n else:\n raise ValueError(\n \"\"\"cate must be either `linear`, `random` or a constant type\n int or float\"\"\"\n )\n\n cate_vector = np.apply_along_axis(func_cate, -1, X)\n self.average_treatment_effect_ = np.mean(cate_vector[treat_alloc == 1])\n self.probability_treated = cate_vector\n u += treat_alloc * np.log(cate_vector)\n # Simulation of true times\n time_hazard_baseline = -np.log(\n rng.uniform(0, 1.0, size=n_samples).astype(self.dtype)\n )\n time_cox_unscaled = time_hazard_baseline * np.exp(-u)\n times = self.scale_t * time_cox_unscaled ** (1.0 / self.shape_t)\n\n # induce samples with same times\n if self.percent_ties is not None:\n nb_ties_target = int(self.percent_ties * n_samples)\n if nb_ties_target >= 2:\n # sklearn not supporting generator yet, pass int to random_state\n # ref: https://github.com/scikit-learn/scikit-learn/issues/16988\n seed_seq = rng.bit_generator._seed_seq.spawn(1)[0] # type: ignore\n random_state = seed_seq.generate_state(1)[0]\n original_times = copy.deepcopy(times)\n # We progressively reduce the number of bins until there are\n # only 2 bins starting with npoints - 1 bins\n reached = False\n for nbins in range(n_samples - 1, 1, -1):\n discretizer = KBinsDiscretizer(\n n_bins=nbins,\n encode=\"ordinal\",\n strategy=\"quantile\",\n random_state=random_state,\n )\n times = discretizer.fit_transform(original_times.reshape((-1, 1)))\n nb_ties_reached = n_samples - len(np.unique(times))\n if (nb_ties_reached - nb_ties_target) >= 0:\n reached = True\n break\n if not reached:\n raise ValueError(\"This should not happen, lower percent_ties\")\n times = times.reshape((-1))\n\n else:\n raise ValueError(\"Choose a larger number of ties\")\n\n avg_time = times.mean()\n\n # Simulation of the censoring times. times is returned in absolute value\n if self.random_censoring:\n censoring = rng.uniform(size=n_samples) < self.censoring_factor\n times[censoring] = [rng.uniform(0, t) for t in times[censoring].tolist()]\n censoring = censoring.astype(\"uint8\")\n else:\n c_sampled = rng.exponential(\n scale=self.censoring_factor * avg_time, size=n_samples\n ).astype(self.dtype)\n\n censoring = (times > c_sampled).astype(\"uint8\")\n times[censoring] = np.minimum(times, c_sampled)\n\n return X, times, censoring, treat_alloc, propensity_scores\n\n def generate_dataframe(\n self,\n n_samples: Optional[int] = None,\n prefix: str = \"X_\",\n duration_col: str = \"time\",\n event_col: str = \"event\",\n treated_col: str = \"treatment\",\n ps_col: str = \"propensity_scores\",\n seed: _SeedType = None,\n ):\n \"\"\"Generate dataframe.\"\"\"\n (\n covariates,\n times,\n censoring,\n treatments,\n propensity_scores,\n ) = self.generate_data(n_samples, seed=seed)\n data = pd.DataFrame(covariates).add_prefix(prefix)\n data[duration_col] = times\n data[event_col] = 1 - censoring\n data[treated_col] = treatments\n data[ps_col] = propensity_scores\n return data" }, { "identifier": "make_categorical", "path": "fedeca/utils/survival_utils.py", "snippet": "def make_categorical(X, up_to: int = 25, seed: _SeedType = None):\n \"\"\"Convert continuous features in a dataset to categorical features.\n\n This function takes a dataset matrix `X` and converts its first `up_to` columns\n (features) into categorical features using the KBinsDiscretizer method.\n It performs min-max scaling on each feature before discretization.\n\n Parameters\n ----------\n X : np.ndarray\n Input dataset matrix of shape (n_samples, n_features).\n up_to : int, optional\n Number of columns to convert to categorical features, by default 25.\n seed : int or None, optional\n Seed for the random number generator, by default None.\n\n Returns\n -------\n np.ndarray, np.ndarray\n Two arrays: `Xleft` containing the modified categorical features\n and `Xright` containing the remaining original features.\n \"\"\"\n rng = np.random.default_rng(seed)\n Xleft = X[:, :up_to]\n Xright = X[:, up_to:]\n mm_normalizer = MinMaxScaler()\n nbins_vector = rng.integers(2, 10, size=up_to)\n for j, nbins in enumerate(nbins_vector):\n # sklearn not supporting generator yet, pass int to random_state\n # ref: https://github.com/scikit-learn/scikit-learn/issues/16988\n seed_seq = rng.bit_generator._seed_seq.spawn(1)[0] # type: ignore\n random_state = seed_seq.generate_state(1)[0]\n discretizer = KBinsDiscretizer(\n n_bins=nbins, encode=\"ordinal\", random_state=random_state\n )\n Xleft[:, j] = mm_normalizer.fit_transform(Xleft[:, j][:, None])[:, 0]\n Xleft[:, j] = discretizer.fit_transform(Xleft[:, j][:, None])[:, 0]\n return Xleft, Xright" } ]
import sys import numpy as np import pandas as pd import torch import torch.nn as nn from itertools import product from sklearn.metrics import accuracy_score from substrafl.algorithms.pytorch import TorchNewtonRaphsonAlgo from substrafl.model_loading import download_algo_state from substrafl.strategies import FedAvg, NewtonRaphson from torch.optim import SGD from fedeca.algorithms.torch_dp_fed_avg_algo import TorchDPFedAvgAlgo from fedeca.fedeca_core import LogisticRegressionTorch from fedeca.utils import ( Experiment, make_accuracy_function, make_substrafl_torch_dataset_class, ) from fedeca.utils.survival_utils import CoxData, make_categorical
11,471
"""Runs the propensity model training part with DP.""" if __name__ == "__main__": epsilons = [0.1, 1.0, 5.0, 10.0][::-1] deltas = [10 ** (-i) for i in range(1, 3)] START_SEED = 42 NDIM = 10 NUM_ROUNDS = 10 NUM_UPDATES = 100 N_REPETITIONS = 5 BACKEND_TYPE = "subprocess" BATCH_SIZE = 32 na_proportion = 0.0 seeds = np.arange(START_SEED, START_SEED + N_REPETITIONS).tolist() rng = np.random.default_rng(seeds[0]) # Generating data with strong linear relationship simu_coxreg = CoxData( n_samples=300, ndim=NDIM, prop_treated=0.5, propensity="linear", dtype="float32", overlap=100.0, seed=rng, random_censoring=True, censoring_factor=0.3, standardize_features=False, ) X, T, C, treated, _ = simu_coxreg.generate_data() # Will make first columns to be categorical
"""Runs the propensity model training part with DP.""" if __name__ == "__main__": epsilons = [0.1, 1.0, 5.0, 10.0][::-1] deltas = [10 ** (-i) for i in range(1, 3)] START_SEED = 42 NDIM = 10 NUM_ROUNDS = 10 NUM_UPDATES = 100 N_REPETITIONS = 5 BACKEND_TYPE = "subprocess" BATCH_SIZE = 32 na_proportion = 0.0 seeds = np.arange(START_SEED, START_SEED + N_REPETITIONS).tolist() rng = np.random.default_rng(seeds[0]) # Generating data with strong linear relationship simu_coxreg = CoxData( n_samples=300, ndim=NDIM, prop_treated=0.5, propensity="linear", dtype="float32", overlap=100.0, seed=rng, random_censoring=True, censoring_factor=0.3, standardize_features=False, ) X, T, C, treated, _ = simu_coxreg.generate_data() # Will make first columns to be categorical
Xcat, Xcont = make_categorical(X, up_to=0)
6
2023-11-27 18:01:37+00:00
16k
aliyun/pai-python-sdk
pai/modelscope/model.py
[ { "identifier": "ImageLabel", "path": "pai/api/image.py", "snippet": "class ImageLabel(object):\n \"\"\"Image Label Class.\"\"\"\n\n # Unofficial Image Label\n UNOFFICIAL_LABEL = \"system.official=false\"\n # Official Image Label\n OFFICIAL_LABEL = \"system.official=true\"\n\n # PAI Image Label\n PROVIDER_PAI_LABEL = \"system.origin=PAI\"\n # Community Image Label\n PROVIDER_COMMUNITY_LABEL = \"system.origin=Community\"\n\n # DLC Image Label: for training\n DLC_LABEL = \"system.supported.dlc=true\"\n # EAS Image Label: for inference\n EAS_LABEL = \"system.supported.eas=true\"\n # DSW Image Label: for develop\n DSW_LABEL = \"system.supported.dsw=true\"\n\n # Accelerator: Use GPU\n DEVICE_TYPE_GPU = \"system.chipType=GPU\"\n DEVICE_TYPE_CPU = \"system.chipType=CPU\"\n\n # Python Version\n # TODO: delete this label key\n PYTHON_VERSION = \"system.pythonVersion\"\n\n @staticmethod\n def framework_version(\n framework: str,\n version: str,\n ):\n \"\"\"Create a label for filtering images that support specific framework version.\n\n Args:\n framework (str): framework name, which is case sensitive.\n version (str): framework version. If version is '*', it will match all\n versions.\n\n Returns:\n str: framework version label string.\n\n Raises:\n ValueError: If the framework is not supported.\n \"\"\"\n if framework not in SUPPORTED_IMAGE_FRAMEWORKS:\n raise ValueError(\n f\"Unsupported framework: {framework}. Current supported frameworks are:\"\n f\" {SUPPORTED_IMAGE_FRAMEWORKS}\"\n )\n return f\"system.framework.{framework}={version}\"\n\n @staticmethod\n def language_version(\n language: str,\n version: str,\n ):\n \"\"\"Create a label for filtering images that support specific language version.\n\n Args:\n language (str): language name, which is case sensitive.\n version (str): language version. If version is '*', it will match all\n versions.\n\n Returns:\n str: language version label string.\n\n Raises:\n ValueError: If the language is not supported.\n \"\"\"\n if language not in SUPPORTED_IMAGE_LANGUAGES:\n raise ValueError(\n f\"Unsupported language: {language}. Current supported languages are:\"\n f\" {SUPPORTED_IMAGE_LANGUAGES}\"\n )\n # TODO: \"system.language.{language}={version}\"\n return f\"system.{language}Version={version}\"" }, { "identifier": "to_semantic_version", "path": "pai/common/utils.py", "snippet": "def to_semantic_version(version_str: str) -> Version:\n \"\"\"Convert version_str to semantic version.\n\n Convert version_str to semantic version, if version_str is not a valid\n semantic version, return '0.0.0'.\n\n Args:\n version_str[str]: Version string, such as '1.0.0', '1.0.0-rc1', '1.0.0+build.1'.\n\n Returns:\n :class:`semantic_version.Version`: Semantic version.\n \"\"\"\n try:\n return Version.coerce(version_str)\n except ValueError:\n # if version_str is not a valid semantic version, return '0.0.0'\n return Version.coerce(\"0.0.0\")" }, { "identifier": "DefaultServiceConfig", "path": "pai/model.py", "snippet": "class DefaultServiceConfig(object):\n \"\"\"Default configuration used in creating prediction service.\"\"\"\n\n # Listen Port\n listen_port = 8000\n\n # Default model path in container\n model_path = \"/eas/workspace/model/\"\n\n # Default user code path in container\n code_path = \"/ml/usercode/\"" }, { "identifier": "ModelBase", "path": "pai/model.py", "snippet": "class ModelBase(object):\n \"\"\"A class represent ModelBase.\"\"\"\n\n def __init__(\n self,\n model_data: str,\n inference_spec: Optional[InferenceSpec] = None,\n session: Session = None,\n ):\n self.model_data = model_data\n self.inference_spec = inference_spec\n self.session = session or get_default_session()\n\n def download(self, target_dir: str):\n \"\"\"Download the model data from OSS to local directory.\n\n Args:\n target_dir (str): The target directory to download the model data.\n\n Returns:\n str: Local directory path stores the model data.\n\n \"\"\"\n if not self.model_data:\n raise ValueError(\"Could not find the model data for this model.\")\n if not is_oss_uri(self.model_data):\n raise RuntimeError(\"Download method only support model data stored in OSS.\")\n self._download_model_data(target_dir)\n\n return target_dir\n\n def _download_model_data(self, target_dir):\n if not self.model_data:\n return\n logger.info(f\"Prepare model data to local directory: {target_dir}\")\n if self.model_data.startswith(\"oss://\"):\n oss_uri = OssUriObj(self.model_data)\n oss_bucket = self.session.get_oss_bucket(oss_uri.bucket_name)\n download(\n oss_path=oss_uri.object_key,\n local_path=target_dir,\n bucket=oss_bucket,\n un_tar=True,\n )\n else:\n if not os.path.exists(self.model_data):\n raise ValueError(f\"Model data path does not exist: {self.model_data}\")\n\n os.makedirs(target_dir, exist_ok=True)\n if os.path.isfile(self.model_data):\n shutil.copy(\n self.model_data,\n os.path.join(target_dir, os.path.basename(self.model_data)),\n )\n else:\n distutils.dir_util.copy_tree(self.model_data, target_dir)\n\n def _upload_model_data(self):\n \"\"\"Upload the model artifact to OSS bucket if self.model_data is a local\n file path.\n\n \"\"\"\n if not self.model_data:\n return\n elif is_oss_uri(self.model_data):\n return self.model_data\n elif not os.path.exists(self.model_data):\n raise RuntimeError(f\"Model data path does not exist: {self.model_data}\")\n\n dest_oss_path = self.session.get_storage_path_by_category(category=\"model_data\")\n upload_model_data = upload(\n source_path=self.model_data,\n oss_path=dest_oss_path,\n bucket=self.session.oss_bucket,\n )\n return upload_model_data\n\n def list_model_files(self, uri_format: bool = False) -> Iterator[str]:\n \"\"\"List model files under the model path.\n\n Args:\n uri_format (bool): If True, return the model file path in OSS URI format.\n\n Returns:\n Iterator[str]: Iterator of model files.\n \"\"\"\n if not self.model_data:\n raise ValueError(\"Model data path is not specified.\")\n\n if not is_oss_uri(self.model_data):\n raise ValueError(\"Method only support model data stored in OSS.\")\n\n oss_uri_obj = OssUriObj(self.model_data)\n bucket = self.session.get_oss_bucket(\n bucket_name=oss_uri_obj.bucket_name,\n )\n\n def _get_relative_path(obj_key: str):\n # if the model_data is reference an object, return the object file\n # name.\n if oss_uri_obj.object_key == obj_key:\n return os.path.basename(obj_key)\n\n path = obj_key[len(oss_uri_obj.object_key) :]\n return path.lstrip(\"/\") if path.startswith(\"/\") else path\n\n obj_iter = ObjectIterator(bucket=bucket, prefix=oss_uri_obj.object_key)\n for obj_info in obj_iter:\n if uri_format:\n yield f\"oss://{bucket.bucket_name}/{obj_info.key}\"\n else:\n yield _get_relative_path(obj_info.key)\n\n def _get_inference_spec(self):\n return self.inference_spec\n\n def deploy(\n self,\n service_name: str,\n instance_count: Optional[int] = 1,\n instance_type: Optional[str] = None,\n resource_config: Optional[Union[Dict[str, int], ResourceConfig]] = None,\n resource_id: Optional[str] = None,\n options: Optional[Dict[str, Any]] = None,\n service_type: Optional[str] = None,\n wait: bool = True,\n serializer: Optional[\"SerializerBase\"] = None,\n **kwargs,\n ):\n \"\"\"Deploy a prediction service with the model.\"\"\"\n if is_local_run_instance_type(instance_type):\n return self._deploy_local(\n instance_type=instance_type,\n serializer=serializer,\n wait=wait,\n )\n else:\n return self._deploy(\n service_name=service_name,\n instance_count=instance_count,\n instance_type=instance_type,\n resource_config=resource_config,\n resource_id=resource_id,\n service_type=service_type,\n options=options,\n wait=wait,\n serializer=serializer,\n )\n\n def _generate_service_name(self):\n s = os.path.basename(self.model_data.rstrip(\"/\")) + random_str(8)\n return to_plain_text(s)\n\n def _deploy(\n self,\n service_name: str = None,\n instance_count: int = 1,\n instance_type: str = None,\n resource_config: Union[Dict[str, int], ResourceConfig] = None,\n resource_id: str = None,\n service_type: str = None,\n options: Dict[str, Any] = None,\n wait: bool = True,\n serializer: \"SerializerBase\" = None,\n ):\n \"\"\"Create a prediction service.\"\"\"\n if not service_name:\n service_name = self._generate_service_name()\n logger.info(\n \"Service name is not specified, using a generated service\"\n f\" name to create the service: service_name={service_name}\"\n )\n\n config = self._build_service_config(\n service_name=service_name,\n instance_count=instance_count,\n instance_type=instance_type,\n service_type=service_type,\n resource_config=resource_config,\n resource_id=resource_id,\n options=options,\n )\n service_name = self.session.service_api.create(config=config)\n self._wait_service_visible(service_name)\n if service_type == ServiceType.Async:\n predictor = AsyncPredictor(\n service_name=service_name,\n session=self.session,\n serializer=serializer,\n )\n else:\n predictor = Predictor(\n service_name=service_name,\n session=self.session,\n serializer=serializer,\n )\n print(\n \"View the service detail by accessing the console URI: \\n{}\".format(\n predictor.console_uri\n )\n )\n if wait:\n predictor.wait_for_ready()\n\n return predictor\n\n def _wait_service_visible(self, service_name, attempts=3, interval=2):\n \"\"\"Wait for the service to be visible in DescribeService API.\n\n hack:\n https://aone.alibaba-inc.com/v2/project/1134421/bug#viewIdentifier=5dfb195e2e2b84f6b2f24718&openWorkitemIdentifier=50192431\n\n \"\"\"\n while attempts > 0:\n obj = self.session.service_api.get(service_name)\n if \"ServiceUid\" in obj:\n return\n attempts -= 1\n time.sleep(interval)\n logger.warning(\"DescribeService API failed to get the Service object.\")\n\n def _build_service_config(\n self,\n service_name: str = None,\n instance_count: int = None,\n instance_type: str = None,\n resource_config: Union[ResourceConfig, Dict[str, Any]] = None,\n resource_id: str = None,\n service_type: str = None,\n options: Dict[str, Any] = None,\n ) -> Dict[str, Any]:\n \"\"\"Build a service config dictionary used to create a PAI EAS service.\"\"\"\n self.model_data = self._upload_model_data()\n\n resource_config = (\n ResourceConfig(**resource_config)\n if resource_config and isinstance(resource_config, dict)\n else None\n )\n\n if resource_config and instance_type:\n raise ValueError(\n f\"Only one of 'instance_type' and 'resource_config' \"\n f\"is required, but both have been provided: instance_type\"\n f\"={instance_type}, resource_config=\"\n f\"{resource_config}.\"\n )\n\n inference_spec = InferenceSpec(\n self._get_inference_spec().to_dict() if self.inference_spec else dict()\n )\n\n if self.model_data:\n if not inference_spec.is_container_serving():\n # if model_data is an OSS URI with endpoint, truncate the endpoint.\n oss_uri_obj = OssUriObj(self.model_data)\n model_path_uri = \"oss://{bucket_name}/{key}\".format(\n bucket_name=oss_uri_obj.bucket_name,\n key=oss_uri_obj.object_key,\n )\n inference_spec.add_option(\"model_path\", model_path_uri)\n else:\n try:\n inference_spec.mount(\n self.model_data,\n mount_path=DefaultServiceConfig.model_path,\n )\n except DuplicatedMountException as e:\n # ignore duplicated mount\n logger.info(\"Model is already mounted the container: %s\", e)\n\n if service_type:\n inference_spec.add_option(\"metadata.type\", service_type)\n if inference_spec.is_container_serving():\n inference_spec.add_option(\"metadata.rpc.proxy_path\", \"/\")\n\n if service_name:\n inference_spec.add_option(\"name\", service_name)\n\n if instance_count:\n inference_spec.add_option(\"metadata.instance\", instance_count)\n\n if instance_type:\n inference_spec.add_option(\"cloud.computing.instance_type\", instance_type)\n elif resource_config:\n inference_spec.add_option(\"metadata.cpu\", resource_config.cpu)\n inference_spec.add_option(\"metadata.memory\", resource_config.memory)\n if resource_config.gpu:\n inference_spec.add_option(\"metadata.gpu\", resource_config.gpu)\n if resource_config.gpu_memory:\n inference_spec.add_option(\n \"metadata.gpu_memory\", resource_config.gpu_memory\n )\n if resource_config.gpu:\n logger.warning(\n \"Parameters 'gpu' is set, the 'gpu_memory' parameter \"\n \"does not take effect.\"\n )\n\n if resource_id:\n inference_spec.add_option(\"metadata.resource\", resource_id)\n\n if options:\n inference_spec.merge_options(options=options)\n\n return inference_spec.to_dict()\n\n def _deploy_local(\n self,\n instance_type: str,\n serializer: SerializerBase = None,\n wait: bool = True,\n ) -> LocalPredictor:\n \"\"\"Deploy the model in local using docker.\"\"\"\n\n if not self.inference_spec.is_container_serving():\n raise RuntimeError(\n \"Currently, only model using the InferenceSpec that serving with\"\n \" container support local run.\"\n )\n\n if len(self.inference_spec.containers) > 1:\n raise RuntimeError(\n \"InferenceSpec that serving with multiple container \"\n \"does not support local run.\"\n )\n\n # prepare model data to local\n work_dir = tempfile.mkdtemp()\n model_dir = os.path.join(work_dir, \"model\")\n\n self._download_model_data(target_dir=model_dir)\n volumes = {\n model_dir: {\n \"bind\": DefaultServiceConfig.model_path,\n \"mode\": \"rw\",\n }\n }\n\n # prepare used storage to local directory.\n if \"storage\" in self.inference_spec:\n # only OSS storage config support local run.\n if any(s for s in self.inference_spec.storage if \"oss\" not in s):\n raise ValueError(\n f\"Local run only support InferenceSpec using OSS storage config: \"\n f\"{self.inference_spec.to_dict()}\"\n )\n for idx, storage in enumerate(self.inference_spec.storage):\n store_dir = os.path.join(work_dir, f\"storage_{idx}\")\n os.makedirs(store_dir, exist_ok=True)\n oss_uri = OssUriObj(storage.oss.path)\n download(\n oss_path=oss_uri.object_key,\n local_path=store_dir,\n bucket=self.session.get_oss_bucket(oss_uri.bucket_name),\n )\n volumes[store_dir] = {\"bind\": storage.mount_path, \"mode\": \"rw\"}\n\n container_spec = self.inference_spec.containers[0].to_dict()\n env_vars = {\n item[\"name\"]: item[\"value\"] for item in container_spec.get(\"env\", [])\n }\n\n # build local launch script\n requirements_list = container_spec.get(\"prepare\", dict()).get(\n \"pythonRequirements\", []\n )\n requirements_path = container_spec.get(\"prepare\", dict()).get(\n \"pythonRequirementsPath\", None\n )\n\n # build command to install requirements\n if requirements_list:\n install_requirements = \" \".join(\n [\n shlex.quote(s)\n for s in [\"python\", \"-m\", \"pip\", \"install\"] + requirements_list\n ]\n )\n elif requirements_path:\n install_requirements = \" \".join(\n [\n shlex.quote(s)\n for s in [\"python\", \"-m\", \"pip\", \"install\", \"-r\", requirements_path]\n ]\n )\n else:\n install_requirements = \"\"\n\n user_scripts = container_spec.get(\"script\", \"\")\n launch_script = textwrap.dedent(\n f\"\"\"\\\n set -e\n {install_requirements}\n {user_scripts}\n \"\"\"\n )\n\n gpu_count = -1 if instance_type == INSTANCE_TYPE_LOCAL_GPU else None\n container_run = run_container(\n image_uri=container_spec[\"image\"],\n port=container_spec.get(\"port\"),\n environment_variables=env_vars,\n entry_point=[\n \"/bin/sh\",\n \"-c\",\n launch_script,\n ],\n volumes=volumes,\n gpu_count=gpu_count,\n )\n predictor = LocalPredictor(\n container_id=container_run.container.id,\n port=container_run.port,\n serializer=serializer,\n )\n\n if wait:\n predictor.wait_for_ready()\n\n return predictor\n\n @classmethod\n def _wait_local_server_ready(\n cls,\n container_run: ContainerRun,\n interval: int = 5,\n ):\n \"\"\"Wait for the local model server to be ready.\"\"\"\n while True:\n try:\n # Check whether the container is still running.\n if not container_run.is_running():\n raise RuntimeError(\n \"Container exited unexpectedly, status: {}\".format(\n container_run.status\n )\n )\n\n # Make a HEAD request to the server.\n requests.head(\n f\"http://127.0.0.1:{container_run.port}/\",\n )\n break\n except requests.ConnectionError:\n # ConnectionError means server is not ready.\n logging.debug(\"Waiting for the container to be ready...\")\n time.sleep(interval)\n continue\n\n def register(\n self,\n model_name: str,\n version: str = None,\n accessibility: Optional[str] = None,\n version_labels: Optional[Dict[str, str]] = None,\n version_description: Optional[str] = None,\n format_type: Optional[str] = None,\n framework_type: Optional[str] = None,\n training_spec: Optional[Dict[str, Any]] = None,\n approval_status: Optional[str] = None,\n metrics: Optional[Dict[str, Any]] = None,\n options: Optional[str] = None,\n model_labels: Optional[Dict[str, str]] = None,\n model_description: Optional[str] = None,\n model_doc: Optional[str] = None,\n origin: Optional[str] = None,\n domain: Optional[str] = None,\n task: Optional[str] = None,\n ) -> \"RegisteredModel\":\n \"\"\"Register a model to the PAI model registry.\n\n Use ``self.model_data`` to register a model to the PAI model registry.\n\n Args:\n model_name (str): The name of the model. If the model name already exists in\n workspace, the model will be updated with a new model version,\n parameters like ``model_labels``, ``model_description``, ``model_doc``,\n ``origin``, ``domain``, ``task``, ``accessibility`` will be ignored. If\n the model name does not exist, a new model will be created.\n version (str, optional): The version of the model. If not specified, a new\n version will be created. If the version already exists, registration\n will fail.\n accessibility (str, optional): The accessibility of the model. The value\n can be \"PUBLIC\" or \"PRIVATE\". Default to \"PRIVATE\".\n version_labels (dict, optional): The labels of the model version.\n version_description (str, optional): The description of the model version.\n format_type (str, optional): The format type of the model version. The value\n can be \"OfflineModel\", \"SavedModel\", \"Keras H5\", \"Frozen Pb\",\n \"Caffe Prototxt\", \"TorchScript\", \"XGBoost\", \"PMML\", \"AlinkModel\",\n \"ONNX\". Default to None.\n framework_type (str, optional): The framework type of the model version. The\n value can be \"PyTorch\", \"TensorFlow\", \"Keras\", \"Caffe\", \"Alink\",\n \"Xflow\", \"XGBoost\". Default to None.\n training_spec (dict, optional): The training spec of the model version.\n Usually, it is got from the training job. Default to None.\n approval_status (str, optional): The approval status of the model version.\n The value can be \"APPROVED\", \"PENDING\". Default to None.\n metrics (dict, optional): The metrics of the model version.\n options (str, optional): Any other options that you want to pass to the\n model registry. Default to None.\n model_labels (dict, optional): The labels of the model.\n model_description (str, optional): The description of the model.\n model_doc (str, optional): The documentation uri of the model.\n origin (str, optional): The origin of the model. For example, \"huggingface\",\n \"modelscope\" etc. Default to None.\n domain (str, optional): The domain that the model is used for. For example,\n \"aigc\", \"audio\", \"nlp\", \"cv\" etc. Default to None.\n task (str, optional): The task that the model is used for. For example,\n \"large-language-model\", \"text-classification\", \"image-classification\",\n \"sequence-labeling\" etc. Default to None.\n\n Returns:\n :class:`pai.model.RegisteredModel`: The registered model object.\n \"\"\"\n\n if not self.model_data:\n raise ValueError(\n \"Register model failed, ``model_data`` is required to register a model.\"\n )\n\n # Ensure model data is uploaded to OSS.\n self.model_data = self._upload_model_data()\n\n # By specifying model_name with double quotes, the list api will process the\n # precise search. Otherwise, the list api will process the fuzzy search.\n resp = self.session.model_api.list(\n model_name=f'\"{model_name}\"',\n )\n if resp.total_count == 0:\n model_id = self.session.model_api.create(\n model_name=model_name,\n labels=model_labels,\n model_description=model_description,\n model_doc=model_doc,\n origin=origin,\n domain=domain,\n task=task,\n accessibility=accessibility,\n )\n else:\n model_id = resp.items[0][\"ModelId\"]\n\n version_name = self.session.model_api.create_version(\n model_id=model_id,\n uri=self.model_data,\n version_name=version,\n labels=version_labels,\n version_description=version_description,\n format_type=format_type,\n framework_type=framework_type,\n training_spec=training_spec,\n inference_spec=self.inference_spec.to_dict()\n if self.inference_spec\n else None,\n approval_status=approval_status,\n metrics=metrics,\n options=options,\n )\n return RegisteredModel(model_name=model_name, model_version=version_name)" }, { "identifier": "ResourceConfig", "path": "pai/model.py", "snippet": "class ResourceConfig(object):\n \"\"\"A class that represents the resource used by a PAI prediction service\n instance.\"\"\"\n\n def __init__(self, cpu: int, memory: int, gpu: int = None, gpu_memory: int = None):\n \"\"\"ResourceConfig initializer.\n\n The public resource group does not support requesting GPU resources with\n `ResourceConfig`. Use the 'gpu' and 'gpu_memory' parameter only for services\n deployed to dedicated resource groups that provide GPU machine instances.\n\n Args:\n cpu (int): The number of CPUs that each instance requires.\n memory (int): The amount of memory that each instance requires,\n must be an integer, Unit: MB.\n gpu (int): The number of GPUs that each instance requires.\n gpu_memory (int): The amount of GPU memory that each instance requires.\n The value must be an integer, Unit: GB.\n\n PAI allows memory resources of a GPU to be allocated to multiple instances.\n If you want multiple instances to share the memory resources of a GPU,\n set the gpu parameter to 0. If you set the ``gpu`` parameter to 1, each\n instance occupies a GPU and the gpu_memory parameter does not take effect.\n\n .. note::\n\n **Important** PAI does not enable the strict isolation of GPU memory.\n To prevent out of memory (OOM) errors, make sure that the GPU memory\n used by each instance does not exceed the requested amount.\n \"\"\"\n self.cpu = cpu\n self.memory = memory\n self.gpu = gpu\n self.gpu_memory = gpu_memory\n\n def __repr__(self):\n return (\n f\"ResourceConfig(cpu={self.cpu}, memory={self.memory}MB, gpu={self.gpu or 0},\"\n f\" gpu_memory={self.gpu_memory or 0}GB)\"\n )\n\n def __str__(self):\n return self.__repr__()\n\n def to_dict(self):\n \"\"\"Transform the ResourceConfig instance to a dictionary.\n\n Returns:\n dict:\n\n \"\"\"\n res = {\n \"cpu\": self.cpu,\n \"gpu\": self.gpu,\n \"gpu_memory\": self.gpu_memory,\n \"memory\": self.memory,\n }\n\n return {k: v for k, v in res.items() if v is not None}" }, { "identifier": "container_serving_spec", "path": "pai/model.py", "snippet": "def container_serving_spec(\n command: str,\n image_uri: Union[str, ImageInfo],\n source_dir: Optional[str] = None,\n git_config: Optional[Dict[str, Any]] = None,\n port: Optional[int] = None,\n environment_variables: Optional[Dict[str, str]] = None,\n requirements: Optional[List[str]] = None,\n requirements_path: Optional[str] = None,\n health_check: Optional[Dict[str, Any]] = None,\n session: Optional[Session] = None,\n) -> InferenceSpec:\n \"\"\"A convenient function to create an InferenceSpec instance that serving the model\n with given container and script.\n\n Examples::\n\n infer_spec: InferenceSpec = container_serving_spec(\n command=\"python run.py\",\n source_dir=\"./model_server/\",\n image_uri=\"<ServingImageUri>\",\n )\n\n m = Model(\n model_data=\"oss://<YourOssBucket>/path/to/your/model\",\n inference_spec=infer_spec,\n )\n m.deploy(\n instance_type=\"ecs.c6.xlarge\"\n )\n\n\n Args:\n command (str): The command used to launch the Model server.\n source_dir (str): A relative path or an absolute path to the source code\n directory used to load model and launch the HTTP server, it will be\n uploaded to the OSS bucket and mounted to the container. If there is a\n ``requirements.txt`` file under the directory, it will be installed before\n the prediction server started.\n\n If 'git_config' is provided, 'source_dir' should be a relative location\n to a directory in the Git repo. With the following GitHub repo directory\n structure:\n\n .. code::\n\n |----- README.md\n |----- src\n |----- train.py\n |----- test.py\n\n if you need 'src' directory as the source code directory, you can assign\n source_dir='./src/'.\n git_config (Dict[str, str]): Git configuration used to clone the repo.\n Including ``repo``, ``branch``, ``commit``, ``username``, ``password`` and\n ``token``. The ``repo`` is required. All other fields are optional. ``repo``\n specifies the Git repository. If you don't provide ``branch``, the default\n value 'master' is used. If you don't provide ``commit``, the latest commit\n in the specified branch is used. ``username``, ``password`` and ``token``\n are for authentication purpose. For example, the following config:\n\n .. code:: python\n\n git_config = {\n 'repo': 'https://github.com/modelscope/modelscope.git',\n 'branch': 'master',\n 'commit': '9bfc4a9d83c4beaf8378d0a186261ffc1cd9f960'\n }\n\n results in cloning the repo specified in 'repo', then checking out the\n 'master' branch, and checking out the specified commit.\n image_uri (str): The Docker image used to run the prediction service.\n port (int): Expose port of the server in container, the prediction request\n will be forward to the port. The environment variable ``LISTENING_PORT``\n in the container will be set to this value. Default to 8000.\n environment_variables (Dict[str, str], optional): Dictionary of environment\n variable key-value pairs to set on the running container.\n requirements (List[str], optional): A list of Python package dependency, it\n will be installed before the serving container run.\n requirements_path (str, optional): A absolute path to the requirements.txt in\n the container.\n health_check (Dict[str, Any], optional): The health check configuration. If it\n not set, A TCP readiness probe will be used to check the health of the\n HTTP server.\n session (Session, optional): A PAI session instance used for communicating\n with PAI service.\n\n Returns:\n :class:`pai.model.InferenceSpec`: An InferenceSpec instance.\n \"\"\"\n session = session or get_default_session()\n if git_config:\n updated_args = git_utils.git_clone_repo(\n git_config=git_config,\n source_dir=source_dir,\n )\n source_dir = updated_args[\"source_dir\"]\n\n if not port:\n port = DefaultServiceConfig.listen_port\n elif int(port) in _RESERVED_PORTS:\n raise ValueError(\n \"Reserved port {} is not allowed to use as serving port.\".format(port),\n )\n\n if source_dir:\n if not os.path.exists(source_dir):\n raise ValueError(\"Source directory {} does not exist.\".format(source_dir))\n\n if not os.path.isdir(source_dir):\n raise ValueError(\n \"Source directory {} is not a directory.\".format(source_dir)\n )\n\n code_mount_path = DefaultServiceConfig.code_path\n # build the command for serving container.\n command = textwrap.dedent(\n f\"\"\"\\\n # change working directory to code mount path.\n cd {code_mount_path}\n {command}\n \"\"\"\n )\n\n if not requirements_path and os.path.exists(\n os.path.join(source_dir, \"requirements.txt\")\n ):\n requirements_path = posixpath.join(code_mount_path, \"requirements.txt\")\n else:\n code_mount_path = None\n requirements_path = None\n\n if isinstance(image_uri, ImageInfo):\n image_uri = image_uri.image_uri\n\n environment_variables = environment_variables or dict()\n container_spec = {\n \"image\": image_uri,\n \"port\": port,\n \"script\": command,\n \"env\": [\n {\"name\": key, \"value\": str(value)}\n for key, value in environment_variables.items()\n ]\n if environment_variables\n else [],\n }\n\n if health_check:\n container_spec[\"health_check\"] = health_check\n\n if requirements:\n container_spec[\"prepare\"] = {\"pythonRequirements\": requirements}\n if requirements_path:\n logger.warning(\n \"If the parameter 'requirements' is set, the requirements_path \"\n \"parameter will be ignored.\"\n )\n elif requirements_path:\n container_spec[\"prepare\"] = {\n \"pythonRequirementsPath\": requirements_path,\n }\n\n inference_spec = InferenceSpec(containers=[container_spec])\n\n # mount the uploaded serving scripts to the serving container.\n if source_dir:\n inference_spec.mount(\n source_dir,\n code_mount_path,\n session=session,\n )\n return inference_spec" }, { "identifier": "SerializerBase", "path": "pai/serializers.py", "snippet": "class SerializerBase(ABC):\n \"\"\"Abstract class for creating a Serializer class for predictor.\"\"\"\n\n @abstractmethod\n def serialize(self, data) -> bytes:\n \"\"\"Serialize the input data to bytes for transmitting.\"\"\"\n\n @abstractmethod\n def deserialize(self, data: bytes):\n \"\"\"Deserialize the data from raw bytes to Python object .\"\"\"\n\n def inspect_from_service(\n self, service_name: str, *, session: Optional[Session] = None\n ):\n \"\"\"Inspect the online prediction service to complete the serializer instance\n initialization.\n\n The implementation of the `inspect_from_service` method is optional. You only\n need to implement it if your serializer requires additional information from\n service metadata or if it needs to send a request to the service in order to\n be initialized.\n\n \"\"\"" }, { "identifier": "Session", "path": "pai/session.py", "snippet": "class Session(ResourceAPIsContainerMixin):\n \"\"\"A class responsible for communicating with PAI services.\"\"\"\n\n def __init__(\n self,\n region_id: str,\n workspace_id: Optional[str] = None,\n credential_config: Optional[CredentialConfig] = None,\n oss_bucket_name: Optional[str] = None,\n oss_endpoint: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"PAI Session Initializer.\n\n Args:\n credential_config (:class:`alibabacloud_credentials.models.Config`, optional):\n The credential config used to access the Alibaba Cloud.\n region_id (str): The ID of the Alibaba Cloud region where the service\n is located.\n workspace_id (str, optional): ID of the workspace used in the default\n session.\n oss_bucket_name (str, optional): The name of the OSS bucket used in the\n session.\n oss_endpoint (str, optional): The endpoint for the OSS bucket.\n \"\"\"\n\n if not region_id:\n raise ValueError(\"Region ID must be provided.\")\n\n self._credential_config = credential_config\n self._region_id = region_id\n self._workspace_id = workspace_id\n self._oss_bucket_name = oss_bucket_name\n self._oss_endpoint = oss_endpoint\n\n header = kwargs.pop(\"header\", None)\n super(Session, self).__init__(header=header)\n\n @property\n def region_id(self) -> str:\n return self._region_id\n\n @property\n def is_inner(self) -> bool:\n return self._region_id in INNER_REGION_IDS\n\n @property\n def oss_bucket_name(self) -> str:\n return self._oss_bucket_name\n\n @property\n def oss_endpoint(self) -> str:\n return self._oss_endpoint\n\n @property\n def credential_config(self) -> CredentialConfig:\n return self._credential_config\n\n @property\n def workspace_name(self):\n if hasattr(self, \"_workspace_name\") and self._workspace_name:\n return self._workspace_name\n\n if not self._workspace_id:\n raise ValueError(\"Workspace id is not set.\")\n workspace_api_obj = self.workspace_api.get(workspace_id=self._workspace_id)\n self._workspace_name = workspace_api_obj[\"WorkspaceName\"]\n return self._workspace_name\n\n @property\n def provider(self) -> str:\n caller_identity = self._acs_sts_client.get_caller_identity().body\n return caller_identity.account_id\n\n @property\n def workspace_id(self) -> str:\n \"\"\"ID of the workspace used by the session.\"\"\"\n return self._workspace_id\n\n @property\n def console_uri(self) -> str:\n \"\"\"The web console URI for PAI service.\"\"\"\n if self.is_inner:\n return \"https://pai-next.alibaba-inc.com\"\n else:\n return \"https://pai.console.aliyun.com/console\"\n\n def _init_oss_config(\n self,\n ):\n \"\"\"Initialize a OssConfig instance.\"\"\"\n if not self._oss_bucket_name:\n # If OSS bucket name is not provided, use the default OSS storage URI\n # that is configured for the workspace.\n default_oss_uri = self.workspace_api.get_default_storage_uri(\n self.workspace_id\n )\n if not default_oss_uri:\n raise RuntimeError(\n \"No default OSS URI is configured for the workspace.\"\n )\n oss_uri_obj = OssUriObj(default_oss_uri)\n self._oss_bucket_name = oss_uri_obj.bucket_name\n\n if not self._oss_endpoint:\n self._oss_endpoint = self._get_default_oss_endpoint()\n\n def _get_oss_auth(self):\n auth = oss2.ProviderAuth(\n credentials_provider=CredentialProviderWrapper(\n config=self._credential_config,\n )\n )\n return auth\n\n @property\n def oss_bucket(self):\n \"\"\"A OSS2 bucket instance used by the session.\"\"\"\n if not self._oss_bucket_name or not self._oss_endpoint:\n self._init_oss_config()\n oss_bucket = oss2.Bucket(\n auth=self._get_oss_auth(),\n endpoint=self._oss_endpoint,\n bucket_name=self._oss_bucket_name,\n )\n return oss_bucket\n\n def save_config(self, config_path=None):\n \"\"\"Save the configuration of the session to a local file.\"\"\"\n attrs = {key.lstrip(\"_\"): value for key, value in vars(self).items()}\n config = {\n key: value\n for key, value in attrs.items()\n if key in _DEFAULT_CONFIG_KEYS and value is not None\n }\n\n config_path = config_path or DEFAULT_CONFIG_PATH\n os.makedirs(os.path.dirname(config_path), exist_ok=True)\n with open(config_path, \"w\") as f:\n f.write(json.dumps(config, indent=4))\n logger.info(\"Write PAI config succeed: config_path=%s\" % config_path)\n\n def patch_oss_endpoint(self, oss_uri: str):\n oss_uri_obj = OssUriObj(oss_uri)\n if oss_uri_obj.endpoint:\n return oss_uri\n\n # patch endpoint using current OSS bucket endpoint.\n endpoint = self.oss_bucket.endpoint\n if endpoint.startswith(\"http://\"):\n endpoint = endpoint.lstrip(\"http://\")\n elif endpoint.startswith(\"https://\"):\n endpoint = endpoint.lstrip(\"https://\")\n return \"oss://{bucket_name}.{endpoint}/{key}\".format(\n bucket_name=oss_uri_obj.bucket_name,\n endpoint=endpoint,\n key=oss_uri_obj.object_key,\n )\n\n def _get_default_oss_endpoint(self) -> str:\n \"\"\"Returns a default OSS endpoint.\"\"\"\n\n # OSS Endpoint document:\n # https://help.aliyun.com/document_detail/31837.html\n internet_endpoint = \"oss-{}.aliyuncs.com\".format(self.region_id)\n internal_endpoint = \"oss-{}-internal.aliyuncs.com\".format(self.region_id)\n\n return (\n internet_endpoint\n if is_domain_connectable(internal_endpoint)\n else internet_endpoint\n )\n\n def get_oss_bucket(self, bucket_name: str, endpoint: str = None) -> oss2.Bucket:\n \"\"\"Get a OSS bucket using the credentials of the session.\n\n Args:\n bucket_name (str): The name of the bucket.\n endpoint (str): Endpoint of the bucket.\n\n Returns:\n :class:`oss2.Bucket`: A OSS bucket instance.\n\n \"\"\"\n endpoint = endpoint or self._oss_endpoint or self._get_default_oss_endpoint()\n oss_bucket = oss2.Bucket(\n auth=self._get_oss_auth(),\n endpoint=endpoint,\n bucket_name=bucket_name,\n )\n return oss_bucket\n\n @classmethod\n def get_storage_path_by_category(\n cls, category: str, dir_name: Optional[str] = None\n ) -> str:\n \"\"\"Get an OSS storage path for the resource.\n\n Args:\n category (str): The category of the resource.\n dir_name (str, optional): The directory name of the resource.\n\n Returns:\n str: A OSS storage path.\n\n \"\"\"\n dir_name = dir_name or datetime.now().strftime(\"%Y%m%d_%H%M%S_%f\")\n storage_path = posixpath.join(\"pai\", category, dir_name).strip()\n\n if not storage_path.endswith(\"/\"):\n storage_path += \"/\"\n return storage_path\n\n def is_supported_training_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is supported for training.\"\"\"\n instance_generator = make_list_resource_iterator(self.job_api.list_ecs_specs)\n machine_spec = next(\n (\n item\n for item in instance_generator\n if item[\"InstanceType\"] == instance_type\n ),\n None,\n )\n return bool(machine_spec)\n\n def is_gpu_training_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is GPU instance for training.\"\"\"\n instance_generator = make_list_resource_iterator(self.job_api.list_ecs_specs)\n machine_spec = next(\n (\n item\n for item in instance_generator\n if item[\"InstanceType\"] == instance_type\n ),\n None,\n )\n if not machine_spec:\n raise ValueError(\n f\"Instance type {instance_type} is not supported for training job. \"\n \"Please provide a supported instance type.\"\n )\n return machine_spec[\"AcceleratorType\"] == \"GPU\"\n\n def is_supported_inference_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is supported for inference.\"\"\"\n res = self.service_api.describe_machine()[\"InstanceMetas\"]\n spec = next(\n (item for item in res if item[\"InstanceType\"] == instance_type), None\n )\n return bool(spec)\n\n def is_gpu_inference_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is GPU instance for inference.\"\"\"\n res = self.service_api.describe_machine()[\"InstanceMetas\"]\n spec = next(\n (item for item in res if item[\"InstanceType\"] == instance_type), None\n )\n\n if not spec:\n raise ValueError(\n f\"Instance type {instance_type} is not supported for deploying. \"\n \"Please provide a supported instance type.\"\n )\n return bool(spec[\"GPU\"])" }, { "identifier": "get_default_session", "path": "pai/session.py", "snippet": "def get_default_session() -> \"Session\":\n \"\"\"Get the default session used by the program.\n\n If the global default session is set, the function will try to initialize\n a session from config file.\n\n Returns:\n :class:`pai.session.Session`: The default session.\n\n \"\"\"\n global _default_session\n if not _default_session:\n config = load_default_config_file()\n if not config:\n return\n _default_session = Session(**config)\n return _default_session" } ]
import logging from typing import Any, Dict, List, Optional, Union from ..api.image import ImageLabel from ..common.utils import to_semantic_version from ..model import ( DefaultServiceConfig, ModelBase, ResourceConfig, container_serving_spec, ) from ..serializers import SerializerBase from ..session import Session, get_default_session
11,105
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) class ModelScopeModel(ModelBase): """A ModelScope ``Model`` that can be deployed in PAI to create a prediction service. A ModelScopeModel instance includes the model artifact path and information on how to create prediction service in PAI. By calling the deploy() method, a prediction service is created in PAI and a :class:`pai.predictor.Predictor` instance is returned that can be used to make prediction to the service. Example:: # Initialize a ModelScopeModel. m: ModelScopeModel = ModelScopeModel( model_data="oss://bucket-name/path/to/model", source_dir="./serving/src/", command="python serving.py", modelscope_version="latest", ) # Deploy the model to create an online prediction service. p: Predictor = m.deploy( service_name="ms_bert_serving", instance_type="ecs.gn6i-c4g1.xlarge", instance_count=1, options={ "metadata.rpc.keepalive": 5000000, "features.eas.aliyun.com/extra-ephemeral-storage":"40Gi", }, ) # Make prediction by sending the data to the online prediction service. result = p.predict("weather is good") """ def __init__( self, model_data: Optional[str] = None, image_uri: Optional[str] = None, modelscope_version: Optional[str] = None, command: Optional[str] = None, source_dir: Optional[str] = None, git_config: Optional[Dict[str, str]] = None, port: Optional[int] = None, environment_variables: Optional[Dict[str, str]] = None, requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None,
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) class ModelScopeModel(ModelBase): """A ModelScope ``Model`` that can be deployed in PAI to create a prediction service. A ModelScopeModel instance includes the model artifact path and information on how to create prediction service in PAI. By calling the deploy() method, a prediction service is created in PAI and a :class:`pai.predictor.Predictor` instance is returned that can be used to make prediction to the service. Example:: # Initialize a ModelScopeModel. m: ModelScopeModel = ModelScopeModel( model_data="oss://bucket-name/path/to/model", source_dir="./serving/src/", command="python serving.py", modelscope_version="latest", ) # Deploy the model to create an online prediction service. p: Predictor = m.deploy( service_name="ms_bert_serving", instance_type="ecs.gn6i-c4g1.xlarge", instance_count=1, options={ "metadata.rpc.keepalive": 5000000, "features.eas.aliyun.com/extra-ephemeral-storage":"40Gi", }, ) # Make prediction by sending the data to the online prediction service. result = p.predict("weather is good") """ def __init__( self, model_data: Optional[str] = None, image_uri: Optional[str] = None, modelscope_version: Optional[str] = None, command: Optional[str] = None, source_dir: Optional[str] = None, git_config: Optional[Dict[str, str]] = None, port: Optional[int] = None, environment_variables: Optional[Dict[str, str]] = None, requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None,
session: Optional[Session] = None,
7
2023-12-01 01:40:12+00:00
16k
JunMa11/UHNSeg-Quiz
nnunetv2/inference/predict_from_raw_data.py
[ { "identifier": "default_num_processes", "path": "nnunetv2/configuration.py", "snippet": "ANISO_THRESHOLD = 3 # determines when a sample is considered anisotropic (3 means that the spacing in the low" }, { "identifier": "PreprocessAdapterFromNpy", "path": "nnunetv2/inference/data_iterators.py", "snippet": "class PreprocessAdapterFromNpy(DataLoader):\n def __init__(self, list_of_images: List[np.ndarray],\n list_of_segs_from_prev_stage: Union[List[np.ndarray], None],\n list_of_image_properties: List[dict],\n truncated_ofnames: Union[List[str], None],\n plans_manager: PlansManager, dataset_json: dict, configuration_manager: ConfigurationManager,\n num_threads_in_multithreaded: int = 1, verbose: bool = False):\n preprocessor = configuration_manager.preprocessor_class(verbose=verbose)\n self.preprocessor, self.plans_manager, self.configuration_manager, self.dataset_json, self.truncated_ofnames = \\\n preprocessor, plans_manager, configuration_manager, dataset_json, truncated_ofnames\n\n self.label_manager = plans_manager.get_label_manager(dataset_json)\n\n if list_of_segs_from_prev_stage is None:\n list_of_segs_from_prev_stage = [None] * len(list_of_images)\n if truncated_ofnames is None:\n truncated_ofnames = [None] * len(list_of_images)\n\n super().__init__(\n list(zip(list_of_images, list_of_segs_from_prev_stage, list_of_image_properties, truncated_ofnames)),\n 1, num_threads_in_multithreaded,\n seed_for_shuffle=1, return_incomplete=True,\n shuffle=False, infinite=False, sampling_probabilities=None)\n\n self.indices = list(range(len(list_of_images)))\n\n def generate_train_batch(self):\n idx = self.get_indices()[0]\n image = self._data[idx][0]\n seg_prev_stage = self._data[idx][1]\n props = self._data[idx][2]\n ofname = self._data[idx][3]\n # if we have a segmentation from the previous stage we have to process it together with the images so that we\n # can crop it appropriately (if needed). Otherwise it would just be resized to the shape of the data after\n # preprocessing and then there might be misalignments\n data, seg = self.preprocessor.run_case_npy(image, seg_prev_stage, props,\n self.plans_manager,\n self.configuration_manager,\n self.dataset_json)\n if seg_prev_stage is not None:\n seg_onehot = convert_labelmap_to_one_hot(seg[0], self.label_manager.foreground_labels, data.dtype)\n data = np.vstack((data, seg_onehot))\n\n data = torch.from_numpy(data)\n\n return {'data': data, 'data_properties': props, 'ofile': ofname}" }, { "identifier": "preprocessing_iterator_fromfiles", "path": "nnunetv2/inference/data_iterators.py", "snippet": "def preprocessing_iterator_fromfiles(list_of_lists: List[List[str]],\n list_of_segs_from_prev_stage_files: Union[None, List[str]],\n output_filenames_truncated: Union[None, List[str]],\n plans_manager: PlansManager,\n dataset_json: dict,\n configuration_manager: ConfigurationManager,\n num_processes: int,\n pin_memory: bool = False,\n verbose: bool = False):\n context = multiprocessing.get_context('spawn')\n manager = Manager()\n num_processes = min(len(list_of_lists), num_processes)\n assert num_processes >= 1\n processes = []\n done_events = []\n target_queues = []\n abort_event = manager.Event()\n for i in range(num_processes):\n event = manager.Event()\n queue = Manager().Queue(maxsize=1)\n pr = context.Process(target=preprocess_fromfiles_save_to_queue,\n args=(\n list_of_lists[i::num_processes],\n list_of_segs_from_prev_stage_files[\n i::num_processes] if list_of_segs_from_prev_stage_files is not None else None,\n output_filenames_truncated[\n i::num_processes] if output_filenames_truncated is not None else None,\n plans_manager,\n dataset_json,\n configuration_manager,\n queue,\n event,\n abort_event,\n verbose\n ), daemon=True)\n pr.start()\n target_queues.append(queue)\n done_events.append(event)\n processes.append(pr)\n\n worker_ctr = 0\n while (not done_events[worker_ctr].is_set()) or (not target_queues[worker_ctr].empty()):\n if not target_queues[worker_ctr].empty():\n item = target_queues[worker_ctr].get()\n worker_ctr = (worker_ctr + 1) % num_processes\n else:\n all_ok = all(\n [i.is_alive() or j.is_set() for i, j in zip(processes, done_events)]) and not abort_event.is_set()\n if not all_ok:\n raise RuntimeError('Background workers died. Look for the error message further up! If there is '\n 'none then your RAM was full and the worker was killed by the OS. Use fewer '\n 'workers or get more RAM in that case!')\n sleep(0.01)\n continue\n if pin_memory:\n [i.pin_memory() for i in item.values() if isinstance(i, torch.Tensor)]\n yield item\n [p.join() for p in processes]" }, { "identifier": "preprocessing_iterator_fromnpy", "path": "nnunetv2/inference/data_iterators.py", "snippet": "def preprocessing_iterator_fromnpy(list_of_images: List[np.ndarray],\n list_of_segs_from_prev_stage: Union[List[np.ndarray], None],\n list_of_image_properties: List[dict],\n truncated_ofnames: Union[List[str], None],\n plans_manager: PlansManager,\n dataset_json: dict,\n configuration_manager: ConfigurationManager,\n num_processes: int,\n pin_memory: bool = False,\n verbose: bool = False):\n context = multiprocessing.get_context('spawn')\n manager = Manager()\n num_processes = min(len(list_of_images), num_processes)\n assert num_processes >= 1\n target_queues = []\n processes = []\n done_events = []\n abort_event = manager.Event()\n for i in range(num_processes):\n event = manager.Event()\n queue = manager.Queue(maxsize=1)\n pr = context.Process(target=preprocess_fromnpy_save_to_queue,\n args=(\n list_of_images[i::num_processes],\n list_of_segs_from_prev_stage[\n i::num_processes] if list_of_segs_from_prev_stage is not None else None,\n list_of_image_properties[i::num_processes],\n truncated_ofnames[i::num_processes] if truncated_ofnames is not None else None,\n plans_manager,\n dataset_json,\n configuration_manager,\n queue,\n event,\n abort_event,\n verbose\n ), daemon=True)\n pr.start()\n done_events.append(event)\n processes.append(pr)\n target_queues.append(queue)\n\n worker_ctr = 0\n while (not done_events[worker_ctr].is_set()) or (not target_queues[worker_ctr].empty()):\n if not target_queues[worker_ctr].empty():\n item = target_queues[worker_ctr].get()\n worker_ctr = (worker_ctr + 1) % num_processes\n else:\n all_ok = all(\n [i.is_alive() or j.is_set() for i, j in zip(processes, done_events)]) and not abort_event.is_set()\n if not all_ok:\n raise RuntimeError('Background workers died. Look for the error message further up! If there is '\n 'none then your RAM was full and the worker was killed by the OS. Use fewer '\n 'workers or get more RAM in that case!')\n sleep(0.01)\n continue\n if pin_memory:\n [i.pin_memory() for i in item.values() if isinstance(i, torch.Tensor)]\n yield item\n [p.join() for p in processes]" }, { "identifier": "export_prediction_from_logits", "path": "nnunetv2/inference/export_prediction.py", "snippet": "def export_prediction_from_logits(predicted_array_or_file: Union[np.ndarray, torch.Tensor], properties_dict: dict,\n configuration_manager: ConfigurationManager,\n plans_manager: PlansManager,\n dataset_json_dict_or_file: Union[dict, str], output_file_truncated: str,\n save_probabilities: bool = False):\n # if isinstance(predicted_array_or_file, str):\n # tmp = deepcopy(predicted_array_or_file)\n # if predicted_array_or_file.endswith('.npy'):\n # predicted_array_or_file = np.load(predicted_array_or_file)\n # elif predicted_array_or_file.endswith('.npz'):\n # predicted_array_or_file = np.load(predicted_array_or_file)['softmax']\n # os.remove(tmp)\n\n if isinstance(dataset_json_dict_or_file, str):\n dataset_json_dict_or_file = load_json(dataset_json_dict_or_file)\n\n label_manager = plans_manager.get_label_manager(dataset_json_dict_or_file)\n ret = convert_predicted_logits_to_segmentation_with_correct_shape(\n predicted_array_or_file, plans_manager, configuration_manager, label_manager, properties_dict,\n return_probabilities=save_probabilities\n )\n del predicted_array_or_file\n\n # save\n if save_probabilities:\n segmentation_final, probabilities_final = ret\n np.savez_compressed(output_file_truncated + '.npz', probabilities=probabilities_final)\n save_pickle(properties_dict, output_file_truncated + '.pkl')\n del probabilities_final, ret\n else:\n segmentation_final = ret\n del ret\n\n rw = plans_manager.image_reader_writer_class()\n rw.write_seg(segmentation_final, output_file_truncated + dataset_json_dict_or_file['file_ending'],\n properties_dict)" }, { "identifier": "convert_predicted_logits_to_segmentation_with_correct_shape", "path": "nnunetv2/inference/export_prediction.py", "snippet": "def convert_predicted_logits_to_segmentation_with_correct_shape(predicted_logits: Union[torch.Tensor, np.ndarray],\n plans_manager: PlansManager,\n configuration_manager: ConfigurationManager,\n label_manager: LabelManager,\n properties_dict: dict,\n return_probabilities: bool = False,\n num_threads_torch: int = default_num_processes):\n old_threads = torch.get_num_threads()\n torch.set_num_threads(num_threads_torch)\n\n # resample to original shape\n current_spacing = configuration_manager.spacing if \\\n len(configuration_manager.spacing) == \\\n len(properties_dict['shape_after_cropping_and_before_resampling']) else \\\n [properties_dict['spacing'][0], *configuration_manager.spacing]\n predicted_logits = configuration_manager.resampling_fn_probabilities(predicted_logits,\n properties_dict['shape_after_cropping_and_before_resampling'],\n current_spacing,\n properties_dict['spacing'])\n # return value of resampling_fn_probabilities can be ndarray or Tensor but that does not matter because\n # apply_inference_nonlin will convert to torch\n predicted_probabilities = label_manager.apply_inference_nonlin(predicted_logits)\n del predicted_logits\n segmentation = label_manager.convert_probabilities_to_segmentation(predicted_probabilities)\n\n # segmentation may be torch.Tensor but we continue with numpy\n if isinstance(segmentation, torch.Tensor):\n segmentation = segmentation.cpu().numpy()\n\n # put segmentation in bbox (revert cropping)\n segmentation_reverted_cropping = np.zeros(properties_dict['shape_before_cropping'],\n dtype=np.uint8 if len(label_manager.foreground_labels) < 255 else np.uint16)\n slicer = bounding_box_to_slice(properties_dict['bbox_used_for_cropping'])\n segmentation_reverted_cropping[slicer] = segmentation\n del segmentation\n\n # revert transpose\n segmentation_reverted_cropping = segmentation_reverted_cropping.transpose(plans_manager.transpose_backward)\n if return_probabilities:\n # revert cropping\n predicted_probabilities = label_manager.revert_cropping_on_probabilities(predicted_probabilities,\n properties_dict[\n 'bbox_used_for_cropping'],\n properties_dict[\n 'shape_before_cropping'])\n predicted_probabilities = predicted_probabilities.cpu().numpy()\n # revert transpose\n predicted_probabilities = predicted_probabilities.transpose([0] + [i + 1 for i in\n plans_manager.transpose_backward])\n torch.set_num_threads(old_threads)\n return segmentation_reverted_cropping, predicted_probabilities\n else:\n torch.set_num_threads(old_threads)\n return segmentation_reverted_cropping" }, { "identifier": "compute_gaussian", "path": "nnunetv2/inference/sliding_window_prediction.py", "snippet": "@lru_cache(maxsize=2)\ndef compute_gaussian(tile_size: Union[Tuple[int, ...], List[int]], sigma_scale: float = 1. / 8,\n value_scaling_factor: float = 1, dtype=torch.float16, device=torch.device('cuda', 0)) \\\n -> torch.Tensor:\n tmp = np.zeros(tile_size)\n center_coords = [i // 2 for i in tile_size]\n sigmas = [i * sigma_scale for i in tile_size]\n tmp[tuple(center_coords)] = 1\n gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0)\n\n gaussian_importance_map = torch.from_numpy(gaussian_importance_map)\n\n gaussian_importance_map = gaussian_importance_map / torch.max(gaussian_importance_map) * value_scaling_factor\n gaussian_importance_map = gaussian_importance_map.type(dtype).to(device)\n\n # gaussian_importance_map cannot be 0, otherwise we may end up with nans!\n gaussian_importance_map[gaussian_importance_map == 0] = torch.min(\n gaussian_importance_map[gaussian_importance_map != 0])\n\n return gaussian_importance_map" }, { "identifier": "compute_steps_for_sliding_window", "path": "nnunetv2/inference/sliding_window_prediction.py", "snippet": "def compute_steps_for_sliding_window(image_size: Tuple[int, ...], tile_size: Tuple[int, ...], tile_step_size: float) -> \\\n List[List[int]]:\n assert [i >= j for i, j in zip(image_size, tile_size)], \"image size must be as large or larger than patch_size\"\n assert 0 < tile_step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'\n\n # our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of\n # 110, patch size of 64 and step_size of 0.5, then we want to make 3 steps starting at coordinate 0, 23, 46\n target_step_sizes_in_voxels = [i * tile_step_size for i in tile_size]\n\n num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels, tile_size)]\n\n steps = []\n for dim in range(len(tile_size)):\n # the highest step value for this dimension is\n max_step_value = image_size[dim] - tile_size[dim]\n if num_steps[dim] > 1:\n actual_step_size = max_step_value / (num_steps[dim] - 1)\n else:\n actual_step_size = 99999999999 # does not matter because there is only one step at 0\n\n steps_here = [int(np.round(actual_step_size * i)) for i in range(num_steps[dim])]\n\n steps.append(steps_here)\n\n return steps" }, { "identifier": "get_output_folder", "path": "nnunetv2/utilities/file_path_utilities.py", "snippet": "def get_output_folder(dataset_name_or_id: Union[str, int], trainer_name: str = 'nnUNetTrainer',\n plans_identifier: str = 'nnUNetPlans', configuration: str = '3d_fullres',\n fold: Union[str, int] = None) -> str:\n tmp = join(nnUNet_results, maybe_convert_to_dataset_name(dataset_name_or_id),\n convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration))\n if fold is not None:\n tmp = join(tmp, f'fold_{fold}')\n return tmp" }, { "identifier": "check_workers_alive_and_busy", "path": "nnunetv2/utilities/file_path_utilities.py", "snippet": "def check_workers_alive_and_busy(export_pool: Pool, worker_list: List, results_list: List, allowed_num_queued: int = 0):\n \"\"\"\n\n returns True if the number of results that are not ready is greater than the number of available workers + allowed_num_queued\n \"\"\"\n alive = [i.is_alive() for i in worker_list]\n if not all(alive):\n raise RuntimeError('Some background workers are no longer alive')\n\n not_ready = [not i.ready() for i in results_list]\n if sum(not_ready) >= (len(export_pool._pool) + allowed_num_queued):\n return True\n return False" }, { "identifier": "recursive_find_python_class", "path": "nnunetv2/utilities/find_class_by_name.py", "snippet": "def recursive_find_python_class(folder: str, class_name: str, current_module: str):\n tr = None\n for importer, modname, ispkg in pkgutil.iter_modules([folder]):\n # print(modname, ispkg)\n if not ispkg:\n m = importlib.import_module(current_module + \".\" + modname)\n if hasattr(m, class_name):\n tr = getattr(m, class_name)\n break\n\n if tr is None:\n for importer, modname, ispkg in pkgutil.iter_modules([folder]):\n if ispkg:\n next_current_module = current_module + \".\" + modname\n tr = recursive_find_python_class(join(folder, modname), class_name, current_module=next_current_module)\n if tr is not None:\n break\n return tr" }, { "identifier": "empty_cache", "path": "nnunetv2/utilities/helpers.py", "snippet": "def empty_cache(device: torch.device):\n if device.type == 'cuda':\n torch.cuda.empty_cache()\n elif device.type == 'mps':\n from torch import mps\n mps.empty_cache()\n else:\n pass" }, { "identifier": "dummy_context", "path": "nnunetv2/utilities/helpers.py", "snippet": "class dummy_context(object):\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass" }, { "identifier": "recursive_fix_for_json_export", "path": "nnunetv2/utilities/json_export.py", "snippet": "def recursive_fix_for_json_export(my_dict: dict):\n # json is stupid. 'cannot serialize object of type bool_/int64/float64'. Come on bro.\n keys = list(my_dict.keys()) # cannot iterate over keys() if we change keys....\n for k in keys:\n if isinstance(k, (np.int64, np.int32, np.int8, np.uint8)):\n tmp = my_dict[k]\n del my_dict[k]\n my_dict[int(k)] = tmp\n del tmp\n k = int(k)\n\n if isinstance(my_dict[k], dict):\n recursive_fix_for_json_export(my_dict[k])\n elif isinstance(my_dict[k], np.ndarray):\n assert my_dict[k].ndim == 1, 'only 1d arrays are supported'\n my_dict[k] = fix_types_iterable(my_dict[k], output_type=list)\n elif isinstance(my_dict[k], (np.bool_,)):\n my_dict[k] = bool(my_dict[k])\n elif isinstance(my_dict[k], (np.int64, np.int32, np.int8, np.uint8)):\n my_dict[k] = int(my_dict[k])\n elif isinstance(my_dict[k], (np.float32, np.float64, np.float16)):\n my_dict[k] = float(my_dict[k])\n elif isinstance(my_dict[k], list):\n my_dict[k] = fix_types_iterable(my_dict[k], output_type=type(my_dict[k]))\n elif isinstance(my_dict[k], tuple):\n my_dict[k] = fix_types_iterable(my_dict[k], output_type=tuple)\n elif isinstance(my_dict[k], torch.device):\n my_dict[k] = str(my_dict[k])\n else:\n pass # pray it can be serialized" }, { "identifier": "determine_num_input_channels", "path": "nnunetv2/utilities/label_handling/label_handling.py", "snippet": "def determine_num_input_channels(plans_manager: PlansManager,\n configuration_or_config_manager: Union[str, ConfigurationManager],\n dataset_json: dict) -> int:\n if isinstance(configuration_or_config_manager, str):\n config_manager = plans_manager.get_configuration(configuration_or_config_manager)\n else:\n config_manager = configuration_or_config_manager\n\n label_manager = plans_manager.get_label_manager(dataset_json)\n num_modalities = len(dataset_json['modality']) if 'modality' in dataset_json.keys() else len(dataset_json['channel_names'])\n\n # cascade has different number of input channels\n if config_manager.previous_stage_name is not None:\n num_label_inputs = len(label_manager.foreground_labels)\n num_input_channels = num_modalities + num_label_inputs\n else:\n num_input_channels = num_modalities\n return num_input_channels" }, { "identifier": "PlansManager", "path": "nnunetv2/utilities/plans_handling/plans_handler.py", "snippet": "class PlansManager(object):\n def __init__(self, plans_file_or_dict: Union[str, dict]):\n \"\"\"\n Why do we need this?\n 1) resolve inheritance in configurations\n 2) expose otherwise annoying stuff like getting the label manager or IO class from a string\n 3) clearly expose the things that are in the plans instead of hiding them in a dict\n 4) cache shit\n\n This class does not prevent you from going wild. You can still use the plans directly if you prefer\n (PlansHandler.plans['key'])\n \"\"\"\n self.plans = plans_file_or_dict if isinstance(plans_file_or_dict, dict) else load_json(plans_file_or_dict)\n\n def __repr__(self):\n return self.plans.__repr__()\n\n def _internal_resolve_configuration_inheritance(self, configuration_name: str,\n visited: Tuple[str, ...] = None) -> dict:\n if configuration_name not in self.plans['configurations'].keys():\n raise ValueError(f'The configuration {configuration_name} does not exist in the plans I have. Valid '\n f'configuration names are {list(self.plans[\"configurations\"].keys())}.')\n configuration = deepcopy(self.plans['configurations'][configuration_name])\n if 'inherits_from' in configuration:\n parent_config_name = configuration['inherits_from']\n\n if visited is None:\n visited = (configuration_name,)\n else:\n if parent_config_name in visited:\n raise RuntimeError(f\"Circular dependency detected. The following configurations were visited \"\n f\"while solving inheritance (in that order!): {visited}. \"\n f\"Current configuration: {configuration_name}. Its parent configuration \"\n f\"is {parent_config_name}.\")\n visited = (*visited, configuration_name)\n\n base_config = self._internal_resolve_configuration_inheritance(parent_config_name, visited)\n base_config.update(configuration)\n configuration = base_config\n return configuration\n\n @lru_cache(maxsize=10)\n def get_configuration(self, configuration_name: str):\n if configuration_name not in self.plans['configurations'].keys():\n raise RuntimeError(f\"Requested configuration {configuration_name} not found in plans. \"\n f\"Available configurations: {list(self.plans['configurations'].keys())}\")\n\n configuration_dict = self._internal_resolve_configuration_inheritance(configuration_name)\n return ConfigurationManager(configuration_dict)\n\n @property\n def dataset_name(self) -> str:\n return self.plans['dataset_name']\n\n @property\n def plans_name(self) -> str:\n return self.plans['plans_name']\n\n @property\n def original_median_spacing_after_transp(self) -> List[float]:\n return self.plans['original_median_spacing_after_transp']\n\n @property\n def original_median_shape_after_transp(self) -> List[float]:\n return self.plans['original_median_shape_after_transp']\n\n @property\n @lru_cache(maxsize=1)\n def image_reader_writer_class(self) -> Type[BaseReaderWriter]:\n return recursive_find_reader_writer_by_name(self.plans['image_reader_writer'])\n\n @property\n def transpose_forward(self) -> List[int]:\n return self.plans['transpose_forward']\n\n @property\n def transpose_backward(self) -> List[int]:\n return self.plans['transpose_backward']\n\n @property\n def available_configurations(self) -> List[str]:\n return list(self.plans['configurations'].keys())\n\n @property\n @lru_cache(maxsize=1)\n def experiment_planner_class(self) -> Type[ExperimentPlanner]:\n planner_name = self.experiment_planner_name\n experiment_planner = recursive_find_python_class(join(nnunetv2.__path__[0], \"experiment_planning\"),\n planner_name,\n current_module=\"nnunetv2.experiment_planning\")\n return experiment_planner\n\n @property\n def experiment_planner_name(self) -> str:\n return self.plans['experiment_planner_used']\n\n @property\n @lru_cache(maxsize=1)\n def label_manager_class(self) -> Type[LabelManager]:\n return get_labelmanager_class_from_plans(self.plans)\n\n def get_label_manager(self, dataset_json: dict, **kwargs) -> LabelManager:\n return self.label_manager_class(label_dict=dataset_json['labels'],\n regions_class_order=dataset_json.get('regions_class_order'),\n **kwargs)\n\n @property\n def foreground_intensity_properties_per_channel(self) -> dict:\n if 'foreground_intensity_properties_per_channel' not in self.plans.keys():\n if 'foreground_intensity_properties_by_modality' in self.plans.keys():\n return self.plans['foreground_intensity_properties_by_modality']\n return self.plans['foreground_intensity_properties_per_channel']" }, { "identifier": "ConfigurationManager", "path": "nnunetv2/utilities/plans_handling/plans_handler.py", "snippet": "class ConfigurationManager(object):\n def __init__(self, configuration_dict: dict):\n self.configuration = configuration_dict\n\n def __repr__(self):\n return self.configuration.__repr__()\n\n @property\n def data_identifier(self) -> str:\n return self.configuration['data_identifier']\n\n @property\n def preprocessor_name(self) -> str:\n return self.configuration['preprocessor_name']\n\n @property\n @lru_cache(maxsize=1)\n def preprocessor_class(self) -> Type[DefaultPreprocessor]:\n preprocessor_class = recursive_find_python_class(join(nnunetv2.__path__[0], \"preprocessing\"),\n self.preprocessor_name,\n current_module=\"nnunetv2.preprocessing\")\n return preprocessor_class\n\n @property\n def batch_size(self) -> int:\n return self.configuration['batch_size']\n\n @property\n def patch_size(self) -> List[int]:\n return self.configuration['patch_size']\n\n @property\n def median_image_size_in_voxels(self) -> List[int]:\n return self.configuration['median_image_size_in_voxels']\n\n @property\n def spacing(self) -> List[float]:\n return self.configuration['spacing']\n\n @property\n def normalization_schemes(self) -> List[str]:\n return self.configuration['normalization_schemes']\n\n @property\n def use_mask_for_norm(self) -> List[bool]:\n return self.configuration['use_mask_for_norm']\n\n @property\n def UNet_class_name(self) -> str:\n return self.configuration['UNet_class_name']\n\n @property\n @lru_cache(maxsize=1)\n def UNet_class(self) -> Type[nn.Module]:\n unet_class = recursive_find_python_class(join(dynamic_network_architectures.__path__[0], \"architectures\"),\n self.UNet_class_name,\n current_module=\"dynamic_network_architectures.architectures\")\n if unet_class is None:\n raise RuntimeError('The network architecture specified by the plans file '\n 'is non-standard (maybe your own?). Fix this by not using '\n 'ConfigurationManager.UNet_class to instantiate '\n 'it (probably just overwrite build_network_architecture of your trainer.')\n return unet_class\n\n @property\n def UNet_base_num_features(self) -> int:\n return self.configuration['UNet_base_num_features']\n\n @property\n def n_conv_per_stage_encoder(self) -> List[int]:\n return self.configuration['n_conv_per_stage_encoder']\n\n @property\n def n_conv_per_stage_decoder(self) -> List[int]:\n return self.configuration['n_conv_per_stage_decoder']\n\n @property\n def num_pool_per_axis(self) -> List[int]:\n return self.configuration['num_pool_per_axis']\n\n @property\n def pool_op_kernel_sizes(self) -> List[List[int]]:\n return self.configuration['pool_op_kernel_sizes']\n\n @property\n def conv_kernel_sizes(self) -> List[List[int]]:\n return self.configuration['conv_kernel_sizes']\n\n @property\n def unet_max_num_features(self) -> int:\n return self.configuration['unet_max_num_features']\n\n @property\n @lru_cache(maxsize=1)\n def resampling_fn_data(self) -> Callable[\n [Union[torch.Tensor, np.ndarray],\n Union[Tuple[int, ...], List[int], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray]\n ],\n Union[torch.Tensor, np.ndarray]]:\n fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_data'])\n fn = partial(fn, **self.configuration['resampling_fn_data_kwargs'])\n return fn\n\n @property\n @lru_cache(maxsize=1)\n def resampling_fn_probabilities(self) -> Callable[\n [Union[torch.Tensor, np.ndarray],\n Union[Tuple[int, ...], List[int], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray]\n ],\n Union[torch.Tensor, np.ndarray]]:\n fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_probabilities'])\n fn = partial(fn, **self.configuration['resampling_fn_probabilities_kwargs'])\n return fn\n\n @property\n @lru_cache(maxsize=1)\n def resampling_fn_seg(self) -> Callable[\n [Union[torch.Tensor, np.ndarray],\n Union[Tuple[int, ...], List[int], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray],\n Union[Tuple[float, ...], List[float], np.ndarray]\n ],\n Union[torch.Tensor, np.ndarray]]:\n fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_seg'])\n fn = partial(fn, **self.configuration['resampling_fn_seg_kwargs'])\n return fn\n\n @property\n def batch_dice(self) -> bool:\n return self.configuration['batch_dice']\n\n @property\n def next_stage_names(self) -> Union[List[str], None]:\n ret = self.configuration.get('next_stage')\n if ret is not None:\n if isinstance(ret, str):\n ret = [ret]\n return ret\n\n @property\n def previous_stage_name(self) -> Union[str, None]:\n return self.configuration.get('previous_stage')" }, { "identifier": "create_lists_from_splitted_dataset_folder", "path": "nnunetv2/utilities/utils.py", "snippet": "def create_lists_from_splitted_dataset_folder(folder: str, file_ending: str, identifiers: List[str] = None) -> List[\n List[str]]:\n \"\"\"\n does not rely on dataset.json\n \"\"\"\n if identifiers is None:\n identifiers = get_identifiers_from_splitted_dataset_folder(folder, file_ending)\n files = subfiles(folder, suffix=file_ending, join=False, sort=True)\n list_of_lists = []\n for f in identifiers:\n p = re.compile(re.escape(f) + r\"_\\d\\d\\d\\d\" + re.escape(file_ending))\n list_of_lists.append([join(folder, i) for i in files if p.fullmatch(i)])\n return list_of_lists" } ]
import inspect import multiprocessing import os import traceback import numpy as np import torch import nnunetv2 import argparse import multiprocessing import argparse import multiprocessing from copy import deepcopy from time import sleep from typing import Tuple, Union, List, Optional from acvl_utils.cropping_and_padding.padding import pad_nd_image from batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter from batchgenerators.utilities.file_and_folder_operations import load_json, join, isfile, maybe_mkdir_p, isdir, subdirs, \ save_json from torch import nn from torch._dynamo import OptimizedModule from torch.nn.parallel import DistributedDataParallel from tqdm import tqdm from nnunetv2.configuration import default_num_processes from nnunetv2.inference.data_iterators import PreprocessAdapterFromNpy, preprocessing_iterator_fromfiles, \ preprocessing_iterator_fromnpy from nnunetv2.inference.export_prediction import export_prediction_from_logits, \ convert_predicted_logits_to_segmentation_with_correct_shape from nnunetv2.inference.sliding_window_prediction import compute_gaussian, \ compute_steps_for_sliding_window from nnunetv2.utilities.file_path_utilities import get_output_folder, check_workers_alive_and_busy from nnunetv2.utilities.find_class_by_name import recursive_find_python_class from nnunetv2.utilities.helpers import empty_cache, dummy_context from nnunetv2.utilities.json_export import recursive_fix_for_json_export from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager from nnunetv2.utilities.utils import create_lists_from_splitted_dataset_folder from nnunetv2.paths import nnUNet_results, nnUNet_raw from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO
11,229
# mta = SingleThreadedAugmenter(ppa, None) # else: # mta = MultiThreadedAugmenter(ppa, None, num_processes, 1, None, pin_memory=pin_memory) # return mta def get_data_iterator_from_raw_npy_data(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3): list_of_images = [image_or_list_of_images] if not isinstance(image_or_list_of_images, list) else \ image_or_list_of_images if isinstance(segs_from_prev_stage_or_list_of_segs_from_prev_stage, np.ndarray): segs_from_prev_stage_or_list_of_segs_from_prev_stage = [ segs_from_prev_stage_or_list_of_segs_from_prev_stage] if isinstance(truncated_ofname, str): truncated_ofname = [truncated_ofname] if isinstance(properties_or_list_of_properties, dict): properties_or_list_of_properties = [properties_or_list_of_properties] num_processes = min(num_processes, len(list_of_images)) pp = preprocessing_iterator_fromnpy( list_of_images, segs_from_prev_stage_or_list_of_segs_from_prev_stage, properties_or_list_of_properties, truncated_ofname, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing ) return pp def predict_from_list_of_npy_arrays(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3, save_probabilities: bool = False, num_processes_segmentation_export: int = default_num_processes): iterator = self.get_data_iterator_from_raw_npy_data(image_or_list_of_images, segs_from_prev_stage_or_list_of_segs_from_prev_stage, properties_or_list_of_properties, truncated_ofname, num_processes) return self.predict_from_data_iterator(iterator, save_probabilities, num_processes_segmentation_export) def predict_from_data_iterator(self, data_iterator, save_probabilities: bool = False, num_processes_segmentation_export: int = default_num_processes): """ each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys! If 'ofile' is None, the result will be returned instead of written to a file """ with multiprocessing.get_context("spawn").Pool(num_processes_segmentation_export) as export_pool: worker_list = [i for i in export_pool._pool] r = [] for preprocessed in data_iterator: data = preprocessed['data'] if isinstance(data, str): delfile = data data = torch.from_numpy(np.load(data)) os.remove(delfile) ofile = preprocessed['ofile'] if ofile is not None: print(f'\nPredicting {os.path.basename(ofile)}:') else: print(f'\nPredicting image of shape {data.shape}:') print(f'perform_everything_on_gpu: {self.perform_everything_on_gpu}') properties = preprocessed['data_properties'] # let's not get into a runaway situation where the GPU predicts so fast that the disk has to b swamped with # npy files proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) while not proceed: # print('sleeping') sleep(0.1) proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) prediction = self.predict_logits_from_preprocessed_data(data).cpu() if ofile is not None: # this needs to go into background processes # export_prediction_from_logits(prediction, properties, configuration_manager, plans_manager, # dataset_json, ofile, save_probabilities) print('sending off prediction to background worker for resampling and export') r.append( export_pool.starmap_async( export_prediction_from_logits, ((prediction, properties, self.configuration_manager, self.plans_manager, self.dataset_json, ofile, save_probabilities),) ) ) else: # convert_predicted_logits_to_segmentation_with_correct_shape(prediction, plans_manager, # configuration_manager, label_manager, # properties, # save_probabilities) print('sending off prediction to background worker for resampling') r.append( export_pool.starmap_async(
class nnUNetPredictor(object): def __init__(self, tile_step_size: float = 0.5, use_gaussian: bool = True, use_mirroring: bool = True, perform_everything_on_gpu: bool = True, device: torch.device = torch.device('cuda'), verbose: bool = False, verbose_preprocessing: bool = False, allow_tqdm: bool = True): self.verbose = verbose self.verbose_preprocessing = verbose_preprocessing self.allow_tqdm = allow_tqdm self.plans_manager, self.configuration_manager, self.list_of_parameters, self.network, self.dataset_json, \ self.trainer_name, self.allowed_mirroring_axes, self.label_manager = None, None, None, None, None, None, None, None self.tile_step_size = tile_step_size self.use_gaussian = use_gaussian self.use_mirroring = use_mirroring if device.type == 'cuda': # device = torch.device(type='cuda', index=0) # set the desired GPU with CUDA_VISIBLE_DEVICES! # why would I ever want to do that. Stupid dobby. This kills DDP inference... pass if device.type != 'cuda': print(f'perform_everything_on_gpu=True is only supported for cuda devices! Setting this to False') perform_everything_on_gpu = False self.device = device self.perform_everything_on_gpu = perform_everything_on_gpu def initialize_from_trained_model_folder(self, model_training_output_dir: str, use_folds: Union[Tuple[Union[int, str]], None], checkpoint_name: str = 'checkpoint_final.pth'): """ This is used when making predictions with a trained model """ if use_folds is None: use_folds = nnUNetPredictor.auto_detect_available_folds(model_training_output_dir, checkpoint_name) dataset_json = load_json(join(model_training_output_dir, 'dataset.json')) plans = load_json(join(model_training_output_dir, 'plans.json')) plans_manager = PlansManager(plans) if isinstance(use_folds, str): use_folds = [use_folds] parameters = [] for i, f in enumerate(use_folds): f = int(f) if f != 'all' else f checkpoint = torch.load(join(model_training_output_dir, f'fold_{f}', checkpoint_name), map_location=torch.device('cpu')) if i == 0: trainer_name = checkpoint['trainer_name'] configuration_name = checkpoint['init_args']['configuration'] inference_allowed_mirroring_axes = checkpoint['inference_allowed_mirroring_axes'] if \ 'inference_allowed_mirroring_axes' in checkpoint.keys() else None parameters.append(checkpoint['network_weights']) configuration_manager = plans_manager.get_configuration(configuration_name) # restore network num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json) trainer_class = recursive_find_python_class(join(nnunetv2.__path__[0], "training", "nnUNetTrainer"), trainer_name, 'nnunetv2.training.nnUNetTrainer') network = trainer_class.build_network_architecture(plans_manager, dataset_json, configuration_manager, num_input_channels, enable_deep_supervision=False) self.plans_manager = plans_manager self.configuration_manager = configuration_manager self.list_of_parameters = parameters self.network = network self.dataset_json = dataset_json self.trainer_name = trainer_name self.allowed_mirroring_axes = inference_allowed_mirroring_axes self.label_manager = plans_manager.get_label_manager(dataset_json) if ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) \ and not isinstance(self.network, OptimizedModule): print('compiling network') self.network = torch.compile(self.network) def manual_initialization(self, network: nn.Module, plans_manager: PlansManager, configuration_manager: ConfigurationManager, parameters: Optional[List[dict]], dataset_json: dict, trainer_name: str, inference_allowed_mirroring_axes: Optional[Tuple[int, ...]]): """ This is used by the nnUNetTrainer to initialize nnUNetPredictor for the final validation """ self.plans_manager = plans_manager self.configuration_manager = configuration_manager self.list_of_parameters = parameters self.network = network self.dataset_json = dataset_json self.trainer_name = trainer_name self.allowed_mirroring_axes = inference_allowed_mirroring_axes self.label_manager = plans_manager.get_label_manager(dataset_json) allow_compile = True allow_compile = allow_compile and ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) allow_compile = allow_compile and not isinstance(self.network, OptimizedModule) if isinstance(self.network, DistributedDataParallel): allow_compile = allow_compile and isinstance(self.network.module, OptimizedModule) if allow_compile: print('compiling network') self.network = torch.compile(self.network) @staticmethod def auto_detect_available_folds(model_training_output_dir, checkpoint_name): print('use_folds is None, attempting to auto detect available folds') fold_folders = subdirs(model_training_output_dir, prefix='fold_', join=False) fold_folders = [i for i in fold_folders if i != 'fold_all'] fold_folders = [i for i in fold_folders if isfile(join(model_training_output_dir, i, checkpoint_name))] use_folds = [int(i.split('_')[-1]) for i in fold_folders] print(f'found the following folds: {use_folds}') return use_folds def _manage_input_and_output_lists(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], output_folder_or_list_of_truncated_output_files: Union[None, str, List[str]], folder_with_segs_from_prev_stage: str = None, overwrite: bool = True, part_id: int = 0, num_parts: int = 1, save_probabilities: bool = False): if isinstance(list_of_lists_or_source_folder, str): list_of_lists_or_source_folder = create_lists_from_splitted_dataset_folder(list_of_lists_or_source_folder, self.dataset_json['file_ending']) print(f'There are {len(list_of_lists_or_source_folder)} cases in the source folder') list_of_lists_or_source_folder = list_of_lists_or_source_folder[part_id::num_parts] caseids = [os.path.basename(i[0])[:-(len(self.dataset_json['file_ending']) + 5)] for i in list_of_lists_or_source_folder] print( f'I am process {part_id} out of {num_parts} (max process ID is {num_parts - 1}, we start counting with 0!)') print(f'There are {len(caseids)} cases that I would like to predict') if isinstance(output_folder_or_list_of_truncated_output_files, str): output_filename_truncated = [join(output_folder_or_list_of_truncated_output_files, i) for i in caseids] else: output_filename_truncated = output_folder_or_list_of_truncated_output_files seg_from_prev_stage_files = [join(folder_with_segs_from_prev_stage, i + self.dataset_json['file_ending']) if folder_with_segs_from_prev_stage is not None else None for i in caseids] # remove already predicted files form the lists if not overwrite and output_filename_truncated is not None: tmp = [isfile(i + self.dataset_json['file_ending']) for i in output_filename_truncated] if save_probabilities: tmp2 = [isfile(i + '.npz') for i in output_filename_truncated] tmp = [i and j for i, j in zip(tmp, tmp2)] not_existing_indices = [i for i, j in enumerate(tmp) if not j] output_filename_truncated = [output_filename_truncated[i] for i in not_existing_indices] list_of_lists_or_source_folder = [list_of_lists_or_source_folder[i] for i in not_existing_indices] seg_from_prev_stage_files = [seg_from_prev_stage_files[i] for i in not_existing_indices] print(f'overwrite was set to {overwrite}, so I am only working on cases that haven\'t been predicted yet. ' f'That\'s {len(not_existing_indices)} cases.') return list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files def predict_from_files(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], output_folder_or_list_of_truncated_output_files: Union[str, None, List[str]], save_probabilities: bool = False, overwrite: bool = True, num_processes_preprocessing: int = default_num_processes, num_processes_segmentation_export: int = default_num_processes, folder_with_segs_from_prev_stage: str = None, num_parts: int = 1, part_id: int = 0): """ This is nnU-Net's default function for making predictions. It works best for batch predictions (predicting many images at once). """ if isinstance(output_folder_or_list_of_truncated_output_files, str): output_folder = output_folder_or_list_of_truncated_output_files elif isinstance(output_folder_or_list_of_truncated_output_files, list): output_folder = os.path.dirname(output_folder_or_list_of_truncated_output_files[0]) else: output_folder = None ######################## # let's store the input arguments so that its clear what was used to generate the prediction if output_folder is not None: my_init_kwargs = {} for k in inspect.signature(self.predict_from_files).parameters.keys(): my_init_kwargs[k] = locals()[k] my_init_kwargs = deepcopy( my_init_kwargs) # let's not unintentionally change anything in-place. Take this as a recursive_fix_for_json_export(my_init_kwargs) maybe_mkdir_p(output_folder) save_json(my_init_kwargs, join(output_folder, 'predict_from_raw_data_args.json')) # we need these two if we want to do things with the predictions like for example apply postprocessing save_json(self.dataset_json, join(output_folder, 'dataset.json'), sort_keys=False) save_json(self.plans_manager.plans, join(output_folder, 'plans.json'), sort_keys=False) ####################### # check if we need a prediction from the previous stage if self.configuration_manager.previous_stage_name is not None: assert folder_with_segs_from_prev_stage is not None, \ f'The requested configuration is a cascaded network. It requires the segmentations of the previous ' \ f'stage ({self.configuration_manager.previous_stage_name}) as input. Please provide the folder where' \ f' they are located via folder_with_segs_from_prev_stage' # sort out input and output filenames list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files = \ self._manage_input_and_output_lists(list_of_lists_or_source_folder, output_folder_or_list_of_truncated_output_files, folder_with_segs_from_prev_stage, overwrite, part_id, num_parts, save_probabilities) if len(list_of_lists_or_source_folder) == 0: return data_iterator = self._internal_get_data_iterator_from_lists_of_filenames(list_of_lists_or_source_folder, seg_from_prev_stage_files, output_filename_truncated, num_processes_preprocessing) return self.predict_from_data_iterator(data_iterator, save_probabilities, num_processes_segmentation_export) def _internal_get_data_iterator_from_lists_of_filenames(self, input_list_of_lists: List[List[str]], seg_from_prev_stage_files: Union[List[str], None], output_filenames_truncated: Union[List[str], None], num_processes: int): return preprocessing_iterator_fromfiles(input_list_of_lists, seg_from_prev_stage_files, output_filenames_truncated, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing) # preprocessor = self.configuration_manager.preprocessor_class(verbose=self.verbose_preprocessing) # # hijack batchgenerators, yo # # we use the multiprocessing of the batchgenerators dataloader to handle all the background worker stuff. This # # way we don't have to reinvent the wheel here. # num_processes = max(1, min(num_processes, len(input_list_of_lists))) # ppa = PreprocessAdapter(input_list_of_lists, seg_from_prev_stage_files, preprocessor, # output_filenames_truncated, self.plans_manager, self.dataset_json, # self.configuration_manager, num_processes) # if num_processes == 0: # mta = SingleThreadedAugmenter(ppa, None) # else: # mta = MultiThreadedAugmenter(ppa, None, num_processes, 1, None, pin_memory=pin_memory) # return mta def get_data_iterator_from_raw_npy_data(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3): list_of_images = [image_or_list_of_images] if not isinstance(image_or_list_of_images, list) else \ image_or_list_of_images if isinstance(segs_from_prev_stage_or_list_of_segs_from_prev_stage, np.ndarray): segs_from_prev_stage_or_list_of_segs_from_prev_stage = [ segs_from_prev_stage_or_list_of_segs_from_prev_stage] if isinstance(truncated_ofname, str): truncated_ofname = [truncated_ofname] if isinstance(properties_or_list_of_properties, dict): properties_or_list_of_properties = [properties_or_list_of_properties] num_processes = min(num_processes, len(list_of_images)) pp = preprocessing_iterator_fromnpy( list_of_images, segs_from_prev_stage_or_list_of_segs_from_prev_stage, properties_or_list_of_properties, truncated_ofname, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing ) return pp def predict_from_list_of_npy_arrays(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3, save_probabilities: bool = False, num_processes_segmentation_export: int = default_num_processes): iterator = self.get_data_iterator_from_raw_npy_data(image_or_list_of_images, segs_from_prev_stage_or_list_of_segs_from_prev_stage, properties_or_list_of_properties, truncated_ofname, num_processes) return self.predict_from_data_iterator(iterator, save_probabilities, num_processes_segmentation_export) def predict_from_data_iterator(self, data_iterator, save_probabilities: bool = False, num_processes_segmentation_export: int = default_num_processes): """ each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys! If 'ofile' is None, the result will be returned instead of written to a file """ with multiprocessing.get_context("spawn").Pool(num_processes_segmentation_export) as export_pool: worker_list = [i for i in export_pool._pool] r = [] for preprocessed in data_iterator: data = preprocessed['data'] if isinstance(data, str): delfile = data data = torch.from_numpy(np.load(data)) os.remove(delfile) ofile = preprocessed['ofile'] if ofile is not None: print(f'\nPredicting {os.path.basename(ofile)}:') else: print(f'\nPredicting image of shape {data.shape}:') print(f'perform_everything_on_gpu: {self.perform_everything_on_gpu}') properties = preprocessed['data_properties'] # let's not get into a runaway situation where the GPU predicts so fast that the disk has to b swamped with # npy files proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) while not proceed: # print('sleeping') sleep(0.1) proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) prediction = self.predict_logits_from_preprocessed_data(data).cpu() if ofile is not None: # this needs to go into background processes # export_prediction_from_logits(prediction, properties, configuration_manager, plans_manager, # dataset_json, ofile, save_probabilities) print('sending off prediction to background worker for resampling and export') r.append( export_pool.starmap_async( export_prediction_from_logits, ((prediction, properties, self.configuration_manager, self.plans_manager, self.dataset_json, ofile, save_probabilities),) ) ) else: # convert_predicted_logits_to_segmentation_with_correct_shape(prediction, plans_manager, # configuration_manager, label_manager, # properties, # save_probabilities) print('sending off prediction to background worker for resampling') r.append( export_pool.starmap_async(
convert_predicted_logits_to_segmentation_with_correct_shape, (
5
2023-12-04 19:43:14+00:00
16k
Zuricho/chroma_pipeline
chroma/layers/structure/protein_graph.py
[ { "identifier": "Protein", "path": "chroma/data/protein.py", "snippet": "class Protein:\n \"\"\"\n Protein: A utility class for managing proteins within the Chroma ecosystem.\n\n The Protein class offers a suite of methods for loading, saving, transforming, and viewing protein structures\n and trajectories from a variety of input sources such as PDBID, CIF files, and XCS representations.\n\n Attributes:\n sys (System): A protein system object used for various molecular operations.\n device (str): Specifies the device on which tensors are managed. Defaults to `cpu`.\n \"\"\"\n\n sys: System\n device: str = \"cpu\"\n\n def __new__(cls, *args, **kwargs):\n \"\"\"Handles automatic loading of the protein based on the input.\n Specifically deals with XCS\n\n Args:\n protein_input (_type_): _description_\n \"\"\"\n\n if len(args) == 1 and isinstance(args[0], System):\n return cls.from_system(*args, **kwargs)\n\n elif len(args) == 3: # 3 Tensor Arguments\n X, C, S = args\n assert isinstance(\n C, torch.Tensor\n ), f\"arg[1] must be a chain (C) torch.Tensor, but get {type(C)}\"\n assert isinstance(\n S, torch.Tensor\n ), f\"arg[2] must be a sequence (S) torch.Tensor, but get {type(S)}\"\n if isinstance(X, list):\n assert all(\n isinstance(x, torch.Tensor) for x in X\n ), \"arg[0] must be an X torch.Tensor or a list of X torch.Tensors\"\n return cls.from_XCS_trajectory(X, C, S)\n elif isinstance(X, torch.Tensor):\n return cls.from_XCS(X, C, S)\n else:\n raise TypeError(\n f\"X must be a list of torch.Tensor that respects XCS format, but get {type(X), type(C), type(S)}\"\n )\n\n elif len(args) == 1 and isinstance(args[0], str):\n if args[0].lower().startswith(\"s3:\"):\n raise NotImplementedError(\n \"download of cifs or pdbs from s3 not supported.\"\n )\n\n if args[0].endswith(\".cif\"):\n return cls.from_CIF(*args, **kwargs)\n\n elif args[0].endswith(\".pdb\"):\n return cls.from_PDB(*args, **kwargs)\n\n else: # PDB or Sequence String\n # Check if it is a valid PDB\n import requests\n\n url = f\"https://data.rcsb.org/rest/v1/core/entry/{args[0]}\"\n VALID_PDBID = requests.get(url).status_code == 200\n VALID_SEQUENCE = all([s in PROTEIN_TOKENS for s in args[0]])\n\n if VALID_PDBID:\n # This only works if connected to the internet,\n # so maybe better status checking will help here\n if VALID_PDBID and VALID_SEQUENCE:\n raise Warning(\n \"Ambuguous input, this is both a valid Sequence string and\"\n \" a valid PDBID. Interpreting as a PDBID, if you wish to\"\n \" initialize as a sequence string please explicitly\"\n \" initialize as Protein.from_sequence(MY_SEQUENCE).\"\n )\n return cls.from_PDBID(*args, **kwargs)\n elif VALID_SEQUENCE:\n return cls.from_sequence(*args, **kwargs)\n else:\n raise NotImplementedError(\n \"Could Not Identify a valid input Type. See docstring for\"\n \" details.\"\n )\n else:\n raise NotImplementedError(\n \"Inputs must either be a 3-tuple of XCS tensors, or a single string\"\n )\n\n @classmethod\n def from_system(cls, system: System, device: str = \"cpu\") -> Protein:\n protein = super(Protein, cls).__new__(cls)\n protein.sys = system\n protein.device = device\n return protein\n\n @classmethod\n def from_XCS(cls, X: torch.Tensor, C: torch.Tensor, S: torch.Tensor) -> Protein:\n \"\"\"\n Create a Protein object from XCS representations.\n\n Args:\n X (torch.Tensor): A 4D tensor representing atomic coordinates of proteins.\n Dimensions are `(batch, residues, atoms (4 or 14), coordinates (3))`.\n C (torch.Tensor): A chain label tensor of shape `(batch, residues)`. Values are integers.\n Sign of the value indicates presence (+) or absence (-) of structural\n information for that residue. Magnitude indicates which chain the residue belongs to.\n S (torch.Tensor): A sequence information tensor of shape `(batch, residues)`. Contains\n non-negative integers representing residue types at each position.\n\n Returns:\n Protein: Initialized Protein object from the given XCS representation.\n \"\"\"\n protein = super(Protein, cls).__new__(cls)\n protein.sys = System.from_XCS(X, C, S)\n protein.device = X.device\n return protein\n\n @classmethod\n def from_XCS_trajectory(\n cls, X_traj: List[torch.Tensor], C: torch.Tensor, S: torch.Tensor\n ) -> Protein:\n \"\"\"\n Initialize a Protein object from a trajectory of XCS representations.\n\n Args:\n X_traj (List[torch.Tensor]): List of X tensor representations over time. Each tensor represents atomic\n coordinates of proteins with dimensions `(batch, residues, atoms (4 or 14), coordinates (3))`.\n C (torch.Tensor): A chain label tensor of shape `(batch, residues)`. Values are integers.\n Sign of the value indicates presence (+) or absence (-) of structural\n information for that residue. Magnitude indicates which chain the residue belongs to.\n S (torch.Tensor): A sequence information tensor of shape `(batch, residues)`. Contains\n non-negative integers representing residue types at each position.\n\n Returns:\n Protein: Protein object initialized from the XCS trajectory.\n \"\"\"\n protein = super(Protein, cls).__new__(cls)\n protein.sys = System.from_XCS(X_traj[0], C, S)\n protein.device = C.device\n for X in X_traj[1:]:\n protein.sys.add_model_from_X(X[C > 0])\n return protein\n\n @classmethod\n def from_PDB(cls, input_file: str, device: str = \"cpu\") -> Protein:\n \"\"\"\n Load a Protein object from a provided PDB file.\n\n Args:\n input_file (str): Path to the PDB file to be loaded.\n device (str, optional): The device for tensor operations. Defaults to 'cpu'.\n\n Returns:\n Protein: Initialized Protein object from the provided PDB file.\n \"\"\"\n protein = super(Protein, cls).__new__(cls)\n protein.sys = System.from_PDB(input_file)\n protein.device = device\n return protein\n\n @classmethod\n def from_CIF(\n cls, input_file: str, canonicalize: bool = True, device: str = \"cpu\"\n ) -> Protein:\n \"\"\"\n Load a Protein object from a provided CIF format.\n\n Args:\n input_file (str): Path to the CIF file to be loaded.\n device (str, optional): The device for tensor operations. Defaults to 'cpu'.\n\n Returns:\n Protein: Initialized Protein object from the provided CIF file.\n \"\"\"\n protein = super(Protein, cls).__new__(cls)\n protein.sys = System.from_CIF(input_file)\n protein.device = device\n if canonicalize:\n protein.canonicalize()\n return protein\n\n @classmethod\n def from_PDBID(\n cls, pdb_id: str, canonicalize: bool = True, device: str = \"cpu\"\n ) -> Protein:\n \"\"\"\n Load a Protein object using its PDBID by fetching the corresponding CIF file from the Protein Data Bank.\n\n This method downloads the CIF file for the specified PDBID, processes it to create a Protein object,\n and then deletes the temporary CIF file.\n\n Args:\n pdb_id (str): The PDBID of the protein to fetch.\n canonicalize (bool, optional): If set to True, the protein will be canonicalized post-loading. Defaults to True.\n device (str, optional): The device for tensor operations. Defaults to 'cpu'.\n\n Returns:\n Protein: An instance of the Protein class initialized from the fetched CIF file corresponding to the PDBID.\n \"\"\"\n from os import unlink\n\n from chroma.utility.fetchdb import RCSB_file_download\n\n file_cif = f\"/tmp/{pdb_id}.cif\"\n RCSB_file_download(pdb_id, \".cif\", file_cif)\n protein = cls.from_CIF(file_cif, canonicalize=canonicalize, device=device)\n unlink(file_cif)\n return protein\n\n @classmethod\n def from_sequence(\n cls, chains: Union[List[str], str], device: str = \"cpu\"\n ) -> Protein:\n \"\"\"\n Load a protein object purely from Sequence with no structural content.\n\n Args:\n chains (Union[List[str],str]): a list of sequence strings, or a sequence string to create the protein.\n device (str, optional): which device for torch outputs should be used. Defaults to \"cpu\".\n\n Returns:\n Protein: An instance of the Protein class initialized a sequence or list of sequences.\n \"\"\"\n\n if isinstance(chains, str):\n chains = [chains]\n\n system = System(\"system\")\n for c_ix, seq in enumerate(chains):\n chain_id = CHAIN_ALPHABET[c_ix + 1]\n chain = system.add_chain(chain_id)\n\n # Populate the Chain\n three_letter_sequence = []\n for s_ix, s in enumerate(seq):\n resname = polyseq.to_triple(s)\n three_letter_sequence.append(resname)\n chain.add_residue(resname, s_ix + 1, \"\")\n\n # Add Entity\n sys_entity = SystemEntity(\n \"polymer\",\n f\"Sequence Chain {chain_id}\",\n \"polypeptide(L)\",\n three_letter_sequence,\n [False] * len(three_letter_sequence),\n )\n system.add_new_entity(sys_entity, [c_ix])\n\n protein = super(Protein, cls).__new__(cls)\n protein.sys = system\n protein.device = device\n return protein\n\n def to_CIF(self, output_file: str, force: bool = False) -> None:\n \"\"\"\n Save the current Protein object to a file in CIF format.\n\n Args:\n output_file (str): The path where the CIF file should be saved.\n\n \"\"\"\n if output_file.lower().startswith(\"s3:\"):\n raise NotImplementedError(\"cif output to an s3 bucket not supported.\")\n else:\n self.sys.to_CIF(output_file)\n\n def to_PDB(self, output_file: str, force: bool = False) -> None:\n \"\"\"\n Save the current Protein object to a file in PDB format.\n\n Args:\n output_file (str): The path where the PDB file should be saved.\n \"\"\"\n if output_file.lower().startswith(\"s3:\"):\n raise NotImplementedError(\"pdb output to an s3 bucket not supported.\")\n\n else:\n self.sys.to_PDB(output_file)\n\n def to_XCS(\n self, all_atom: bool = False, device: Optional[str] = None\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Convert the current Protein object to its XCS tensor representations.\n\n Args:\n all_atom (bool, optional): Indicates if all atoms should be considered in the conversion. Defaults to False.\n device (str, optional): the device to export XCS tensors to. If not specified uses the device property\n set in the class. Default None.\n\n Returns:\n X (torch.Tensor): A 4D tensor representing atomic coordinates of proteins with dimensions\n `(batch, residues, atoms (4 or 14), coordinates (3))`.\n C (torch.Tensor): A chain label tensor of shape `(batch, residues)`. Values are integers. Sign of\n the value indicates presence (+) or absence (-) of structural information for that residue.\n Magnitude indicates which chain the residue belongs to.\n S (torch.Tensor): A sequence information tensor of shape `(batch, residues)`. Contains non-negative\n integers representing residue types at each position.\n \"\"\"\n\n if device is None:\n device = self.device\n\n X, C, S = [tensor.to(device) for tensor in self.sys.to_XCS(all_atom=all_atom)]\n\n return X, C, S\n\n def to_XCS_trajectory(\n self, device: Optional[str] = None,\n ) -> Tuple[List[torch.Tensor], torch.Tensor, torch.Tensor]:\n \"\"\"\n Convert the current Protein object to its XCS tensor representations over a trajectory.\n\n Args:\n device (str, optional): the device to export XCS tensors to. If not specified uses the device property\n set in the class. Default None.\n\n Returns:\n X_traj (List[torch.Tensor]): List of X tensor representations over time. Each tensor represents atomic\n coordinates of proteins with dimensions `(batch, residues, atoms (4 or 14), coordinates (3))`.\n C (torch.Tensor): A chain label tensor of shape `(batch, residues)`. Values are integers. Sign of\n the value indicates presence (+) or absence (-) of structural information for that residue.\n Magnitude indicates which chain the residue belongs to.\n S (torch.Tensor): A sequence information tensor of shape `(batch, residues)`. Contains non-negative\n integers representing residue types at each position.\n \"\"\"\n X, C, S = [], None, None\n for i in range(self.sys.num_models()):\n self.sys.swap_model(i)\n if i == 0:\n X_frame, C, S, loc_indices = self.sys.to_XCS(get_indices=True)\n else:\n X_frame.flatten(0, 2)[:] = torch.from_numpy(\n self.sys._locations[\"coor\"][loc_indices, 0:3]\n )\n X.append(X_frame.clone())\n self.sys.swap_model(i)\n X = torch.cat(X)\n\n if device is None:\n device = self.device\n\n Xtraj, C, S = [tensor.to(device) for tensor in [X, C, S]]\n return [each.unsqueeze(0) for each in Xtraj], C, S\n\n def to(self, file_path: str, force: bool = False) -> None:\n \"\"\"\n General Export for the Protein Class\n\n This method allows for export in pdf or cif based on the file extension.\n explicit saving is still available with the respective export methods.\n\n Args:\n device (str): The desired device for tensor operations, e.g., 'cpu' or 'cuda'.\n \"\"\"\n if file_path.lower().endswith(\".pdb\"):\n self.to_PDB(file_path, force=force)\n elif file_path.lower().endswith(\".cif\"):\n self.to_CIF(file_path, force=force)\n else:\n raise NotImplementedError(\n \"file path must end with either *.cif or *.pdb for export.\"\n )\n\n def length(self, structured: bool = False) -> None:\n \"\"\"\n Retrieve the length of the protein.\n\n Args:\n structured (bool, optional): If set to True, returns the residue size of the structured part of the protein.\n Otherwise, returns the length of the entire protein. Defaults to False.\n\n Returns:\n int: Length of the protein or its structured part based on the 'structured' argument.\n \"\"\"\n if structured:\n return self.sys.num_structured_residues()\n return self.sys.num_residues()\n\n __len__ = length\n\n def canonicalize(self) -> None:\n \"\"\"\n Canonicalize the protein's backbone geometry.\n\n This method processes the protein to ensure it conforms to a canonical form.\n \"\"\"\n self.sys.canonicalize_protein(\n level=2, drop_coors_unknowns=True, drop_coors_missing_backbone=True,\n )\n\n def sequence(self, format: str = \"one-letter-string\") -> Union[List[str], str]:\n \"\"\"\n Retrieve the sequence of the protein in the specified format.\n\n Args:\n format (str, optional): The desired format for the sequence. Can be 'three-letter-list' or 'one-letter-string'.\n Defaults to 'one-letter-string'.\n\n Returns:\n Union[List[str], str]: The protein sequence in the desired format.\n\n Raises:\n Exception: If an unknown sequence format is provided.\n \"\"\"\n if format == \"three-letter-list\":\n return list(self.sys.sequence())\n elif format == \"one-letter-string\":\n return self.sys.sequence(\"one-letter-string\")\n else:\n raise Exception(f\"unknown sequence format {format}\")\n\n def display(self, representations: list = []) -> None:\n \"\"\"\n Display the protein using the provided representations in NGL view.\n\n Args:\n representations (list, optional): List of visual representations to use in the display. Defaults to an empty list.\n\n Returns:\n viewer: A viewer object for interactive visualization.\n \"\"\"\n from chroma.utility.ngl import SystemTrajectory, view_gsystem\n\n if self.sys.num_models() == 1:\n viewer = view_gsystem(self.sys)\n for rep in representations:\n viewer.add_representation(rep)\n\n else:\n t = SystemTrajectory(self)\n viewer = nv.NGLWidget(t)\n return viewer\n\n def _ipython_display_(self):\n display(self.display())\n\n def __str__(self):\n \"\"\"Define Print Behavior\n Return Protein Sequence Along with some useful statistics.\n \"\"\"\n protein_string = f\"Protein: {self.sys.name}\\n\"\n for chain in self.sys.chains():\n if chain.sequence is not None:\n protein_string += (\n f\"> Chain {chain.cid} ({len(chain.sequence())} residues)\\n\"\n )\n protein_string += \"\".join(\n [polyseq.to_single(s) for s in chain.sequence()]\n )\n protein_string += \"\\n\\n\"\n\n return protein_string\n\n def get_mask(self, selection: str) -> torch.Tensor:\n \"\"\"\n Generate a mask tensor based on the provided residue selection.\n\n Args:\n selection (str): A selection string to specify which residues should be included in the mask.\n\n Returns:\n torch.Tensor: A mask tensor of shape `(1, protein length)`, where positions corresponding to selected residues have a value of 1.\n \"\"\"\n residue_gtis = self.sys.select_residues(selection, gti=True)\n D = torch.zeros(1, self.sys.num_residues(), device=self.device)\n for gti in residue_gtis:\n D[0, gti] = 1\n return D\n\n def __copy__(self):\n new_system = copy.copy(self.sys)\n device = self.device\n return Protein(new_system, device=device)\n\n def __deepcopy__(self, memo):\n new_system = copy.deepcopy(self.sys)\n device = self.device\n return Protein(new_system, device=device)" }, { "identifier": "graph", "path": "chroma/layers/graph.py", "snippet": "class GraphNN(nn.Module):\nclass GraphLayer(nn.Module):\nclass MLP(nn.Module):\nclass MaskedNorm(nn.Module):\n def __init__(\n self,\n num_layers: int,\n dim_nodes: int,\n dim_edges: int,\n node_mlp_layers: int = 1,\n node_mlp_dim: Optional[int] = None,\n edge_update: bool = True,\n edge_mlp_layers: int = 1,\n edge_mlp_dim: Optional[int] = None,\n mlp_activation: str = \"relu\",\n dropout: float = 0.0,\n norm: str = \"transformer\",\n scale: float = 1.0,\n skip_connect_input: bool = False,\n attentional: bool = False,\n num_attention_heads: int = 4,\n checkpoint_gradients: bool = False,\n ):\n def forward(\n self,\n node_h: torch.Tensor,\n edge_h: torch.Tensor,\n edge_idx: torch.LongTensor,\n mask_i: Optional[torch.Tensor] = None,\n mask_ij: Optional[torch.Tensor] = None,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n def checkpoint(self, layer, *args):\n def sequential(\n self,\n tensors: dict,\n pre_step_function: Callable = None,\n post_step_function: Callable = None,\n ) -> dict:\n def init_steps(\n self, node_h: torch.Tensor, edge_h: torch.Tensor\n ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:\n def step(\n self,\n t: int,\n node_h_cache: List[torch.Tensor],\n edge_h_cache: List[torch.Tensor],\n edge_idx: torch.LongTensor,\n mask_i: Optional[torch.Tensor] = None,\n mask_ij: Optional[torch.Tensor] = None,\n ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:\n def __init__(\n self,\n dim_nodes: int,\n dim_edges: int,\n node_mlp_layers: int = 1,\n node_mlp_dim: Optional[int] = None,\n edge_update: bool = True,\n edge_mlp_layers: int = 1,\n edge_mlp_dim: Optional[int] = None,\n mlp_activation: str = \"relu\",\n dropout: float = 0.0,\n norm: str = \"transformer\",\n scale: float = 1.0,\n attentional: bool = False,\n num_attention_heads: int = 4,\n ):\n def attend(\n self, node_h: torch.Tensor, messages: torch.Tensor, mask_ij: torch.Tensor\n ) -> torch.Tensor:\n def _normalize(self, node_h, edge_h, mask_i=None, mask_ij=None):\n def _normalize_t(\n self, edge_node_stack_t, mask_ij_t, include_nodes=True, include_edges=True\n ):\n def _update_nodes(\n self, node_h, node_h_norm, edge_h_norm, edge_idx, mask_i=None, mask_ij=None\n ):\n def _update_nodes_t(\n self,\n t,\n node_h,\n node_h_norm_t,\n edge_h_norm_t,\n edge_idx_t,\n mask_i_t=None,\n mask_ij_t=None,\n ):\n def _update_edges(self, edge_h, node_h_out, edge_h_norm, edge_idx, mask_ij):\n def _update_edges_t(\n self, t, edge_h_t, node_h_out, edge_h_t_norm, edge_idx_t, mask_ij_t\n ):\n def forward(\n self,\n node_h: torch.Tensor,\n edge_h: torch.Tensor,\n edge_idx: torch.LongTensor,\n mask_i: Optional[torch.Tensor] = None,\n mask_ij: Optional[torch.Tensor] = None,\n ):\n def step(\n self,\n t: int,\n node_h: torch.Tensor,\n node_h_out: torch.Tensor,\n edge_h: torch.Tensor,\n edge_idx: torch.LongTensor,\n mask_i: Optional[torch.Tensor] = None,\n mask_ij: Optional[torch.Tensor] = None,\n ):\n def __init__(\n self,\n dim_in: int,\n dim_hidden: Optional[int] = None,\n dim_out: Optional[int] = None,\n num_layers_hidden: int = 1,\n activation: str = \"relu\",\n dropout: float = 0.0,\n ):\n def forward(self, h: torch.Tensor) -> torch.Tensor:\ndef collect_neighbors(node_h: torch.Tensor, edge_idx: torch.Tensor) -> torch.Tensor:\ndef collect_edges(\n edge_h_dense: torch.Tensor, edge_idx: torch.LongTensor\n) -> torch.Tensor:\ndef collect_edges_transpose(\n edge_h: torch.Tensor, edge_idx: torch.LongTensor, mask_ij: torch.Tensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef scatter_edges(edge_h: torch.Tensor, edge_idx: torch.LongTensor) -> torch.Tensor:\ndef pack_edges(\n node_h: torch.Tensor, edge_h: torch.Tensor, edge_idx: torch.LongTensor\n) -> torch.Tensor:\ndef pack_edges_step(\n t: int, node_h: torch.Tensor, edge_h_t: torch.Tensor, edge_idx_t: torch.LongTensor\n) -> torch.Tensor:\ndef transpose_edge_idx(\n edge_idx: torch.LongTensor, mask_ij: torch.Tensor\n) -> Tuple[torch.LongTensor, torch.Tensor]:\ndef permute_tensor(\n tensor: torch.Tensor, dim: int, permute_idx: torch.LongTensor\n) -> torch.Tensor:\ndef permute_graph_embeddings(\n node_h: torch.Tensor,\n edge_h: torch.Tensor,\n edge_idx: torch.LongTensor,\n mask_i: torch.Tensor,\n mask_ij: torch.Tensor,\n permute_idx: torch.LongTensor,\n) -> Tuple[torch.Tensor, torch.Tensor, torch.LongTensor, torch.Tensor, torch.Tensor]:\ndef edge_mask_causal(edge_idx: torch.LongTensor, mask_ij: torch.Tensor) -> torch.Tensor:\n def __init__(\n self,\n dim: int,\n num_features: int = -1,\n affine: bool = False,\n norm: str = \"instance\",\n eps: float = 1e-5,\n ):\n def forward(\n self, data: torch.Tensor, mask: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n B, L, K, D = messages.size()" }, { "identifier": "FourierFeaturization", "path": "chroma/layers/basic.py", "snippet": "class FourierFeaturization(nn.Module):\n \"\"\"Applies fourier featurization of low-dimensional (usually spatial) input data as described in [https://arxiv.org/abs/2006.10739] ,\n optionally trainable as described in [https://arxiv.org/abs/2106.02795].\n\n Args:\n d_input (int): dimension of inputs\n d_model (int): dimension of outputs\n trainable (bool): whether to learn the frequency of fourier features\n scale (float): if not trainable, controls the scale of fourier feature periods (see reference for description, this parameter matters and should be tuned!)\n\n Inputs:\n input (torch.tensor): of size (batch_size, *, d_input)\n\n Outputs:\n output (torch.tensor): of size (batch_size, *, d_output)\n \"\"\"\n\n def __init__(self, d_input, d_model, trainable=False, scale=1.0):\n super().__init__()\n self.scale = scale\n\n if d_model % 2 != 0:\n raise ValueError(\n \"d_model needs to be even for this featurization, try again!\"\n )\n\n B = 2 * math.pi * scale * torch.randn(d_input, d_model // 2)\n self.trainable = trainable\n if not trainable:\n self.register_buffer(\"B\", B)\n else:\n self.register_parameter(\"B\", torch.nn.Parameter(B))\n\n def forward(self, inputs):\n h = inputs @ self.B\n return torch.cat([h.cos(), h.sin()], -1)" }, { "identifier": "PositionalEncoding", "path": "chroma/layers/basic.py", "snippet": "class PositionalEncoding(nn.Module):\n \"\"\"Axis-aligned positional encodings with log-linear spacing.\n\n Args:\n d_input (int): dimension of inputs\n d_model (int): dimension of outputs\n period_range (tuple of floats): Min and maximum periods for the\n frequency components. Fourier features will be log-linearly spaced\n between these values (inclusive).\n\n Inputs:\n input (torch.tensor): of size (..., d_input)\n\n Outputs:\n output (torch.tensor): of size (..., d_model)\n \"\"\"\n\n def __init__(self, d_model, d_input=1, period_range=(1.0, 1000.0)):\n super().__init__()\n\n if d_model % (2 * d_input) != 0:\n raise ValueError(\n \"d_model needs to be divisible by 2*d_input for this featurization, \"\n f\"but got {d_model} versus {d_input}\"\n )\n\n num_frequencies = d_model // (2 * d_input)\n log_bounds = np.log10(period_range)\n p = torch.logspace(log_bounds[0], log_bounds[1], num_frequencies, base=10.0)\n w = 2 * math.pi / p\n self.register_buffer(\"w\", w)\n\n def forward(self, inputs):\n batch_dims = list(inputs.shape)[:-1]\n # (..., 1, num_out) * (..., num_in, 1)\n w = self.w.reshape(len(batch_dims) * [1] + [1, -1])\n h = w * inputs[..., None]\n h = torch.cat([h.cos(), h.sin()], -1).reshape(batch_dims + [-1])\n return h" }, { "identifier": "backbone", "path": "chroma/layers/structure/backbone.py", "snippet": "class ProteinBackbone(nn.Module):\nclass RigidTransform(nn.Module):\nclass RigidTransformer(nn.Module):\nclass BackboneBuilder(nn.Module):\nclass FrameBuilder(nn.Module):\nclass GraphBackboneUpdate(nn.Module):\nclass LossBackboneResidueDistance(nn.Module):\n def __init__(\n self,\n num_residues: int,\n num_batch: int = 1,\n init_state: str = \"alpha\",\n use_internal_coords: bool = True,\n X_init: Optional[torch.Tensor] = None,\n ):\n def forward(self) -> torch.Tensor:\n def __init__(\n self,\n num_batch: int = 1,\n keep_centered: bool = False,\n scale_dX: float = 1.0,\n scale_q: float = 1.0,\n ):\n def forward(self, X: torch.Tensor) -> torch.Tensor:\n def __init__(self, center_rotation: bool = True, keep_centered: bool = False):\n def _rotation_matrix(self, q_unc: torch.Tensor) -> torch.Tensor:\n def forward(\n self,\n X: torch.Tensor,\n dX: torch.Tensor,\n q: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n def __init__(self):\n def forward(\n self,\n phi: torch.Tensor,\n psi: torch.Tensor,\n omega: Optional[torch.Tensor] = None,\n angles: Optional[torch.Tensor] = None,\n lengths: Optional[torch.Tensor] = None,\n add_O: bool = True,\n ) -> torch.Tensor:\n def _build_x_i(v_i, l_i, x, u_minus_1, u_minus_2):\n def __init__(self, distance_eps: float = 1e-3):\n def _build_O(self, X_chain: torch.Tensor, C: torch.LongTensor):\n def forward(\n self,\n R: torch.Tensor,\n t: torch.Tensor,\n C: torch.LongTensor,\n q: Optional[torch.Tensor] = None,\n ):\n def inverse(\n self, X: torch.Tensor, C: torch.LongTensor\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n def __init__(\n self,\n dim_nodes: int,\n dim_edges: int,\n distance_scale: float = 10.0,\n distance_eps: float = 1e-3,\n method: str = \"neighbor\",\n iterations: int = 1,\n unconstrained: bool = True,\n num_transform_weights: int = 1,\n black_hole_init: bool = False,\n ):\n def _init_black_hole(self, X):\n def _update_local_transform(self, X, C, node_h, edge_h, edge_idx, mask_i, mask_ij):\n def _update_neighbor_transform(\n self, X, C, node_h, edge_h, edge_idx, mask_i, mask_ij\n ):\n def _update_neighbor_global_transform(\n self, X, C, node_h, edge_h, edge_idx, mask_i, mask_ij\n ):\n def _update_neighbor_global_affine_transform(\n self, X, C, node_h, edge_h, edge_idx, mask_i, mask_ij\n ):\n def _inner_transforms(self, X, C, edge_idx):\n def _transform_loss(self, R_ij_predict, t_ij_predict, X, C, edge_idx, mask_ij):\n def forward(\n self,\n X: torch.Tensor,\n C: torch.LongTensor,\n node_h: torch.Tensor,\n edge_h: torch.Tensor,\n edge_idx: torch.LongTensor,\n mask_i: torch.Tensor,\n mask_ij: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n def __init__(self, dist_eps: float = 1e-3):\n def _D(self, X):\n def forward(\n self, X_mobile: torch.Tensor, X_target: torch.Tensor, C: torch.LongTensor\n ) -> torch.Tensor:\ndef center_X(X: torch.Tensor, C: torch.LongTensor) -> torch.Tensor:\ndef atomic_mean(\n X_flat: torch.Tensor, mask: torch.Tensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef scale_around_mean(\n X: torch.Tensor, C: torch.LongTensor, scale: float\n) -> torch.Tensor:\ndef impute_masked_X(X: torch.Tensor, C: torch.LongTensor) -> torch.Tensor:\ndef expand_chain_map(C: torch.LongTensor) -> torch.Tensor:\n X = self.backbone_geometry(self.phi, self.psi)\n X = self.X\n X = self.transform(X)\n R = torch.stack([\n a2 + b2 - c2 - d2, 2*b*c - 2*a*d, 2*b*d + 2*a*c,\n 2*b*c + 2*a*d, a2 - b2 + c2 - d2, 2*c*d - 2*a*b,\n 2*b*d - 2*a*c, 2*c*d + 2*a*b, a2 - b2 - c2 + d2\n ], dim=-1)\n R = R.view([num_batch, 3, 3])\n R = self._rotation_matrix(q)\n R = torch.stack([u_minus_1, n_b, n_a], 2)\n X = []\n X = torch.stack(X, 1)\n X = X.view([N_batch, -1, 3, 3])\n X = X[:, :-1, :, :]\n X_O = X[:, :, 2, :] + u\n X = torch.cat([X, X_O.unsqueeze(2)], 2)\n X = X - X.mean([1, 2, 3], keepdim=True)\n R = torch.eye(3).reshape([1, 1, 1, 3, 3])\n X_N, X_CA, X_C = X_chain.unbind(-2)\n X_O = geometry.extend_atoms(\n X_N_next,\n X_CA,\n X_C,\n self._length_C_O * ones,\n self._angle_CA_C_O * ones,\n self._dihedral_Np_CA_C_O * ones,\n degrees=True,\n )\n X = mask * torch.stack([X_N, X_CA, X_C, X_O], dim=-2)\n R = geometry.rotations_from_quaternions(\n q, normalize=True, eps=self.distance_eps\n )\n R = R.unsqueeze(-3)\n X = self._build_O(X_chain, C)\n R = mask.unsqueeze(-1) * R\n R = (\n torch.eye(3, device=X.device, dtype=X.dtype)\n .reshape(1, 1, 3, 3)\n .repeat(X.size(0), X.size(1), 1, 1)\n )\n R = geometry.rotations_from_quaternions(\n self.W_q(node_h), normalize=True, eps=self.distance_eps\n )\n D = (\n (X_mean[:, :, None, :] - X_mean[:, None, :, :])\n .square()\n .sum(-1)\n .add(self.dist_eps)\n .sqrt()\n )" }, { "identifier": "geometry", "path": "chroma/layers/structure/geometry.py", "snippet": "def geometry(self):\n bond, angle, dihedral = self.virtual_geometries[self.virtual_type]\n return bond, angle, dihedral" }, { "identifier": "transforms", "path": "chroma/layers/structure/transforms.py", "snippet": "def compose_transforms(\n R_a: torch.Tensor, t_a: torch.Tensor, R_b: torch.Tensor, t_b: torch.Tensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef compose_translation(\n R_a: torch.Tensor, t_a: torch.Tensor, t_b: torch.Tensor\n) -> torch.Tensor:\ndef compose_inner_transforms(\n R_a: torch.Tensor, t_a: torch.Tensor, R_b: torch.Tensor, t_b: torch.Tensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef fuse_gaussians_isometric_plus_radial(\n x: torch.Tensor,\n p_iso: torch.Tensor,\n p_rad: torch.Tensor,\n direction: torch.Tensor,\n dim: int,\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef collect_neighbor_transforms(\n R_i: torch.Tensor, t_i: torch.Tensor, edge_idx: torch.LongTensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef collect_neighbor_inner_transforms(\n R_i: torch.Tensor, t_i: torch.Tensor, edge_idx: torch.LongTensor\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef equilibrate_transforms(\n R_i: torch.Tensor,\n t_i: torch.Tensor,\n R_ji: torch.Tensor,\n t_ji: torch.Tensor,\n logit_ij: torch.Tensor,\n mask_ij: torch.Tensor,\n edge_idx: torch.LongTensor,\n iterations: int = 1,\n R_global: Optional[torch.Tensor] = None,\n t_global: Optional[torch.Tensor] = None,\n R_global_i: Optional[torch.Tensor] = None,\n t_global_i: Optional[torch.Tensor] = None,\n logit_global_i: Optional[torch.Tensor] = None,\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef average_transforms(\n R: torch.Tensor,\n t: torch.Tensor,\n w: torch.Tensor,\n mask: torch.Tensor,\n dim: int,\n t_edge: Optional[torch.Tensor] = None,\n dither: Optional[bool] = True,\n dither_eps: float = 1e-4,\n) -> Tuple[torch.Tensor, torch.Tensor]:\ndef _debug_plot_transforms(\n R_ij: torch.Tensor,\n t_ij: torch.Tensor,\n logits_ij: torch.Tensor,\n edge_idx: torch.LongTensor,\n mask_ij: torch.Tensor,\n dist_eps: float = 1e-3,\n):\n def _format(T):\n P = P_iso + P_rad\n D = torch.sqrt(t_ij.square().sum(-1))\n U = t_ij / (D[..., None] + dist_eps)\n T = T.cpu().data.numpy()\n T = (T + 1) / 2" } ]
import json import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple from chroma.data.protein import Protein from chroma.layers import graph from chroma.layers.basic import FourierFeaturization, PositionalEncoding from chroma.layers.structure import backbone, geometry, transforms
12,246
for i, layer in enumerate(self.edge_layers): key = json.dumps(self.edge_features[i]) tensor = torch.tensor(param_dictionary[key], dtype=torch.float32) tensor = tensor.view(1, 1, -1) self.register_buffer(f"edge_means_{i}", tensor) return def _reference_stats(self, reference_pdb): X, C, _ = Protein.from_PDBID(reference_pdb).to_XCS() stats_dict = self._feature_stats(X, C) return stats_dict def _feature_stats(self, X, C, verbose=False, center=False): mask_i = chain_map_to_mask(C) edge_idx, mask_ij = self.graph_builder(X, C) def _masked_stats(feature, mask, dims, verbose=False): mask = mask.unsqueeze(-1) feature = mask * feature sum_mask = mask.sum() mean = feature.sum(dims, keepdim=True) / sum_mask var = torch.sum(mask * (feature - mean) ** 2, dims) / sum_mask std = torch.sqrt(var) mean = mean.view(-1) std = std.view(-1) if verbose: frac = (100.0 * std ** 2 / (mean ** 2 + std ** 2)).type(torch.int32) print(f"Fraction of raw variance: {frac}") return mean, std # Collect statistics stats_dict = {} # Aggregate node layers for i, layer in enumerate(self.node_layers): node_h = layer(X, edge_idx, C) if center: node_h = node_h - self.__getattr__(f"node_means_{i}") mean, std = _masked_stats(node_h, mask_i, dims=[0, 1]) # Store in dictionary key = json.dumps(self.node_features[i]) stats_dict[key] = mean.tolist() # Aggregate node layers for i, layer in enumerate(self.edge_layers): edge_h = layer(X, edge_idx, C) if center: edge_h = edge_h - self.__getattr__(f"edge_means_{i}") mean, std = _masked_stats(edge_h, mask_ij, dims=[0, 1, 2]) # Store in dictionary key = json.dumps(self.edge_features[i]) stats_dict[key] = mean.tolist() # Round to small number of decimal places stats_dict = {k: [round(f, 3) for f in v] for k, v in stats_dict.items()} return stats_dict class ProteinGraph(nn.Module): """Build a graph topology given a protein backbone. Args: num_neighbors (int): Maximum number of neighbors in the graph. distance_atom_type (int): Atom type for computing residue-residue distances for graph construction. Negative values will specify centroid across atom types. Default is `-1` (centroid). cutoff (float): Cutoff distance for graph construction. If not None, mask any edges further than this cutoff. Default is `None`. mask_interfaces (Boolean): Restrict connections only to within chains, excluding-between chain interactions. Default is `False`. criterion (string, optional): Method used for building graph from distances. Currently supported methods are `{knn, random_log, random_linear}`. Default is `knn`. random_alpha (float, optional): Length scale parameter for random graph generation. Default is 3. random_temperature (float, optional): Temperature parameter for random graph sampling. Between 0 and 1 this value will interpolate between a normal k-NN graph and sampling from the graph generation process. Default is 1.0. Inputs: X (torch.Tensor): Backbone coordinates with shape `(num_batch, num_residues, 4, 3)`. C (torch.LongTensor): Chain map with shape `(num_batch, num_residues)`. custom_D (torch.Tensor, optional): Optional external distance map, for example based on other distance metrics, with shape `(num_batch, num_residues, num_residues)`. custom_mask_2D (torch.Tensor, optional): Optional mask to apply to distances before computing dissimilarities with shape `(num_batch, num_residues, num_residues)`. Outputs: edge_idx (torch.LongTensor): Edge indices for neighbors with shape `(num_batch, num_residues, num_neighbors)`. mask_ij (torch.Tensor): Edge mask with shape `(num_batch, num_nodes, num_neighbors)`. """ def __init__( self, num_neighbors: int = 30, distance_atom_type: int = -1, cutoff: Optional[float] = None, mask_interfaces: bool = False, criterion: str = "knn", random_alpha: float = 3.0, random_temperature: float = 1.0, random_min_local: float = 20, deterministic: bool = False, deterministic_seed: int = 10, ): super(ProteinGraph, self).__init__() self.num_neighbors = num_neighbors self.distance_atom_type = distance_atom_type self.cutoff = cutoff self.mask_interfaces = mask_interfaces
# Copyright Generate Biomedicines, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Layers for building graph representations of protein structure. This module contains pytorch layers for representing protein structure as a graph with node and edge features based on geometric information. The graph features are differentiable with respect to input coordinates and can be used for building protein scoring functions and optimizing protein geometries natively in pytorch. """ class ProteinFeatureGraph(nn.Module): """Graph featurizer for protein chains and complexes. This module builds graph representations of protein structures that are differentiable with respect to input coordinates and invariant with respect to global rotations and translations. It takes as input a batch of protein backbones (single chains or complexes), constructs a sparse graph with residues as nodes, and featurizes the backbones in terms of node and edge feature tensors. The graph representation has 5 components: 1. Node features `node_h` representing residues in the protein. 2. Edge features `edge_h` representing relationships between residues. 3. Index map `edge_idx` representing graph topology. 4. Node mask `mask_i` that specifies which nodes are present. 5. Edge mask `mask_ij` that specifies which edges are present. Criteria for constructing the graph currently include k-Nearest Neighbors or distance-weighted edge sampling. Node and edge features are specified as tuples to make it simpler to add additional features and options while retaining backwards compatibility. Specifically, each node or edge feature type can be added to the list either in default configuration by a `'feature_name'` keyword, or in modified form with a `('feature_name', feature_kwargs)` tuple. Example usage: graph = ProteinFeatureGraph( graph_type='knn', node_features=('dihedrals',), edge_features=[ 'chain_distance', ('dmat_6mer', {'D_function': 'log'}) ] ) node_h, edge_h, edge_idx, mask_i, mask_ij = graph(X, C) This builds a kNN graph with dihedral angles as node features and 6mer interatomic distance matrices (process) 6mers, where the options for post-processing the 6mers are passed as a kwargs dict. Args: dim_nodes (int): Hidden dimension of node features. dim_edges (int): Hidden dimension of edge features. num_neighbors (int): Maximum degree of the graph. graph_kwargs (dict): Arguments for graph construction. Default is None. node_features (list): List of node feature strings and optional args. Valid feature strings are `{internal_coords}`. edge_features (list): List of node feature strings and optional args. Valid feature strings are `{'distances_6mer','distances_chain'}`. centered (boolean): Flag for enabling feature centering. If `True`, the features will be will centered by subtracting an empirical mean that was computed on the reference PDB `centered_pdb`. The statistics are per-dimension of every node and edge feature. If they have not previously been computed, the PDB will be downloaded, featurized, and aggregated into local statistics that are cached in the repo. centered_pdb (str): PDB code for the reference PDB to compute some empirical feature statistics from. Inputs: X (torch.Tensor): Backbone coordinates with shape `(num_batch, num_residues, 4, 3)`. The standard atom indices for for the the third dimension are PDB order (`[N, CA, C, O]`). C (LongTensor, optional): Chain map with shape `(num_batch, num_residues)`. The chain map codes positions as `0` when masked, poitive integers for chain indices, and negative integers to represent missing residues of the corresponding positive integers. custom_D (Tensor, optional): Pre-computed custom distance map for graph construction `(numb_batch,num_residues,num_residues)`. If present, this will override the behavior of `graph_type` and used as the distances for k-nearest neighbor graph construction. custom_mask_2D (Tensor, optional): Custom 2D mask to apply to `custom_D` with shape `(numb_batch,num_residues,num_residues)`. Outputs: node_h (torch.Tensor): Node features with shape `(num_batch, num_residues, dim_nodes)`. edge_h (torch.Tensor): Edge features with shape `(num_batch, num_residues, num_neighbors, dim_edges)`. edge_idx (torch.LongTensor): Edge indices for neighbors with shape `(num_batch, num_residues, num_neighbors)`. mask_i (torch.Tensor): Node mask with shape `(num_batch, num_residues)`. mask_ij (torch.Tensor): Edge mask with shape `(num_batch, num_nodes, num_neighbors)`. """ def __init__( self, dim_nodes: int, dim_edges: int, num_neighbors: int = 30, graph_kwargs: dict = None, node_features: tuple = ("internal_coords",), edge_features: tuple = ("distances_6mer", "distances_chain"), centered: bool = True, centered_pdb: str = "2g3n", ): super(ProteinFeatureGraph, self).__init__() self.dim_nodes = dim_nodes self.dim_edges = dim_edges self.num_neighbors = num_neighbors graph_kwargs = graph_kwargs if graph_kwargs is not None else {} self.graph_builder = ProteinGraph(num_neighbors, **graph_kwargs) self.node_features = node_features self.edge_features = edge_features def _init_layer(layer_dict, features): # Parse option string custom_args = not isinstance(features, str) key = features[0] if custom_args else features kwargs = features[1] if custom_args else {} return layer_dict[key](**kwargs) # Node feature compilation node_dict = { "internal_coords": NodeInternalCoords, "cartesian_coords": NodeCartesianCoords, "radii": NodeRadii, } self.node_layers = nn.ModuleList( [_init_layer(node_dict, option) for option in self.node_features] ) # Edge feature compilation edge_dict = { "distances_6mer": EdgeDistance6mer, "distances_2mer": EdgeDistance2mer, "orientations_2mer": EdgeOrientation2mer, "position_2mer": EdgePositionalEncodings, "distances_chain": EdgeDistanceChain, "orientations_chain": EdgeOrientationChain, "cartesian_coords": EdgeCartesianCoords, "random_fourier_2mer": EdgeRandomFourierFeatures2mer, } self.edge_layers = nn.ModuleList( [_init_layer(edge_dict, option) for option in self.edge_features] ) # Load feature centering params as buffers self.centered = centered self.centered_pdb = centered_pdb.lower() if self.centered: self._load_centering_params(self.centered_pdb) """ Storing separate linear transformations for each layer, rather than concat + one large linear, provides a more even weighting of the different input features when used with standard weight initialization. It has the specific effect actually re-weighting the weight variance based on the number of input features for each feature type. Otherwise, the relative importance of each feature goes with the number of feature dimensions. """ self.node_linears = nn.ModuleList( [nn.Linear(l.dim_out, self.dim_nodes) for l in self.node_layers] ) self.edge_linears = nn.ModuleList( [nn.Linear(l.dim_out, self.dim_edges) for l in self.edge_layers] ) return def forward( self, X: torch.Tensor, C: torch.Tensor, edge_idx: Optional[torch.LongTensor] = None, mask_ij: torch.Tensor = None, custom_D: Optional[torch.Tensor] = None, custom_mask_2D: Optional[torch.Tensor] = None, ) -> Tuple[ torch.Tensor, torch.Tensor, torch.LongTensor, torch.Tensor, torch.Tensor ]: mask_i = chain_map_to_mask(C) if mask_ij is None or edge_idx is None: edge_idx, mask_ij = self.graph_builder( X, C, custom_D=custom_D, custom_mask_2D=custom_mask_2D ) # Aggregate node layers node_h = None for i, layer in enumerate(self.node_layers): node_h_l = layer(X, edge_idx, C) if self.centered: node_h_l = node_h_l - self.__getattr__(f"node_means_{i}") node_h_l = self.node_linears[i](node_h_l) node_h = node_h_l if node_h is None else node_h + node_h_l if node_h is None: node_h = torch.zeros(list(X.shape[:2]) + [self.dim_nodes], device=X.device) # Aggregate edge layers edge_h = None for i, layer in enumerate(self.edge_layers): edge_h_l = layer(X, edge_idx, C) if self.centered: edge_h_l = edge_h_l - self.__getattr__(f"edge_means_{i}") edge_h_l = self.edge_linears[i](edge_h_l) edge_h = edge_h_l if edge_h is None else edge_h + edge_h_l if edge_h is None: edge_h = torch.zeros(list(X.shape[:2]) + [self.dim_nodes], device=X.device) # Apply masks node_h = mask_i.unsqueeze(-1) * node_h edge_h = mask_ij.unsqueeze(-1) * edge_h return node_h, edge_h, edge_idx, mask_i, mask_ij def _load_centering_params(self, reference_pdb: str): basepath = os.path.dirname(os.path.abspath(__file__)) + "/params/" if not os.path.exists(basepath): os.makedirs(basepath) filename = f"centering_{reference_pdb}.params" self.centering_file = basepath + filename key = ( reference_pdb + ";" + json.dumps(self.node_features) + ";" + json.dumps(self.edge_features) ) # Attempt to load saved centering params, otherwise compute and cache json_line = None with open(self.centering_file, "a+") as f: prefix = key + "\t" f.seek(0) for line in f: if line.startswith(prefix): json_line = line.split(prefix)[1] break if json_line is not None: print("Loaded from cache") param_dictionary = json.loads(json_line) else: print(f"Computing reference stats for {reference_pdb}") param_dictionary = self._reference_stats(reference_pdb) json_line = json.dumps(param_dictionary) f.write(prefix + "\t" + json_line + "\n") for i, layer in enumerate(self.node_layers): key = json.dumps(self.node_features[i]) tensor = torch.tensor(param_dictionary[key], dtype=torch.float32) tensor = tensor.view(1, 1, -1) self.register_buffer(f"node_means_{i}", tensor) for i, layer in enumerate(self.edge_layers): key = json.dumps(self.edge_features[i]) tensor = torch.tensor(param_dictionary[key], dtype=torch.float32) tensor = tensor.view(1, 1, -1) self.register_buffer(f"edge_means_{i}", tensor) return def _reference_stats(self, reference_pdb): X, C, _ = Protein.from_PDBID(reference_pdb).to_XCS() stats_dict = self._feature_stats(X, C) return stats_dict def _feature_stats(self, X, C, verbose=False, center=False): mask_i = chain_map_to_mask(C) edge_idx, mask_ij = self.graph_builder(X, C) def _masked_stats(feature, mask, dims, verbose=False): mask = mask.unsqueeze(-1) feature = mask * feature sum_mask = mask.sum() mean = feature.sum(dims, keepdim=True) / sum_mask var = torch.sum(mask * (feature - mean) ** 2, dims) / sum_mask std = torch.sqrt(var) mean = mean.view(-1) std = std.view(-1) if verbose: frac = (100.0 * std ** 2 / (mean ** 2 + std ** 2)).type(torch.int32) print(f"Fraction of raw variance: {frac}") return mean, std # Collect statistics stats_dict = {} # Aggregate node layers for i, layer in enumerate(self.node_layers): node_h = layer(X, edge_idx, C) if center: node_h = node_h - self.__getattr__(f"node_means_{i}") mean, std = _masked_stats(node_h, mask_i, dims=[0, 1]) # Store in dictionary key = json.dumps(self.node_features[i]) stats_dict[key] = mean.tolist() # Aggregate node layers for i, layer in enumerate(self.edge_layers): edge_h = layer(X, edge_idx, C) if center: edge_h = edge_h - self.__getattr__(f"edge_means_{i}") mean, std = _masked_stats(edge_h, mask_ij, dims=[0, 1, 2]) # Store in dictionary key = json.dumps(self.edge_features[i]) stats_dict[key] = mean.tolist() # Round to small number of decimal places stats_dict = {k: [round(f, 3) for f in v] for k, v in stats_dict.items()} return stats_dict class ProteinGraph(nn.Module): """Build a graph topology given a protein backbone. Args: num_neighbors (int): Maximum number of neighbors in the graph. distance_atom_type (int): Atom type for computing residue-residue distances for graph construction. Negative values will specify centroid across atom types. Default is `-1` (centroid). cutoff (float): Cutoff distance for graph construction. If not None, mask any edges further than this cutoff. Default is `None`. mask_interfaces (Boolean): Restrict connections only to within chains, excluding-between chain interactions. Default is `False`. criterion (string, optional): Method used for building graph from distances. Currently supported methods are `{knn, random_log, random_linear}`. Default is `knn`. random_alpha (float, optional): Length scale parameter for random graph generation. Default is 3. random_temperature (float, optional): Temperature parameter for random graph sampling. Between 0 and 1 this value will interpolate between a normal k-NN graph and sampling from the graph generation process. Default is 1.0. Inputs: X (torch.Tensor): Backbone coordinates with shape `(num_batch, num_residues, 4, 3)`. C (torch.LongTensor): Chain map with shape `(num_batch, num_residues)`. custom_D (torch.Tensor, optional): Optional external distance map, for example based on other distance metrics, with shape `(num_batch, num_residues, num_residues)`. custom_mask_2D (torch.Tensor, optional): Optional mask to apply to distances before computing dissimilarities with shape `(num_batch, num_residues, num_residues)`. Outputs: edge_idx (torch.LongTensor): Edge indices for neighbors with shape `(num_batch, num_residues, num_neighbors)`. mask_ij (torch.Tensor): Edge mask with shape `(num_batch, num_nodes, num_neighbors)`. """ def __init__( self, num_neighbors: int = 30, distance_atom_type: int = -1, cutoff: Optional[float] = None, mask_interfaces: bool = False, criterion: str = "knn", random_alpha: float = 3.0, random_temperature: float = 1.0, random_min_local: float = 20, deterministic: bool = False, deterministic_seed: int = 10, ): super(ProteinGraph, self).__init__() self.num_neighbors = num_neighbors self.distance_atom_type = distance_atom_type self.cutoff = cutoff self.mask_interfaces = mask_interfaces
self.distances = geometry.Distances()
5
2023-11-28 00:09:40+00:00
16k
BiQiWHU/CMFormer
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "mask2former/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75" }, { "identifier": "COCOInstanceNewBaselineDatasetMapper", "path": "mask2former/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py", "snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "COCOPanopticNewBaselineDatasetMapper", "path": "mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py", "snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerInstanceDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_instance_dataset_mapper.py", "snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerPanopticDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py", "snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerSemanticDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_semantic_dataset_mapper.py", "snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop_CategoryAreaConstraint(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,\n cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "SemanticSegmentorWithTTA", "path": "mask2former/test_time_augmentation.py", "snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms" }, { "identifier": "InstanceSegEvaluator", "path": "mask2former/evaluation/instance_evaluation.py", "snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from mask2former import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
11,304
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
mapper = MaskFormerInstanceDatasetMapper(cfg, True)
3
2023-11-29 15:26:53+00:00
16k
PopicLab/insilicoSV
test/test_processing.py
[ { "identifier": "SV_Simulator", "path": "insilicosv/simulate.py", "snippet": "class SV_Simulator:\n def __init__(self, par_file, log_file=None):\n \"\"\"\n par_file: file location to configuration file (.yaml)\n log_file: location to store log file with diagnostic information if config parameters indicate so\n \"\"\"\n global time_start\n print(\"Setting up Simulator...\")\n\n self.formatter = FormatterIO(par_file)\n self.formatter.yaml_to_var_list()\n config = self.formatter.config\n self.ref_file = config['sim_settings']['reference']\n self.ref_fasta = FastaFile(self.ref_file)\n self.svs_config = config['variant_sets']\n\n self.sim_settings = config['sim_settings']\n if log_file and \"generate_log_file\" in self.sim_settings.keys():\n logging.basicConfig(filename=log_file, filemode=\"w\", level=logging.DEBUG,\n format='[%(name)s: %(levelname)s - %(asctime)s] %(message)s')\n self.log_to_file(\"YAML Configuration: {}\".format(config))\n\n # get all chromosome ids\n self.order_ids = self.ref_fasta.references\n self.len_dict = dict() # stores mapping with key = chromosome, value = chromosome length\n for id in self.order_ids:\n chrom_len = self.ref_fasta.get_reference_length(id)\n if 'filter_small_chr' in self.sim_settings and chrom_len < self.sim_settings['filter_small_chr']:\n print(\"Filtering chromosome {}: Length of {} below threshold of {}\".format(id, chrom_len, self.sim_settings['filter_small_chr']))\n else:\n self.len_dict[id] = chrom_len\n print(\"Length of chromosome {}: {}\".format(id, self.len_dict[id]))\n\n # initialize stats file to be generated after all edits and exporting are finished\n self.stats = StatsCollection(self.order_ids, self.len_dict)\n\n self.mode = \"randomized\"\n self.vcf_path = None\n if \"vcf_path\" in self.svs_config[0]:\n self.mode = \"fixed\"\n self.vcf_path = self.svs_config[0][\"vcf_path\"]\n\n self.svs = []\n self.event_ranges = defaultdict(list)\n\n if \"avoid_intervals\" in config:\n # extract {chrom: [(start, end)]} intervals from vcf, add intervals from vcf to event range\n self.extract_vcf_event_intervals(config[\"avoid_intervals\"])\n\n self.overlap_events = None if \"overlap_events\" not in config.keys() \\\n else utils.OverlapEvents(config, allow_chroms=self.order_ids)\n\n self.initialize_svs()\n\n print(\"Finished Setting up Simulator in {} seconds\\n\".format(time.time() - time_start))\n time_start = time.time()\n\n def __repr__(self):\n return \"All structural variants entered into simulator: {}\".format(self.svs)\n\n def log_to_file(self, info, key=\"DEBUG\"):\n # only logs to file if config setting indicates so\n key_to_func = {\"DEBUG\": logging.debug, \"WARNING\": logging.warning}\n if \"generate_log_file\" in self.sim_settings and self.sim_settings[\"generate_log_file\"]:\n key_to_func[key](info)\n\n def get_rand_chr(self, check_size=None, fixed_chrom=None):\n # random assignment of SV to a chromosome (unless we have a predetermined chromosome for this event)\n valid_chrs = self.order_ids\n if check_size is not None:\n valid_chrs = [chrom for chrom, chr_size in self.len_dict.items() if chr_size >= check_size]\n if len(valid_chrs) == 0:\n raise Exception(\"SVs are too big for the reference!\")\n rand_id = valid_chrs[random.randint(0, len(valid_chrs) - 1)] if fixed_chrom is None else fixed_chrom\n chr_len = self.len_dict[rand_id]\n chr_event_ranges = self.event_ranges[rand_id]\n assert rand_id is not None\n return rand_id, chr_len, chr_event_ranges\n\n def extract_vcf_event_intervals(self, vcf_path):\n vcf = VariantFile(vcf_path)\n for rec in vcf.fetch():\n self.event_ranges[rec.chrom].append((rec.start, rec.stop))\n\n def process_vcf(self, vcf_path):\n # process vcf containing SVs to be added (deterministically) to reference\n active_svs_total = 0\n time_start_local = 0\n vcf = VariantFile(vcf_path)\n for rec in vcf.fetch():\n svtype = Variant_Type(rec.info['SVTYPE']) if 'SVTYPE' in rec.info else Variant_Type(rec.id)\n self.event_ranges[rec.chrom].append((rec.start, rec.stop))\n sv = Structural_Variant(sv_type=svtype, mode='fixed', vcf_rec=rec, ref_fasta=self.ref_fasta)\n self.svs.append(sv)\n active_svs_total += 1\n self.log_to_file(\"Intervals {} added to Chromosome \\\"{}\\\"\".format(self.event_ranges[rec.chrom], rec.chrom))\n time_dif = time.time() - time_start_local\n print(\"{} SVs successfully placed ========== {} seconds\".format(active_svs_total, time_dif), end=\"\\r\")\n time_start_local = time.time()\n\n def initialize_svs(self):\n \"\"\"\n Creates Structural_Variant objects for every SV to simulate and decides zygosity\n self.mode: flag indicating whether SVs are to be randomly generated or read in from VCF\n self.vcf_path: optional path that will be used if mode==\"fixed\"\n \"\"\"\n if self.mode == \"randomized\":\n for sv_config in self.svs_config:\n for num in range(sv_config[\"number\"]):\n # logic for placing events at intervals given in overlap bed file:\n # for the first (sv_config[\"num_overlap\"]) events, instantiate the SV at the next valid repeat elt interval\n repeat_elt = None\n elt_type = None\n if self.overlap_events is not None:\n sv_config_identifier = utils.get_sv_config_identifier(sv_config)\n if sv_config_identifier in self.overlap_events.svtype_overlap_counts.keys():\n repeat_elt, retrieved_type, elt_type = self.overlap_events.get_single_element_interval(\n sv_config_identifier, sv_config, partial_overlap=False)\n elif sv_config_identifier in self.overlap_events.svtype_partial_overlap_counts.keys():\n repeat_elt, retrieved_type, elt_type = self.overlap_events.get_single_element_interval(\n sv_config_identifier, sv_config, partial_overlap=True)\n elif sv_config_identifier in self.overlap_events.svtype_alu_mediated_counts.keys():\n repeat_elt, retrieved_type = self.overlap_events.get_alu_mediated_interval(sv_config_identifier)\n if sv_config['type'] == Variant_Type.SNP:\n sv = Structural_Variant(sv_type=sv_config[\"type\"], mode=self.mode, length_ranges=[(1, 1)])\n else:\n sv = Structural_Variant(sv_type=sv_config[\"type\"], mode=self.mode,\n length_ranges=sv_config[\"length_ranges\"], source=sv_config[\"source\"],\n target=sv_config[\"target\"],\n overlap_event=(repeat_elt + (retrieved_type if elt_type in ['ALL', None] else elt_type,) if repeat_elt is not None else None),\n div_prob=(None if 'divergence_prob' not in sv_config.keys() else sv_config['divergence_prob']))\n\n # For divergent repeat simulation, need div_dDUP to be homozygous\n if self.sim_settings.get(\"homozygous_only\", False) or random.randint(0, 1):\n sv.ishomozygous = Zygosity.HOMOZYGOUS\n sv.hap = [True, True]\n else:\n sv.ishomozygous = Zygosity.HETEROZYGOUS\n sv.hap = random.choice([[True, False], [False, True]])\n\n self.svs.append(sv)\n if not self.sim_settings[\"prioritize_top\"]:\n random.shuffle(self.svs)\n else: # mode == \"fixed\"\n self.process_vcf(self.vcf_path)\n\n def produce_variant_genome(self, fasta1_out, fasta2_out, ins_fasta, bedfile, stats_file=None, initial_reset=True,\n verbose=False, export_to_file=True):\n \"\"\"\n initial_reset: boolean to indicate if output file should be overwritten (True) or appended to (False)\n stats_file: whether a stats file summarizing SVs simulated will be generated in same directory the reference genome is located in\n \"\"\"\n global time_start\n if initial_reset:\n utils.reset_file(fasta1_out)\n utils.reset_file(fasta2_out)\n ref_fasta = self.ref_fasta\n self.apply_transformations(ref_fasta)\n print(\"Finished SV placements and transformations in {} seconds\".format(time.time() - time_start))\n time_start = time.time()\n active_svs = [sv for sv in self.svs if sv.active]\n print(\"Starting Export Process...\")\n for x in range(2):\n edits_dict = dict()\n for id in self.order_ids:\n edits_dict[id] = []\n if x == 0:\n fasta_out = fasta1_out\n elif x == 1:\n fasta_out = fasta2_out\n for sv in active_svs:\n if sv.hap[x]:\n for frag in sv.changed_fragments:\n edits_dict[frag[0]].append(frag[1:])\n for id in edits_dict:\n edits_dict[id].sort()\n self.event_ranges[id].sort()\n self.log_to_file(\"Event Ranges: {}\".format(self.event_ranges))\n self.log_to_file(\"Intervals for hap {}: {}\".format(x, edits_dict))\n for id in self.order_ids:\n edits_x = edits_dict[id]\n utils.fail_if_any_overlapping(edits_x)\n self.formatter.export_variants_to_fasta(id, edits_x, fasta_out, ref_fasta, verbose=verbose)\n print(\"ID {} exported to fasta file {} in {} seconds\".format(id, fasta_out, time.time() - time_start))\n time_start = time.time()\n if export_to_file:\n self.formatter.export_to_bedpe(active_svs, bedfile, ins_fasta=ins_fasta, reset_file=initial_reset)\n self.formatter.export_to_vcf(active_svs, self.stats, vcffile=bedfile[:-4]+'.vcf')\n if stats_file:\n self.stats.get_info(self.svs)\n self.stats.export_data(stats_file)\n\n def choose_rand_pos(self, svs, ref_fasta, verbose=False):\n \"\"\"\n randomly positions SVs and stores reference fragments in SV events\n\n svs: list of Structural Variant objects\n ref_fasta: FastaFile with access to reference file\n \"\"\"\n active_svs_total = 0\n inactive_svs_total = 0\n time_start_local = time.time()\n for sv in svs:\n tries = 0\n valid = False\n while not valid:\n tries += 1\n valid = True\n if tries > self.sim_settings[\"max_tries\"]:\n if self.sim_settings[\"fail_if_placement_issues\"]:\n raise Exception(\n \"Failed to simulate {}, {} / {} SVs successfully simulated (set fail_if_placement_issues \"\n \"to False to override placement failures)\".format(\n sv, active_svs_total, len(svs)))\n valid = False\n break\n rand_id, chr_len, chr_event_ranges = self.get_rand_chr(check_size=sv.req_space,\n fixed_chrom=(None if sv.overlap_event is None\n else sv.overlap_event[0]))\n if not (sv.dispersion_flip and sv.overlap_event is not None):\n # if an overlap event is given, need to find the SV start position based on which fragment has been\n # set to the overlap event interval\n if sv.overlap_event is not None:\n start_pos = 0\n for frag in sv.source_events[::-1]:\n if frag.start is not None:\n start_pos = frag.start\n else:\n start_pos -= frag.length\n else:\n start_pos = random.randint(0, chr_len - sv.req_space)\n # define the space in which SV operates\n new_intervals = [] # tracks new ranges of blocks\n sv.start, sv.start_chr = start_pos, rand_id\n sv.end = sv.start + sv.req_space\n block_start = sv.start\n else:\n # to assign event \"A\" to a repeat interval in a flipped dispersion event, need to\n # anchor the sv to the end of \"A\" and get the start position by subtracting off the total size\n end_pos = int(sv.overlap_event[2])\n start_pos = end_pos - sv.req_space\n new_intervals = []\n sv.start, sv.start_chr = start_pos, rand_id\n sv.end = end_pos\n block_start = sv.start\n\n for sv_event in sv.source_events:\n sv_event.start, sv_event.end = start_pos, start_pos + sv_event.length\n sv_event.source_chr = rand_id\n frag = ref_fasta.fetch(rand_id, sv_event.start, sv_event.end)\n sv_event.source_frag = frag\n start_pos += sv_event.length\n\n if sv_event.symbol.startswith(Symbols.DIS.value):\n if utils.is_overlapping(chr_event_ranges, (block_start, sv_event.start)):\n valid = False\n break\n new_intervals.append((block_start, sv_event.start))\n block_start = sv_event.end\n elif utils.percent_N(frag) > 0.05:\n valid = False\n break\n # catches the last (and perhaps only) block in sequence\n if utils.is_overlapping(chr_event_ranges, (block_start, sv.end)):\n valid = False\n continue\n else:\n new_intervals.append((block_start, sv.end))\n\n # adds new SV to simulate only if chosen positions were valid\n if valid:\n active_svs_total += 1\n sv.active = True\n self.log_to_file(\"Intervals {} added to Chromosome \\\"{}\\\" for SV {}\".format(new_intervals, rand_id, sv))\n chr_event_ranges.extend(new_intervals)\n # populates insertions with random sequence - these event symbols only show up in target transformation\n for event in sv.events_dict.values():\n if event.source_frag is None and event.length > 0:\n event.source_frag = utils.generate_seq(event.length)\n sv.assign_locations(sv.start)\n else:\n inactive_svs_total += 1\n if tries != self.sim_settings[\"max_tries\"] + 1:\n self.log_to_file(\"{} only got {} tries instead of the max {}\".format(sv, tries, self.sim_settings[\n \"max_tries\"] + 1), key=\"WARNING\")\n\n time_dif = time.time() - time_start_local\n print(\n \"{} / {} SVs successfully placed ========== {} / {} SVs unsuccessfully placed, {} tries, {} seconds\".format(\n active_svs_total, len(svs), inactive_svs_total, len(svs), tries, time_dif), end=\"\\r\")\n time_start_local = time.time()\n\n def apply_transformations(self, ref_fasta):\n \"\"\"\n Randomly chooses positions for all SVs and carries out all edits\n Populates event classes within SVs with reference fragments and start & end positions\n Stores list of changes, which each have an interval and a sequence to substitute the reference frag with, in SV\n\n ref_fasta: FastaFile with access to reference\n mode: flag indicating whether we're adding SVs to the reference in a randomized or deterministic way\n \"\"\"\n if self.mode == \"randomized\":\n # select random positions for SVs\n self.choose_rand_pos(self.svs, ref_fasta)\n print()\n\n total = 0\n for sv in self.svs:\n if sv.active:\n sv.change_fragment()\n total += 1\n self.log_to_file(\"Events Dict after all edits: {} \".format(sv.events_dict))\n\n def close(self):\n self.ref_fasta.close()" }, { "identifier": "FormatterIO", "path": "insilicosv/processing.py", "snippet": "class FormatterIO:\n def __init__(self, par_file):\n self.bedpe_counter = 1\n self.par_file = par_file\n self.config = None\n\n @staticmethod\n def run_checks_randomized(config):\n \"\"\"\n check method for yaml given with SVs given for randomized placement on reference\n \"\"\"\n config_svs = config['variant_sets']\n for config_sv in config_svs:\n if \"avoid_intervals\" in config_sv:\n continue\n elif \"type\" not in config_sv:\n raise Exception(\"\\\"Type\\\" attribute must be specified! For custom transformations, enter in \\\"Custom\\\"\")\n elif config_sv[\"type\"] == \"SNP\": # SNP events are only specified by count (size is deterministic)\n if \"number\" in config_sv and isinstance(config_sv[\"number\"], int) and config_sv[\"number\"] > 0:\n continue\n else:\n raise Exception(\"Number (of type int > 0) is a required parameter for all SVs\")\n if \"min_length\" not in config_sv:\n raise Exception(\"Min length must be specified on all SVs!\")\n if \"max_length\" not in config_sv:\n raise Exception(\"Max length must be specified on all SVs!\")\n if \"number\" not in config_sv:\n raise Exception(\"Number is a required parameter for all SVs\")\n\n elif \"type\" in config_sv and not isinstance(config_sv[\"type\"], str):\n raise Exception(\"Invalid {} type for SV \\'type\\' attribute, str expected\".format(type(config_sv[\"type\"])))\n valid_optional_par = [\"fail_if_placement_issues\", \"max_tries\", \"generate_log_file\", \"filter_small_chr\",\n \"prioritize_top\", \"homozygous_only\", \"reference\"] # valid arguments within sim_settings\n for parameter in config['sim_settings']:\n if parameter not in valid_optional_par:\n raise Exception(\"\\\"{}\\\" is an invalid argument under sim_settings\".format(parameter))\n valid_keys = [\"sim_settings\", \"variant_sets\", \"overlap_events\", \"avoid_intervals\"] # valid arguments at the top level\n for key in config:\n if key not in valid_keys:\n raise Exception(\"Unknown argument \\\"{}\\\"\".format(key))\n\n def postproc_config_dict(self):\n if 'sim_settings' not in self.config.keys():\n raise Exception(\"Must include \\'sim_settings\\' sections specifying at least \\'reference\\' path\")\n if \"filter_small_chr\" in self.config.keys() and not isinstance(self.config[\"filter_small_chr\"], int):\n raise Exception(\"Must provide value of type int to \\'filter_small_chr\\'\")\n if \"reference\" not in self.config[\"sim_settings\"]:\n raise Exception(\"Must include reference FASTA file in \\'reference\\' field of \\'sim_settings\\'\")\n elif self.config[\"sim_settings\"][\"reference\"].split(\".\")[-1] not in [\"fa\", \"fna\", \"fasta\"]:\n raise Exception(\"Input reference must be of type .fa, .fna, or .fasta\")\n if \"vcf_path\" not in self.config[\"variant_sets\"][0]:\n self.run_checks_randomized(self.config)\n for config_sv in self.config['variant_sets']:\n if \"vcf_path\" in config_sv:\n continue\n # SV event length specification - not applicable for SNPs\n if config_sv[\"type\"] != \"SNP\":\n if not isinstance(config_sv[\"min_length\"], list) or not isinstance(config_sv[\"max_length\"], list):\n raise Exception(\"Must provide entries of type list to \\'min_length\\' and \\'max_length\\'\")\n else:\n config_sv[\"length_ranges\"] = list(zip(config_sv[\"min_length\"], config_sv[\"max_length\"]))\n assert all(max_len >= min_len >= 0 for (min_len, max_len) in config_sv[\"length_ranges\"]), \"Max length must be >= min length for all SVs! Also ensure that all length values are >= 0.\"\n if \"divergence_prob\" in config_sv:\n if config_sv[\"type\"] != \"DIVERGENCE\":\n raise Exception(\"divergence_prob can only be given for event type DIVERGENCE\")\n else:\n assert isinstance(config_sv[\"divergence_prob\"], int) or isinstance(config_sv[\"divergence_prob\"], float), \\\n \"Must give \\'divergence_prob\\'\"\n assert 1 >= config_sv[\"divergence_prob\"] > 0, \"divergence_prob must be in (0,1]\"\n\n config_sv[\"type\"] = Variant_Type(config_sv[\"type\"])\n if config_sv[\"type\"] != Variant_Type.Custom:\n config_sv[\"source\"] = None\n config_sv[\"target\"] = None\n\n # setting default values for sim_settings fields\n if 'max_tries' not in self.config['sim_settings']:\n self.config['sim_settings']['max_tries'] = 50\n if 'fail_if_placement_issues' not in self.config['sim_settings']:\n self.config['sim_settings']['fail_if_placement_issues'] = False\n\n def yaml_to_var_list(self):\n try:\n with open(self.par_file) as yaml_file:\n self.config = yaml.full_load(yaml_file)\n except:\n raise Exception(\"YAML File {} failed to be open\".format(self.par_file))\n self.postproc_config_dict()\n\n def write_to_file(self, sv, bedfile, source_s, source_e, target_s, target_e, transform, event, sv_id):\n assert (not event.symbol.startswith(Symbols.DIS.value))\n if transform == Operations.INS.value:\n transform_length = event.length\n else:\n transform_length = source_e - source_s\n if event.length > 0:\n with open(bedfile, \"a\") as fout:\n row = [str(event.source_chr),\n str(source_s),\n str(source_e),\n str(event.source_chr),\n str(target_s),\n str(target_e),\n transform,\n str(transform_length),\n '%d/%d' % (int(sv.hap[0]), int(sv.hap[1])),\n sv.name,\n str(sv_id)]\n fout.write(\"\\t\".join(row) + \"\\n\")\n\n @staticmethod\n def symbol_is_inversion(symbol):\n return any(c.islower() for c in symbol)\n\n @staticmethod\n def export_insertions(chr, start_pos, seq, ins_fasta):\n \"\"\"\n Exports foreign insertion sequences to separate fasta file, append only\n \"\"\"\n with open(ins_fasta, \"a\") as fout_ins:\n fout_ins.write(\">{}_{}\\n\".format(chr, start_pos))\n fout_ins.write(\"{}\\n\".format(seq))\n\n @staticmethod\n def get_event_target_operation(ev, target_events_dict, source_events_dict):\n \"\"\"\n determines target interval and operation for multi-source events\n \"\"\"\n # A -> A'\n if ev + Symbols.DUP.value in target_events_dict.keys():\n trg_sym = ev + Symbols.DUP.value\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), \\\n Operations.DUP.value if ev in target_events_dict.keys() else Operations.TRA.value\n # A -> a'\n elif ev.lower() + Symbols.DUP.value in target_events_dict.keys():\n trg_sym = ev.lower() + Symbols.DUP.value\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), Operations.INVDUP.value\n # A -> a\n elif ev.lower() in target_events_dict.keys():\n trg_sym = ev.lower()\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), Operations.INV.value\n # A -> A* (in the case of a custom event in which an event is divergently duplicated)\n elif ev + Symbols.DIV.value in target_events_dict.keys():\n trg_sym = ev + Symbols.DIV.value\n return (target_events_dict[trg_sym].start, target_events_dict[trg_sym].end), Operations.DIV.value\n # A -> A (insertion if source A is undefined, identity otherwise)\n elif ev in target_events_dict.keys():\n return (target_events_dict[ev].start, target_events_dict[ev].end), \\\n Operations.INS.value if source_events_dict[ev].start is None else Operations.IDENTITY.value\n # A -> [none]\n elif ev not in [sym[0] for sym in target_events_dict.keys()]:\n return (source_events_dict[ev].start, source_events_dict[ev].end), Operations.DEL.value\n # otherwise unknown mapping\n else:\n return (source_events_dict[ev].start, source_events_dict[ev].end), Operations.UNDEFINED.value\n\n @staticmethod\n def postprocess_record_params(sv, sv_record_info):\n \"\"\"\n arrange the bed_record parameter dictionaries in order of ascending source interval start position\n and assign order values to the relevant entries\n \"\"\"\n # for TRA/INS/DUP events with the same target position, 'order' describes the order in which they\n # are compiled (i.e., the order in which they appear in the target sequence)\n order = 0\n ins_pos = None\n for block in sv.target_symbol_blocks:\n for target_event in block:\n if target_event.symbol.startswith(Symbols.DIS.value) or \\\n target_event.symbol in sv_record_info.keys(): # <- prevent collision with A' and A if both in target\n continue\n src_sym = target_event.symbol[0].upper()\n if sv_record_info[src_sym]['transform'] in NONZERO_ORDER_OPERATIONS:\n if ins_pos is None:\n ins_pos = sv_record_info[src_sym]['target_s']\n order += 1\n elif sv_record_info[src_sym]['target_s'] == ins_pos:\n order += 1\n else:\n ins_pos = None\n order = 0\n # sv_record_info[src_sym]['order'] = order\n return sorted([params for params in sv_record_info.values()], key=lambda params: params['source_s'])\n\n def export_to_bedpe(self, svs, bedfile, ins_fasta=None, reset_file=True):\n if reset_file:\n utils.reset_file(bedfile)\n if ins_fasta:\n utils.reset_file(ins_fasta)\n for sv_id, sv in enumerate(svs):\n # SVs with multiple source events will be split into multiple bed records (one for each)\n if len(sv.events_dict) == 1:\n ev = list(sv.sv_blocks.target_events_dict.values())[0] if sv.type == Variant_Type.INS\\\n else list(sv.events_dict.values())[0]\n op = self.get_event_target_operation(ev.symbol, sv.sv_blocks.target_events_dict, sv.events_dict)[1]\n record_info = {'source_s': ev.start, 'source_e': ev.end, 'target_s': ev.start, 'target_e': ev.end,\n 'transform': op, 'sv': sv, 'event': ev, 'bedfile': bedfile, 'sv_id': sv_id + 1}\n self.write_to_file(**record_info)\n if op == Operations.INS.value:\n self.export_insertions(sv.start_chr, ev.start, ev.source_frag, ins_fasta)\n else:\n # multiple source events: source intervals taken from the source events\n # and target intervals taken from corresponding target events (if no match, then deletion)\n sv_record_info = {}\n for ev in sv.events_dict.values():\n if ev.symbol.startswith(Symbols.DIS.value):\n continue\n sv_record_info[ev.symbol] = {'source_s': ev.start, 'source_e': ev.end, 'sv': sv, 'event': ev, 'bedfile': bedfile, 'sv_id': sv_id + 1}\n (target_s, target_e), operation = self.get_event_target_operation(ev.symbol, sv.sv_blocks.target_events_dict, sv.events_dict)\n sv_record_info[ev.symbol]['target_s'] = target_s\n sv_record_info[ev.symbol]['target_e'] = target_e\n sv_record_info[ev.symbol]['transform'] = operation\n for param_dict in self.postprocess_record_params(sv, sv_record_info):\n self.write_to_file(**param_dict)\n\n def export_to_vcf(self, svs, stats, vcffile):\n with open(vcffile, \"w\") as vcf:\n vcf.write(\"##fileformat=VCFv4.2\\n\")\n for chrm, chrm_len in stats.chr_lengths.items():\n vcf.write(\"##contig=<ID=%s,length=%d>\\n\" % (chrm, chrm_len))\n vcf.write(\"#%s\\n\" % \"\\t\".join([\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\",\n \"SAMPLE\"]))\n # *** This will throw an error with pysam version 0.18, need 0.16.0.1\n vcf_file = pysam.VariantFile(vcffile)\n vcf_file.header.info.add('END', number=1, type='Integer', description=\"End position of the variant \"\n \"described in this record\")\n vcf_file.header.info.add('CIPOS', number=2, type='Integer', description=\"Confidence interval around POS for \"\n \"imprecise variants\")\n vcf_file.header.info.add('CIEND', number=2, type='Integer', description=\"Confidence interval around END for \"\n \"imprecise variants\")\n vcf_file.header.info.add('SVTYPE', number=1, type='String', description=\"Type of structural variant\")\n vcf_file.header.info.add('SVLEN', number=1, type='Integer', description=\"Length of structural variant\")\n vcf_file.header.info.add('SVMETHOD', number=1, type='String', description=\"SV detection method\")\n vcf_file.header.info.add('TARGET', number=1, type='Integer', description=\"Target location for divergent repeat\")\n vcf_file.header.info.add('OVERLAP_EV', number=1, type='String', description=\"Bool. indicator for the event being\"\n \"placed at an overlap_events interval\")\n vcf_file.header.formats.add('GT', number=1, type='String', description=\"Genotype\")\n\n vcf_out_file = pysam.VariantFile(vcffile, 'w', header=vcf_file.header)\n\n for sv in svs:\n zyg = (int(sv.hap[0]), int(sv.hap[1]))\n dispersion_target = None\n if sv.type in DISPERSION_TYPES:\n source_event = sv.events_dict[Symbols.REQUIRED_SOURCE.value]\n disp_event = sv.events_dict['_1']\n rec_start = source_event.start\n rec_end = source_event.end\n if disp_event.start == source_event.end:\n dispersion_target = disp_event.end\n else:\n dispersion_target = disp_event.start\n else:\n rec_start = min([frag[1] for frag in sv.changed_fragments])\n rec_end = max(frag[2] for frag in sv.changed_fragments)\n if dispersion_target is not None:\n info_field = {'SVTYPE': sv.type.value, 'SVLEN': rec_end - rec_start, 'TARGET': dispersion_target}\n else:\n if sv.type == Variant_Type.INS:\n # special case of simple INS: sv length \\neq (sv end - sv start)\n # **pysam will delete END fields that are equal to POS, therefore INS records won't have an END\n rec_end += 1\n info_field = {'SVTYPE': sv.type.value, 'SVLEN': sv.events_dict[Symbols.REQUIRED_SOURCE.value].length}\n else:\n info_field = {'SVTYPE': sv.type.value, 'SVLEN': rec_end - rec_start}\n if sv.overlap_event is not None:\n info_field['OVERLAP_EV'] = sv.overlap_event[3]\n\n vcf_record = vcf_out_file.header.new_record(contig=sv.start_chr, start=rec_start, stop=rec_end,\n alleles=['N', '<%s>' % sv.type.value], id=sv.type.value,\n info=info_field,\n qual=100, filter='PASS',\n samples=[{'GT': zyg}])\n vcf_out_file.write(vcf_record)\n\n vcf_out_file.close()\n\n def export_variants_to_fasta(self, id, edits, fasta_out, fasta_file, verbose=False):\n \"\"\"\n Exports list of changes from simulator to fasta file\n\n id: chr_id to apply edits to\n edits: list with elements of the form (start, end, new_frag)\n fasta_out: Fasta file to export changes to\n fasta_file: FastaFile with access to reference\n \"\"\"\n with open(fasta_out, \"a\") as fout_export:\n if id not in fasta_file.references:\n raise KeyError(\"ID {} not found in inputted fasta file\".format(id))\n if verbose:\n print(\"New ID: \", id)\n fout_export.write(\">\" + str(id) + \"\\n\")\n chr_variants = list(edits)\n chr_variants.sort()\n chr_variants.append([fasta_file.get_reference_length(id), fasta_file.get_reference_length(id), \"\"])\n pos = 0\n for variant in chr_variants:\n var_start, var_end = variant[0], variant[1]\n while pos < var_start:\n appropriate_buffer = MAX_BUFFER_SIZE if var_start - pos > MAX_BUFFER_SIZE else var_start - pos\n c = fasta_file.fetch(id, pos, pos + appropriate_buffer)\n fout_export.write(c)\n pos += appropriate_buffer\n assert (pos == var_start), \"Replacement fragment about to be inserted at position {} instead of var_start {}\".format(pos, var_start)\n fout_export.write(variant[2])\n pos = var_end\n fout_export.write(\"\\n\")\n\n def close(self):\n self.fin_export1.close()\n self.fin_export2.close()" }, { "identifier": "NestedDict", "path": "insilicosv/utils.py", "snippet": "class NestedDict(defaultdict):\n def __call__(self):\n return NestedDict(self.default_factory)" }, { "identifier": "utils", "path": "insilicosv/utils.py", "snippet": "class NestedDict(defaultdict):\nclass OverlapEvents:\n def __call__(self):\ndef is_overlapping(event_ranges, addition, called_from_helper=False, strictly_partial=False):\ndef fail_if_any_overlapping(arr):\ndef validate_symbols(source, target):\ndef remove_file(file):\ndef reset_file(filename):\ndef generate_seq(length):\ndef percent_N(seq):\ndef complement(seq):\ndef divergence(seq, divergence_prob=None):\ndef get_sv_config_identifier(sv_config):\n def __init__(self, config, allow_chroms=None):\n def get_num_overlap_counts(self, config):\n def parse_bed_file(self, bed_fname, allow_chroms=None, allow_types=None):\n def get_single_element_interval(self, sv_config_id, sv_config, partial_overlap):\n def populate_alu_pairs(self, svs_config):\n def get_alu_mediated_interval(self, sv_config_id):\n def remove_alu_from_overlap_dict(self, chrom, start, end):\n def midpoint(start, end):\n def get_intrvl_len(chr, st, end):\n def elt_type_is_allowed(self, elt_type):\n def get_partially_overlapping_interval(elt_chrom, elt_start, elt_stop, sv_min, sv_max):\n def draw_from_unif(a, b):\n def decrement_counts(self, sv_config_id, input_elt_type, partial_overlap):\n def __getitem__(self, sv_config_id, minsize, maxsize, elt_type=None, partial_overlap=False):" }, { "identifier": "constants", "path": "insilicosv/constants.py", "snippet": "MAX_BUFFER_SIZE: int = 1000000 # max number of bases that can be read at one time to export to fasta file\n INS = \"INS\"\n DEL = \"DEL\"\n INV = \"INV\"\n DUP = \"DUP\"\n SNP = \"SNP\"\n TRA = \"TRA\"\n DIVERGENCE = \"DIVERGENCE\"\nDISPERSION_TYPES = [Variant_Type.dDUP, Variant_Type.INV_dDUP,\n Variant_Type.TRA, Variant_Type.div_dDUP,\n Variant_Type.dDUP_iDEL, Variant_Type.INS_iDEL]\n INS = \"INS\"\n DUP = \"DUP\"\n INV = \"INV\"\n DEL = \"DEL\"\n TRA = \"TRA\"\n INVDUP = \"INVDUP\"\n INVTRA = \"INVTRA\"\n IDENTITY = \"IDENTITY\"\n UNDEFINED = \"UNDEFINED\"\n DIV = \"DIV\"\nNONZERO_ORDER_OPERATIONS = [Operations.TRA.value, Operations.INS.value, Operations.DUP.value, Operations.INVDUP.value,\n Operations.INVTRA.value, Operations.DIV.value]\n UNDEFINED = -1\n HOMOZYGOUS = 1\n HETEROZYGOUS = 0\n DIS = \"_\" # dispersion event\n DUP = \"'\" # attached to symbols that are not the original one from source sequence\n DIV = \"*\" # divergent interval, attached to symbols that vary from the original by low-probability base error\n REQUIRED_SOURCE = \"A\" # event symbol of the required source/main event all SVs must have\nSV_KEY = {Variant_Type.INS: [(), (\"A\")],\n Variant_Type.SNP: [(\"A\",), (\"A*\",)],\n Variant_Type.DEL: [(\"A\",), ()],\n Variant_Type.INV: [(\"A\",), (\"a\",)],\n Variant_Type.DUP: [(\"A\",), (\"A\", \"A'\")],\n Variant_Type.TRA: [(\"A\", \"_\"), (\"_\", \"A'\")],\n Variant_Type.dupINVdup: [(\"A\", \"B\", \"C\"), (\"A\", \"c'\", \"b\", \"a'\", \"C\")],\n Variant_Type.delINVdel: [(\"A\", \"B\", \"C\"), (\"b\",)],\n Variant_Type.delINVdup: [(\"A\", \"B\", \"C\"), (\"c'\", \"b\", \"C\")],\n Variant_Type.dupINVdel: [(\"A\", \"B\", \"C\"), (\"A\", \"b\", \"a'\")],\n Variant_Type.delINV: [(\"A\", \"B\"), (\"b\",)],\n Variant_Type.INVdel: [(\"A\", \"B\"), (\"a\",)],\n Variant_Type.dDUP_iDEL: [(\"A\", \"_\", \"B\"), (\"A\", \"_\", \"A'\")],\n Variant_Type.INS_iDEL: [(\"A\", \"_\", \"B\"), (\"_\", \"A'\")],\n Variant_Type.INVdup: [(\"A\",), (\"a\", \"a'\")],\n Variant_Type.dup_INV: [(\"A\", \"B\"), (\"A\", \"b\", \"a'\")],\n Variant_Type.INV_dup: [(\"A\", \"B\"), (\"b'\", \"a\", \"B\")],\n Variant_Type.dDUP: [(\"A\", \"_\"), (\"A\", \"_\", \"A'\")],\n Variant_Type.INV_dDUP: [(\"A\", \"_\"), (\"A\", \"_\", \"a'\")],\n Variant_Type.div_dDUP: [(\"A\", \"_\"), (\"A\", \"_\", \"A*\")],\n Variant_Type.DIVERGENCE: [(\"A\",), (\"A*\",)]}\nDEFAULT_CONFIG = {\"sim_settings\": {\"max_tries\": 100,\n \"fail_if_placement_issues\": False,\n \"generate_log_file\": False,\n \"prioritize_top\": False},\n \"variant_sets\": {}}\nclass Variant_Type(Enum):\nclass Operations(Enum):\nclass Zygosity(Enum):\nclass Symbols(Enum):" } ]
from insilicosv.simulate import SV_Simulator from insilicosv.processing import FormatterIO from test_simulate import TestObject from pysam import VariantFile, FastaFile from collections import defaultdict, Counter from insilicosv.utils import NestedDict from insilicosv import utils from insilicosv import constants import unittest import sys import os
14,137
"max_length": [4], "min_length": [2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_overlap_simple = {'overlap1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [2, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap2': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 4, "min_length": [1], "max_length": [5], "num_overlap": [3, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap3': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1", "ALR"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap4': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": "L1"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap5': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": self.test_overlap_bed_3, "allow_types": "ALR"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap6': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [2], "max_length": [4], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [6], "max_length": [8], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap7': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [2], "max_length": [2], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap8': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [2, 1], "max_length": [4, 1], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [6, 1], "max_length": [8, 1], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap9': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [1, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [2, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf) } self.test_objects_alu_mediated = {'alu_med1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_4}, "variant_sets": [{"type": "DEL", "number": 1, "min_length": [13], "max_length": [15], "num_alu_mediated": 1}]}], self.hap1, self.hap2, self.bed, self.vcf)}
class TestProcObject(TestObject): def __init__(self, ref, par, hap1, hap2, bed, vcf): self.vcf = vcf super().__init__(ref, par, hap1, hap2, bed) def extract_bed_records(self): # parse bed record into dict for easy comparison # --> example split bed record: ['chr19', '0', '3', 'chr19', '0', '3', 'DEL', '3', '1/1', 'DEL', '1'] bed_records = [] with open(self.bed) as f: for line in f: ln = line.split() bed_record = {'source_chr': ln[0], 'source_s': ln[1], 'source_e': ln[2], 'target_chr': ln[3], 'target_s': ln[4], 'target_e': ln[5], 'ev_type': ln[6], 'len': ln[7], 'zyg': ln[8], 'parent_type': ln[9], 'sv_id': ln[10]} bed_records.append(bed_record) return bed_records def extract_vcf_records(self): vcf_records = [] vcf = VariantFile(self.vcf) for rec in vcf.fetch(): ln = str(rec).split() # separately parse info field of the form: 'END=45590417;SVTYPE=dDUP;SVLEN=539;TARGET=45581738' info = {field.split('=')[0]: field.split('=')[1] for field in ln[7].split(';')} vcf_record = {'CHROM': ln[0], 'POS': ln[1], 'ID': ln[2], 'REF': ln[3], 'ALT': ln[4], 'QUAL': ln[5], 'FILTER': ln[6], 'INFO': info, 'FORMAT': ln[8], 'SAMPLE': ln[9]} vcf_records.append(vcf_record) return vcf_records class TestProcessing(unittest.TestCase): def setUp(self): # runs before every test self.ref_file = "test/inputs/test.fa" self.par = "test/inputs/par.yaml" self.hap1 = "test/inputs/test1.fa" self.hap2 = "test/inputs/test2.fa" self.bed = "test/inputs/out.bed" self.vcf = "test/inputs/out.vcf" self.ins_fasta = "test/inputs/ins_fasta.fa" self.test_overlap_bed = "test/inputs/example_overlap_events.bed" self.test_overlap_bed_2 = "test/inputs/example_overlap_events_2.bed" # test_overlap_bed_3: events with differing chromosome self.test_overlap_bed_3 = "test/inputs/example_overlap_events_3.bed" self.test_overlap_bed_4 = "test/inputs/example_overlap_events_4.bed" self.test_overlap_bed_11 = "test/inputs/example_overlap_events_11.bed" self.test_objects_simple_events = {'DEL': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "DEL", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'DUP': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "DUP", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "INV", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INS': TestProcObject([self.ref_file, {"chr19": "C"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "INS", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_flanked_inversions = {'dupINVdup': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dupINVdup", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'delINVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINVdel", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'dupINVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dupINVdel", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'delINVdup': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINVdup", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_dispersions = {'dDUP': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dDUP", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV_dDUP': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INV_dDUP", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'TRA': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "TRA", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_del_inv = {'delINV': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINV", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INVdel", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_idel = {'dDUP_iDEL': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dDUP_iDEL", "number": 1, "max_length": [3, 3, 2], "min_length": [3, 3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INS_iDEL': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INS_iDEL", "number": 1, "max_length": [3, 3, 2], "min_length": [3, 3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_dup_inv = {'dup_INV': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dup_INV", "number": 1, "max_length": [4, 4], "min_length": [4, 4]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV_dup': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INV_dup", "number": 1, "max_length": [4, 4], "min_length": [4, 4]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_INVdup = {'INVdup': TestProcObject([self.ref_file, {"chr19": "ACTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INVdup", "number": 1, "max_length": [4], "min_length": [4]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_multievent = {'INVdup': TestProcObject([self.ref_file, {"chr19": "ACTGCTAATGCGTTCACTGCTAATGCGTTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 200, "prioritize_top": True}, "variant_sets": [{"type": "INVdup", "number": 3, "max_length": [4], "min_length": [2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_overlap_simple = {'overlap1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [2, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap2': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 4, "min_length": [1], "max_length": [5], "num_overlap": [3, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap3': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1", "ALR"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap4': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": "L1"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap5': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": self.test_overlap_bed_3, "allow_types": "ALR"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap6': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [2], "max_length": [4], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [6], "max_length": [8], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap7': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [2], "max_length": [2], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap8': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [2, 1], "max_length": [4, 1], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [6, 1], "max_length": [8, 1], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap9': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [1, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [2, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf) } self.test_objects_alu_mediated = {'alu_med1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_4}, "variant_sets": [{"type": "DEL", "number": 1, "min_length": [13], "max_length": [15], "num_alu_mediated": 1}]}], self.hap1, self.hap2, self.bed, self.vcf)}
self.formatter = FormatterIO(self.par)
1
2023-12-01 14:39:20+00:00
16k
BiQiWHU/BWG
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "mask2former/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = \"MultiScaleMaskedTransformerDecoder\"\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75" }, { "identifier": "COCOInstanceNewBaselineDatasetMapper", "path": "mask2former/data/dataset_mappers/coco_instance_new_baseline_dataset_mapper.py", "snippet": "class COCOInstanceNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(str(self.tfm_gens))\n )\n\n self.img_format = image_format\n self.is_train = is_train\n \n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # TODO: get padding mask\n # by feeding a \"segmentation mask\" to the same transforms\n padding_mask = np.ones(image.shape[:2])\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n # the crop transformation has default padding value 0 for segmentation\n padding_mask = transforms.apply_segmentation(padding_mask)\n padding_mask = ~ padding_mask.astype(bool)\n\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n dataset_dict[\"padding_mask\"] = torch.as_tensor(np.ascontiguousarray(padding_mask))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"annotations\" in dataset_dict:\n # USER: Modify this if you want to keep them for some reason.\n for anno in dataset_dict[\"annotations\"]:\n # Let's always keep mask\n # if not self.mask_on:\n # anno.pop(\"segmentation\", None)\n anno.pop(\"keypoints\", None)\n\n # USER: Implement additional transformations if you have other types of data\n annos = [\n utils.transform_instance_annotations(obj, transforms, image_shape)\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n # NOTE: does not support BitMask due to augmentation\n # Current BitMask cannot handle empty objects\n instances = utils.annotations_to_instances(annos, image_shape)\n # After transforms such as cropping are applied, the bounding box may no longer\n # tightly bound the object. As an example, imagine a triangle object\n # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight\n # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to\n # the intersection of original bounding box and the cropping box.\n instances.gt_boxes = instances.gt_masks.get_bounding_boxes()\n # Need to filter empty instances first (due to augmentation)\n instances = utils.filter_empty_instances(instances)\n # Generate masks from polygon\n h, w = instances.image_size\n # image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float)\n if hasattr(instances, 'gt_masks'):\n gt_masks = instances.gt_masks\n gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)\n instances.gt_masks = gt_masks\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "COCOPanopticNewBaselineDatasetMapper", "path": "mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py", "snippet": "class COCOPanopticNewBaselineDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer.\n\n This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n tfm_gens,\n image_format,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n crop_gen: crop augmentation\n tfm_gens: data augmentation\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n self.tfm_gens = tfm_gens\n logging.getLogger(__name__).info(\n \"[COCOPanopticNewBaselineDatasetMapper] Full TransformGens used in training: {}\".format(\n str(self.tfm_gens)\n )\n )\n\n self.img_format = image_format\n self.is_train = is_train\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n tfm_gens = build_transform_gen(cfg, is_train)\n\n ret = {\n \"is_train\": is_train,\n \"tfm_gens\": tfm_gens,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n image, transforms = T.apply_transform_gens(self.tfm_gens, image)\n image_shape = image.shape[:2] # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"annotations\", None)\n return dataset_dict\n\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n instances.gt_boxes = Boxes(torch.zeros((0, 4)))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n instances.gt_boxes = masks.get_bounding_boxes()\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerInstanceDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_instance_dataset_mapper.py", "snippet": "class MaskFormerInstanceDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for instance segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n aug_input = T.AugInput(image)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n\n # transform instnace masks\n assert \"annotations\" in dataset_dict\n for anno in dataset_dict[\"annotations\"]:\n anno.pop(\"keypoints\", None)\n\n annos = [\n utils.transform_instance_annotations(obj, transforms, image.shape[:2])\n for obj in dataset_dict.pop(\"annotations\")\n if obj.get(\"iscrowd\", 0) == 0\n ]\n\n if len(annos):\n assert \"segmentation\" in annos[0]\n segms = [obj[\"segmentation\"] for obj in annos]\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image.shape[:2]))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n masks = [torch.from_numpy(np.ascontiguousarray(x)) for x in masks]\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n # pad image\n image = F.pad(image, padding_size, value=128).contiguous()\n # pad mask\n masks = [F.pad(x, padding_size, value=0).contiguous() for x in masks]\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n # Prepare per-category binary masks\n instances = Instances(image_shape)\n instances.gt_classes = classes\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, image.shape[-2], image.shape[-1]))\n else:\n masks = BitMasks(torch.stack(masks))\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerPanopticDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py", "snippet": "class MaskFormerPanopticDatasetMapper(MaskFormerSemanticDatasetMapper):\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for panoptic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n super().__init__(\n is_train,\n augmentations=augmentations,\n image_format=image_format,\n ignore_label=ignore_label,\n size_divisibility=size_divisibility,\n )\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerPanopticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n # semantic segmentation\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n # panoptic segmentation\n if \"pan_seg_file_name\" in dataset_dict:\n pan_seg_gt = utils.read_image(dataset_dict.pop(\"pan_seg_file_name\"), \"RGB\")\n segments_info = dataset_dict[\"segments_info\"]\n else:\n pan_seg_gt = None\n segments_info = None\n\n if pan_seg_gt is None:\n raise ValueError(\n \"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n if sem_seg_gt is not None:\n sem_seg_gt = aug_input.sem_seg\n\n # apply the same transformation to panoptic segmentation\n pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)\n\n from panopticapi.utils import rgb2id\n\n pan_seg_gt = rgb2id(pan_seg_gt)\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n pan_seg_gt = torch.as_tensor(pan_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n pan_seg_gt = F.pad(\n pan_seg_gt, padding_size, value=0\n ).contiguous() # 0 is the VOID panoptic label\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Pemantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n pan_seg_gt = pan_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = []\n masks = []\n for segment_info in segments_info:\n class_id = segment_info[\"category_id\"]\n if not segment_info[\"iscrowd\"]:\n classes.append(class_id)\n masks.append(pan_seg_gt == segment_info[\"id\"])\n\n classes = np.array(classes)\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MaskFormerSemanticDatasetMapper", "path": "mask2former/data/dataset_mappers/mask_former_semantic_dataset_mapper.py", "snippet": "class MaskFormerSemanticDatasetMapper:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens = augmentations\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n # Build augmentation\n augs = [\n T.ResizeShortestEdge(\n cfg.INPUT.MIN_SIZE_TRAIN,\n cfg.INPUT.MAX_SIZE_TRAIN,\n cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,\n )\n ]\n if cfg.INPUT.CROP.ENABLED:\n augs.append(\n T.RandomCrop_CategoryAreaConstraint(\n cfg.INPUT.CROP.TYPE,\n cfg.INPUT.CROP.SIZE,\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,\n cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert self.is_train, \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\"double\")\n else:\n sem_seg_gt = None\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\"Semantic segmentation dataset should not have 'annotations'.\")\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros((0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1]))\n else:\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "SemanticSegmentorWithTTA", "path": "mask2former/test_time_augmentation.py", "snippet": "class SemanticSegmentorWithTTA(nn.Module):\n \"\"\"\n A SemanticSegmentor with test-time augmentation enabled.\n Its :meth:`__call__` method has the same interface as :meth:`SemanticSegmentor.forward`.\n \"\"\"\n\n def __init__(self, cfg, model, tta_mapper=None, batch_size=1):\n \"\"\"\n Args:\n cfg (CfgNode):\n model (SemanticSegmentor): a SemanticSegmentor to apply TTA on.\n tta_mapper (callable): takes a dataset dict and returns a list of\n augmented versions of the dataset dict. Defaults to\n `DatasetMapperTTA(cfg)`.\n batch_size (int): batch the augmented images into this batch size for inference.\n \"\"\"\n super().__init__()\n if isinstance(model, DistributedDataParallel):\n model = model.module\n self.cfg = cfg.clone()\n\n self.model = model\n\n if tta_mapper is None:\n tta_mapper = DatasetMapperTTA(cfg)\n self.tta_mapper = tta_mapper\n self.batch_size = batch_size\n\n def __call__(self, batched_inputs):\n \"\"\"\n Same input/output format as :meth:`SemanticSegmentor.forward`\n \"\"\"\n\n def _maybe_read_image(dataset_dict):\n ret = copy.copy(dataset_dict)\n if \"image\" not in ret:\n image = read_image(ret.pop(\"file_name\"), self.model.input_format)\n image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW\n ret[\"image\"] = image\n if \"height\" not in ret and \"width\" not in ret:\n ret[\"height\"] = image.shape[1]\n ret[\"width\"] = image.shape[2]\n return ret\n\n processed_results = []\n for x in batched_inputs:\n result = self._inference_one_image(_maybe_read_image(x))\n processed_results.append(result)\n return processed_results\n\n def _inference_one_image(self, input):\n \"\"\"\n Args:\n input (dict): one dataset dict with \"image\" field being a CHW tensor\n Returns:\n dict: one output dict\n \"\"\"\n orig_shape = (input[\"height\"], input[\"width\"])\n augmented_inputs, tfms = self._get_augmented_inputs(input)\n\n final_predictions = None\n count_predictions = 0\n for input, tfm in zip(augmented_inputs, tfms):\n count_predictions += 1\n with torch.no_grad():\n if final_predictions is None:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions = self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions = self.model([input])[0].pop(\"sem_seg\")\n else:\n if any(isinstance(t, HFlipTransform) for t in tfm.transforms):\n final_predictions += self.model([input])[0].pop(\"sem_seg\").flip(dims=[2])\n else:\n final_predictions += self.model([input])[0].pop(\"sem_seg\")\n\n final_predictions = final_predictions / count_predictions\n return {\"sem_seg\": final_predictions}\n\n def _get_augmented_inputs(self, input):\n augmented_inputs = self.tta_mapper(input)\n tfms = [x.pop(\"transforms\") for x in augmented_inputs]\n return augmented_inputs, tfms" }, { "identifier": "InstanceSegEvaluator", "path": "mask2former/evaluation/instance_evaluation.py", "snippet": "class InstanceSegEvaluator(COCOEvaluator):\n \"\"\"\n Evaluate AR for object proposals, AP for instance detection/segmentation, AP\n for keypoint detection outputs using COCO's metrics.\n See http://cocodataset.org/#detection-eval and\n http://cocodataset.org/#keypoints-eval to understand its metrics.\n The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means\n the metric cannot be computed (e.g. due to no predictions made).\n\n In addition to COCO, this evaluator is able to support any bounding box detection,\n instance segmentation, or keypoint detection dataset.\n \"\"\"\n\n def _eval_predictions(self, predictions, img_ids=None):\n \"\"\"\n Evaluate predictions. Fill self._results with the metrics of the tasks.\n \"\"\"\n self._logger.info(\"Preparing results for COCO format ...\")\n coco_results = list(itertools.chain(*[x[\"instances\"] for x in predictions]))\n tasks = self._tasks or self._tasks_from_predictions(coco_results)\n\n # unmap the category ids for COCO\n if hasattr(self._metadata, \"thing_dataset_id_to_contiguous_id\"):\n dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id\n # all_contiguous_ids = list(dataset_id_to_contiguous_id.values())\n # num_classes = len(all_contiguous_ids)\n # assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1\n\n reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}\n for result in coco_results:\n category_id = result[\"category_id\"]\n # assert category_id < num_classes, (\n # f\"A prediction has class={category_id}, \"\n # f\"but the dataset only has {num_classes} classes and \"\n # f\"predicted class id should be in [0, {num_classes - 1}].\"\n # )\n assert category_id in reverse_id_mapping, (\n f\"A prediction has class={category_id}, \"\n f\"but the dataset only has class ids in {dataset_id_to_contiguous_id}.\"\n )\n result[\"category_id\"] = reverse_id_mapping[category_id]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"coco_instances_results.json\")\n self._logger.info(\"Saving results to {}\".format(file_path))\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(coco_results))\n f.flush()\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\n \"Evaluating predictions with {} COCO API...\".format(\n \"unofficial\" if self._use_fast_impl else \"official\"\n )\n )\n for task in sorted(tasks):\n assert task in {\"bbox\", \"segm\", \"keypoints\"}, f\"Got unknown task: {task}!\"\n coco_eval = (\n _evaluate_predictions_on_coco(\n self._coco_api,\n coco_results,\n task,\n kpt_oks_sigmas=self._kpt_oks_sigmas,\n use_fast_impl=self._use_fast_impl,\n img_ids=img_ids,\n max_dets_per_image=self._max_dets_per_image,\n )\n if len(coco_results) > 0\n else None # cocoapi does not handle empty results very well\n )\n\n res = self._derive_coco_results(\n coco_eval, task, class_names=self._metadata.get(\"thing_classes\")\n )\n self._results[task] = res" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from mask2former import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
12,296
@classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj": mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...")
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj": mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...")
model = SemanticSegmentorWithTTA(cfg, model)
6
2023-11-29 17:15:46+00:00
16k
opisaac9001/TTS-With-ooba-and-voice
TTS/tts/models/bark.py
[ { "identifier": "codec_decode", "path": "TTS/tts/layers/bark/inference_funcs.py", "snippet": "def codec_decode(fine_tokens, model):\n \"\"\"Turn quantized audio codes into audio array using encodec.\"\"\"\n arr = torch.from_numpy(fine_tokens)[None]\n arr = arr.to(model.device)\n arr = arr.transpose(0, 1)\n emb = model.encodec.quantizer.decode(arr)\n out = model.encodec.decoder(emb)\n audio_arr = out.detach().cpu().numpy().squeeze()\n return audio_arr" }, { "identifier": "generate_coarse", "path": "TTS/tts/layers/bark/inference_funcs.py", "snippet": "def generate_coarse(\n x_semantic,\n model,\n history_prompt=None,\n temp=0.7,\n top_k=None,\n top_p=None,\n silent=False,\n max_coarse_history=630, # min 60 (faster), max 630 (more context)\n sliding_window_len=60,\n base=None,\n use_kv_caching=True,\n):\n \"\"\"Generate coarse audio codes from semantic tokens.\n\n Args:\n x_semantic (np.ndarray): The semantic tokens to generate coarse audio codes from.\n model (BarkModel): The BarkModel to use for generating the coarse audio codes.\n history_prompt (tuple): A tuple of (semantic_history, coarse_history, fine_history) to use as a prompt for the generation.\n temp (float): The temperature to use for the generation.\n top_k (int): The number of top tokens to consider for the generation.\n top_p (float): The cumulative probability to consider for the generation.\n silent (bool): Whether to silence the tqdm progress bar.\n max_coarse_history (int): The maximum number of coarse audio codes to use as history.\n sliding_window_len (int): The length of the sliding window to use for the generation.\n base (tuple): A tuple of (semantic_history, coarse_history, fine_history) to use as a base for the generation.\n use_kv_caching (bool): Whether to use key-value caching for the generation.\n\n Returns:\n np.ndarray: The generated coarse audio codes.\n \"\"\"\n assert (\n isinstance(x_semantic, np.ndarray)\n and len(x_semantic.shape) == 1\n and len(x_semantic) > 0\n and x_semantic.min() >= 0\n and x_semantic.max() <= model.config.SEMANTIC_VOCAB_SIZE - 1\n )\n assert 60 <= max_coarse_history <= 630\n assert max_coarse_history + sliding_window_len <= 1024 - 256\n semantic_to_coarse_ratio = (\n model.config.COARSE_RATE_HZ / model.config.SEMANTIC_RATE_HZ * model.config.N_COARSE_CODEBOOKS\n )\n max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio))\n if all(v is not None for v in history_prompt) or base is not None:\n if history_prompt is not None:\n x_history = history_prompt\n x_semantic_history = x_history[0]\n x_coarse_history = x_history[1]\n if base is not None:\n x_semantic_history = base[0]\n x_coarse_history = base[1]\n assert (\n isinstance(x_semantic_history, np.ndarray)\n and len(x_semantic_history.shape) == 1\n and len(x_semantic_history) > 0\n and x_semantic_history.min() >= 0\n and x_semantic_history.max() <= model.config.SEMANTIC_VOCAB_SIZE - 1\n and isinstance(x_coarse_history, np.ndarray)\n and len(x_coarse_history.shape) == 2\n and x_coarse_history.shape[0] == model.config.N_COARSE_CODEBOOKS\n and x_coarse_history.shape[-1] >= 0\n and x_coarse_history.min() >= 0\n and x_coarse_history.max() <= model.config.CODEBOOK_SIZE - 1\n and (\n round(x_coarse_history.shape[-1] / len(x_semantic_history), 1)\n == round(semantic_to_coarse_ratio / model.config.N_COARSE_CODEBOOKS, 1)\n )\n )\n x_coarse_history = (\n _flatten_codebooks(x_coarse_history, model.config.CODEBOOK_SIZE) + model.config.SEMANTIC_VOCAB_SIZE\n )\n # trim histories correctly\n n_semantic_hist_provided = np.min(\n [\n max_semantic_history,\n len(x_semantic_history) - len(x_semantic_history) % 2,\n int(np.floor(len(x_coarse_history) / semantic_to_coarse_ratio)),\n ]\n )\n n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio))\n x_semantic_history = x_semantic_history[-n_semantic_hist_provided:].astype(np.int32)\n x_coarse_history = x_coarse_history[-n_coarse_hist_provided:].astype(np.int32)\n # TODO: bit of a hack for time alignment (sounds better)\n x_coarse_history = x_coarse_history[:-2]\n else:\n x_semantic_history = np.array([], dtype=np.int32)\n x_coarse_history = np.array([], dtype=np.int32)\n # start loop\n n_steps = int(\n round(\n np.floor(len(x_semantic) * semantic_to_coarse_ratio / model.config.N_COARSE_CODEBOOKS)\n * model.config.N_COARSE_CODEBOOKS\n )\n )\n assert n_steps > 0 and n_steps % model.config.N_COARSE_CODEBOOKS == 0\n x_semantic = np.hstack([x_semantic_history, x_semantic]).astype(np.int32)\n x_coarse = x_coarse_history.astype(np.int32)\n base_semantic_idx = len(x_semantic_history)\n with inference_mode():\n x_semantic_in = torch.from_numpy(x_semantic)[None].to(model.device)\n x_coarse_in = torch.from_numpy(x_coarse)[None].to(model.device)\n n_window_steps = int(np.ceil(n_steps / sliding_window_len))\n n_step = 0\n for _ in tqdm.tqdm(range(n_window_steps), total=n_window_steps, disable=silent):\n semantic_idx = base_semantic_idx + int(round(n_step / semantic_to_coarse_ratio))\n # pad from right side\n x_in = x_semantic_in[:, np.max([0, semantic_idx - max_semantic_history]) :]\n x_in = x_in[:, :256]\n x_in = F.pad(\n x_in,\n (0, 256 - x_in.shape[-1]),\n \"constant\",\n model.config.COARSE_SEMANTIC_PAD_TOKEN,\n )\n x_in = torch.hstack(\n [\n x_in,\n torch.tensor([model.config.COARSE_INFER_TOKEN])[None].to(model.device),\n x_coarse_in[:, -max_coarse_history:],\n ]\n )\n kv_cache = None\n for _ in range(sliding_window_len):\n if n_step >= n_steps:\n continue\n is_major_step = n_step % model.config.N_COARSE_CODEBOOKS == 0\n\n if use_kv_caching and kv_cache is not None:\n x_input = x_in[:, [-1]]\n else:\n x_input = x_in\n\n logits, kv_cache = model.coarse_model(x_input, use_cache=use_kv_caching, past_kv=kv_cache)\n logit_start_idx = (\n model.config.SEMANTIC_VOCAB_SIZE + (1 - int(is_major_step)) * model.config.CODEBOOK_SIZE\n )\n logit_end_idx = model.config.SEMANTIC_VOCAB_SIZE + (2 - int(is_major_step)) * model.config.CODEBOOK_SIZE\n relevant_logits = logits[0, 0, logit_start_idx:logit_end_idx]\n if top_p is not None:\n # faster to convert to numpy\n logits_device = relevant_logits.device\n logits_dtype = relevant_logits.type()\n relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy()\n sorted_indices = np.argsort(relevant_logits)[::-1]\n sorted_logits = relevant_logits[sorted_indices]\n cumulative_probs = np.cumsum(torch.nn.functional.softmax(sorted_logits))\n sorted_indices_to_remove = cumulative_probs > top_p\n sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy()\n sorted_indices_to_remove[0] = False\n relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf\n relevant_logits = torch.from_numpy(relevant_logits)\n relevant_logits = relevant_logits.to(logits_device).type(logits_dtype)\n if top_k is not None:\n v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1)))\n relevant_logits[relevant_logits < v[-1]] = -float(\"Inf\")\n probs = torch.nn.functional.softmax(relevant_logits / temp, dim=-1)\n item_next = torch.multinomial(probs, num_samples=1)\n item_next += logit_start_idx\n x_coarse_in = torch.cat((x_coarse_in, item_next[None]), dim=1)\n x_in = torch.cat((x_in, item_next[None]), dim=1)\n del logits, relevant_logits, probs, item_next\n n_step += 1\n del x_in\n del x_semantic_in\n gen_coarse_arr = x_coarse_in.detach().cpu().numpy().squeeze()[len(x_coarse_history) :]\n del x_coarse_in\n assert len(gen_coarse_arr) == n_steps\n gen_coarse_audio_arr = (\n gen_coarse_arr.reshape(-1, model.config.N_COARSE_CODEBOOKS).T - model.config.SEMANTIC_VOCAB_SIZE\n )\n for n in range(1, model.config.N_COARSE_CODEBOOKS):\n gen_coarse_audio_arr[n, :] -= n * model.config.CODEBOOK_SIZE\n clear_cuda_cache()\n return gen_coarse_audio_arr" }, { "identifier": "generate_fine", "path": "TTS/tts/layers/bark/inference_funcs.py", "snippet": "def generate_fine(\n x_coarse_gen,\n model,\n history_prompt=None,\n temp=0.5,\n silent=True,\n base=None,\n):\n \"\"\"Generate full audio codes from coarse audio codes.\n\n Args:\n x_coarse_gen (np.ndarray): The coarse audio codes to generate full audio codes from.\n model (BarkModel): The BarkModel to use for generating the full audio codes.\n history_prompt (tuple): A tuple of (semantic_history, coarse_history, fine_history) to use as a prompt for the generation.\n temp (float): The temperature to use for the generation.\n silent (bool): Whether to silence the tqdm progress bar.\n base (tuple): A tuple of (semantic_history, coarse_history, fine_history) to use as a base for the generation.\n\n Returns:\n np.ndarray: The generated full audio codes.\n \"\"\"\n assert (\n isinstance(x_coarse_gen, np.ndarray)\n and len(x_coarse_gen.shape) == 2\n and 1 <= x_coarse_gen.shape[0] <= model.config.N_FINE_CODEBOOKS - 1\n and x_coarse_gen.shape[1] > 0\n and x_coarse_gen.min() >= 0\n and x_coarse_gen.max() <= model.config.CODEBOOK_SIZE - 1\n )\n if all(v is not None for v in history_prompt) or base is not None:\n if history_prompt is not None:\n x_fine_history = history_prompt[2]\n if base is not None:\n x_fine_history = base[2]\n assert (\n isinstance(x_fine_history, np.ndarray)\n and len(x_fine_history.shape) == 2\n and x_fine_history.shape[0] == model.config.N_FINE_CODEBOOKS\n and x_fine_history.shape[1] >= 0\n and x_fine_history.min() >= 0\n and x_fine_history.max() <= model.config.CODEBOOK_SIZE - 1\n )\n else:\n x_fine_history = None\n n_coarse = x_coarse_gen.shape[0]\n # make input arr\n in_arr = np.vstack(\n [\n x_coarse_gen,\n np.zeros((model.config.N_FINE_CODEBOOKS - n_coarse, x_coarse_gen.shape[1]))\n + model.config.CODEBOOK_SIZE, # padding\n ]\n ).astype(np.int32)\n # prepend history if available (max 512)\n if x_fine_history is not None:\n x_fine_history = x_fine_history.astype(np.int32)\n in_arr = np.hstack(\n [\n x_fine_history[:, -512:].astype(np.int32),\n in_arr,\n ]\n )\n n_history = x_fine_history[:, -512:].shape[1]\n else:\n n_history = 0\n n_remove_from_end = 0\n # need to pad if too short (since non-causal model)\n if in_arr.shape[1] < 1024:\n n_remove_from_end = 1024 - in_arr.shape[1]\n in_arr = np.hstack(\n [\n in_arr,\n np.zeros((model.config.N_FINE_CODEBOOKS, n_remove_from_end), dtype=np.int32)\n + model.config.CODEBOOK_SIZE,\n ]\n )\n # we can be lazy about fractional loop and just keep overwriting codebooks\n n_loops = np.max([0, int(np.ceil((x_coarse_gen.shape[1] - (1024 - n_history)) / 512))]) + 1\n with inference_mode():\n in_arr = torch.tensor(in_arr.T).to(model.device)\n for n in tqdm.tqdm(range(n_loops), disable=silent):\n start_idx = np.min([n * 512, in_arr.shape[0] - 1024])\n start_fill_idx = np.min([n_history + n * 512, in_arr.shape[0] - 512])\n rel_start_fill_idx = start_fill_idx - start_idx\n in_buffer = in_arr[start_idx : start_idx + 1024, :][None]\n for nn in range(n_coarse, model.config.N_FINE_CODEBOOKS):\n logits = model.fine_model(nn, in_buffer)\n if temp is None:\n relevant_logits = logits[0, rel_start_fill_idx:, : model.config.CODEBOOK_SIZE]\n codebook_preds = torch.argmax(relevant_logits, -1)\n else:\n relevant_logits = logits[0, :, : model.config.CODEBOOK_SIZE] / temp\n probs = F.softmax(relevant_logits, dim=-1)\n codebook_preds = torch.hstack(\n [torch.multinomial(probs[n], num_samples=1) for n in range(rel_start_fill_idx, 1024)]\n )\n in_buffer[0, rel_start_fill_idx:, nn] = codebook_preds\n del logits, codebook_preds\n # transfer over info into model_in and convert to numpy\n for nn in range(n_coarse, model.config.N_FINE_CODEBOOKS):\n in_arr[start_fill_idx : start_fill_idx + (1024 - rel_start_fill_idx), nn] = in_buffer[\n 0, rel_start_fill_idx:, nn\n ]\n del in_buffer\n gen_fine_arr = in_arr.detach().cpu().numpy().squeeze().T\n del in_arr\n gen_fine_arr = gen_fine_arr[:, n_history:]\n if n_remove_from_end > 0:\n gen_fine_arr = gen_fine_arr[:, :-n_remove_from_end]\n assert gen_fine_arr.shape[-1] == x_coarse_gen.shape[-1]\n clear_cuda_cache()\n return gen_fine_arr" }, { "identifier": "generate_text_semantic", "path": "TTS/tts/layers/bark/inference_funcs.py", "snippet": "def generate_text_semantic(\n text,\n model,\n history_prompt=None,\n temp=0.7,\n top_k=None,\n top_p=None,\n silent=False,\n min_eos_p=0.2,\n max_gen_duration_s=None,\n allow_early_stop=True,\n base=None,\n use_kv_caching=True,\n **kwargs, # pylint: disable=unused-argument\n):\n \"\"\"Generate semantic tokens from text.\n\n Args:\n text (str): The text to generate semantic tokens from.\n model (BarkModel): The BarkModel to use for generating the semantic tokens.\n history_prompt (tuple): A tuple of (semantic_history, coarse_history, fine_history) to use as a prompt for the generation.\n temp (float): The temperature to use for the generation.\n top_k (int): The number of top tokens to consider for the generation.\n top_p (float): The cumulative probability to consider for the generation.\n silent (bool): Whether to silence the tqdm progress bar.\n min_eos_p (float): The minimum probability to consider for the end of sentence token.\n max_gen_duration_s (float): The maximum duration in seconds to generate for.\n allow_early_stop (bool): Whether to allow the generation to stop early.\n base (tuple): A tuple of (semantic_history, coarse_history, fine_history) to use as a base for the generation.\n use_kv_caching (bool): Whether to use key-value caching for the generation.\n **kwargs: Additional keyword arguments. They are ignored.\n\n Returns:\n np.ndarray: The generated semantic tokens.\n \"\"\"\n assert isinstance(text, str)\n text = _normalize_whitespace(text)\n assert len(text.strip()) > 0\n if all(v is not None for v in history_prompt) or base is not None:\n if history_prompt is not None:\n semantic_history = history_prompt[0]\n if base is not None:\n semantic_history = base[0]\n assert (\n isinstance(semantic_history, np.ndarray)\n and len(semantic_history.shape) == 1\n and len(semantic_history) > 0\n and semantic_history.min() >= 0\n and semantic_history.max() <= model.config.SEMANTIC_VOCAB_SIZE - 1\n )\n else:\n semantic_history = None\n encoded_text = np.array(_tokenize(model.tokenizer, text)) + model.config.TEXT_ENCODING_OFFSET\n if len(encoded_text) > 256:\n p = round((len(encoded_text) - 256) / len(encoded_text) * 100, 1)\n logger.warning(f\"warning, text too long, lopping of last {p}%\")\n encoded_text = encoded_text[:256]\n encoded_text = np.pad(\n encoded_text,\n (0, 256 - len(encoded_text)),\n constant_values=model.config.TEXT_PAD_TOKEN,\n mode=\"constant\",\n )\n if semantic_history is not None:\n semantic_history = semantic_history.astype(np.int64)\n # lop off if history is too long, pad if needed\n semantic_history = semantic_history[-256:]\n semantic_history = np.pad(\n semantic_history,\n (0, 256 - len(semantic_history)),\n constant_values=model.config.SEMANTIC_PAD_TOKEN,\n mode=\"constant\",\n )\n else:\n semantic_history = np.array([model.config.SEMANTIC_PAD_TOKEN] * 256)\n x = torch.from_numpy(\n np.hstack([encoded_text, semantic_history, np.array([model.config.SEMANTIC_INFER_TOKEN])]).astype(np.int64)\n )[None]\n assert x.shape[1] == 256 + 256 + 1\n with inference_mode():\n x = x.to(model.device)\n n_tot_steps = 768\n # custom tqdm updates since we don't know when eos will occur\n pbar = tqdm.tqdm(disable=silent, total=100)\n pbar_state = 0\n tot_generated_duration_s = 0\n kv_cache = None\n for n in range(n_tot_steps):\n if use_kv_caching and kv_cache is not None:\n x_input = x[:, [-1]]\n else:\n x_input = x\n logits, kv_cache = model.semantic_model(\n x_input, merge_context=True, use_cache=use_kv_caching, past_kv=kv_cache\n )\n relevant_logits = logits[0, 0, : model.config.SEMANTIC_VOCAB_SIZE]\n if allow_early_stop:\n relevant_logits = torch.hstack(\n (relevant_logits, logits[0, 0, [model.config.SEMANTIC_PAD_TOKEN]])\n ) # eos\n if top_p is not None:\n # faster to convert to numpy\n logits_device = relevant_logits.device\n logits_dtype = relevant_logits.type()\n relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy()\n sorted_indices = np.argsort(relevant_logits)[::-1]\n sorted_logits = relevant_logits[sorted_indices]\n cumulative_probs = np.cumsum(softmax(sorted_logits))\n sorted_indices_to_remove = cumulative_probs > top_p\n sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy()\n sorted_indices_to_remove[0] = False\n relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf\n relevant_logits = torch.from_numpy(relevant_logits)\n relevant_logits = relevant_logits.to(logits_device).type(logits_dtype)\n if top_k is not None:\n v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1)))\n relevant_logits[relevant_logits < v[-1]] = -float(\"Inf\")\n probs = torch.softmax(relevant_logits / temp, dim=-1)\n item_next = torch.multinomial(probs, num_samples=1)\n if allow_early_stop and (\n item_next == model.config.SEMANTIC_VOCAB_SIZE or (min_eos_p is not None and probs[-1] >= min_eos_p)\n ):\n # eos found, so break\n pbar.update(100 - pbar_state)\n break\n x = torch.cat((x, item_next[None]), dim=1)\n tot_generated_duration_s += 1 / model.config.SEMANTIC_RATE_HZ\n if max_gen_duration_s is not None and tot_generated_duration_s > max_gen_duration_s:\n pbar.update(100 - pbar_state)\n break\n if n == n_tot_steps - 1:\n pbar.update(100 - pbar_state)\n break\n del logits, relevant_logits, probs, item_next\n req_pbar_state = np.min([100, int(round(100 * n / n_tot_steps))])\n if req_pbar_state > pbar_state:\n pbar.update(req_pbar_state - pbar_state)\n pbar_state = req_pbar_state\n pbar.close()\n out = x.detach().cpu().numpy().squeeze()[256 + 256 + 1 :]\n assert all(out >= 0) and all(out < model.config.SEMANTIC_VOCAB_SIZE)\n clear_cuda_cache()\n return out" }, { "identifier": "generate_voice", "path": "TTS/tts/layers/bark/inference_funcs.py", "snippet": "def generate_voice(\n audio,\n model,\n output_path,\n):\n \"\"\"Generate a new voice from a given audio and text prompt.\n\n Args:\n audio (np.ndarray): The audio to use as a base for the new voice.\n text (str): Transcription of the audio you are clonning.\n model (BarkModel): The BarkModel to use for generating the new voice.\n output_path (str): The path to save the generated voice to.\n \"\"\"\n if isinstance(audio, str):\n audio, sr = torchaudio.load(audio)\n audio = convert_audio(audio, sr, model.config.sample_rate, model.encodec.channels)\n audio = audio.unsqueeze(0).to(model.device)\n\n with torch.no_grad():\n encoded_frames = model.encodec.encode(audio)\n codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() # [n_q, T]\n\n # move codes to cpu\n codes = codes.cpu().numpy()\n\n # generate semantic tokens\n # Load the HuBERT model\n hubert_manager = HubertManager()\n # hubert_manager.make_sure_hubert_installed(model_path=model.config.LOCAL_MODEL_PATHS[\"hubert\"])\n hubert_manager.make_sure_tokenizer_installed(model_path=model.config.LOCAL_MODEL_PATHS[\"hubert_tokenizer\"])\n\n hubert_model = CustomHubert(checkpoint_path=model.config.LOCAL_MODEL_PATHS[\"hubert\"]).to(model.device)\n\n # Load the CustomTokenizer model\n tokenizer = HubertTokenizer.load_from_checkpoint(\n model.config.LOCAL_MODEL_PATHS[\"hubert_tokenizer\"], map_location=model.device\n )\n # semantic_tokens = model.text_to_semantic(\n # text, max_gen_duration_s=seconds, top_k=50, top_p=0.95, temp=0.7\n # ) # not 100%\n semantic_vectors = hubert_model.forward(audio[0], input_sample_hz=model.config.sample_rate)\n semantic_tokens = tokenizer.get_token(semantic_vectors)\n semantic_tokens = semantic_tokens.cpu().numpy()\n\n np.savez(output_path, fine_prompt=codes, coarse_prompt=codes[:2, :], semantic_prompt=semantic_tokens)" }, { "identifier": "load_voice", "path": "TTS/tts/layers/bark/inference_funcs.py", "snippet": "def load_voice(model, voice: str, extra_voice_dirs: List[str] = []): # pylint: disable=dangerous-default-value\n if voice == \"random\":\n return None, None, None\n\n voices = get_voices(extra_voice_dirs)\n paths = voices[voice]\n\n # bark only uses a single sample for cloning\n if len(paths) > 1:\n raise ValueError(f\"Voice {voice} has multiple paths: {paths}\")\n\n try:\n path = voices[voice]\n except KeyError as e:\n raise KeyError(f\"Voice {voice} not found in {extra_voice_dirs}\") from e\n\n if len(paths) == 1 and paths[0].endswith(\".npz\"):\n return load_npz(path[0])\n\n audio_path = paths[0]\n # replace the file extension with .npz\n output_path = os.path.splitext(audio_path)[0] + \".npz\"\n generate_voice(audio=audio_path, model=model, output_path=output_path)\n return load_voice(model, voice, extra_voice_dirs)" }, { "identifier": "load_model", "path": "TTS/tts/layers/bark/load_model.py", "snippet": "def load_model(ckpt_path, device, config, model_type=\"text\"):\n logger.info(f\"loading {model_type} model from {ckpt_path}...\")\n\n if device == \"cpu\":\n logger.warning(\"No GPU being used. Careful, Inference might be extremely slow!\")\n if model_type == \"text\":\n ConfigClass = GPTConfig\n ModelClass = GPT\n elif model_type == \"coarse\":\n ConfigClass = GPTConfig\n ModelClass = GPT\n elif model_type == \"fine\":\n ConfigClass = FineGPTConfig\n ModelClass = FineGPT\n else:\n raise NotImplementedError()\n if (\n not config.USE_SMALLER_MODELS\n and os.path.exists(ckpt_path)\n and _md5(ckpt_path) != config.REMOTE_MODEL_PATHS[model_type][\"checksum\"]\n ):\n logger.warning(f\"found outdated {model_type} model, removing...\")\n os.remove(ckpt_path)\n if not os.path.exists(ckpt_path):\n logger.info(f\"{model_type} model not found, downloading...\")\n _download(config.REMOTE_MODEL_PATHS[model_type][\"path\"], ckpt_path, config.CACHE_DIR)\n\n checkpoint = torch.load(ckpt_path, map_location=device)\n # this is a hack\n model_args = checkpoint[\"model_args\"]\n if \"input_vocab_size\" not in model_args:\n model_args[\"input_vocab_size\"] = model_args[\"vocab_size\"]\n model_args[\"output_vocab_size\"] = model_args[\"vocab_size\"]\n del model_args[\"vocab_size\"]\n\n gptconf = ConfigClass(**checkpoint[\"model_args\"])\n if model_type == \"text\":\n config.semantic_config = gptconf\n elif model_type == \"coarse\":\n config.coarse_config = gptconf\n elif model_type == \"fine\":\n config.fine_config = gptconf\n\n model = ModelClass(gptconf)\n state_dict = checkpoint[\"model\"]\n # fixup checkpoint\n unwanted_prefix = \"_orig_mod.\"\n for k, _ in list(state_dict.items()):\n if k.startswith(unwanted_prefix):\n state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k)\n extra_keys = set(state_dict.keys()) - set(model.state_dict().keys())\n extra_keys = set(k for k in extra_keys if not k.endswith(\".attn.bias\"))\n missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())\n missing_keys = set(k for k in missing_keys if not k.endswith(\".attn.bias\"))\n if len(extra_keys) != 0:\n raise ValueError(f\"extra keys found: {extra_keys}\")\n if len(missing_keys) != 0:\n raise ValueError(f\"missing keys: {missing_keys}\")\n model.load_state_dict(state_dict, strict=False)\n n_params = model.get_num_params()\n val_loss = checkpoint[\"best_val_loss\"].item()\n logger.info(f\"model loaded: {round(n_params/1e6,1)}M params, {round(val_loss,3)} loss\")\n model.eval()\n model.to(device)\n del checkpoint, state_dict\n clear_cuda_cache()\n return model, config" }, { "identifier": "GPT", "path": "TTS/tts/layers/bark/model.py", "snippet": "class GPT(nn.Module):\n def __init__(self, config):\n super().__init__()\n assert config.input_vocab_size is not None\n assert config.output_vocab_size is not None\n assert config.block_size is not None\n self.config = config\n\n self.transformer = nn.ModuleDict(\n dict(\n wte=nn.Embedding(config.input_vocab_size, config.n_embd),\n wpe=nn.Embedding(config.block_size, config.n_embd),\n drop=nn.Dropout(config.dropout),\n h=nn.ModuleList([Block(config, idx) for idx in range(config.n_layer)]),\n ln_f=LayerNorm(config.n_embd, bias=config.bias),\n )\n )\n self.lm_head = nn.Linear(config.n_embd, config.output_vocab_size, bias=False)\n\n def get_num_params(self, non_embedding=True):\n \"\"\"\n Return the number of parameters in the model.\n For non-embedding count (default), the position embeddings get subtracted.\n The token embeddings would too, except due to the parameter sharing these\n params are actually used as weights in the final layer, so we include them.\n \"\"\"\n n_params = sum(p.numel() for p in self.parameters())\n if non_embedding:\n n_params -= self.transformer.wte.weight.numel()\n n_params -= self.transformer.wpe.weight.numel()\n return n_params\n\n def forward(self, idx, merge_context=False, past_kv=None, position_ids=None, use_cache=False):\n device = idx.device\n _, t = idx.size()\n if past_kv is not None:\n assert t == 1\n tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)\n else:\n if merge_context:\n assert idx.shape[1] >= 256 + 256 + 1\n t = idx.shape[1] - 256\n else:\n assert (\n t <= self.config.block_size\n ), f\"Cannot forward sequence of length {t}, block size is only {self.config.block_size}\"\n\n # forward the GPT model itself\n if merge_context:\n tok_emb = torch.cat(\n [\n self.transformer.wte(idx[:, :256]) + self.transformer.wte(idx[:, 256 : 256 + 256]),\n self.transformer.wte(idx[:, 256 + 256 :]),\n ],\n dim=1,\n )\n else:\n tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)\n\n if past_kv is None:\n past_length = 0\n past_kv = tuple([None] * len(self.transformer.h))\n else:\n past_length = past_kv[0][0].size(-2)\n\n if position_ids is None:\n position_ids = torch.arange(past_length, t + past_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0) # shape (1, t)\n assert position_ids.shape == (1, t)\n\n pos_emb = self.transformer.wpe(position_ids) # position embeddings of shape (1, t, n_embd)\n\n x = self.transformer.drop(tok_emb + pos_emb)\n\n new_kv = () if use_cache else None\n\n for _, (block, past_layer_kv) in enumerate(zip(self.transformer.h, past_kv)):\n x, kv = block(x, past_kv=past_layer_kv, use_cache=use_cache)\n\n if use_cache:\n new_kv = new_kv + (kv,)\n\n x = self.transformer.ln_f(x)\n\n # inference-time mini-optimization: only forward the lm_head on the very last position\n logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim\n\n return (logits, new_kv)" }, { "identifier": "FineGPT", "path": "TTS/tts/layers/bark/model_fine.py", "snippet": "class FineGPT(GPT):\n def __init__(self, config):\n super().__init__(config)\n del self.lm_head\n self.config = config\n self.n_codes_total = config.n_codes_total\n self.transformer = nn.ModuleDict(\n dict(\n wtes=nn.ModuleList(\n [nn.Embedding(config.input_vocab_size, config.n_embd) for _ in range(config.n_codes_total)]\n ),\n wpe=nn.Embedding(config.block_size, config.n_embd),\n drop=nn.Dropout(config.dropout),\n h=nn.ModuleList([FineBlock(config) for _ in range(config.n_layer)]),\n ln_f=nn.LayerNorm(config.n_embd),\n )\n )\n self.lm_heads = nn.ModuleList(\n [\n nn.Linear(config.n_embd, config.output_vocab_size, bias=False)\n for _ in range(config.n_codes_given, self.n_codes_total)\n ]\n )\n for i in range(self.n_codes_total - config.n_codes_given):\n self.transformer.wtes[i + 1].weight = self.lm_heads[i].weight\n\n def forward(self, pred_idx, idx):\n device = idx.device\n b, t, codes = idx.size()\n assert (\n t <= self.config.block_size\n ), f\"Cannot forward sequence of length {t}, block size is only {self.config.block_size}\"\n assert pred_idx > 0, \"cannot predict 0th codebook\"\n assert codes == self.n_codes_total, (b, t, codes)\n pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t)\n\n # forward the GPT model itself\n tok_embs = [\n wte(idx[:, :, i]).unsqueeze(-1) for i, wte in enumerate(self.transformer.wtes)\n ] # token embeddings of shape (b, t, n_embd)\n tok_emb = torch.cat(tok_embs, dim=-1)\n pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_embd)\n x = tok_emb[:, :, :, : pred_idx + 1].sum(dim=-1)\n x = self.transformer.drop(x + pos_emb)\n for block in self.transformer.h:\n x = block(x)\n x = self.transformer.ln_f(x)\n logits = self.lm_heads[pred_idx - self.config.n_codes_given](x)\n return logits\n\n def get_num_params(self, non_embedding=True):\n \"\"\"\n Return the number of parameters in the model.\n For non-embedding count (default), the position embeddings get subtracted.\n The token embeddings would too, except due to the parameter sharing these\n params are actually used as weights in the final layer, so we include them.\n \"\"\"\n n_params = sum(p.numel() for p in self.parameters())\n if non_embedding:\n for wte in self.transformer.wtes:\n n_params -= wte.weight.numel()\n n_params -= self.transformer.wpe.weight.numel()\n return n_params" }, { "identifier": "BaseTTS", "path": "TTS/tts/models/base_tts.py", "snippet": "class BaseTTS(BaseTrainerModel):\n \"\"\"Base `tts` class. Every new `tts` model must inherit this.\n\n It defines common `tts` specific functions on top of `Model` implementation.\n \"\"\"\n\n MODEL_TYPE = \"tts\"\n\n def __init__(\n self,\n config: Coqpit,\n ap: \"AudioProcessor\",\n tokenizer: \"TTSTokenizer\",\n speaker_manager: SpeakerManager = None,\n language_manager: LanguageManager = None,\n ):\n super().__init__()\n self.config = config\n self.ap = ap\n self.tokenizer = tokenizer\n self.speaker_manager = speaker_manager\n self.language_manager = language_manager\n self._set_model_args(config)\n\n def _set_model_args(self, config: Coqpit):\n \"\"\"Setup model args based on the config type (`ModelConfig` or `ModelArgs`).\n\n `ModelArgs` has all the fields reuqired to initialize the model architecture.\n\n `ModelConfig` has all the fields required for training, inference and containes `ModelArgs`.\n\n If the config is for training with a name like \"*Config\", then the model args are embeded in the\n config.model_args\n\n If the config is for the model with a name like \"*Args\", then we assign the directly.\n \"\"\"\n # don't use isintance not to import recursively\n if \"Config\" in config.__class__.__name__:\n config_num_chars = (\n self.config.model_args.num_chars if hasattr(self.config, \"model_args\") else self.config.num_chars\n )\n num_chars = config_num_chars if self.tokenizer is None else self.tokenizer.characters.num_chars\n if \"characters\" in config:\n self.config.num_chars = num_chars\n if hasattr(self.config, \"model_args\"):\n config.model_args.num_chars = num_chars\n self.args = self.config.model_args\n else:\n self.config = config\n self.args = config.model_args\n elif \"Args\" in config.__class__.__name__:\n self.args = config\n else:\n raise ValueError(\"config must be either a *Config or *Args\")\n\n def init_multispeaker(self, config: Coqpit, data: List = None):\n \"\"\"Initialize a speaker embedding layer if needen and define expected embedding channel size for defining\n `in_channels` size of the connected layers.\n\n This implementation yields 3 possible outcomes:\n\n 1. If `config.use_speaker_embedding` and `config.use_d_vector_file are False, do nothing.\n 2. If `config.use_d_vector_file` is True, set expected embedding channel size to `config.d_vector_dim` or 512.\n 3. If `config.use_speaker_embedding`, initialize a speaker embedding layer with channel size of\n `config.d_vector_dim` or 512.\n\n You can override this function for new models.\n\n Args:\n config (Coqpit): Model configuration.\n \"\"\"\n # set number of speakers\n if self.speaker_manager is not None:\n self.num_speakers = self.speaker_manager.num_speakers\n elif hasattr(config, \"num_speakers\"):\n self.num_speakers = config.num_speakers\n\n # set ultimate speaker embedding size\n if config.use_speaker_embedding or config.use_d_vector_file:\n self.embedded_speaker_dim = (\n config.d_vector_dim if \"d_vector_dim\" in config and config.d_vector_dim is not None else 512\n )\n # init speaker embedding layer\n if config.use_speaker_embedding and not config.use_d_vector_file:\n print(\" > Init speaker_embedding layer.\")\n self.speaker_embedding = nn.Embedding(self.num_speakers, self.embedded_speaker_dim)\n self.speaker_embedding.weight.data.normal_(0, 0.3)\n\n def get_aux_input(self, **kwargs) -> Dict:\n \"\"\"Prepare and return `aux_input` used by `forward()`\"\"\"\n return {\"speaker_id\": None, \"style_wav\": None, \"d_vector\": None, \"language_id\": None}\n\n def get_aux_input_from_test_sentences(self, sentence_info):\n if hasattr(self.config, \"model_args\"):\n config = self.config.model_args\n else:\n config = self.config\n\n # extract speaker and language info\n text, speaker_name, style_wav, language_name = None, None, None, None\n\n if isinstance(sentence_info, list):\n if len(sentence_info) == 1:\n text = sentence_info[0]\n elif len(sentence_info) == 2:\n text, speaker_name = sentence_info\n elif len(sentence_info) == 3:\n text, speaker_name, style_wav = sentence_info\n elif len(sentence_info) == 4:\n text, speaker_name, style_wav, language_name = sentence_info\n else:\n text = sentence_info\n\n # get speaker id/d_vector\n speaker_id, d_vector, language_id = None, None, None\n if self.speaker_manager is not None:\n if config.use_d_vector_file:\n if speaker_name is None:\n d_vector = self.speaker_manager.get_random_embedding()\n else:\n d_vector = self.speaker_manager.get_d_vector_by_name(speaker_name)\n elif config.use_speaker_embedding:\n if speaker_name is None:\n speaker_id = self.speaker_manager.get_random_id()\n else:\n speaker_id = self.speaker_manager.name_to_id[speaker_name]\n\n # get language id\n if self.language_manager is not None and config.use_language_embedding and language_name is not None:\n language_id = self.language_manager.name_to_id[language_name]\n\n return {\n \"text\": text,\n \"speaker_id\": speaker_id,\n \"style_wav\": style_wav,\n \"d_vector\": d_vector,\n \"language_id\": language_id,\n }\n\n def format_batch(self, batch: Dict) -> Dict:\n \"\"\"Generic batch formatting for `TTSDataset`.\n\n You must override this if you use a custom dataset.\n\n Args:\n batch (Dict): [description]\n\n Returns:\n Dict: [description]\n \"\"\"\n # setup input batch\n text_input = batch[\"token_id\"]\n text_lengths = batch[\"token_id_lengths\"]\n speaker_names = batch[\"speaker_names\"]\n linear_input = batch[\"linear\"]\n mel_input = batch[\"mel\"]\n mel_lengths = batch[\"mel_lengths\"]\n stop_targets = batch[\"stop_targets\"]\n item_idx = batch[\"item_idxs\"]\n d_vectors = batch[\"d_vectors\"]\n speaker_ids = batch[\"speaker_ids\"]\n attn_mask = batch[\"attns\"]\n waveform = batch[\"waveform\"]\n pitch = batch[\"pitch\"]\n energy = batch[\"energy\"]\n language_ids = batch[\"language_ids\"]\n max_text_length = torch.max(text_lengths.float())\n max_spec_length = torch.max(mel_lengths.float())\n\n # compute durations from attention masks\n durations = None\n if attn_mask is not None:\n durations = torch.zeros(attn_mask.shape[0], attn_mask.shape[2])\n for idx, am in enumerate(attn_mask):\n # compute raw durations\n c_idxs = am[:, : text_lengths[idx], : mel_lengths[idx]].max(1)[1]\n # c_idxs, counts = torch.unique_consecutive(c_idxs, return_counts=True)\n c_idxs, counts = torch.unique(c_idxs, return_counts=True)\n dur = torch.ones([text_lengths[idx]]).to(counts.dtype)\n dur[c_idxs] = counts\n # smooth the durations and set any 0 duration to 1\n # by cutting off from the largest duration indeces.\n extra_frames = dur.sum() - mel_lengths[idx]\n largest_idxs = torch.argsort(-dur)[:extra_frames]\n dur[largest_idxs] -= 1\n assert (\n dur.sum() == mel_lengths[idx]\n ), f\" [!] total duration {dur.sum()} vs spectrogram length {mel_lengths[idx]}\"\n durations[idx, : text_lengths[idx]] = dur\n\n # set stop targets wrt reduction factor\n stop_targets = stop_targets.view(text_input.shape[0], stop_targets.size(1) // self.config.r, -1)\n stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze(2)\n stop_target_lengths = torch.divide(mel_lengths, self.config.r).ceil_()\n\n return {\n \"text_input\": text_input,\n \"text_lengths\": text_lengths,\n \"speaker_names\": speaker_names,\n \"mel_input\": mel_input,\n \"mel_lengths\": mel_lengths,\n \"linear_input\": linear_input,\n \"stop_targets\": stop_targets,\n \"stop_target_lengths\": stop_target_lengths,\n \"attn_mask\": attn_mask,\n \"durations\": durations,\n \"speaker_ids\": speaker_ids,\n \"d_vectors\": d_vectors,\n \"max_text_length\": float(max_text_length),\n \"max_spec_length\": float(max_spec_length),\n \"item_idx\": item_idx,\n \"waveform\": waveform,\n \"pitch\": pitch,\n \"energy\": energy,\n \"language_ids\": language_ids,\n \"audio_unique_names\": batch[\"audio_unique_names\"],\n }\n\n def get_sampler(self, config: Coqpit, dataset: TTSDataset, num_gpus=1):\n weights = None\n data_items = dataset.samples\n\n if getattr(config, \"use_language_weighted_sampler\", False):\n alpha = getattr(config, \"language_weighted_sampler_alpha\", 1.0)\n print(\" > Using Language weighted sampler with alpha:\", alpha)\n weights = get_language_balancer_weights(data_items) * alpha\n\n if getattr(config, \"use_speaker_weighted_sampler\", False):\n alpha = getattr(config, \"speaker_weighted_sampler_alpha\", 1.0)\n print(\" > Using Speaker weighted sampler with alpha:\", alpha)\n if weights is not None:\n weights += get_speaker_balancer_weights(data_items) * alpha\n else:\n weights = get_speaker_balancer_weights(data_items) * alpha\n\n if getattr(config, \"use_length_weighted_sampler\", False):\n alpha = getattr(config, \"length_weighted_sampler_alpha\", 1.0)\n print(\" > Using Length weighted sampler with alpha:\", alpha)\n if weights is not None:\n weights += get_length_balancer_weights(data_items) * alpha\n else:\n weights = get_length_balancer_weights(data_items) * alpha\n\n if weights is not None:\n sampler = WeightedRandomSampler(weights, len(weights))\n else:\n sampler = None\n\n # sampler for DDP\n if sampler is None:\n sampler = DistributedSampler(dataset) if num_gpus > 1 else None\n else: # If a sampler is already defined use this sampler and DDP sampler together\n sampler = DistributedSamplerWrapper(sampler) if num_gpus > 1 else sampler\n\n return sampler\n\n def get_data_loader(\n self,\n config: Coqpit,\n assets: Dict,\n is_eval: bool,\n samples: Union[List[Dict], List[List]],\n verbose: bool,\n num_gpus: int,\n rank: int = None,\n ) -> \"DataLoader\":\n if is_eval and not config.run_eval:\n loader = None\n else:\n # setup multi-speaker attributes\n if self.speaker_manager is not None:\n if hasattr(config, \"model_args\"):\n speaker_id_mapping = (\n self.speaker_manager.name_to_id if config.model_args.use_speaker_embedding else None\n )\n d_vector_mapping = self.speaker_manager.embeddings if config.model_args.use_d_vector_file else None\n config.use_d_vector_file = config.model_args.use_d_vector_file\n else:\n speaker_id_mapping = self.speaker_manager.name_to_id if config.use_speaker_embedding else None\n d_vector_mapping = self.speaker_manager.embeddings if config.use_d_vector_file else None\n else:\n speaker_id_mapping = None\n d_vector_mapping = None\n\n # setup multi-lingual attributes\n if self.language_manager is not None:\n language_id_mapping = self.language_manager.name_to_id if self.args.use_language_embedding else None\n else:\n language_id_mapping = None\n\n # init dataloader\n dataset = TTSDataset(\n outputs_per_step=config.r if \"r\" in config else 1,\n compute_linear_spec=config.model.lower() == \"tacotron\" or config.compute_linear_spec,\n compute_f0=config.get(\"compute_f0\", False),\n f0_cache_path=config.get(\"f0_cache_path\", None),\n compute_energy=config.get(\"compute_energy\", False),\n energy_cache_path=config.get(\"energy_cache_path\", None),\n samples=samples,\n ap=self.ap,\n return_wav=config.return_wav if \"return_wav\" in config else False,\n batch_group_size=0 if is_eval else config.batch_group_size * config.batch_size,\n min_text_len=config.min_text_len,\n max_text_len=config.max_text_len,\n min_audio_len=config.min_audio_len,\n max_audio_len=config.max_audio_len,\n phoneme_cache_path=config.phoneme_cache_path,\n precompute_num_workers=config.precompute_num_workers,\n use_noise_augment=False if is_eval else config.use_noise_augment,\n verbose=verbose,\n speaker_id_mapping=speaker_id_mapping,\n d_vector_mapping=d_vector_mapping if config.use_d_vector_file else None,\n tokenizer=self.tokenizer,\n start_by_longest=config.start_by_longest,\n language_id_mapping=language_id_mapping,\n )\n\n # wait all the DDP process to be ready\n if num_gpus > 1:\n dist.barrier()\n\n # sort input sequences from short to long\n dataset.preprocess_samples()\n\n # get samplers\n sampler = self.get_sampler(config, dataset, num_gpus)\n\n loader = DataLoader(\n dataset,\n batch_size=config.eval_batch_size if is_eval else config.batch_size,\n shuffle=config.shuffle if sampler is None else False, # if there is no other sampler\n collate_fn=dataset.collate_fn,\n drop_last=config.drop_last, # setting this False might cause issues in AMP training.\n sampler=sampler,\n num_workers=config.num_eval_loader_workers if is_eval else config.num_loader_workers,\n pin_memory=False,\n )\n return loader\n\n def _get_test_aux_input(\n self,\n ) -> Dict:\n d_vector = None\n if self.config.use_d_vector_file:\n d_vector = [self.speaker_manager.embeddings[name][\"embedding\"] for name in self.speaker_manager.embeddings]\n d_vector = (random.sample(sorted(d_vector), 1),)\n\n aux_inputs = {\n \"speaker_id\": None\n if not self.config.use_speaker_embedding\n else random.sample(sorted(self.speaker_manager.name_to_id.values()), 1),\n \"d_vector\": d_vector,\n \"style_wav\": None, # TODO: handle GST style input\n }\n return aux_inputs\n\n def test_run(self, assets: Dict) -> Tuple[Dict, Dict]:\n \"\"\"Generic test run for `tts` models used by `Trainer`.\n\n You can override this for a different behaviour.\n\n Args:\n assets (dict): A dict of training assets. For `tts` models, it must include `{'audio_processor': ap}`.\n\n Returns:\n Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard.\n \"\"\"\n print(\" | > Synthesizing test sentences.\")\n test_audios = {}\n test_figures = {}\n test_sentences = self.config.test_sentences\n aux_inputs = self._get_test_aux_input()\n for idx, sen in enumerate(test_sentences):\n if isinstance(sen, list):\n aux_inputs = self.get_aux_input_from_test_sentences(sen)\n sen = aux_inputs[\"text\"]\n outputs_dict = synthesis(\n self,\n sen,\n self.config,\n \"cuda\" in str(next(self.parameters()).device),\n speaker_id=aux_inputs[\"speaker_id\"],\n d_vector=aux_inputs[\"d_vector\"],\n style_wav=aux_inputs[\"style_wav\"],\n use_griffin_lim=True,\n do_trim_silence=False,\n )\n test_audios[\"{}-audio\".format(idx)] = outputs_dict[\"wav\"]\n test_figures[\"{}-prediction\".format(idx)] = plot_spectrogram(\n outputs_dict[\"outputs\"][\"model_outputs\"], self.ap, output_fig=False\n )\n test_figures[\"{}-alignment\".format(idx)] = plot_alignment(\n outputs_dict[\"outputs\"][\"alignments\"], output_fig=False\n )\n return test_figures, test_audios\n\n def on_init_start(self, trainer):\n \"\"\"Save the speaker.pth and language_ids.json at the beginning of the training. Also update both paths.\"\"\"\n if self.speaker_manager is not None:\n output_path = os.path.join(trainer.output_path, \"speakers.pth\")\n self.speaker_manager.save_ids_to_file(output_path)\n trainer.config.speakers_file = output_path\n # some models don't have `model_args` set\n if hasattr(trainer.config, \"model_args\"):\n trainer.config.model_args.speakers_file = output_path\n trainer.config.save_json(os.path.join(trainer.output_path, \"config.json\"))\n print(f\" > `speakers.pth` is saved to {output_path}.\")\n print(\" > `speakers_file` is updated in the config.json.\")\n\n if self.language_manager is not None:\n output_path = os.path.join(trainer.output_path, \"language_ids.json\")\n self.language_manager.save_ids_to_file(output_path)\n trainer.config.language_ids_file = output_path\n if hasattr(trainer.config, \"model_args\"):\n trainer.config.model_args.language_ids_file = output_path\n trainer.config.save_json(os.path.join(trainer.output_path, \"config.json\"))\n print(f\" > `language_ids.json` is saved to {output_path}.\")\n print(\" > `language_ids_file` is updated in the config.json.\")" } ]
import os import numpy as np from dataclasses import dataclass from typing import Optional from coqpit import Coqpit from encodec import EncodecModel from transformers import BertTokenizer from TTS.tts.layers.bark.inference_funcs import ( codec_decode, generate_coarse, generate_fine, generate_text_semantic, generate_voice, load_voice, ) from TTS.tts.layers.bark.load_model import load_model from TTS.tts.layers.bark.model import GPT from TTS.tts.layers.bark.model_fine import FineGPT from TTS.tts.models.base_tts import BaseTTS
14,198
model_type="coarse", ) self.fine_model, self.config = load_model( ckpt_path=self.config.LOCAL_MODEL_PATHS["fine"], device=self.device, config=self.config, model_type="fine" ) def train_step( self, ): pass def text_to_semantic( self, text: str, history_prompt: Optional[str] = None, temp: float = 0.7, base=None, allow_early_stop=True, **kwargs, ): """Generate semantic array from text. Args: text: text to be turned into audio history_prompt: history choice for audio cloning temp: generation temperature (1.0 more diverse, 0.0 more conservative) Returns: numpy semantic array to be fed into `semantic_to_waveform` """ x_semantic = generate_text_semantic( text, self, history_prompt=history_prompt, temp=temp, base=base, allow_early_stop=allow_early_stop, **kwargs, ) return x_semantic def semantic_to_waveform( self, semantic_tokens: np.ndarray, history_prompt: Optional[str] = None, temp: float = 0.7, base=None, ): """Generate audio array from semantic input. Args: semantic_tokens: semantic token output from `text_to_semantic` history_prompt: history choice for audio cloning temp: generation temperature (1.0 more diverse, 0.0 more conservative) Returns: numpy audio array at sample frequency 24khz """ x_coarse_gen = generate_coarse( semantic_tokens, self, history_prompt=history_prompt, temp=temp, base=base, ) x_fine_gen = generate_fine( x_coarse_gen, self, history_prompt=history_prompt, temp=0.5, base=base, ) audio_arr = codec_decode(x_fine_gen, self) return audio_arr, x_coarse_gen, x_fine_gen def generate_audio( self, text: str, history_prompt: Optional[str] = None, text_temp: float = 0.7, waveform_temp: float = 0.7, base=None, allow_early_stop=True, **kwargs, ): """Generate audio array from input text. Args: text: text to be turned into audio history_prompt: history choice for audio cloning text_temp: generation temperature (1.0 more diverse, 0.0 more conservative) waveform_temp: generation temperature (1.0 more diverse, 0.0 more conservative) Returns: numpy audio array at sample frequency 24khz """ x_semantic = self.text_to_semantic( text, history_prompt=history_prompt, temp=text_temp, base=base, allow_early_stop=allow_early_stop, **kwargs, ) audio_arr, c, f = self.semantic_to_waveform( x_semantic, history_prompt=history_prompt, temp=waveform_temp, base=base ) return audio_arr, [x_semantic, c, f] def generate_voice(self, audio, speaker_id, voice_dir): """Generate a voice from the given audio and text. Args: audio (str): Path to the audio file. speaker_id (str): Speaker name. voice_dir (str): Path to the directory to save the generate voice. """ if voice_dir is not None: voice_dirs = [voice_dir] try:
@dataclass class BarkAudioConfig(Coqpit): sample_rate: int = 24000 output_sample_rate: int = 24000 class Bark(BaseTTS): def __init__( self, config: Coqpit, tokenizer: BertTokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased"), ) -> None: super().__init__(config=config, ap=None, tokenizer=None, speaker_manager=None, language_manager=None) self.config.num_chars = len(tokenizer) self.tokenizer = tokenizer self.semantic_model = GPT(config.semantic_config) self.coarse_model = GPT(config.coarse_config) self.fine_model = FineGPT(config.fine_config) self.encodec = EncodecModel.encodec_model_24khz() self.encodec.set_target_bandwidth(6.0) @property def device(self): return next(self.parameters()).device def load_bark_models(self): self.semantic_model, self.config = load_model( ckpt_path=self.config.LOCAL_MODEL_PATHS["text"], device=self.device, config=self.config, model_type="text" ) self.coarse_model, self.config = load_model( ckpt_path=self.config.LOCAL_MODEL_PATHS["coarse"], device=self.device, config=self.config, model_type="coarse", ) self.fine_model, self.config = load_model( ckpt_path=self.config.LOCAL_MODEL_PATHS["fine"], device=self.device, config=self.config, model_type="fine" ) def train_step( self, ): pass def text_to_semantic( self, text: str, history_prompt: Optional[str] = None, temp: float = 0.7, base=None, allow_early_stop=True, **kwargs, ): """Generate semantic array from text. Args: text: text to be turned into audio history_prompt: history choice for audio cloning temp: generation temperature (1.0 more diverse, 0.0 more conservative) Returns: numpy semantic array to be fed into `semantic_to_waveform` """ x_semantic = generate_text_semantic( text, self, history_prompt=history_prompt, temp=temp, base=base, allow_early_stop=allow_early_stop, **kwargs, ) return x_semantic def semantic_to_waveform( self, semantic_tokens: np.ndarray, history_prompt: Optional[str] = None, temp: float = 0.7, base=None, ): """Generate audio array from semantic input. Args: semantic_tokens: semantic token output from `text_to_semantic` history_prompt: history choice for audio cloning temp: generation temperature (1.0 more diverse, 0.0 more conservative) Returns: numpy audio array at sample frequency 24khz """ x_coarse_gen = generate_coarse( semantic_tokens, self, history_prompt=history_prompt, temp=temp, base=base, ) x_fine_gen = generate_fine( x_coarse_gen, self, history_prompt=history_prompt, temp=0.5, base=base, ) audio_arr = codec_decode(x_fine_gen, self) return audio_arr, x_coarse_gen, x_fine_gen def generate_audio( self, text: str, history_prompt: Optional[str] = None, text_temp: float = 0.7, waveform_temp: float = 0.7, base=None, allow_early_stop=True, **kwargs, ): """Generate audio array from input text. Args: text: text to be turned into audio history_prompt: history choice for audio cloning text_temp: generation temperature (1.0 more diverse, 0.0 more conservative) waveform_temp: generation temperature (1.0 more diverse, 0.0 more conservative) Returns: numpy audio array at sample frequency 24khz """ x_semantic = self.text_to_semantic( text, history_prompt=history_prompt, temp=text_temp, base=base, allow_early_stop=allow_early_stop, **kwargs, ) audio_arr, c, f = self.semantic_to_waveform( x_semantic, history_prompt=history_prompt, temp=waveform_temp, base=base ) return audio_arr, [x_semantic, c, f] def generate_voice(self, audio, speaker_id, voice_dir): """Generate a voice from the given audio and text. Args: audio (str): Path to the audio file. speaker_id (str): Speaker name. voice_dir (str): Path to the directory to save the generate voice. """ if voice_dir is not None: voice_dirs = [voice_dir] try:
_ = load_voice(speaker_id, voice_dirs)
5
2023-11-29 08:15:06+00:00
16k
AILab-CVC/UniRepLKNet
Video/dataset/build.py
[ { "identifier": "RawFrameClsDataset", "path": "Video/dataset/datasets.py", "snippet": "class RawFrameClsDataset(Dataset):\n \"\"\"Load your own raw frame classification dataset.\"\"\"\n\n def __init__(self,\n anno_path,\n data_root,\n mode='train',\n clip_len=8,\n crop_size=224,\n short_side_size=256,\n new_height=256,\n new_width=340,\n keep_aspect_ratio=True,\n num_segment=1,\n num_crop=1,\n test_num_segment=10,\n test_num_crop=3,\n filename_tmpl='img_{:05}.jpg',\n start_idx=1,\n args=None):\n self.anno_path = anno_path\n self.data_root = data_root\n self.mode = mode\n self.clip_len = clip_len\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.filename_tmpl = filename_tmpl\n self.start_idx = start_idx\n self.args = args\n self.aug = False\n self.rand_erase = False\n\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n\n self.image_loader = get_image_loader()\n\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')\n self.dataset_samples = list(\n cleaned[0].apply(lambda row: os.path.join(self.data_root, row)))\n self.total_frames = list(cleaned.values[:, 1])\n self.label_array = list(cleaned.values[:, -1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = video_transforms.Compose([\n video_transforms.Resize(\n self.short_side_size, interpolation='bilinear'),\n video_transforms.CenterCrop(\n size=(self.crop_size, self.crop_size)),\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = video_transforms.Compose([\n video_transforms.Resize(\n size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = video_transforms.Compose([\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_total_frames = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n self.test_seg.append((ck, cp))\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_total_frames.append(self.total_frames[idx])\n self.test_label_array.append(self.label_array[idx])\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args\n scale_t = 1\n\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(\n sample, total_frame, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during training\".format(\n sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(\n sample, total_frame, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n\n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(sample, total_frame)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during validation\".\n format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.load_frame(sample, total_frame)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\n \"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n total_frame = self.test_total_frames[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_frame(sample, total_frame)\n\n while len(buffer) == 0:\n warnings.warn(\n \"video {}, temporal {}, spatial {} not found during testing\"\n .format(str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n total_frame = self.test_total_frames[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_frame(sample, total_frame)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) -\n self.short_side_size) / (\n self.test_num_crop - 1)\n temporal_start = chunk_nb\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::self.test_num_segment,\n spatial_start:spatial_start +\n self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::self.test_num_segment, :,\n spatial_start:spatial_start +\n self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\n \"/\")[-1].split(\".\")[0], chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(self, buffer, args):\n aug_transform = video_transforms.create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [transforms.ToPILImage()(frame) for frame in buffer]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C\n\n # T H W C\n buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False)\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n def load_frame(self, sample, num_frames, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n\n if self.mode == 'test':\n tick = num_frames / float(self.num_segment)\n all_index = []\n for t_seg in range(self.test_num_segment):\n tmp_index = [\n int(t_seg * tick / self.test_num_segment + tick * x)\n for x in range(self.num_segment)\n ]\n all_index.extend(tmp_index)\n all_index = list(np.sort(np.array(all_index) + self.start_idx))\n imgs = []\n for idx in all_index:\n frame_fname = os.path.join(fname,\n self.filename_tmpl.format(idx))\n img = self.image_loader(frame_fname)\n imgs.append(img)\n buffer = np.array(imgs)\n return buffer\n\n # handle temporal segments\n average_duration = num_frames // self.num_segment\n all_index = []\n if average_duration > 0:\n if self.mode == 'validation':\n all_index = list(\n np.multiply(\n list(range(self.num_segment)), average_duration) +\n np.ones(self.num_segment, dtype=int) *\n (average_duration // 2))\n else:\n all_index = list(\n np.multiply(\n list(range(self.num_segment)), average_duration) +\n np.random.randint(average_duration, size=self.num_segment))\n elif num_frames > self.num_segment:\n if self.mode == 'validation':\n all_index = list(range(self.num_segment))\n else:\n all_index = list(\n np.sort(\n np.random.randint(num_frames, size=self.num_segment)))\n else:\n all_index = [0] * (self.num_segment - num_frames) + list(\n range(num_frames))\n all_index = list(np.array(all_index) + self.start_idx)\n imgs = []\n for idx in all_index:\n frame_fname = os.path.join(fname, self.filename_tmpl.format(idx))\n img = self.image_loader(frame_fname)\n imgs.append(img)\n buffer = np.array(imgs)\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "VideoClsDataset", "path": "Video/dataset/datasets.py", "snippet": "class VideoClsDataset(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self,\n anno_path,\n data_root='',\n mode='train',\n clip_len=8,\n frame_sample_rate=2,\n crop_size=224,\n short_side_size=256,\n new_height=256,\n new_width=340,\n keep_aspect_ratio=True,\n num_segment=1,\n num_crop=1,\n test_num_segment=10,\n test_num_crop=3,\n sparse_sample=False,\n args=None):\n self.anno_path = anno_path\n self.data_root = data_root\n self.mode = mode\n self.clip_len = clip_len\n self.frame_sample_rate = frame_sample_rate\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.sparse_sample = sparse_sample\n self.args = args\n self.aug = False\n self.rand_erase = False\n\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n\n self.video_loader = get_video_loader()\n\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')\n self.dataset_samples = list(\n cleaned[0].apply(lambda row: os.path.join(self.data_root, row)))\n self.label_array = list(cleaned.values[:, 1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = video_transforms.Compose([\n video_transforms.Resize(\n self.short_side_size, interpolation='bilinear'),\n video_transforms.CenterCrop(\n size=(self.crop_size, self.crop_size)),\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = video_transforms.Compose([\n video_transforms.Resize(\n size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = video_transforms.Compose([\n volume_transforms.ClipToTensor(),\n video_transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args\n scale_t = 1\n\n sample = self.dataset_samples[index]\n # T H W C\n buffer = self.load_video(sample, sample_rate_scale=scale_t)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during training\".format(\n sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.load_video(sample, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n\n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n buffer = self.load_video(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during validation\".\n format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.load_video(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\n \"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_video(sample)\n\n while len(buffer) == 0:\n warnings.warn(\n \"video {}, temporal {}, spatial {} not found during testing\"\n .format(str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_video(sample)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n if self.sparse_sample:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) -\n self.short_side_size) / (\n self.test_num_crop - 1)\n temporal_start = chunk_nb\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::self.test_num_segment,\n spatial_start:spatial_start +\n self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::self.test_num_segment, :,\n spatial_start:spatial_start +\n self.short_side_size, :]\n else:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) -\n self.short_side_size) / (\n self.test_num_crop - 1)\n temporal_step = max(\n 1.0 * (buffer.shape[0] - self.clip_len) /\n (self.test_num_segment - 1), 0)\n temporal_start = int(chunk_nb * temporal_step)\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start:temporal_start +\n self.clip_len,\n spatial_start:spatial_start +\n self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start:temporal_start +\n self.clip_len, :,\n spatial_start:spatial_start +\n self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\n \"/\")[-1].split(\".\")[0], chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(self, buffer, args):\n aug_transform = video_transforms.create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [transforms.ToPILImage()(frame) for frame in buffer]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C\n\n # T H W C\n buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n # crop_size=224,\n crop_size=args.input_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False)\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3) # C T H W -> T C H W\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3) # T C H W -> C T H W\n\n return buffer\n\n def load_video(self, sample, sample_rate_scale=1):\n fname = sample\n\n try:\n vr = self.video_loader(fname)\n except Exception as e:\n print(f\"Failed to load video from {fname} with error {e}!\")\n return []\n\n length = len(vr)\n\n if self.mode == 'test':\n if self.sparse_sample:\n tick = length / float(self.num_segment)\n all_index = []\n for t_seg in range(self.test_num_segment):\n tmp_index = [\n int(t_seg * tick / self.test_num_segment + tick * x)\n for x in range(self.num_segment)\n ]\n all_index.extend(tmp_index)\n all_index = list(np.sort(np.array(all_index)))\n else:\n all_index = [\n x for x in range(0, length, self.frame_sample_rate)\n ]\n while len(all_index) < self.clip_len:\n all_index.append(all_index[-1])\n\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n # handle temporal segments\n converted_len = int(self.clip_len * self.frame_sample_rate)\n seg_len = length // self.num_segment\n\n all_index = []\n for i in range(self.num_segment):\n if seg_len <= converted_len:\n index = np.linspace(\n 0, seg_len, num=seg_len // self.frame_sample_rate)\n index = np.concatenate(\n (index,\n np.ones(self.clip_len - seg_len // self.frame_sample_rate)\n * seg_len))\n index = np.clip(index, 0, seg_len - 1).astype(np.int64)\n else:\n if self.mode == 'validation':\n end_idx = (converted_len + seg_len) // 2\n else:\n end_idx = np.random.randint(converted_len, seg_len)\n str_idx = end_idx - converted_len\n index = np.linspace(str_idx, end_idx, num=self.clip_len)\n index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)\n index = index + i * seg_len\n all_index.extend(list(index))\n\n all_index = all_index[::int(sample_rate_scale)]\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "DataAugmentationForVideoMAEv2", "path": "Video/dataset/pretrain_datasets.py", "snippet": "class DataAugmentationForVideoMAEv2(object):\n\n def __init__(self, args):\n self.input_mean = [0.485, 0.456, 0.406]\n self.input_std = [0.229, 0.224, 0.225]\n div = True\n roll = False\n normalize = GroupNormalize(self.input_mean, self.input_std)\n self.train_augmentation = GroupMultiScaleCrop(args.input_size,\n [1, .875, .75, .66])\n self.transform = transforms.Compose([\n self.train_augmentation,\n Stack(roll=roll),\n ToTorchFormatTensor(div=div),\n normalize,\n ])\n if args.mask_type == 'tube':\n self.encoder_mask_map_generator = TubeMaskingGenerator(\n args.window_size, args.mask_ratio)\n else:\n raise NotImplementedError(\n 'Unsupported encoder masking strategy type.')\n if args.decoder_mask_ratio > 0.:\n if args.decoder_mask_type == 'run_cell':\n self.decoder_mask_map_generator = RunningCellMaskingGenerator(\n args.window_size, args.decoder_mask_ratio)\n else:\n raise NotImplementedError(\n 'Unsupported decoder masking strategy type.')\n\n def __call__(self, images):\n process_data, _ = self.transform(images)\n encoder_mask_map = self.encoder_mask_map_generator()\n if hasattr(self, 'decoder_mask_map_generator'):\n decoder_mask_map = self.decoder_mask_map_generator()\n else:\n decoder_mask_map = 1 - encoder_mask_map\n return process_data, encoder_mask_map, decoder_mask_map\n\n def __repr__(self):\n repr = \"(DataAugmentationForVideoMAEv2,\\n\"\n repr += \" transform = %s,\\n\" % str(self.transform)\n repr += \" Encoder Masking Generator = %s,\\n\" % str(\n self.encoder_mask_map_generator)\n if hasattr(self, 'decoder_mask_map_generator'):\n repr += \" Decoder Masking Generator = %s,\\n\" % str(\n self.decoder_mask_map_generator)\n else:\n repr += \" Do not use decoder masking,\\n\"\n repr += \")\"\n return repr" }, { "identifier": "HybridVideoMAE", "path": "Video/dataset/pretrain_datasets.py", "snippet": "class HybridVideoMAE(torch.utils.data.Dataset):\n \"\"\"Load your own videomae pretraining dataset.\n Parameters\n ----------\n root : str, required.\n Path to the root folder storing the dataset.\n setting : str, required.\n A text file describing the dataset, each line per video sample.\n There are four items in each line:\n (1) video path; (2) start_idx, (3) total frames and (4) video label.\n for pre-train video data\n total frames < 0, start_idx and video label meaningless\n for pre-train rawframe data\n video label meaningless\n train : bool, default True.\n Whether to load the training or validation set.\n test_mode : bool, default False.\n Whether to perform evaluation on the test set.\n Usually there is three-crop or ten-crop evaluation strategy involved.\n name_pattern : str, default 'img_{:05}.jpg'.\n The naming pattern of the decoded video frames.\n For example, img_00012.jpg.\n video_ext : str, default 'mp4'.\n If video_loader is set to True, please specify the video format accordinly.\n is_color : bool, default True.\n Whether the loaded image is color or grayscale.\n modality : str, default 'rgb'.\n Input modalities, we support only rgb video frames for now.\n Will add support for rgb difference image and optical flow image later.\n num_segments : int, default 1.\n Number of segments to evenly divide the video into clips.\n A useful technique to obtain global video-level information.\n Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.\n num_crop : int, default 1.\n Number of crops for each image. default is 1.\n Common choices are three crops and ten crops during evaluation.\n new_length : int, default 1.\n The length of input video clip. Default is a single image, but it can be multiple video frames.\n For example, new_length=16 means we will extract a video clip of consecutive 16 frames.\n new_step : int, default 1.\n Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.\n new_step=2 means we will extract a video clip of every other frame.\n transform : function, default None.\n A function that takes data and label and transforms them.\n temporal_jitter : bool, default False.\n Whether to temporally jitter if new_step > 1.\n lazy_init : bool, default False.\n If set to True, build a dataset instance without loading any dataset.\n num_sample : int, default 1.\n Number of sampled views for Repeated Augmentation.\n \"\"\"\n\n def __init__(self,\n root,\n setting,\n train=True,\n test_mode=False,\n name_pattern='img_{:05}.jpg',\n video_ext='mp4',\n is_color=True,\n modality='rgb',\n num_segments=1,\n num_crop=1,\n new_length=1,\n new_step=1,\n transform=None,\n temporal_jitter=False,\n lazy_init=False,\n num_sample=1):\n\n super(HybridVideoMAE, self).__init__()\n self.root = root\n self.setting = setting\n self.train = train\n self.test_mode = test_mode\n self.is_color = is_color\n self.modality = modality\n self.num_segments = num_segments\n self.num_crop = num_crop\n self.new_length = new_length\n self.new_step = new_step\n self.skip_length = self.new_length * self.new_step\n self.temporal_jitter = temporal_jitter\n self.name_pattern = name_pattern\n self.video_ext = video_ext\n self.transform = transform\n self.lazy_init = lazy_init\n self.num_sample = num_sample\n\n # NOTE:\n # for hybrid train\n # different frame naming formats are used for different datasets\n # should MODIFY the fname_tmpl to your own situation\n self.ava_fname_tmpl = 'image_{:06}.jpg'\n self.ssv2_fname_tmpl = 'img_{:05}.jpg'\n\n # NOTE:\n # we set sampling_rate = 2 for ssv2\n # thus being consistent with the fine-tuning stage\n # Note that the ssv2 we use is decoded to frames at 12 fps;\n # if decoded at 24 fps, the sample interval should be 4.\n self.ssv2_skip_length = self.new_length * 2\n self.orig_skip_length = self.skip_length\n\n self.video_loader = get_video_loader()\n self.image_loader = get_image_loader()\n\n if not self.lazy_init:\n self.clips = self._make_dataset(root, setting)\n if len(self.clips) == 0:\n raise (\n RuntimeError(\"Found 0 video clips in subfolders of: \" +\n root + \"\\n\"\n \"Check your data directory (opt.data-dir).\"))\n\n def __getitem__(self, index):\n try:\n video_name, start_idx, total_frame = self.clips[index]\n self.skip_length = self.orig_skip_length\n\n if total_frame < 0:\n decord_vr = self.video_loader(video_name)\n duration = len(decord_vr)\n\n segment_indices, skip_offsets = self._sample_train_indices(\n duration)\n frame_id_list = self.get_frame_id_list(duration,\n segment_indices,\n skip_offsets)\n video_data = decord_vr.get_batch(frame_id_list).asnumpy()\n images = [\n Image.fromarray(video_data[vid, :, :, :]).convert('RGB')\n for vid, _ in enumerate(frame_id_list)\n ]\n\n else:\n # ssv2 & ava & other rawframe dataset\n if 'SomethingV2' in video_name:\n self.skip_length = self.ssv2_skip_length\n fname_tmpl = self.ssv2_fname_tmpl\n elif 'AVA2.2' in video_name:\n fname_tmpl = self.ava_fname_tmpl\n else:\n fname_tmpl = self.name_pattern\n\n segment_indices, skip_offsets = self._sample_train_indices(\n total_frame)\n frame_id_list = self.get_frame_id_list(total_frame,\n segment_indices,\n skip_offsets)\n\n images = []\n for idx in frame_id_list:\n frame_fname = os.path.join(\n video_name, fname_tmpl.format(idx + start_idx))\n img = self.image_loader(frame_fname)\n img = Image.fromarray(img)\n images.append(img)\n\n except Exception as e:\n print(\"Failed to load video from {} with error {}\".format(\n video_name, e))\n index = random.randint(0, len(self.clips) - 1)\n return self.__getitem__(index)\n\n if self.num_sample > 1:\n process_data_list = []\n encoder_mask_list = []\n decoder_mask_list = []\n for _ in range(self.num_sample):\n process_data, encoder_mask, decoder_mask = self.transform(\n (images, None))\n process_data = process_data.view(\n (self.new_length, 3) + process_data.size()[-2:]).transpose(\n 0, 1)\n process_data_list.append(process_data)\n encoder_mask_list.append(encoder_mask)\n decoder_mask_list.append(decoder_mask)\n return process_data_list, encoder_mask_list, decoder_mask_list\n else:\n process_data, encoder_mask, decoder_mask = self.transform(\n (images, None))\n # T*C,H,W -> T,C,H,W -> C,T,H,W\n process_data = process_data.view(\n (self.new_length, 3) + process_data.size()[-2:]).transpose(\n 0, 1)\n return process_data, encoder_mask, decoder_mask\n\n def __len__(self):\n return len(self.clips)\n\n def _make_dataset(self, root, setting):\n if not os.path.exists(setting):\n raise (RuntimeError(\n \"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. \"\n % (setting)))\n clips = []\n with open(setting) as split_f:\n data = split_f.readlines()\n for line in data:\n line_info = line.split(' ')\n # line format: video_path, video_duration, video_label\n if len(line_info) < 2:\n raise (RuntimeError(\n 'Video input format is not correct, missing one or more element. %s'\n % line))\n clip_path = os.path.join(root, line_info[0])\n start_idx = int(line_info[1])\n total_frame = int(line_info[2])\n item = (clip_path, start_idx, total_frame)\n clips.append(item)\n return clips\n\n def _sample_train_indices(self, num_frames):\n average_duration = (num_frames - self.skip_length +\n 1) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(\n list(range(self.num_segments)), average_duration)\n offsets = offsets + np.random.randint(\n average_duration, size=self.num_segments)\n elif num_frames > max(self.num_segments, self.skip_length):\n offsets = np.sort(\n np.random.randint(\n num_frames - self.skip_length + 1, size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments, ))\n\n if self.temporal_jitter:\n skip_offsets = np.random.randint(\n self.new_step, size=self.skip_length // self.new_step)\n else:\n skip_offsets = np.zeros(\n self.skip_length // self.new_step, dtype=int)\n return offsets + 1, skip_offsets\n\n def get_frame_id_list(self, duration, indices, skip_offsets):\n frame_id_list = []\n for seg_ind in indices:\n offset = int(seg_ind)\n for i, _ in enumerate(range(0, self.skip_length, self.new_step)):\n if offset + skip_offsets[i] <= duration:\n frame_id = offset + skip_offsets[i] - 1\n else:\n frame_id = offset - 1\n frame_id_list.append(frame_id)\n if offset + self.new_step < duration:\n offset += self.new_step\n return frame_id_list" }, { "identifier": "VideoMAE", "path": "Video/dataset/pretrain_datasets.py", "snippet": "class VideoMAE(torch.utils.data.Dataset):\n \"\"\"Load your own videomae pretraining dataset.\n Parameters\n ----------\n root : str, required.\n Path to the root folder storing the dataset.\n setting : str, required.\n A text file describing the dataset, each line per video sample.\n There are four items in each line:\n (1) video path; (2) start_idx, (3) total frames and (4) video label.\n for pre-train video data\n total frames < 0, start_idx and video label meaningless\n for pre-train rawframe data\n video label meaningless\n train : bool, default True.\n Whether to load the training or validation set.\n test_mode : bool, default False.\n Whether to perform evaluation on the test set.\n Usually there is three-crop or ten-crop evaluation strategy involved.\n name_pattern : str, default 'img_{:05}.jpg'.\n The naming pattern of the decoded video frames.\n For example, img_00012.jpg.\n video_ext : str, default 'mp4'.\n If video_loader is set to True, please specify the video format accordinly.\n is_color : bool, default True.\n Whether the loaded image is color or grayscale.\n modality : str, default 'rgb'.\n Input modalities, we support only rgb video frames for now.\n Will add support for rgb difference image and optical flow image later.\n num_segments : int, default 1.\n Number of segments to evenly divide the video into clips.\n A useful technique to obtain global video-level information.\n Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.\n num_crop : int, default 1.\n Number of crops for each image. default is 1.\n Common choices are three crops and ten crops during evaluation.\n new_length : int, default 1.\n The length of input video clip. Default is a single image, but it can be multiple video frames.\n For example, new_length=16 means we will extract a video clip of consecutive 16 frames.\n new_step : int, default 1.\n Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.\n new_step=2 means we will extract a video clip of every other frame.\n transform : function, default None.\n A function that takes data and label and transforms them.\n temporal_jitter : bool, default False.\n Whether to temporally jitter if new_step > 1.\n lazy_init : bool, default False.\n If set to True, build a dataset instance without loading any dataset.\n num_sample : int, default 1.\n Number of sampled views for Repeated Augmentation.\n \"\"\"\n\n def __init__(self,\n root,\n setting,\n train=True,\n test_mode=False,\n name_pattern='img_{:05}.jpg',\n video_ext='mp4',\n is_color=True,\n modality='rgb',\n num_segments=1,\n num_crop=1,\n new_length=1,\n new_step=1,\n transform=None,\n temporal_jitter=False,\n lazy_init=False,\n num_sample=1):\n\n super(VideoMAE, self).__init__()\n self.root = root\n self.setting = setting\n self.train = train\n self.test_mode = test_mode\n self.is_color = is_color\n self.modality = modality\n self.num_segments = num_segments\n self.num_crop = num_crop\n self.new_length = new_length\n self.new_step = new_step\n self.skip_length = self.new_length * self.new_step\n self.temporal_jitter = temporal_jitter\n self.name_pattern = name_pattern\n self.video_ext = video_ext\n self.transform = transform\n self.lazy_init = lazy_init\n self.num_sample = num_sample\n\n self.video_loader = get_video_loader()\n self.image_loader = get_image_loader()\n\n if not self.lazy_init:\n self.clips = self._make_dataset(root, setting)\n if len(self.clips) == 0:\n raise (\n RuntimeError(\"Found 0 video clips in subfolders of: \" +\n root + \"\\n\"\n \"Check your data directory (opt.data-dir).\"))\n\n def __getitem__(self, index):\n try:\n video_name, start_idx, total_frame = self.clips[index]\n if total_frame < 0: # load video\n decord_vr = self.video_loader(video_name)\n duration = len(decord_vr)\n\n segment_indices, skip_offsets = self._sample_train_indices(\n duration)\n frame_id_list = self.get_frame_id_list(duration,\n segment_indices,\n skip_offsets)\n video_data = decord_vr.get_batch(frame_id_list).asnumpy()\n images = [\n Image.fromarray(video_data[vid, :, :, :]).convert('RGB')\n for vid, _ in enumerate(frame_id_list)\n ]\n else: # load frames\n segment_indices, skip_offsets = self._sample_train_indices(\n total_frame)\n frame_id_list = self.get_frame_id_list(total_frame,\n segment_indices,\n skip_offsets)\n\n images = []\n for idx in frame_id_list:\n frame_fname = os.path.join(\n video_name, self.name_pattern.format(idx + start_idx))\n img = self.image_loader(frame_fname)\n img = Image.fromarray(img)\n images.append(img)\n\n except Exception as e:\n print(\"Failed to load video from {} with error {}\".format(\n video_name, e))\n index = random.randint(0, len(self.clips) - 1)\n return self.__getitem__(index)\n\n if self.num_sample > 1:\n process_data_list = []\n encoder_mask_list = []\n decoder_mask_list = []\n for _ in range(self.num_sample):\n process_data, encoder_mask, decoder_mask = self.transform(\n (images, None))\n process_data = process_data.view(\n (self.new_length, 3) + process_data.size()[-2:]).transpose(\n 0, 1)\n process_data_list.append(process_data)\n encoder_mask_list.append(encoder_mask)\n decoder_mask_list.append(decoder_mask)\n return process_data_list, encoder_mask_list, decoder_mask_list\n else:\n process_data, encoder_mask, decoder_mask = self.transform(\n (images, None))\n # T*C,H,W -> T,C,H,W -> C,T,H,W\n process_data = process_data.view(\n (self.new_length, 3) + process_data.size()[-2:]).transpose(\n 0, 1)\n return process_data, encoder_mask, decoder_mask\n\n def __len__(self):\n return len(self.clips)\n\n def _make_dataset(self, root, setting):\n if not os.path.exists(setting):\n raise (RuntimeError(\n \"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. \"\n % (setting)))\n clips = []\n with open(setting) as split_f:\n data = split_f.readlines()\n for line in data:\n line_info = line.split(' ')\n # line format: video_path, start_idx, total_frames\n if len(line_info) < 3:\n raise (RuntimeError(\n 'Video input format is not correct, missing one or more element. %s'\n % line))\n clip_path = os.path.join(root, line_info[0])\n start_idx = int(line_info[1])\n total_frame = int(line_info[2])\n item = (clip_path, start_idx, total_frame)\n clips.append(item)\n return clips\n\n def _sample_train_indices(self, num_frames):\n average_duration = (num_frames - self.skip_length +\n 1) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(\n list(range(self.num_segments)), average_duration)\n offsets = offsets + np.random.randint(\n average_duration, size=self.num_segments)\n elif num_frames > max(self.num_segments, self.skip_length):\n offsets = np.sort(\n np.random.randint(\n num_frames - self.skip_length + 1, size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments, ))\n\n if self.temporal_jitter:\n skip_offsets = np.random.randint(\n self.new_step, size=self.skip_length // self.new_step)\n else:\n skip_offsets = np.zeros(\n self.skip_length // self.new_step, dtype=int)\n return offsets + 1, skip_offsets\n\n def get_frame_id_list(self, duration, indices, skip_offsets):\n frame_id_list = []\n for seg_ind in indices:\n offset = int(seg_ind)\n for i, _ in enumerate(range(0, self.skip_length, self.new_step)):\n if offset + skip_offsets[i] <= duration:\n frame_id = offset + skip_offsets[i] - 1\n else:\n frame_id = offset - 1\n frame_id_list.append(frame_id)\n if offset + self.new_step < duration:\n offset += self.new_step\n return frame_id_list" } ]
import os from .datasets import RawFrameClsDataset, VideoClsDataset from .pretrain_datasets import ( # noqa: F401 DataAugmentationForVideoMAEv2, HybridVideoMAE, VideoMAE, )
11,089
# -------------------------------------------------------- # Based on BEiT, timm, DINO and DeiT code bases # https://github.com/microsoft/unilm/tree/master/beit # https://github.com/rwightman/pytorch-image-models/tree/master/timm # https://github.com/facebookresearch/deit # https://github.com/facebookresearch/dino # --------------------------------------------------------' def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAEv2(args) dataset = VideoMAE( root=args.data_root, setting=args.data_path, train=True, test_mode=False, name_pattern=args.fname_tmpl, video_ext='mp4', is_color=True, modality='rgb', num_segments=1, num_crop=1, new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, lazy_init=False, num_sample=args.num_sample) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if is_train: mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif test_mode: mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'val.csv') if args.data_set == 'Kinetics-400': if not args.sparse_sample:
# -------------------------------------------------------- # Based on BEiT, timm, DINO and DeiT code bases # https://github.com/microsoft/unilm/tree/master/beit # https://github.com/rwightman/pytorch-image-models/tree/master/timm # https://github.com/facebookresearch/deit # https://github.com/facebookresearch/dino # --------------------------------------------------------' def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAEv2(args) dataset = VideoMAE( root=args.data_root, setting=args.data_path, train=True, test_mode=False, name_pattern=args.fname_tmpl, video_ext='mp4', is_color=True, modality='rgb', num_segments=1, num_crop=1, new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, lazy_init=False, num_sample=args.num_sample) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if is_train: mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif test_mode: mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'val.csv') if args.data_set == 'Kinetics-400': if not args.sparse_sample:
dataset = VideoClsDataset(
1
2023-11-24 07:28:22+00:00
16k
wenquanlu/HandRefiner
handrefiner.py
[ { "identifier": "handrefiner_root", "path": "config.py", "snippet": "" }, { "identifier": "create_model", "path": "cldm/model.py", "snippet": "def create_model(config_path):\n config = OmegaConf.load(config_path)\n model = instantiate_from_config(config.model).cpu()\n print(f'Loaded model config from [{config_path}]')\n return model" }, { "identifier": "load_state_dict", "path": "cldm/model.py", "snippet": "def load_state_dict(ckpt_path, location='cpu'):\n _, extension = os.path.splitext(ckpt_path)\n if extension.lower() == \".safetensors\":\n import safetensors.torch\n state_dict = safetensors.torch.load_file(ckpt_path, device=location)\n else:\n state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location)))\n state_dict = get_state_dict(state_dict)\n print(f'Loaded state_dict from [{ckpt_path}]')\n return state_dict" }, { "identifier": "DDIMSampler", "path": "cldm/ddim_hacked.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n guidance_end=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule,\n guidance_end=guidance_end\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None, guidance_end=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n uncontrol = False\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n if guidance_end is not None:\n uncontrol = (i > (guidance_end * total_steps))\n #print(uncontrol)\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts)\n img = img_orig * (1. - mask) + mask * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold, uncontrol=uncontrol)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None, uncontrol=False):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n if uncontrol:\n self.model.control_scales = ([0.0] * 13)\n model_t = self.model.apply_model(x, t, c)\n model_uncond = self.model.apply_model(x, t, unconditional_conditioning)\n\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n num_reference_steps = timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), timesteps[i], device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" }, { "identifier": "MeshGraphormerMediapipe", "path": "preprocessor/meshgraphormer.py", "snippet": "class MeshGraphormerMediapipe(Preprocessor):\n def __init__(self, args=args) -> None:\n #global logger\n # Setup CUDA, GPU & distributed training\n args.num_gpus = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1\n os.environ['OMP_NUM_THREADS'] = str(args.num_workers)\n print('set os.environ[OMP_NUM_THREADS] to {}'.format(os.environ['OMP_NUM_THREADS']))\n\n #mkdir(args.output_dir)\n #logger = setup_logger(\"Graphormer\", args.output_dir, get_rank())\n set_seed(args.seed, args.num_gpus)\n #logger.info(\"Using {} GPUs\".format(args.num_gpus))\n\n # Mesh and MANO utils\n mano_model = MANO().to(args.device)\n mano_model.layer = mano_model.layer.cuda()\n mesh_sampler = Mesh()\n\n # Renderer for visualization\n # renderer = Renderer(faces=mano_model.face)\n\n # Load pretrained model\n trans_encoder = []\n\n input_feat_dim = [int(item) for item in args.input_feat_dim.split(',')]\n hidden_feat_dim = [int(item) for item in args.hidden_feat_dim.split(',')]\n output_feat_dim = input_feat_dim[1:] + [3]\n\n # which encoder block to have graph convs\n which_blk_graph = [int(item) for item in args.which_gcn.split(',')]\n\n if args.run_eval_only==True and args.resume_checkpoint!=None and args.resume_checkpoint!='None' and 'state_dict' not in args.resume_checkpoint:\n # if only run eval, load checkpoint\n #logger.info(\"Evaluation: Loading from checkpoint {}\".format(args.resume_checkpoint))\n _model = torch.load(args.resume_checkpoint)\n\n else:\n # init three transformer-encoder blocks in a loop\n for i in range(len(output_feat_dim)):\n config_class, model_class = BertConfig, Graphormer\n config = config_class.from_pretrained(args.config_name if args.config_name \\\n else args.model_name_or_path)\n\n config.output_attentions = False\n config.img_feature_dim = input_feat_dim[i] \n config.output_feature_dim = output_feat_dim[i]\n args.hidden_size = hidden_feat_dim[i]\n args.intermediate_size = int(args.hidden_size*2)\n\n if which_blk_graph[i]==1:\n config.graph_conv = True\n #logger.info(\"Add Graph Conv\")\n else:\n config.graph_conv = False\n\n config.mesh_type = args.mesh_type\n\n # update model structure if specified in arguments\n update_params = ['num_hidden_layers', 'hidden_size', 'num_attention_heads', 'intermediate_size']\n for idx, param in enumerate(update_params):\n arg_param = getattr(args, param)\n config_param = getattr(config, param)\n if arg_param > 0 and arg_param != config_param:\n #logger.info(\"Update config parameter {}: {} -> {}\".format(param, config_param, arg_param))\n setattr(config, param, arg_param)\n\n # init a transformer encoder and append it to a list\n assert config.hidden_size % config.num_attention_heads == 0\n model = model_class(config=config) \n #logger.info(\"Init model from scratch.\")\n trans_encoder.append(model)\n \n # create backbone model\n if args.arch=='hrnet':\n hrnet_yaml = 'MeshGraphormer/models/hrnet/cls_hrnet_w40_sgd_lr5e-2_wd1e-4_bs32_x100.yaml'\n hrnet_checkpoint = 'MeshGraphormer/models/hrnet/hrnetv2_w40_imagenet_pretrained.pth'\n hrnet_update_config(hrnet_config, hrnet_yaml)\n backbone = get_cls_net_gridfeat(hrnet_config, pretrained=hrnet_checkpoint)\n #logger.info('=> loading hrnet-v2-w40 model')\n elif args.arch=='hrnet-w64':\n hrnet_yaml = 'MeshGraphormer/models/hrnet/cls_hrnet_w64_sgd_lr5e-2_wd1e-4_bs32_x100.yaml'\n hrnet_checkpoint = 'MeshGraphormer/models/hrnet/hrnetv2_w64_imagenet_pretrained.pth'\n hrnet_update_config(hrnet_config, hrnet_yaml)\n backbone = get_cls_net_gridfeat(hrnet_config, pretrained=hrnet_checkpoint)\n #logger.info('=> loading hrnet-v2-w64 model')\n else:\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n backbone = models.__dict__[args.arch](pretrained=True)\n # remove the last fc layer\n backbone = torch.nn.Sequential(*list(backbone.children())[:-1])\n\n trans_encoder = torch.nn.Sequential(*trans_encoder)\n total_params = sum(p.numel() for p in trans_encoder.parameters())\n #logger.info('Graphormer encoders total parameters: {}'.format(total_params))\n backbone_total_params = sum(p.numel() for p in backbone.parameters())\n #logger.info('Backbone total parameters: {}'.format(backbone_total_params))\n\n # build end-to-end Graphormer network (CNN backbone + multi-layer Graphormer encoder)\n _model = Graphormer_Network(args, config, backbone, trans_encoder)\n\n if args.resume_checkpoint!=None and args.resume_checkpoint!='None':\n # for fine-tuning or resume training or inference, load weights from checkpoint\n #logger.info(\"Loading state dict from checkpoint {}\".format(args.resume_checkpoint))\n # workaround approach to load sparse tensor in graph conv.\n state_dict = torch.load(args.resume_checkpoint)\n _model.load_state_dict(state_dict, strict=False)\n del state_dict\n gc.collect()\n torch.cuda.empty_cache()\n\n # update configs to enable attention outputs\n setattr(_model.trans_encoder[-1].config,'output_attentions', True)\n setattr(_model.trans_encoder[-1].config,'output_hidden_states', True)\n _model.trans_encoder[-1].bert.encoder.output_attentions = True\n _model.trans_encoder[-1].bert.encoder.output_hidden_states = True\n for iter_layer in range(4):\n _model.trans_encoder[-1].bert.encoder.layer[iter_layer].attention.self.output_attentions = True\n for inter_block in range(3):\n setattr(_model.trans_encoder[-1].config,'device', args.device)\n\n _model.to(args.device)\n self._model = _model\n self.mano_model = mano_model\n self.mesh_sampler = mesh_sampler\n\n self.transform = transforms.Compose([ \n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])])\n \n base_options = python.BaseOptions(model_asset_path='preprocessor/hand_landmarker.task')\n options = vision.HandLandmarkerOptions(base_options=base_options,\n min_hand_detection_confidence=0.6,\n min_hand_presence_confidence=0.6,\n min_tracking_confidence=0.6,\n num_hands=2)\n\n self.detector = vision.HandLandmarker.create_from_options(options)\n \n \n def get_rays(self, W, H, fx, fy, cx, cy, c2w_t, center_pixels): # rot = I\n \n j, i = np.meshgrid(np.arange(H, dtype=np.float32), np.arange(W, dtype=np.float32))\n if center_pixels:\n i = i.copy() + 0.5\n j = j.copy() + 0.5\n\n directions = np.stack([(i - cx) / fx, (j - cy) / fy, np.ones_like(i)], -1)\n directions /= np.linalg.norm(directions, axis=-1, keepdims=True)\n\n rays_o = np.expand_dims(c2w_t,0).repeat(H*W, 0)\n\n rays_d = directions # (H, W, 3)\n rays_d = (rays_d / np.linalg.norm(rays_d, axis=-1, keepdims=True)).reshape(-1,3)\n\n return rays_o, rays_d\n \n def get_mask_bounding_box(self, extrema, H, W, padding=30, dynamic_resize=0.15):\n x_min, x_max, y_min, y_max = extrema\n bb_xpad = max(int((x_max - x_min + 1) * dynamic_resize), padding)\n bb_ypad = max(int((y_max - y_min + 1) * dynamic_resize), padding)\n bbx_min = np.max((x_min - bb_xpad, 0))\n bbx_max = np.min((x_max + bb_xpad, W-1))\n bby_min = np.max((y_min - bb_ypad, 0))\n bby_max = np.min((y_max + bb_ypad, H-1))\n return bbx_min, bbx_max, bby_min, bby_max\n\n def run_inference(self, img, Graphormer_model, mano, mesh_sampler, scale, crop_len):\n # switch to evaluate mode\n H, W = int(crop_len), int(crop_len)\n Graphormer_model.eval()\n mano.eval()\n with torch.no_grad():\n img_tensor = self.transform(img)\n batch_imgs = torch.unsqueeze(img_tensor, 0).cuda()\n \n # forward-pass\n pred_camera, pred_3d_joints, pred_vertices_sub, pred_vertices, hidden_states, att = Graphormer_model(batch_imgs, mano, mesh_sampler)\n\n # obtain 3d joints, which are regressed from the full mesh\n pred_3d_joints_from_mesh = mano.get_3d_joints(pred_vertices)\n # obtain 2d joints, which are projected from 3d joints of mesh\n #pred_2d_joints_from_mesh = orthographic_projection(pred_3d_joints_from_mesh.contiguous(), pred_camera.contiguous())\n #pred_2d_coarse_vertices_from_mesh = orthographic_projection(pred_vertices_sub.contiguous(), pred_camera.contiguous())\n pred_camera = pred_camera.cpu()\n pred_vertices = pred_vertices.cpu()\n mesh = Trimesh(vertices=pred_vertices[0], faces=mano.face)\n res = crop_len\n focal_length = 1000 * scale\n camera_t = np.array([-pred_camera[1], -pred_camera[2], -2*focal_length/(res * pred_camera[0] +1e-9)])\n pred_3d_joints_camera = pred_3d_joints_from_mesh.cpu()[0] - camera_t\n z_3d_dist = pred_3d_joints_camera[:,2].clone()\n\n pred_2d_joints_img_space = ((pred_3d_joints_camera/z_3d_dist[:,None]) * np.array((focal_length, focal_length, 1)))[:,:2] + np.array((W/2, H/2))\n\n rays_o, rays_d = self.get_rays(W, H, focal_length, focal_length, W/2, H/2, camera_t, True)\n coords = np.array(list(np.ndindex(H,W))).reshape(H,W,-1).transpose(1,0,2).reshape(-1,2)\n intersector = RayMeshIntersector(mesh)\n points, index_ray, _ = intersector.intersects_location(rays_o, rays_d, multiple_hits=False)\n\n tri_index = intersector.intersects_first(rays_o, rays_d)\n\n tri_index = tri_index[index_ray]\n\n assert len(index_ray) == len(tri_index)\n \n discriminator = (np.sum(mesh.face_normals[tri_index]* rays_d[index_ray], axis=-1)<= 0)\n points = points[discriminator] # ray intesects in interior faces, discard them\n\n if len(points) == 0:\n return None, None\n depth = (points + camera_t)[:,-1]\n index_ray = index_ray[discriminator]\n pixel_ray = coords[index_ray]\n\n minval = np.min(depth)\n maxval = np.max(depth)\n depthmap = np.zeros([H,W])\n\n depthmap[pixel_ray[:, 0], pixel_ray[:, 1]] = 1.0 - (0.8 * (depth - minval) / (maxval - minval))\n depthmap *= 255\n return depthmap, pred_2d_joints_img_space\n\n\n def get_depth(self, input_dir, file_name, padding):\n info = {}\n \n image_file = os.path.join(input_dir, file_name)\n\n # STEP 3: Load the input image.\n image = mp.Image.create_from_file(image_file)\n\n # STEP 4: Detect hand landmarks from the input image.\n detection_result = self.detector.detect(image)\n\n handedness_list = detection_result.handedness\n hand_landmarks_list = detection_result.hand_landmarks\n\n raw_image = image.numpy_view()\n H, W, C = raw_image.shape\n\n\n # HANDLANDMARKS CAN BE EMPTY, HANDLE THIS!\n if len(hand_landmarks_list) == 0:\n print(\"Cannot detect hands for original image {}\".format(file_name))\n return None, None, None\n raw_image = raw_image[:, :, :3]\n\n padded_image = np.zeros((H*2, W*2, 3))\n padded_image[int(1/2 * H):int(3/2 * H), int(1/2 * W):int(3/2 * W)] = raw_image\n\n hand_landmarks_list, handedness_list = zip(\n *sorted(\n zip(hand_landmarks_list, handedness_list), key=lambda x: x[0][9].z, reverse=True\n )\n )\n\n padded_depthmap = np.zeros((H*2, W*2))\n mask = np.zeros((H, W))\n crop_boxes = []\n #bboxes = []\n groundtruth_2d_keypoints = []\n hands = []\n depth_failure = False\n crop_lens = []\n for idx in range(len(hand_landmarks_list)):\n hand = true_hand_category[handedness_list[idx][0].category_name]\n hands.append(hand)\n hand_landmarks = hand_landmarks_list[idx]\n handedness = handedness_list[idx]\n height, width, _ = raw_image.shape\n x_coordinates = [landmark.x for landmark in hand_landmarks]\n y_coordinates = [landmark.y for landmark in hand_landmarks]\n\n # x_min, x_max, y_min, y_max: extrema from mediapipe keypoint detection\n x_min = int(min(x_coordinates) * width)\n x_max = int(max(x_coordinates) * width)\n x_c = (x_min + x_max)//2\n y_min = int(min(y_coordinates) * height)\n y_max = int(max(y_coordinates) * height)\n y_c = (y_min + y_max)//2\n\n #if x_max - x_min < 60 or y_max - y_min < 60:\n # continue\n\n crop_len = (max(x_max - x_min, y_max - y_min) * 1.6) //2 * 2\n\n # crop_x_min, crop_x_max, crop_y_min, crop_y_max: bounding box for mesh reconstruction \n crop_x_min = int(x_c - (crop_len/2 - 1) + W/2)\n crop_x_max = int(x_c + crop_len/2 + W/2)\n crop_y_min = int(y_c - (crop_len/2 - 1) + H/2)\n crop_y_max = int(y_c + crop_len/2 + H/2)\n\n cropped = padded_image[crop_y_min:crop_y_max+1, crop_x_min:crop_x_max+1]\n crop_boxes.append([crop_y_min, crop_y_max, crop_x_min, crop_x_max])\n crop_lens.append(crop_len)\n if hand == \"left\":\n cropped = cv2.flip(cropped, 1)\n\n if crop_len < 224:\n graphormer_input = cv2.resize(cropped, (224, 224), interpolation=cv2.INTER_CUBIC)\n else:\n graphormer_input = cv2.resize(cropped, (224, 224), interpolation=cv2.INTER_AREA)\n scale = crop_len/224\n cropped_depthmap, pred_2d_keypoints = self.run_inference(graphormer_input.astype(np.uint8), self._model, self.mano_model, self.mesh_sampler, scale, int(crop_len)) \n\n if cropped_depthmap is None:\n print(\"Depth reconstruction failed for image {}\".format(file_name))\n depth_failure = True\n break\n #keypoints_image_space = pred_2d_keypoints * (crop_y_max - crop_y_min + 1)/224\n groundtruth_2d_keypoints.append(pred_2d_keypoints)\n \n if hand == \"left\":\n cropped_depthmap = cv2.flip(cropped_depthmap, 1)\n resized_cropped_depthmap = cv2.resize(cropped_depthmap, (int(crop_len), int(crop_len)), interpolation=cv2.INTER_LINEAR)\n nonzero_y, nonzero_x = (resized_cropped_depthmap != 0).nonzero()\n if len(nonzero_y) == 0 or len(nonzero_x) == 0:\n print(\"Depth reconstruction failed for image {}\".format(file_name))\n depth_failure = True\n break\n padded_depthmap[crop_y_min+nonzero_y, crop_x_min+nonzero_x] = resized_cropped_depthmap[nonzero_y, nonzero_x]\n\n # nonzero stands for nonzero value on the depth map\n # coordinates of nonzero depth pixels in original image space\n original_nonzero_x = crop_x_min+nonzero_x - int(W/2)\n original_nonzero_y = crop_y_min+nonzero_y - int(H/2)\n \n nonzerox_min = min(np.min(original_nonzero_x), x_min)\n nonzerox_max = max(np.max(original_nonzero_x), x_max)\n nonzeroy_min = min(np.min(original_nonzero_y), y_min)\n nonzeroy_max = max(np.max(original_nonzero_y), y_max)\n\n bbx_min, bbx_max, bby_min, bby_max = self.get_mask_bounding_box((nonzerox_min, nonzerox_max, nonzeroy_min, nonzeroy_max), H, W, padding)\n mask[bby_min:bby_max+1, bbx_min:bbx_max+1] = 1.0\n #bboxes.append([int(bbx_min), int(bbx_max), int(bby_min), int(bby_max)])\n if depth_failure:\n #print(\"cannot detect normal hands\")\n return None, None, None\n depthmap = padded_depthmap[int(1/2 * H):int(3/2 * H), int(1/2 * W):int(3/2 * W)].astype(np.uint8)\n mask = (255.0 * mask).astype(np.uint8)\n info[\"groundtruth_2d_keypoints\"] = groundtruth_2d_keypoints\n info[\"hands\"] = hands\n info[\"crop_boxes\"] = crop_boxes\n info[\"crop_lens\"] = crop_lens\n return depthmap, mask, info\n \n def get_keypoints(self, img, Graphormer_model, mano, mesh_sampler, scale, crop_len):\n H, W = int(crop_len), int(crop_len)\n Graphormer_model.eval()\n mano.eval()\n with torch.no_grad():\n img_tensor = self.transform(img)\n #print(img_tensor)\n batch_imgs = torch.unsqueeze(img_tensor, 0).cuda()\n \n # forward-pass\n pred_camera, pred_3d_joints, pred_vertices_sub, pred_vertices, hidden_states, att = Graphormer_model(batch_imgs, mano, mesh_sampler)\n\n # obtain 3d joints, which are regressed from the full mesh\n pred_3d_joints_from_mesh = mano.get_3d_joints(pred_vertices)\n # obtain 2d joints, which are projected from 3d joints of mesh\n #pred_2d_joints_from_mesh = orthographic_projection(pred_3d_joints_from_mesh.contiguous(), pred_camera.contiguous())\n #pred_2d_coarse_vertices_from_mesh = orthographic_projection(pred_vertices_sub.contiguous(), pred_camera.contiguous())\n pred_camera = pred_camera.cpu()\n pred_vertices = pred_vertices.cpu()\n #\n res = crop_len\n focal_length = 1000 * scale\n camera_t = np.array([-pred_camera[1], -pred_camera[2], -2*focal_length/(res * pred_camera[0] +1e-9)])\n pred_3d_joints_camera = pred_3d_joints_from_mesh.cpu()[0] - camera_t\n z_3d_dist = pred_3d_joints_camera[:,2].clone()\n pred_2d_joints_img_space = ((pred_3d_joints_camera/z_3d_dist[:,None]) * np.array((focal_length, focal_length, 1)))[:,:2] + np.array((W/2, H/2))\n \n return pred_2d_joints_img_space\n \n\n def eval_mpjpe(self, sample, info):\n H, W, C = sample.shape\n padded_image = np.zeros((H*2, W*2, 3))\n padded_image[int(1/2 * H):int(3/2 * H), int(1/2 * W):int(3/2 * W)] = sample\n crop_boxes = info[\"crop_boxes\"]\n hands = info[\"hands\"]\n groundtruth_2d_keypoints = info[\"groundtruth_2d_keypoints\"]\n crop_lens = info[\"crop_lens\"]\n pjpe = 0\n for i in range(len(crop_boxes)):#box in crop_boxes:\n crop_y_min, crop_y_max, crop_x_min, crop_x_max = crop_boxes[i]\n cropped = padded_image[crop_y_min:crop_y_max+1, crop_x_min:crop_x_max+1]\n hand = hands[i]\n if hand == \"left\":\n cropped = cv2.flip(cropped, 1)\n crop_len = crop_lens[i]\n scale = crop_len/224\n if crop_len < 224:\n graphormer_input = cv2.resize(cropped, (224, 224), interpolation=cv2.INTER_CUBIC)\n else:\n graphormer_input = cv2.resize(cropped, (224, 224), interpolation=cv2.INTER_AREA)\n generated_keypoint = self.get_keypoints(graphormer_input.astype(np.uint8), self._model, self.mano_model, self.mesh_sampler, scale, crop_len)\n #generated_keypoint = generated_keypoint * ((crop_y_max - crop_y_min + 1)/224)\n pjpe += np.sum(np.sqrt(np.sum(((generated_keypoint - groundtruth_2d_keypoints[i]) ** 2).numpy(), axis=1)))\n pass\n mpjpe = pjpe/(len(crop_boxes) * 21)\n return mpjpe" } ]
import sys import os import argparse import json import torch import numpy as np import cv2 import numpy as np import cv2 import config import cv2 import einops import numpy as np import torch import random import ast from config import handrefiner_root from PIL import Image from torchvision import transforms from pytorch_lightning import seed_everything from cldm.model import create_model, load_state_dict from cldm.ddim_hacked import DDIMSampler from pathlib import Path from preprocessor.meshgraphormer import MeshGraphormerMediapipe
10,879
# input image parser.add_argument('--input_img', type=str, default="") # output directory where the rectified images will be saved to parser.add_argument('--out_dir', type=str, default="") # file where the mpjpe values will be logged to parser.add_argument('--log_json', type=str, default="") # control strength for ControlNet parser.add_argument('--strength', type=float, default=1.0) # directory where the depth maps will be saved to. Leaving it empty will disable this function parser.add_argument('--depth_dir', type=str, default="") # directory where the masks will be saved to. Leaving it empty will disable this function parser.add_argument('--mask_dir', type=str, default="") # whether evaluate the mpjpe error in fixed control strength mode parser.add_argument('--eval', type=ast.literal_eval, default=False) # whether use finetuned ControlNet trained on synthetic images as introduced in the paper parser.add_argument('--finetuned', type=ast.literal_eval, default=True) # path to the SD + ControlNet weights parser.add_argument('--weights', type=str, default="") # batch size parser.add_argument('--num_samples', type=int, default=1) # prompt file for multi-image rectification # see manual.md for file format parser.add_argument('--prompt_file', type=str, default="") # prompt for single image rectification parser.add_argument('--prompt', type=str, default="") # number of generation iteration for each image to be rectified # in general, for each input image, n_iter x num_samples number of rectified images will be produced parser.add_argument('--n_iter', type=int, default=1) # adaptive control strength as introduced in paper (we tend to use fixed control strength as default) parser.add_argument('--adaptive_control', type=ast.literal_eval, default=False) # padding controls the size of masks around the hand parser.add_argument('--padding_bbox', type=int, default=30) # set seed parser.add_argument('--seed', type=int, default=-1) args = parser.parse_args() return args args = parse_args() if (args.prompt_file != "" and args.prompt != "") or (args.prompt_file == "" and args.prompt == ""): raise Exception("Please specify one and only one of the --prompt and --prompt_file") if (args.input_dir != "" and args.input_img != "") or (args.input_dir == "" and args.input_img == ""): raise Exception("Please specify one and only one of the --input_dir and --input_img") model = create_model("control_depth_inpaint.yaml").cpu() if args.finetuned: model.load_state_dict(load_state_dict(args.weights, location='cuda'), strict=False) else: model.load_state_dict( load_state_dict("models/sd-v1-5-inpainting.ckpt", location="cuda"), strict=False ) model.load_state_dict( load_state_dict("models/control_v11f1p_sd15_depth.pth", location="cuda"), strict=False, ) model = model.to("cuda") meshgraphormer = MeshGraphormerMediapipe() if args.log_json != "": f_mpjpe = open(args.log_json, 'w') # prompt needs to be same for all pictures in the same batch if args.input_img != "": assert args.prompt_file == "", "prompt file should not be used for single image rectification" inputs = [args.input_img] else: if args.prompt_file != "": f_prompt = open(args.prompt_file) inputs = f_prompt.readlines() else: inputs = os.listdir(args.input_dir) for file_info in inputs: if args.prompt_file != "": file_info = json.loads(file_info) file_name = file_info["img"] prompt = file_info["txt"] else: file_name = file_info prompt = args.prompt image_file = os.path.join(args.input_dir, file_name) file_name_raw = Path(file_name).stem # STEP 3: Load the input image. image = np.array(Image.open(image_file)) raw_image = image H, W, C = raw_image.shape gen_count = 0 for iteration in range(args.n_iter): depthmap, mask, info = meshgraphormer.get_depth(args.input_dir, file_name, args.padding_bbox) if args.depth_dir != "": cv2.imwrite(os.path.join(args.depth_dir, file_name_raw + "_depth.jpg"), depthmap) if args.mask_dir != "": cv2.imwrite(os.path.join(args.mask_dir, file_name_raw + "_mask.jpg"), mask) control = depthmap
# STEP 1: Import the necessary modules. from __future__ import absolute_import, division, print_function def load(): paths = [handrefiner_root, os.path.join(handrefiner_root, 'MeshGraphormer'), os.path.join(handrefiner_root, 'preprocessor')] for p in paths: sys.path.insert(0, p) load() transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) def parse_args(): parser = argparse.ArgumentParser() # input directory containing images to be rectified parser.add_argument('--input_dir', type=str, default="") # input image parser.add_argument('--input_img', type=str, default="") # output directory where the rectified images will be saved to parser.add_argument('--out_dir', type=str, default="") # file where the mpjpe values will be logged to parser.add_argument('--log_json', type=str, default="") # control strength for ControlNet parser.add_argument('--strength', type=float, default=1.0) # directory where the depth maps will be saved to. Leaving it empty will disable this function parser.add_argument('--depth_dir', type=str, default="") # directory where the masks will be saved to. Leaving it empty will disable this function parser.add_argument('--mask_dir', type=str, default="") # whether evaluate the mpjpe error in fixed control strength mode parser.add_argument('--eval', type=ast.literal_eval, default=False) # whether use finetuned ControlNet trained on synthetic images as introduced in the paper parser.add_argument('--finetuned', type=ast.literal_eval, default=True) # path to the SD + ControlNet weights parser.add_argument('--weights', type=str, default="") # batch size parser.add_argument('--num_samples', type=int, default=1) # prompt file for multi-image rectification # see manual.md for file format parser.add_argument('--prompt_file', type=str, default="") # prompt for single image rectification parser.add_argument('--prompt', type=str, default="") # number of generation iteration for each image to be rectified # in general, for each input image, n_iter x num_samples number of rectified images will be produced parser.add_argument('--n_iter', type=int, default=1) # adaptive control strength as introduced in paper (we tend to use fixed control strength as default) parser.add_argument('--adaptive_control', type=ast.literal_eval, default=False) # padding controls the size of masks around the hand parser.add_argument('--padding_bbox', type=int, default=30) # set seed parser.add_argument('--seed', type=int, default=-1) args = parser.parse_args() return args args = parse_args() if (args.prompt_file != "" and args.prompt != "") or (args.prompt_file == "" and args.prompt == ""): raise Exception("Please specify one and only one of the --prompt and --prompt_file") if (args.input_dir != "" and args.input_img != "") or (args.input_dir == "" and args.input_img == ""): raise Exception("Please specify one and only one of the --input_dir and --input_img") model = create_model("control_depth_inpaint.yaml").cpu() if args.finetuned: model.load_state_dict(load_state_dict(args.weights, location='cuda'), strict=False) else: model.load_state_dict( load_state_dict("models/sd-v1-5-inpainting.ckpt", location="cuda"), strict=False ) model.load_state_dict( load_state_dict("models/control_v11f1p_sd15_depth.pth", location="cuda"), strict=False, ) model = model.to("cuda") meshgraphormer = MeshGraphormerMediapipe() if args.log_json != "": f_mpjpe = open(args.log_json, 'w') # prompt needs to be same for all pictures in the same batch if args.input_img != "": assert args.prompt_file == "", "prompt file should not be used for single image rectification" inputs = [args.input_img] else: if args.prompt_file != "": f_prompt = open(args.prompt_file) inputs = f_prompt.readlines() else: inputs = os.listdir(args.input_dir) for file_info in inputs: if args.prompt_file != "": file_info = json.loads(file_info) file_name = file_info["img"] prompt = file_info["txt"] else: file_name = file_info prompt = args.prompt image_file = os.path.join(args.input_dir, file_name) file_name_raw = Path(file_name).stem # STEP 3: Load the input image. image = np.array(Image.open(image_file)) raw_image = image H, W, C = raw_image.shape gen_count = 0 for iteration in range(args.n_iter): depthmap, mask, info = meshgraphormer.get_depth(args.input_dir, file_name, args.padding_bbox) if args.depth_dir != "": cv2.imwrite(os.path.join(args.depth_dir, file_name_raw + "_depth.jpg"), depthmap) if args.mask_dir != "": cv2.imwrite(os.path.join(args.mask_dir, file_name_raw + "_mask.jpg"), mask) control = depthmap
ddim_sampler = DDIMSampler(model)
3
2023-11-24 10:19:23+00:00
16k
eth-sri/language-model-arithmetic
src/model_arithmetic/evaluation.py
[ { "identifier": "BaseClass", "path": "src/model_arithmetic/base.py", "snippet": "class BaseClass:\n \"\"\"\n Base class for providing a serialization and deserialization mechanism.\n \"\"\"\n def __init__(self, **kwargs):\n \"\"\"\n Instantiates the base class with keyword arguments\n \n Args:\n kwargs (dict): Keyword arguments\n \"\"\"\n self.kwargs = kwargs\n self.__dict__.update(kwargs)\n\n def generate_list_settings(self, list_):\n \"\"\"\n Converts provided list to a normalized list that can be stored as a json object to serialize.\n \n Args:\n list_ (List): List to be converted\n Returns\n Transformed normal list\n \"\"\"\n normal_list = []\n for item in list_:\n if isinstance(item, BaseClass):\n normal_list.append(item.generate_settings())\n elif isinstance(item, dict):\n normal_list.append(self.generate_kwarg_setting(item))\n elif isinstance(item, (tuple, list)):\n normal_list.append(self.generate_list_settings(item))\n else:\n normal_list.append(item)\n return normal_list\n\n def generate_kwarg_setting(self, kwargs):\n \"\"\"\n Converts provided keyword arguments to normal kwargs in terms of serialization.\n\n Args:\n kwargs (dict): kwargs to be converted.\n \"\"\"\n normal_kwargs = dict()\n for kwarg in kwargs:\n if isinstance(kwargs[kwarg], BaseClass):\n normal_kwargs[kwarg] = kwargs[kwarg].generate_settings()\n elif isinstance(kwargs[kwarg], (list, tuple)):\n normal_kwargs[kwarg] = self.generate_list_settings(kwargs[kwarg])\n elif isinstance(kwargs[kwarg], dict):\n normal_kwargs[kwarg] = self.generate_kwarg_setting(kwargs[kwarg])\n else:\n normal_kwargs[kwarg] = kwargs[kwarg]\n \n return normal_kwargs\n\n\n def generate_settings(self):\n \"\"\"\n Generates settings for the instance of the BaseClass.\n\n Returns\n Settings in dictionary format.\n \"\"\"\n settings = {\n \"class\": self.__class__.__name__, \n **self.generate_kwarg_setting({kwarg: self.__dict__[kwarg] for kwarg in self.kwargs}), \n }\n return settings\n \n def save(self, path):\n \"\"\"\n Saves the generated settings into a JSON file at a specified path.\n \n Args:\n path (string): The file path at which the settings have to be saved.\n \"\"\"\n settings = self.generate_settings()\n\n if os.path.dirname(path) != \"\":\n os.makedirs(os.path.dirname(path), exist_ok=True)\n \n with open(path, \"w\") as f:\n json.dump(settings, f, indent=2)\n\n @classmethod\n def get_all_subclasses(cls):\n \"\"\"\n Returns all subclasses of the BaseClass.\n \"\"\"\n all_subclasses = []\n\n for subclass in cls.__subclasses__():\n all_subclasses.append(subclass)\n all_subclasses.extend(subclass.get_all_subclasses())\n\n return all_subclasses\n\n @staticmethod\n def find_class(cls_name):\n \"\"\"\n Searches for a class that matches the given class name.\n\n Args:\n cls_name (string): Class name to be matched\n \"\"\"\n for possible_cls in BaseClass.get_all_subclasses():\n if possible_cls.__name__ == cls_name:\n return possible_cls\n return None\n\n @staticmethod\n def load_from_list_settings(list_):\n \"\"\"\n Deserializes the list saved settings to instantiate the objects.\n\n Args:\n list_ (List): List of saved settings\n \"\"\"\n output_list = []\n for item in list_:\n if isinstance(item, dict):\n output_list.append(BaseClass.load_from_dict(item))\n elif isinstance(item, (tuple, list)):\n output_list.append(BaseClass.load_from_list_settings(item))\n else:\n output_list.append(item)\n\n return output_list\n \n @staticmethod\n def load_from_dict(dict_):\n \"\"\"\n Deserializes the dictionary saved settings to instantiate the objects.\n\n Args:\n dict_ (dict): Dictionary containing saved settings\n \"\"\"\n other_class = BaseClass.find_class(dict_.get(\"class\", None))\n if other_class is not None:\n return other_class.load_from_settings(dict_)\n \n output_dict = dict()\n for key in dict_:\n if isinstance(dict_[key], dict):\n output_dict[key] = BaseClass.load_from_dict(dict_[key])\n elif isinstance(dict_[key], (tuple, list)):\n output_dict[key] = BaseClass.load_from_list_settings(dict_[key])\n else:\n output_dict[key] = dict_[key]\n\n return output_dict\n\n @staticmethod\n def load_from_settings(settings):\n \"\"\"\n Deserializes the saved settings to instantiate the object.\n\n Args:\n settings (dict): Saved settings\n \"\"\"\n cls = BaseClass.find_class(settings[\"class\"])\n\n if cls is None:\n logger.error(f\"Could not find class {settings['class']} when loading class.\")\n return None\n\n kwargs = dict()\n for kwarg in settings:\n if kwarg == \"class\":\n continue\n if isinstance(settings[kwarg], dict):\n kwargs[kwarg] = BaseClass.load_from_dict(settings[kwarg])\n elif isinstance(settings[kwarg], (tuple, list)):\n kwargs[kwarg] = BaseClass.load_from_list_settings(settings[kwarg])\n else:\n kwargs[kwarg] = settings[kwarg]\n\n return cls(**kwargs)\n\n @classmethod\n def _load(cls, path, **kwargs):\n \"\"\"\n Loads the settings from the JSON file at the specified path.\n \n Args:\n path (string): The file path from which the settings have to be loaded.\n kwargs (dict): Additional keywords arguments\n \"\"\"\n with open(path, \"r\") as f:\n settings = json.load(f)\n for kwarg in kwargs:\n settings[kwarg] = kwargs[kwarg]\n return cls.load_from_settings(settings)\n\n @staticmethod\n def load(path, **kwargs):\n \"\"\"\n Loads the settings of the class from the JSON file.\n\n Args:\n path (string): The file path from which the class settings have to be loaded.\n kwargs (dict): Additional keywords arguments\n \"\"\"\n with open(path, \"r\") as f:\n settings = json.load(f)\n cls = BaseClass.find_class(settings[\"class\"])\n return cls._load(path, **kwargs)\n\n def __str__(self) -> str:\n \"\"\"\n Returns a string representation of the class object.\n \"\"\"\n return f\"{self.__class__.__name__}({self.kwargs})\"\n \n def __eq__(self, o: object) -> bool:\n \"\"\"\n Checks whether the provided object is equal to the current object.\n\n Args:\n o (object): Object to compare\n \"\"\"\n if not isinstance(o, BaseClass):\n return False\n \n other_settings = o.generate_settings()\n settings = self.generate_settings()\n\n return other_settings == settings" }, { "identifier": "CustomDataset", "path": "src/model_arithmetic/dataset.py", "snippet": "class CustomDataset(Dataset):\n \"\"\"\n A custom PyTorch Dataset class for tokenized sequence data.\n\n Uses a tokenizer to convert text data from a DataFrame to input_ids (tokens), \n and optionally attaches label data if present in the DataFrame.\n \"\"\"\n def __init__(self, tokenizer, df, max_tokens=128, min_tokens=1, random_cutoff=False):\n \"\"\"\n Initializes the CustomDataset object.\n\n Args:\n tokenizer (Tokenizer): The tokenizer to be used for the text data.\n df (pandas.DataFrame): DataFrame containing the text data, and optionally labels.\n max_tokens (int, optional): Maximum number of tokens per sequence. Defaults to 128.\n min_tokens (int, optional): Minimum number of tokens per sequence. Defaults to 1.\n random_cutoff (bool, optional): Whether to randomly cut off the number of tokens per sequence. Defaults to False.\n \"\"\"\n super().__init__()\n data = df.dropna()\n self.tokenized_dataset = [\n tokenizer(sentence, return_tensors=\"pt\", truncation=True, max_length=max_tokens).input_ids.view(-1) for sentence in tqdm(data[\"text\"].tolist())\n ]\n\n self.df = data\n self.has_labels = \"label\" in data.columns\n self.min_tokens = min_tokens\n self.labels = None\n if self.has_labels:\n self.labels = data[\"label\"].values\n \n self.random_cutoff = random_cutoff\n\n def __len__(self):\n \"\"\"\n Returns the length of the tokenized dataset, \n i.e., the number of tokenized sequences.\n \n Returns:\n int: Number of tokenized sequences.\n \"\"\"\n return len(self.tokenized_dataset)\n\n def __getitem__(self, idx):\n \"\"\"\n Fetches an item from the dataset at the given index.\n\n If labels are available, also fetches the associated label.\n If `random_cutoff` is true, may truncate sequence length randomly.\n\n Args:\n idx (int): Index of the required sequence.\n\n Returns:\n dict: A dictionary with the following structure-\n {\n \"input_ids\": torch.Tensor (Tokenized sequence),\n \"labels\": torch.Tensor (Associated label, if available)\n }\n \"\"\"\n cutoff = len(self.tokenized_dataset[idx])\n if self.random_cutoff:\n cutoff = torch.randint(min(cutoff, self.min_tokens), cutoff + 1, (1,)).item()\n \n if not self.has_labels:\n return {\"input_ids\": self.tokenized_dataset[idx][:cutoff]}\n else:\n return {\"input_ids\": self.tokenized_dataset[idx][:cutoff], \"labels\": torch.tensor([self.labels[idx]], dtype=torch.long)}" }, { "identifier": "load_model", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_model(dir_or_model, classification=False, token_classification=False, return_tokenizer=False, dtype=torch.bfloat16, load_dtype=True, \n rl=False, peft_config=None):\n \"\"\"\n This function is used to load a model based on several parameters including the type of task it is targeted to perform.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n\n classification (bool): If True, loads the model for sequence classification.\n\n token_classification (bool): If True, loads the model for token classification.\n\n return_tokenizer (bool): If True, returns the tokenizer along with the model.\n\n dtype: The data type that PyTorch should use internally to store the model’s parameters and do the computation.\n\n load_dtype (bool): If False, sets dtype as torch.float32 regardless of the passed dtype value.\n\n rl (bool): If True, loads model specifically designed to be used in reinforcement learning environment.\n\n peft_config: Configuration details for Peft models. \n \n Returns:\n It returns a model for the required task along with its tokenizer, if specified.\n \"\"\"\n log(logger.debug, f\"Loading model for {dir_or_model} with {classification}, {dtype}, {load_dtype}\")\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if not load_dtype:\n dtype = torch.float32\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n\n original_model_name = model_name\n\n if classification:\n model = AutoModelForSequenceClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\") # to investigate: calling torch_dtype here fails.\n elif token_classification:\n model = AutoModelForTokenClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n elif rl:\n model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, \n peft_config=peft_config, device_map=\"auto\")\n else:\n if model_name.endswith(\"GPTQ\") or model_name.endswith(\"GGML\"):\n model = AutoGPTQForCausalLM.from_quantized(model_name,\n use_safetensors=True,\n trust_remote_code=True,\n # use_triton=True, # breaks currently, unfortunately generation time of the GPTQ model is quite slow\n quantize_config=None, device_map=\"auto\")\n else:\n model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n\n if is_lora_dir:\n model = PeftModel.from_pretrained(model, dir_or_model)\n \n try:\n tokenizer = load_tokenizer(original_model_name)\n model.config.pad_token_id = tokenizer.pad_token_id\n except Exception:\n pass\n if return_tokenizer:\n return model, load_tokenizer(original_model_name)\n return model" }, { "identifier": "load_tokenizer", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_tokenizer(dir_or_model):\n \"\"\"\n This function is used to load the tokenizer for a specific pre-trained model.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n \n Returns:\n It returns a tokenizer that can convert text to tokens for the specific model input.\n \"\"\"\n log(logger.debug, f\"Loading tokenizer for {dir_or_model}\")\n\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n \n if os.path.isfile(os.path.join(dir_or_model, \"config.json\")):\n loaded_json = json.load(open(os.path.join(dir_or_model, \"config.json\"), \"r\"))\n model_name = loaded_json[\"_name_or_path\"]\n\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n\n if tokenizer.pad_token is None:\n log(logger.debug, \"Setting pad token to eos token\")\n tokenizer.pad_token = tokenizer.eos_token\n tokenizer.pad_token_id = tokenizer.eos_token_id\n \n return tokenizer" }, { "identifier": "ModelArithmetic", "path": "src/model_arithmetic/model_arithmetic.py", "snippet": "class ModelArithmetic(PreTrainedModel):\n \"\"\"\n Main class for prompt arithmetic. Handles the generation of text based on the formula.\n \"\"\"\n SAVE_FILE = \"prompt_arithmetic.json\"\n _supports_sdpa = True\n\n def __init__(self, formula : Operator, default_model : str = None, dtype=torch.bfloat16, intermediate_argmax : bool = False, epsilon = 1e-12, \n retroactive_operators = [], calculate_statistics=True, needs_input_tokens_lm_eval=False, lm_eval_task=None, tokenizer=None):\n \"\"\"Initializes the prompt arithmetic model.\n\n Args:\n formula (Operator): The formula for which generations need to be made.\n default_model (str, optional): Default model for RunnableOperators that don't have a model associated with them. Defaults to None.\n dtype (torch.dtype, optional): Dtype of the models to load by default. Defaults to torch.bfloat16.\n intermediate_argmax (bool, optional): Something unimportant that was tried out, but now deprecated. Defaults to False.\n epsilon (float, optional): Just some small value. Defaults to 1e-12.\n retroactive_operators (list, optional): The retroactive operators that need to be applied. Defaults to [].\n calculate_statistics (bool, optional): Whether or not to calculate some statistics, can be a tad bit expensive. Defaults to True.\n needs_input_tokens_lm_eval (bool, optional): Whether or not lm eval is used and whether or not the task needs the input tokens. Defaults to False. Only set to true for an lm eval task.\n lm_eval_task (str, optional): Name of the lm eval task. Defaults to None.\n tokenizer (transformers.tokenization_utils_base.PreTrainedTokenizerBase, optional): Tokenizer to use. Defaults to None.\n \"\"\"\n self.formula = formula.clone()\n\n self.default_model = default_model\n self.loaded_models = dict()\n self.model_prediction_history = [] # keeps track of the RunnableOperators predictions for each token (that hasn't finished computing)\n self.logprobs_history = [] # keeps track of the current probability distribution for which each token has been drawn\n self.model_last_token_prediction = [] # keeps track of the last token that has been predicted for each RunnableOperator\n \n self.output_type = namedtuple(\"ModelArithmeticOutput\", [\"logits\", \"logprobs_per_model\"])\n self.intermediate_argmax = intermediate_argmax\n self.retroactive_operators = retroactive_operators\n self.calculate_statistics = calculate_statistics\n\n self.runnable_operators = []\n for runnable_operator in self.formula.runnable_operators():\n if not any([runnable_operator.same_operator(output) for output in self.runnable_operators]):\n self.runnable_operators.append(runnable_operator)\n \n\n # sort the prompts by speculative factor, putting the one with highest speculative factor first\n # => run model with highest speculative factor first, since otherwise the computation might be wasted for the first ones\n # however, we first need to sort by run_priority and then within that by speculative factor\n self.runnable_operators = sorted(self.runnable_operators, key=lambda runnable_operator: (runnable_operator.run_priority, runnable_operator.speculative_factor), reverse=True)\n \n self.load_all_models(dtype=dtype)\n if self.default_model not in self.loaded_models:\n for runnable_operator in self.runnable_operators:\n if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None:\n self.default_model = runnable_operator.model\n break\n if self.default_model is None:\n raise ValueError(\"Default model must be specified if not specified in an llm prompt\")\n\n self.config = self.loaded_models[str(self.default_model)].config\n\n if tokenizer is None:\n self.tokenizer = load_tokenizer(self.default_model)\n else:\n self.tokenizer = tokenizer\n \n self.init_runnable_operators()\n \n self.model_input_tokens = {\n runnable_operator.id(): TokenizedInput(runnable_operator, \n runnable_operator.model, \n self.loaded_models[str(runnable_operator.model)].config,\n self.tokenizer) \n for runnable_operator in self.runnable_operators\n }\n \n self.init_monitor()\n \n self.epsilon = epsilon\n \n self.word_size = len(self.tokenizer)\n \n if Compatibility is not None:\n self.lm_eval_compatibility = Compatibility(\n task_name=lm_eval_task,\n needs_input_tokens_lm_eval=needs_input_tokens_lm_eval,\n tokenizer=self.tokenizer,\n device=self.device,\n max_length=get_max_length(self.config),\n )\n else:\n self.lm_eval_compatibility = None\n \n super().__init__(self.config)\n \n def init_monitor(self):\n \"\"\"\n Initializes the monitor for the prompt arithmetic model.\n \"\"\"\n self.monitor = Monitor(self.runnable_operators)\n \n def init_runnable_operators(self):\n \"\"\"Initializes the runnable operators. This is done after the models have been loaded, because the models are needed for the runnable operators.\n \"\"\"\n for runnable_operator in self.runnable_operators:\n if runnable_operator.model is None:\n runnable_operator.model = self.default_model\n runnable_operator.initialize_after_model_set()\n\n def load_all_models(self, dtype=torch.bfloat16):\n \"\"\"Loads all the models that are needed for the runnable operators. Models are never loaded twice.\n\n Args:\n dtype (torch.dtype, optional): Default Dtype of the models. Defaults to torch.bfloat16.\n \"\"\"\n if self.default_model is None:\n for runnable_operator in self.runnable_operators:\n if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None:\n self.default_model = str(runnable_operator.model)\n break\n \n for runnable_operator in self.runnable_operators:\n if runnable_operator.model is None:\n assert self.default_model is not None, \"Default model must be specified if not specified in prompt\"\n runnable_operator.model = self.default_model\n if runnable_operator.model not in self.loaded_models:\n model = runnable_operator.load_model(dtype=dtype)\n model.eval()\n if model is not None:\n self.loaded_models[str(runnable_operator.model)] = model\n \n if len(self.loaded_models) == 0:\n assert self.default_model is not None, \"Required to at least have one model, for now\"\n self.loaded_models[str(self.default_model)] = load_model(self.default_model, dtype=dtype)\n \n @property\n def device(self):\n \"\"\"Device of the default model. Needed for compatibility with lm_eval\n\n Returns:\n torch.device: Device of the default model.\n \"\"\"\n return self.loaded_models[str(self.default_model)].device\n\n def save_pretrained(self, path : str):\n \"\"\"Saves the model to the specified path.\n\n Args:\n path (str): Path to which to save the model\n \"\"\"\n os.makedirs(path, exist_ok=True)\n all_settings = {\n \"formula\": self.formula.generate_settings(),\n \"default_model\": self.default_model,\n }\n\n with open(os.path.join(path, self.SAVE_FILE), \"w\") as f:\n json.dump(all_settings, f, indent=4, sort_keys=True)\n\n @classmethod\n def from_pretrained(cls, path : str, dtype=torch.bfloat16):\n \"\"\"Loads the model from the specified path.\n\n Args:\n path (str): Path from which to load the model\n dtype (torch.dtype, optional): Default dtype for the models. Defaults to torch.bfloat16.\n\n Returns:\n ModelArithmetic: model arithmetic model\n \"\"\"\n with open(os.path.join(path, cls.SAVE_FILE), \"r\") as f:\n all_settings = json.load(f)\n all_settings[\"formula\"] = Operator.load_from_settings(all_settings[\"formula\"])\n return cls(**all_settings, dtype=dtype)\n\n \n def forward_model(self, runnable_operator, continuation_tokens, model_new_tokens=None, use_cache=False, do_speculation=False):\n \"\"\"Runs a specifc runnable operator on the continuation tokens.\n\n Args:\n runnable_operator (RunnableOperator): The runnable operator to run.\n continuation_tokens (list[list[int]]): List of tokens that need to be continued. The prompt is not included in these tokens\n model_new_tokens (list[int], optional): New tokens for the model. Defaults to None.\n use_cache (bool, optional): Whether or not to allow the model to use cache (eg key-value storage for an LLM). Defaults to False.\n do_speculation (bool, optional): Whether or not to do speculation sampling. Defaults to False.\n\n Returns:\n torch.tensor: logprobs of the model, one logprob distribution for each new token in each sample\n \"\"\"\n start_time = time.time()\n \n tokenized_input_creator = self.model_input_tokens[runnable_operator.id()]\n tokenized_inputs = tokenized_input_creator.add_continuation_tokens(continuation_tokens)\n tokenized_only_input = tokenized_input_creator.get_only_input_tokens()\n \n was_none = model_new_tokens is None\n \n if was_none:\n model_new_tokens = torch.tensor([len(continuation_tokens[i]) + 1 for i in range(len(continuation_tokens))])\n \n if len(self.model_prediction_history) < len(continuation_tokens):\n new_prediction_history = [dict() for _ in range(len(continuation_tokens))]\n else:\n new_prediction_history = [self.model_prediction_history[i].get(self.max_index_prediction_history(i), dict()) for i in range(len(continuation_tokens))]\n \n logprobs = runnable_operator.run(\n loaded_models=self.loaded_models,\n tokenized_inputs=tokenized_inputs,\n model_new_tokens=model_new_tokens,\n new_prediction_history=new_prediction_history,\n other_tokenizer=self.tokenizer,\n tokenized_only_input=tokenized_only_input, \n use_cache=use_cache,\n do_speculation=do_speculation\n )\n \n logprobs = [logprob.to(self.device) for logprob in logprobs]\n \n if was_none:\n logprobs = torch.stack(logprobs, dim=0)\n\n self.monitor.add_result(element=time.time() - start_time, runnable_operator=runnable_operator)\n return logprobs\n \n def group_complete(self, model_history):\n \"\"\"Checks which groups of runnable operators have been completely calculated and which haven't.\n\n Args:\n model_history (dict): Dict mapping the runnable operator id to the logprobs of the model\n\n Returns:\n dict[bool]: Dict mapping the group to whether it has been completely calculated or not\n \"\"\"\n # everything that is a group needs to be either all calculated or all not calculated\n group_calculated = dict()\n groups = set([runnable_operator.group for runnable_operator in self.runnable_operators if runnable_operator.group is not None])\n completed_groups = {group: True for group in groups}\n \n for runnable_operator in self.runnable_operators:\n if runnable_operator.group is not None:\n is_calculated = model_history.get(runnable_operator.id()) is not None\n if runnable_operator.group not in group_calculated:\n group_calculated[runnable_operator.group] = is_calculated\n elif group_calculated[runnable_operator.group] != is_calculated:\n completed_groups[runnable_operator.group] = False\n return completed_groups\n \n def group_model_history(self, model_history):\n \"\"\"Sets the model history on which to evaluate the formula based on the groups. Removes predictions if the group hasn't been completely calculated yet.\n\n Args:\n model_history (dict): Dict mapping the runnable operator id to the logprobs of the model\n\n Returns:\n dict: Adjusted dict mapping\n \"\"\"\n completed_groups = self.group_complete(model_history)\n grouped_model_history = dict()\n for runnable_operator in self.runnable_operators:\n if runnable_operator.group is None or completed_groups[runnable_operator.group]:\n grouped_model_history[runnable_operator.id()] = model_history[runnable_operator.id()]\n else:\n grouped_model_history[runnable_operator.id()] = None\n \n return grouped_model_history\n \n def create_sample_logprobs(self, logprobs, temperature, top_k, top_p):\n \"\"\"Creates the logprobs for each token in each sample.\n\n Args:\n logprobs (torch.tensor): Logprobs of the model\n temperature (float): temperature to use\n top_k (int): top_k to use\n top_p (float): top_p to use\n\n Returns:\n torch.tensor: Logprobs for each token in each sample\n \"\"\"\n if temperature == 0:\n logprobs_argmax = torch.argmax(logprobs, dim=-1)\n logprobs = torch.nn.functional.one_hot(logprobs_argmax, num_classes=logprobs.shape[-1]).float()\n return logprobs\n logprobs = logprobs / temperature\n logprobs = top_k_top_p_filtering(logprobs.unsqueeze(0), top_k=top_k, top_p=top_p)\n return torch.softmax(logprobs, dim=-1).squeeze()\n \n \n\n def process_logprobs(self, model_history):\n \"\"\"Processes model history to get the probability distribution for the token.\n\n Args:\n model_history (dict): Dict mapping the runnable operator id to the logprobs of the model\n\n Returns:\n _type_: _description_\n \"\"\"\n init_time = time.time()\n logprobs_normalized = self.formula.evaluate(model_history)\n self.monitor.add_result(element=time.time() - init_time, indicator=\"formula_evaluation\")\n if not torch.is_tensor(logprobs_normalized):\n return None\n # logprobs_normalized = logprobs_normalized / temperature\n # logprobs_normalized = top_k_top_p_filtering(logprobs_normalized.unsqueeze(0), top_k=top_k, top_p=top_p)\n return logprobs_normalized\n \n def run_retroactive_operators(self, index, tokenized_sentence, temperature, top_k, top_p):\n \"\"\"Runs the retroactive operators on the tokenized sentence. \n\n Args:\n index (int): Index of the sentence in the current batch\n tokenized_sentence (list[int]): Tokenized sentence\n temperature (float): temperature to use\n top_k (int): top_k to use\n top_p (float): top_p to use\n\n Returns:\n list[int]: Adjusted tokenized sentence based on the retroactive operators and whether they accepted it.\n \"\"\"\n for operator in self.retroactive_operators:\n accepted = operator.accept(tokenized_sentence, self.tokenizer)\n if accepted < 0:\n not_accepted_token = tokenized_sentence[accepted]\n self.clear_model_prediction_history(index, tokenized_sentence, from_=len(tokenized_sentence) + accepted)\n tokenized_sentence = tokenized_sentence[:len(tokenized_sentence) + accepted]\n \n self.logprobs_history[index][len(tokenized_sentence)][not_accepted_token] = -torch.inf\n \n if torch.all(self.logprobs_history[index][len(tokenized_sentence)] == -torch.inf):\n self.logprobs_history[index][len(tokenized_sentence)] = torch.zeros_like(self.logprobs_history[index][len(tokenized_sentence)])\n \n probs_to_sample = self.create_sample_logprobs(\n self.logprobs_history[index][len(tokenized_sentence)],\n temperature=temperature,\n top_k=top_k,\n top_p=top_p\n )\n new_token = torch.multinomial(probs_to_sample, 1).item()\n \n tokenized_sentence.append(new_token)\n return self.run_retroactive_operators(index, tokenized_sentence, temperature, top_k, top_p)\n \n return tokenized_sentence\n \n def speculation_sample(self, token, previous_models_probs, new_models_probs):\n \"\"\"Sample a token based on the previous and new model probabilities in the speculative sampling way. Also returns whether the token was accepted or not.\n\n Args:\n token (int): Token that is currently selected\n previous_models_probs (torch.tensor): Model probabilities of the previous models\n new_models_probs (torch.tensor): Model probabilities of the new models\n\n Returns:\n (int, bool): New token and whether or not the input token was accepted\n \"\"\"\n acceptance_prob = torch.minimum(torch.tensor(1.0), new_models_probs[token] / (previous_models_probs[token] + torch.tensor(self.epsilon)))\n # TODO: the next line is taking an enormous amount of time because of asynchronous computing on gpu's and requiring it to be returned immediately\n # Therefore do batch processing\n acceptance_prob = float(acceptance_prob)\n self.monitor.add_result(element=float(acceptance_prob), indicator=\"acceptance_prob\")\n # self.monitor.add_result(element=self.entropy(previous_models_probs).item(), indicator=\"entropy_previous\")\n # self.monitor.add_result(element=previous_models_probs[token].item(), indicator=\"probability_previous\")\n\n if torch.rand(1) < acceptance_prob:\n return token, True\n else:\n new_proba_distrib = torch.relu(new_models_probs - previous_models_probs)\n new_proba_distrib /= torch.sum(new_proba_distrib)\n new_token = torch.multinomial(new_proba_distrib, 1).item()\n return new_token, False\n \n \n def add_new_result(self, generated_tokens, num_new_tokens, runnable_operator, new_model_logprobs, top_p, top_k, temperature):\n \"\"\"Adds a new run of a runnable operator to the model prediction history. Also does speculation sampling if needed.\n\n Args:\n generated_tokens (list[list[int]]): Currently generated tokens by the model\n num_new_tokens (list[int]): Number of new tokens for each sample in the batch\n runnable_operator (RunnableOperator): Runnable operator that was run\n new_model_logprobs (List[torch.tensor]): Output of the run function of the runnable operator\n top_p (flaot): top_p to use\n top_k (int): top_k to use\n temperature (float): temperature to use\n\n Returns:\n list[bool]: For each sample in the batch, whether all tokens in that sample were kept or not\n \"\"\"\n all_kept = []\n for i in range(len(generated_tokens)):\n n_generated_tokens = len(generated_tokens[i])\n kept = True\n for n_token in range(n_generated_tokens - num_new_tokens[i] + 1, n_generated_tokens + 1):\n # initialize the model prediction history\n self.model_prediction_history[i][n_token] = self.model_prediction_history[i].get(n_token, \n {runnable_operator.id(): None for runnable_operator in self.runnable_operators})\n # check if we need to do speculation sampling, only needed when a previous token was sampled\n do_speculation_sample = n_token < n_generated_tokens\n \n # speculation sampling not needed if the model was run before \n if self.model_prediction_history[i][n_token][runnable_operator.id()] is not None:\n do_speculation_sample = False\n \n # speculation sampling not needed if all models have not been run yet: this is the first model on this token\n if all([logprob is None for logprob in self.model_prediction_history[i][n_token].values()]):\n do_speculation_sample = False\n # This means that this token was already fully accepted, so we can just continue (can happen if batch_size > 1 or when end is triggered)\n if self.max_index_prediction_history(i) > n_token:\n continue\n \n # add the new model logprobs\n self.model_prediction_history[i][n_token][runnable_operator.id()] = new_model_logprobs[i][-n_generated_tokens + n_token + num_new_tokens[i] - 1]\n \n group_model_history = self.group_model_history(self.model_prediction_history[i][n_token])\n # group_model_history needs to be separately checked, since it could be that the group is not yet fully calculated\n # also allow no logprobs runnable operators (would lead to errors) if the formula is not finished yet (if it is finished, you need to)\n if all([logprob is None for logprob in group_model_history.values()]) or (not runnable_operator.outputs_logprobs and not self.formula.is_finished(group_model_history)):\n continue\n \n # process the logprobs\n new_model_probs = self.process_logprobs(group_model_history)\n \n if self.intermediate_argmax and not self.formula.is_finished(group_model_history):\n argmax_el = torch.argmax(new_model_probs)\n new_model_probs = torch.zeros_like(new_model_probs)\n new_model_probs[argmax_el] = 1.0\n \n if do_speculation_sample:\n if self.calculate_statistics:\n self.monitor.add_result(self.expected_acceptance_prob(self.create_sample_logprobs(new_model_probs, temperature, top_k, top_p), \n self.create_sample_logprobs(self.logprobs_history[i].get(n_token), temperature, top_k, top_p)), \n indicator=\"expected_acceptance_prob\", runnable_operator=runnable_operator)\n\n new_token, kept = self.speculation_sample(\n token = generated_tokens[i][n_token],\n previous_models_probs=self.create_sample_logprobs(self.logprobs_history[i][n_token], temperature, top_k, top_p),\n new_models_probs=self.create_sample_logprobs(new_model_probs, temperature, top_k, top_p), \n )\n if n_token in self.model_prediction_history[i]:\n self.logprobs_history[i][n_token] = new_model_probs\n \n if not kept:\n # if not kept, we change the generated tokens and remove the model prediction history after that token\n generated_tokens[i][n_token] = new_token\n generated_tokens[i] = generated_tokens[i][:n_token + 1]\n self.clear_model_prediction_history(i, generated_tokens[i], from_=n_token)\n self.trigger_end[i] = False\n \n elif n_token in self.model_prediction_history[i]:\n self.logprobs_history[i][n_token] = new_model_probs\n \n if not kept:\n break\n \n all_kept.append(kept)\n return all_kept\n \n\n def clear_model_prediction_history(self, index, generated_tokens_index, from_=-1):\n \"\"\"Clears the model prediction history for a specific sample in the batch. First deletes all history of finished tokens, then \n deletes history of tokens that were prediction, but then got removed because of speculation\n\n Args:\n index (int): index of the sample in the batch\n generated_tokens_index (list[int]): Generated tokens at the index\n from_ (int, optional): From which token to delete all the history. Defaults to -1.\n \"\"\"\n all_indices = list(self.model_prediction_history[index].keys())\n for token in all_indices:\n all_none = all([logprob is None for logprob in self.model_prediction_history[index][token].values()])\n finished = self.formula.is_finished(self.model_prediction_history[index][token])\n if all_none or finished or (from_ != -1 and token > from_):\n if finished and len(generated_tokens_index) > token and self.calculate_statistics:\n self.add_monitor_token_probs(generated_tokens_index[token], self.model_prediction_history[index][token], self.logprobs_history[index].get(token))\n \n if finished:\n for model_index in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[model_index][index] = max(token + 1, self.model_last_token_prediction[model_index][index])\n \n del self.model_prediction_history[index][token]\n \n if from_ > -1:\n for model_index in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[model_index][index] = min(from_ + 1, self.model_last_token_prediction[model_index][index])\n \n def max_index_prediction_history(self, index):\n \"\"\"Gets the max index of the model prediction history for a specific runnable operator \n\n Args:\n index (int): index of runnable operator in the list of runnable operators\n\n Returns:\n int: max index of its prediction\n \"\"\"\n keys = list(self.model_prediction_history[index].keys())\n if len(keys) == 0:\n return 0\n return max(self.model_prediction_history[index].keys())\n\n def normal_sample(self, probs):\n \"\"\"Samples from a probability distribution\n\n Args:\n probs (torch.tensor): Probability distribution\n\n Returns:\n int: Sampled token\n \"\"\"\n out = torch.multinomial(probs, 1)\n return out\n \n def KL_divergence(self, p, q):\n \"\"\"Compuates KL divergence between two probability distributions\n\n Args:\n p (torch.tensor): probability distribution\n q (torch.tensor): probability distribution\n\n Returns:\n float: KL divergence\n \"\"\"\n return torch.sum(p * torch.log((p + self.epsilon) / (q + self.epsilon)))\n \n def entropy(self, p):\n \"\"\"Computes entropy of a probability distribution\n\n Args:\n p (torch.tensor): probability distribution\n\n Returns:\n float: entropy\n \"\"\"\n return -torch.sum(p * torch.log(p + self.epsilon))\n \n def expected_acceptance_prob(self, p, q):\n \"\"\"\n Calculates the expected acceptance probability of speculative sampling.\n \n Args:\n p (torch.tensor): probability distribution\n q (torch.tensor): probability distribution\n \"\"\"\n return 1 - 1 / 2 * torch.sum(torch.abs(q - p)).item()\n \n def add_monitor_token_probs(self, token, history, history_logprobs):\n \"\"\"Adds some token probabilities to the monitor\n\n Args:\n token (int): Samples token\n history (dict): Model prediction history at the specific index where the token was drawn from\n history_logprobs (torch.tensor): LogProbability distribution from which the token was drawn.\n \"\"\"\n for runnable_operator in self.runnable_operators:\n if runnable_operator.is_finished(history) and runnable_operator.outputs_logprobs:\n evaluated = runnable_operator.evaluate(history)\n self.monitor.add_result(element=torch.softmax(evaluated, dim=-1)[token].item(), runnable_operator=runnable_operator, indicator=\"token_prob\")\n # add logprob as well\n self.monitor.add_result(element=max(evaluated[token].item(), np.log(self.epsilon)), runnable_operator=runnable_operator, indicator=\"token_logprob\")\n # add KL divergence\n if history_logprobs is not None:\n self.monitor.add_result(element=self.KL_divergence(torch.softmax(history_logprobs, dim=-1), torch.softmax(evaluated, dim=-1)).item(), \n runnable_operator=runnable_operator, indicator=\"KL_divergence\")\n \n self.monitor.add_result(element=self.entropy(torch.softmax(history_logprobs, dim=-1)).item(), indicator=\"entropy\")\n\n def next_token_speculative(self, continuation_tokens, \n top_p=1.0, top_k=0, temperature=1.0, speculation=True, use_cache=True):\n \"\"\"Continues one step in the generation process by running the runnable operators that need to be run and then sampling from the probability distribution.\n\n Args:\n continuation_tokens (list[list[int]]): Current continuation tokens\n top_p (float, optional): top_p to use. Defaults to 1.0.\n top_k (int, optional): top_k to use. Defaults to 0.\n temperature (float, optional): temperature to use. Defaults to 1.0.\n speculation (bool, optional): Whether to use speculation. Defaults to True.\n use_cache (bool, optional): Whether to use cache. Defaults to True.\n\n Returns:\n _type_: _description_\n \"\"\"\n models_ran = []\n for i, runnable_operator in enumerate(self.runnable_operators):\n new_tokens = [len(continuation_tokens[j]) - self.model_last_token_prediction[i][j] + 1 for j in range(len(continuation_tokens))]\n if runnable_operator.run_condition(new_tokens, self.trigger_end) or not speculation:\n logprobs = self.forward_model(runnable_operator, continuation_tokens, model_new_tokens=new_tokens, use_cache=use_cache, do_speculation=speculation)\n all_kept = self.add_new_result(continuation_tokens, new_tokens, runnable_operator, logprobs, top_p, top_k, temperature)\n models_ran.append(i)\n \n self.model_last_token_prediction[i] = [len(continuation_tokens[j]) + int(all_kept[j])\n for j in range(len(continuation_tokens))]\n \n if not all(all_kept):\n break\n \n to_sample_indices = [i for i in range(len(continuation_tokens)) if all_kept[i] and not self.trigger_end[i]]\n\n if len(to_sample_indices) > 0:\n # do batch sampling\n all_required_histories = torch.stack([\n self.create_sample_logprobs(\n self.logprobs_history[i][len(continuation_tokens[i])], \n temperature=temperature,\n top_k=top_k,\n top_p=top_p\n ) for i in to_sample_indices\n ])\n new_tokens = self.normal_sample(all_required_histories)\n for i in range(len(to_sample_indices)):\n continuation_tokens[to_sample_indices[i]].append(new_tokens[i].item())\n\n for i in models_ran:\n self.model_last_token_prediction[i] = [len(continuation_tokens[j]) for j in range(len(continuation_tokens))]\n return continuation_tokens\n\n def __call__(self, input_ids, **kwargs):\n \"\"\"Runs the forward pass of the model. This is needed for compatibility with lm-evaluation-harness\n\n Args:\n input_ids (torch.tensor): input ids\n\n Returns:\n namedtuple: Named tuple of the ModelArithmetic model\n \"\"\"\n return self.forward(input_ids, **kwargs)\n \n def forward(self, input_ids, normalize=True, **kwargs):\n \"\"\"Runs the foward pass. This is needed for compatibility with lm-evaluation-harness\n\n Args:\n input_ids (torch.tensor): input ids\n normalize (bool, optional): Whether or not to normalize the output. Defaults to True.\n\n Returns:\n namedtuple: Named tuple of the ModelArithmetic model\n \"\"\"\n ### this is a bit cheeky, but in order to be compatible with lm-evaluation-harness, we need to implement this method\n logprobs_per_model = {runnable_operator.id(): None for runnable_operator in self.runnable_operators}\n if not isinstance(input_ids, list):\n input_shape = input_ids.shape\n continuation_tokens = self.lm_eval_compatibility.forward_preprocessing(input_ids, self.model_input_tokens)\n else:\n input_shape = None\n continuation_tokens = input_ids\n\n for runnable_operator in self.runnable_operators:\n logprobs = self.forward_model(runnable_operator, continuation_tokens)\n if input_shape is not None:\n logprobs = self.lm_eval_compatibility.forward_post_processing(logprobs, input_shape)\n logprobs_per_model[runnable_operator.id()] = logprobs\n\n output = self.formula.evaluate(logprobs_per_model, normalize=normalize)\n return [output]\n\n def get_decoded_tokens(self, next_tokens_batch):\n \"\"\"Gets decoded tokens from the next tokens\n\n Args:\n next_tokens_batch (list[list[int]]): New tokens for each sample in the batch\n\n Returns:\n list[str]: Decoded tokens\n \"\"\"\n # adding eos token for compatibility with sentencepiece tokenizer\n encoded_sentences = [[self.tokenizer.eos_token_id] + next_tokens for next_tokens in next_tokens_batch]\n decoded_sentences = [self.tokenizer.decode(encoded_sentence, add_special_tokens=False) for encoded_sentence in encoded_sentences]\n decoded_next_tokens = [decoded_sentence[len(self.tokenizer.eos_token):] for decoded_sentence in decoded_sentences]\n return decoded_next_tokens\n \n def clear_memory(self):\n \"\"\"Deletes all loaded models and clears the cache\n \"\"\"\n for runnable_operator in self.runnable_operators:\n runnable_operator.delete_cache()\n self.loaded_models = dict()\n torch.cuda.empty_cache()\n\n def generate_text(self, sentences, max_length=1024, stop_texts=None, batch_size=None,\n temperature=1.0, top_p=1.0, top_k=0, num_return_sequences=1, do_speculation=False, use_cache=True, **kwargs):\n \"\"\"Generates text based on the input params\n\n Args:\n sentences (list[str]): List of input sentences\n max_length (int, optional): Max generation length. Defaults to 128.\n stop_texts (list[str], optional): Strings at which to stop generation. Defaults to None.\n batch_size (int, optional): Batch size. Defaults to None (all at once).\n temperature (float, optional): temperature to use. Defaults to 1.0.\n top_p (float, optional): top_p to use. Defaults to 1.0.\n top_k (int, optional): top_k to use. Defaults to 0.\n num_return_sequences (int, optional): Number of return sequences per sentence. Defaults to 1.\n do_speculation (bool, optional): Whether or not to do speculation. Defaults to True.\n use_cache (bool, optional): Whether or not to use cache. Defaults to True.\n\n Returns:\n list[str]: List of generated texts\n \"\"\"\n assert not do_speculation or any([runnable_operator.speculative_factor == 1 for runnable_operator in self.runnable_operators])\n if isinstance(sentences, str):\n sentences = [sentences]\n if batch_size is None:\n batch_size = len(sentences)\n \n # duplicate each sentence num_return_sequences times, but keep the same sentences next to each other\n sentences = [sentence for sentence in sentences for _ in range(num_return_sequences)]\n\n self.model_prediction_history = [dict() for _ in range(batch_size)]\n self.logprobs_history = [dict() for _ in range(batch_size)]\n self.model_last_token_prediction = [[0 for _ in range(batch_size)] for _ in range(len(self.runnable_operators))]\n self.trigger_end = [False for _ in range(batch_size)]\n self.init_monitor()\n \n if stop_texts is None:\n stop_texts = []\n stop_texts.append(self.tokenizer.eos_token)\n\n start_sentences = sentences[:]\n\n log(logger.debug, f\"Generating {len(sentences)} sentences\")\n\n generated_texts = [\"\" for _ in range(len(sentences))]\n generated_tokens = [[] for _ in range(len(sentences))]\n current_indices = [i for i in range(0, min(len(sentences), batch_size))]\n next_index = len(current_indices)\n \n for runnable_operator_id in self.model_input_tokens:\n self.model_input_tokens[runnable_operator_id].set_inputs([start_sentences[index] for index in current_indices])\n \n total_done = 0\n while len(current_indices) > 0:\n start_time = time.time()\n generated_tokens_batch = [generated_tokens[index] for index in current_indices]\n next_tokens = self.next_token_speculative(generated_tokens_batch, top_p, top_k, \n temperature, speculation=do_speculation, use_cache=use_cache)\n for i in range(len(next_tokens)):\n next_tokens[i] = self.run_retroactive_operators(i, next_tokens[i], temperature, top_k, top_p)\n self.clear_model_prediction_history(i, next_tokens[i])\n decoded_tokens = self.get_decoded_tokens(next_tokens)\n\n for i, index in enumerate(current_indices):\n generated_tokens[index] = next_tokens[i]\n generated_texts[index] = decoded_tokens[i]\n\n indices_to_remove = []\n for i in range(len(current_indices)):\n sentences[current_indices[i]] = start_sentences[current_indices[i]] + generated_texts[current_indices[i]]\n if any([stop_text in generated_texts[current_indices[i]] for stop_text in stop_texts]) or len(generated_tokens[current_indices[i]]) >= max_length:\n if len(self.model_prediction_history[i]) == 0:\n indices_to_remove.append(i)\n else:\n self.trigger_end[i] = True\n \n for i in indices_to_remove[::-1]:\n self.monitor.add_result(element=len(generated_tokens[current_indices[i]]), indicator=\"length\")\n del current_indices[i]\n self.model_prediction_history = self.model_prediction_history[:i] + self.model_prediction_history[i + 1:]\n self.logprobs_history = self.logprobs_history[:i] + self.logprobs_history[i + 1:]\n for j in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[j] = self.model_last_token_prediction[j][:i] + self.model_last_token_prediction[j][i + 1:]\n self.trigger_end = self.trigger_end[:i] + self.trigger_end[i + 1:]\n \n for runnable_operator in self.runnable_operators:\n runnable_operator.delete_cache(index=i)\n\n if next_index < len(sentences):\n current_indices.append(next_index)\n self.model_prediction_history.append(dict())\n self.logprobs_history.append(dict())\n self.trigger_end.append(False)\n \n for j in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[j].append(0)\n \n next_index += 1\n total_done += 1\n if total_done % 30 == 0:\n log(logger.debug, f\"Progress: {total_done / len(sentences):.3f}\")\n \n for runnable_operator_id in self.model_input_tokens:\n self.model_input_tokens[runnable_operator_id].set_inputs([start_sentences[index] for index in current_indices])\n\n self.monitor.add_result(element=time.time() - start_time)\n \n return generated_texts\n\n def generate(self, input_ids, attention_mask=None, do_sample=False, max_new_tokens=1024, \n stopping_criteria=None, temperature=1.0, top_p=1.0, top_k=0, use_cache=True, eos_token_id=None, pad_token_id=None, **kwargs):\n \"\"\"Generates text based on the input params. Needed for compatibility with lm-evaluation-harness\n\n Args:\n input_ids (torch.tensor): input ids\n attention_mask (torch.tensor, optional): attention mask. Defaults to None.\n do_sample (bool, optional): Whether or not to sample. Defaults to False.\n max_new_tokens (int, optional): Max new number of tokens. Defaults to 128.\n stopping_criteria (_type_, optional): Stopping criteria to use. Defaults to None.\n temperature (float, optional): Temperature to. Defaults to 1.0.\n top_p (float, optional): top_p to use. Defaults to 1.0.\n top_k (int, optional): top_k to use. Defaults to 0.\n use_cache (bool, optional): Whether or not to use cache. Defaults to True.\n eos_token_id (int, optional): eos token id. Defaults to None.\n pad_token_id (int, optional): pad token id. Defaults to None.\n\n Returns:\n list[str]: Generated texts\n \"\"\"\n if not do_sample:\n top_k = 1\n \n batch_size = input_ids.shape[0]\n input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids]\n stopping_sequences = [self.tokenizer.eos_token]\n if stopping_criteria is not None:\n stopping_sequences += [criteria.sequence for criteria in stopping_criteria]\n if eos_token_id is not None:\n stopping_sequences += [self.tokenizer.decode([eos_token_id])]\n \n texts = self.generate_text(input_texts, max_length=max_new_tokens, stop_texts=stopping_sequences,\n batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, use_cache=use_cache)\n encoded_texts = self.tokenizer.batch_encode_plus(texts, add_special_tokens=False, return_tensors=\"pt\").input_ids.to(self.device)\n # concatenate the input_ids with the encoded_texts\n all_encoded = torch.cat([input_ids, encoded_texts], dim=-1)\n return all_encoded" }, { "identifier": "ENABLE_LOGGING", "path": "src/model_arithmetic/utils.py", "snippet": "ENABLE_LOGGING = False" }, { "identifier": "log", "path": "src/model_arithmetic/utils.py", "snippet": "def log(function, message):\n \"\"\"\n Logs the given message using the provided function if logging is enabled.\n \n Parameters:\n function (callable): The logging function to use.\n message (str): The message to be logged.\n \"\"\"\n if ENABLE_LOGGING:\n function(message)" } ]
from .base import BaseClass from loguru import logger from transformers import set_seed, Trainer, TrainingArguments, DataCollatorWithPadding from .dataset import CustomDataset from sklearn.model_selection import train_test_split from .basic_model_loader import load_model, load_tokenizer from .model_arithmetic import ModelArithmetic from googleapiclient import discovery from dotenv import load_dotenv from torch.utils.data import DataLoader from .utils import ENABLE_LOGGING, log from lm_eval import evaluator import pandas as pd import numpy as np import torch import os import json import time
13,813
load_dotenv() class Evaluation(BaseClass): """ This class is used for evaluating a model's performance on a given dataset. It includes methods for preparing the dataset, evaluating the model, generating samples, calculating perplexity and faithfulness of the model. """ def __init__(self, generator=None, dataset_location=None, dataset=None, train_dataset=None, train_dataset_location=None, n_input_words=5, bleurt_checkpoint="../models/BLEURT-20", **kwargs): """ Initialize the Evaluation class with the given parameters. Args: generator (ModelArithmetic, optional): The model to be evaluated. dataset_location (string, optional): The location of the dataset to be used for evaluation. Either this or dataset should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) dataset (pd.DataFrame, optional): The dataset to be used for evaluation. Either this or dataset_location should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) train_dataset (pd.DataFrame, optional): The dataset to be used for training the model. Only used when calculating the faithfulness of the model and when the downstream model still needs to be finetuned. train_dataset_location (string, optional): The location of the dataset to be used for training the model. n_input_words (int, optional): The number of input words to be used in the generator. Only used if the dataset does not contain the column "input". bleurt_checkpoint (string, optional): The location of the BLEURT model checkpoint. **kwargs: Additional keyword arguments. """ self.has_input_task = True self.dataset = None if dataset is not None: self.dataset = dataset.copy() elif dataset_location is not None: self.dataset = pd.read_csv(dataset_location, escapechar='\\', lineterminator="\n") if train_dataset is not None: self.train_dataset = train_dataset elif train_dataset_location is not None: self.train_dataset = pd.read_csv(train_dataset_location, escapechar='\\', lineterminator="\n") else: self.train_dataset = None if self.dataset is not None: self.prepare_dataset(n_input_words) super().__init__(**kwargs, dataset_location=dataset_location, generator=generator, has_input_task=self.has_input_task, output=dict(), extra_kwargs=None, bleurt_checkpoint=bleurt_checkpoint, train_dataset_location=None) if isinstance(generator, ModelArithmetic): # If we don't do it this way, we can't store the evaluator because ModelArithmetic is not serializable del self.kwargs["generator"] self.kwargs["formula"] = generator.formula self.formula = generator.formula def prepare_dataset(self, n_input_words=5): """ Prepares the dataset for evaluation. If the dataset does not have an input column, it assumes the input is the first n_input_words words of the output. If the dataset does not have a label column, it assumes all labels are 1. Args: n_input_words (int): The number of input words to be used. """
load_dotenv() class Evaluation(BaseClass): """ This class is used for evaluating a model's performance on a given dataset. It includes methods for preparing the dataset, evaluating the model, generating samples, calculating perplexity and faithfulness of the model. """ def __init__(self, generator=None, dataset_location=None, dataset=None, train_dataset=None, train_dataset_location=None, n_input_words=5, bleurt_checkpoint="../models/BLEURT-20", **kwargs): """ Initialize the Evaluation class with the given parameters. Args: generator (ModelArithmetic, optional): The model to be evaluated. dataset_location (string, optional): The location of the dataset to be used for evaluation. Either this or dataset should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) dataset (pd.DataFrame, optional): The dataset to be used for evaluation. Either this or dataset_location should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) train_dataset (pd.DataFrame, optional): The dataset to be used for training the model. Only used when calculating the faithfulness of the model and when the downstream model still needs to be finetuned. train_dataset_location (string, optional): The location of the dataset to be used for training the model. n_input_words (int, optional): The number of input words to be used in the generator. Only used if the dataset does not contain the column "input". bleurt_checkpoint (string, optional): The location of the BLEURT model checkpoint. **kwargs: Additional keyword arguments. """ self.has_input_task = True self.dataset = None if dataset is not None: self.dataset = dataset.copy() elif dataset_location is not None: self.dataset = pd.read_csv(dataset_location, escapechar='\\', lineterminator="\n") if train_dataset is not None: self.train_dataset = train_dataset elif train_dataset_location is not None: self.train_dataset = pd.read_csv(train_dataset_location, escapechar='\\', lineterminator="\n") else: self.train_dataset = None if self.dataset is not None: self.prepare_dataset(n_input_words) super().__init__(**kwargs, dataset_location=dataset_location, generator=generator, has_input_task=self.has_input_task, output=dict(), extra_kwargs=None, bleurt_checkpoint=bleurt_checkpoint, train_dataset_location=None) if isinstance(generator, ModelArithmetic): # If we don't do it this way, we can't store the evaluator because ModelArithmetic is not serializable del self.kwargs["generator"] self.kwargs["formula"] = generator.formula self.formula = generator.formula def prepare_dataset(self, n_input_words=5): """ Prepares the dataset for evaluation. If the dataset does not have an input column, it assumes the input is the first n_input_words words of the output. If the dataset does not have a label column, it assumes all labels are 1. Args: n_input_words (int): The number of input words to be used. """
log(logger.debug, "Preparing dataset")
6
2023-11-21 20:01:08+00:00
16k
huang-yh/SelfOcc
model/encoder/tpvformer/tpvformer_encoder.py
[ { "identifier": "BaseEncoder", "path": "model/encoder/base_encoder.py", "snippet": "class BaseEncoder(BaseModule):\n \"\"\"Further encode 3D representations.\n image backbone -> neck -> lifter -> encoder -> segmentor\n \"\"\"\n\n def __init__(self, init_cfg=None, **kwargs):\n super().__init__(init_cfg)\n \n def forward(\n self, \n representation,\n ms_img_feats=None,\n metas=None,\n **kwargs\n ):\n pass" }, { "identifier": "point_sampling", "path": "model/encoder/bevformer/utils.py", "snippet": "@torch.cuda.amp.autocast(enabled=False)\ndef point_sampling(reference_points, img_metas):\n reference_points = reference_points.float()\n\n lidar2img = []\n for img_meta in img_metas:\n lidar2img.append(img_meta['lidar2img'])\n if isinstance(lidar2img[0], (np.ndarray, list)):\n lidar2img = np.asarray(lidar2img)\n lidar2img = reference_points.new_tensor(lidar2img) # (B, N, 4, 4)\n else:\n lidar2img = torch.stack(lidar2img, dim=0)\n\n reference_points = torch.cat(\n (reference_points, torch.ones_like(reference_points[..., :1])), -1)\n\n reference_points = reference_points.permute(1, 0, 2, 3)\n D, B, num_query = reference_points.size()[:3]\n num_cam = lidar2img.size(1)\n\n reference_points = reference_points.view(\n D, B, 1, num_query, 4, 1)\n\n lidar2img = lidar2img.view(\n 1, B, num_cam, 1, 4, 4)\n\n reference_points_cam = torch.matmul(\n lidar2img.to(torch.float32),\n reference_points.to(torch.float32)).squeeze(-1)\n \n eps = 1e-5\n\n # reference_points_cam[..., 0:2] = reference_points_cam[..., 0:2] * \\\n # img_metas[0]['scale_rate']\n \n if 'img_augmentation' in img_metas[0] and \\\n 'post_rots' in img_metas[0]['img_augmentation'] and \\\n 'post_trans' in img_metas[0]['img_augmentation']:\n post_rots = []\n post_trans = []\n for img_meta in img_metas:\n post_rots.append(img_meta['img_augmentation']['post_rots'].numpy())\n post_trans.append(img_meta['img_augmentation']['post_trans'].numpy())\n post_rots = np.asarray(post_rots)\n post_trans = np.asarray(post_trans)\n post_rots = reference_points.new_tensor(post_rots)\n post_trans = reference_points.new_tensor(post_trans)\n\n reference_points_cam[..., :2] = reference_points_cam[..., :2] / torch.maximum(\n reference_points_cam[..., 2:3], torch.ones_like(reference_points_cam[..., 2:3]) * eps)\n \n # D, B, N, Q, 3, 1\n reference_points_cam = reference_points_cam[..., :3].unsqueeze(-1)\n post_rots = post_rots.view(1, B, num_cam, 1, 3, 3)\n reference_points_cam = torch.matmul(\n post_rots.to(torch.float32),\n reference_points_cam.to(torch.float32)).squeeze(-1)\n # D, B, N, Q, 3\n post_trans = post_trans.view(1, B, num_cam, 1, 3)\n reference_points_cam = reference_points_cam + post_trans\n tpv_mask = (reference_points_cam[..., 2:3] > eps) \n reference_points_cam = reference_points_cam[..., :2]\n else:\n tpv_mask = (reference_points_cam[..., 2:3] > eps)\n reference_points_cam = reference_points_cam[..., 0:2] / torch.maximum(\n reference_points_cam[..., 2:3], torch.ones_like(reference_points_cam[..., 2:3]) * eps)\n\n # reference_points_cam[..., 0] /= img_metas[0]['img_shape'][0][1]\n # reference_points_cam[..., 1] /= img_metas[0]['img_shape'][0][0]\n\n reference_points_cam[..., 0] /= img_metas[0]['img_shape'][1]\n reference_points_cam[..., 1] /= img_metas[0]['img_shape'][0] # D, B, N, Q, 2\n\n tpv_mask = (tpv_mask & (reference_points_cam[..., 1:2] > 0.0)\n & (reference_points_cam[..., 1:2] < 1.0)\n & (reference_points_cam[..., 0:1] < 1.0)\n & (reference_points_cam[..., 0:1] > 0.0))\n\n tpv_mask = torch.nan_to_num(tpv_mask)\n\n reference_points_cam = reference_points_cam.permute(2, 1, 3, 0, 4) # N, B, Q, D, 2\n tpv_mask = tpv_mask.permute(2, 1, 3, 0, 4).squeeze(-1)\n\n if 'focal_ratios_x' in img_metas[0]:\n scales_x = np.asarray(img_metas[0]['focal_ratios_x'])\n scales_x = reference_points.new_tensor(scales_x).view(-1, 1, 1, 1, 1)\n reference_points_cam[..., :1] = reference_points_cam[..., :1] * scales_x\n scales_y = np.asarray(img_metas[0]['focal_ratios_y'])\n scales_y = reference_points.new_tensor(scales_y).view(-1, 1, 1, 1, 1)\n reference_points_cam[..., 1:] = reference_points_cam[..., 1:] * scales_y\n\n return reference_points_cam, tpv_mask" }, { "identifier": "get_cross_view_ref_points", "path": "model/encoder/tpvformer/utils.py", "snippet": "def get_cross_view_ref_points(tpv_h, tpv_w, tpv_z, num_points_in_pillar, offset=0):\n # ref points generating target: (#query)hw+zh+wz, (#level)3, #p, 2\n # generate points for hw and level 1\n h_ranges = torch.linspace(offset, tpv_h-1+offset, tpv_h) / tpv_h\n w_ranges = torch.linspace(offset, tpv_w-1+offset, tpv_w) / tpv_w\n h_ranges = h_ranges.unsqueeze(-1).expand(-1, tpv_w).flatten()\n w_ranges = w_ranges.unsqueeze(0).expand(tpv_h, -1).flatten()\n hw_hw = torch.stack([w_ranges, h_ranges], dim=-1) # hw, 2\n hw_hw = hw_hw.unsqueeze(1).expand(-1, num_points_in_pillar[2], -1) # hw, #p, 2\n # generate points for hw and level 2\n z_ranges = torch.linspace(offset, tpv_z-1+offset, num_points_in_pillar[2]) / tpv_z # #p\n z_ranges = z_ranges.unsqueeze(0).expand(tpv_h*tpv_w, -1) # hw, #p\n h_ranges = torch.linspace(offset, tpv_h-1+offset, tpv_h) / tpv_h\n h_ranges = h_ranges.reshape(-1, 1, 1).expand(-1, tpv_w, num_points_in_pillar[2]).flatten(0, 1)\n hw_zh = torch.stack([h_ranges, z_ranges], dim=-1) # hw, #p, 2\n # generate points for hw and level 3\n z_ranges = torch.linspace(offset, tpv_z-1+offset, num_points_in_pillar[2]) / tpv_z # #p\n z_ranges = z_ranges.unsqueeze(0).expand(tpv_h*tpv_w, -1) # hw, #p\n w_ranges = torch.linspace(offset, tpv_w-1+offset, tpv_w) / tpv_w\n w_ranges = w_ranges.reshape(1, -1, 1).expand(tpv_h, -1, num_points_in_pillar[2]).flatten(0, 1)\n hw_wz = torch.stack([z_ranges, w_ranges], dim=-1) # hw, #p, 2\n \n # generate points for zh and level 1\n w_ranges = torch.linspace(offset, tpv_w-1+offset, num_points_in_pillar[1]) / tpv_w\n w_ranges = w_ranges.unsqueeze(0).expand(tpv_z*tpv_h, -1)\n h_ranges = torch.linspace(offset, tpv_h-1+offset, tpv_h) / tpv_h\n h_ranges = h_ranges.reshape(1, -1, 1).expand(tpv_z, -1, num_points_in_pillar[1]).flatten(0, 1)\n zh_hw = torch.stack([w_ranges, h_ranges], dim=-1)\n # generate points for zh and level 2\n z_ranges = torch.linspace(offset, tpv_z-1+offset, tpv_z) / tpv_z\n z_ranges = z_ranges.reshape(-1, 1, 1).expand(-1, tpv_h, num_points_in_pillar[1]).flatten(0, 1)\n h_ranges = torch.linspace(offset, tpv_h-1+offset, tpv_h) / tpv_h\n h_ranges = h_ranges.reshape(1, -1, 1).expand(tpv_z, -1, num_points_in_pillar[1]).flatten(0, 1)\n zh_zh = torch.stack([h_ranges, z_ranges], dim=-1) # zh, #p, 2\n # generate points for zh and level 3\n w_ranges = torch.linspace(offset, tpv_w-1+offset, num_points_in_pillar[1]) / tpv_w\n w_ranges = w_ranges.unsqueeze(0).expand(tpv_z*tpv_h, -1)\n z_ranges = torch.linspace(offset, tpv_z-1+offset, tpv_z) / tpv_z\n z_ranges = z_ranges.reshape(-1, 1, 1).expand(-1, tpv_h, num_points_in_pillar[1]).flatten(0, 1)\n zh_wz = torch.stack([z_ranges, w_ranges], dim=-1)\n\n # generate points for wz and level 1\n h_ranges = torch.linspace(offset, tpv_h-1+offset, num_points_in_pillar[0]) / tpv_h\n h_ranges = h_ranges.unsqueeze(0).expand(tpv_w*tpv_z, -1)\n w_ranges = torch.linspace(offset, tpv_w-1+offset, tpv_w) / tpv_w\n w_ranges = w_ranges.reshape(-1, 1, 1).expand(-1, tpv_z, num_points_in_pillar[0]).flatten(0, 1)\n wz_hw = torch.stack([w_ranges, h_ranges], dim=-1)\n # generate points for wz and level 2\n h_ranges = torch.linspace(offset, tpv_h-1+offset, num_points_in_pillar[0]) / tpv_h\n h_ranges = h_ranges.unsqueeze(0).expand(tpv_w*tpv_z, -1)\n z_ranges = torch.linspace(offset, tpv_z-1+offset, tpv_z) / tpv_z\n z_ranges = z_ranges.reshape(1, -1, 1).expand(tpv_w, -1, num_points_in_pillar[0]).flatten(0, 1)\n wz_zh = torch.stack([h_ranges, z_ranges], dim=-1)\n # generate points for wz and level 3\n w_ranges = torch.linspace(offset, tpv_w-1+offset, tpv_w) / tpv_w\n w_ranges = w_ranges.reshape(-1, 1, 1).expand(-1, tpv_z, num_points_in_pillar[0]).flatten(0, 1)\n z_ranges = torch.linspace(offset, tpv_z-1+offset, tpv_z) / tpv_z\n z_ranges = z_ranges.reshape(1, -1, 1).expand(tpv_w, -1, num_points_in_pillar[0]).flatten(0, 1)\n wz_wz = torch.stack([z_ranges, w_ranges], dim=-1)\n\n reference_points = torch.cat([\n torch.stack([hw_hw, hw_zh, hw_wz], dim=1),\n torch.stack([zh_hw, zh_zh, zh_wz], dim=1),\n torch.stack([wz_hw, wz_zh, wz_wz], dim=1)\n ], dim=0) # hw+zh+wz, 3, #p, 2\n \n return reference_points" }, { "identifier": "GridMeterMapping", "path": "model/encoder/bevformer/mappings.py", "snippet": "class GridMeterMapping:\n\n def __init__(\n self,\n nonlinear_mode: Literal['linear_upscale', 'linear'] = 'linear_upscale',\n h_size=[128, 32],\n h_range=[51.2, 28.8],\n h_half=False,\n w_size=[128, 32],\n w_range=[51.2, 28.8],\n w_half=False,\n d_size=[20, 10],\n d_range=[-4.0, 4.0, 12.0]\n ) -> None:\n self.nonlinear_mode = nonlinear_mode\n if nonlinear_mode == 'linear_upscale':\n assert all([h == w for h, w in zip(h_size, w_size)])\n assert all([h == w for h, w in zip(h_range, w_range)])\n assert (not h_half) and (not w_half)\n self.mapping = NonLinearMapping(\n h_size[0],\n h_size[1],\n h_range[0],\n h_range[1],\n d_size[0],\n d_size[1],\n d_range)\n self.size_h = self.size_w = self.mapping.bev_size\n self.size_d = self.mapping.z_size\n elif nonlinear_mode == 'linear':\n self.mapping = LinearMapping(\n h_size,\n h_range,\n h_half,\n w_size,\n w_range,\n w_half,\n d_size,\n d_range)\n self.size_h = self.mapping.h_tot_len\n self.size_w = self.mapping.w_tot_len\n self.size_d = self.mapping.d_tot_len\n self.grid2meter = self.mapping.grid2meter\n self.meter2grid = self.mapping.meter2grid" }, { "identifier": "BEVCrossAttention", "path": "model/encoder/bevformer/attention/image_cross_attention.py", "snippet": "class BEVCrossAttention(BaseModule):\r\n \"\"\"\r\n Image cross-attention in TPVFormer. Enable every tpv query to interact with its corresponding \r\n area on the image feature plane.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n embed_dims=256,\r\n num_cams=6,\r\n dropout=0.1,\r\n init_cfg=None,\r\n batch_first=True,\r\n deformable_attention=dict(\r\n type='MSDeformableAttention3D',\r\n embed_dims=256,\r\n num_levels=4),\r\n **kwargs):\r\n super().__init__(init_cfg)\r\n\r\n self.init_cfg = init_cfg\r\n self.dropout = nn.Dropout(dropout)\r\n self.deformable_attention = build_attention(deformable_attention)\r\n self.embed_dims = embed_dims\r\n self.num_cams = num_cams\r\n self.output_proj = nn.Linear(embed_dims, embed_dims)\r\n self.batch_first = batch_first\r\n self.init_weight()\r\n\r\n def init_weight(self):\r\n \"\"\"Default initialization for Parameters of Module.\"\"\"\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)\r\n\r\n # @force_fp32(apply_to=('query', 'key', 'value', 'reference_points_cams'))\r\n # @torch.cuda.amp.autocast(enabled=False)\r\n def forward(self,\r\n query,\r\n key,\r\n value,\r\n residual=None,\r\n spatial_shapes=None,\r\n reference_points_cams=None,\r\n bev_masks=None,\r\n level_start_index=None,\r\n **kwargs):\r\n \"\"\"Forward Function of Detr3DCrossAtten.\r\n Args:\r\n query (Tensor): Query of Transformer with shape\r\n (bs, num_query, embed_dims).\r\n key (Tensor): The key tensor with shape\r\n (bs, num_key, embed_dims).\r\n value (Tensor): The value tensor with shape\r\n (bs, num_key, embed_dims).\r\n residual (Tensor): The tensor used for addition, with the\r\n same shape as `x`. Default None. If None, `x` will be used.\r\n spatial_shapes (Tensor): Spatial shape of features in\r\n different level. With shape (num_levels, 2),\r\n last dimension represent (h, w).\r\n level_start_index (Tensor): The start index of each level.\r\n A tensor has shape (num_levels) and can be represented\r\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\r\n Returns:\r\n Tensor: forwarded results with shape [num_query, bs, embed_dims].\r\n \"\"\"\r\n if key is None:\r\n key = query\r\n if value is None:\r\n value = key\r\n\r\n if residual is None:\r\n residual = query \r\n bs, num_query, _ = query.size()\r\n\r\n slots = torch.zeros_like(query)\r\n # indexeses = []\r\n # max_lens = []\r\n # queries_rebatches = []\r\n # reference_points_rebatches = []\r\n # for tpv_idx, tpv_mask in enumerate(tpv_masks):\r\n indexes = []\r\n for _, mask_per_img in enumerate(bev_masks):\r\n index_query_per_img = mask_per_img[0].sum(-1).nonzero().squeeze(-1)\r\n indexes.append(index_query_per_img)\r\n max_len = max([len(each) for each in indexes])\r\n # max_lens.append(max_len)\r\n # indexeses.append(indexes)\r\n\r\n reference_points_cam = reference_points_cams\r\n D = reference_points_cam.size(3)\r\n\r\n queries_rebatch = query.new_zeros(\r\n [bs * self.num_cams, max_len, self.embed_dims])\r\n reference_points_rebatch = reference_points_cam.new_zeros(\r\n [bs * self.num_cams, max_len, D, 2])\r\n\r\n for i, reference_points_per_img in enumerate(reference_points_cam):\r\n for j in range(bs):\r\n index_query_per_img = indexes[i]\r\n queries_rebatch[j * self.num_cams + i, :len(index_query_per_img)] = query[j, index_query_per_img]\r\n reference_points_rebatch[j * self.num_cams + i, :len(index_query_per_img)] = reference_points_per_img[j, index_query_per_img]\r\n \r\n # queries_rebatches.append(queries_rebatch)\r\n # reference_points_rebatches.append(reference_points_rebatch)\r\n\r\n num_cams, l, bs, embed_dims = key.shape\r\n\r\n key = key.permute(2, 0, 1, 3).reshape(\r\n self.num_cams * bs, l, self.embed_dims)\r\n value = value.permute(2, 0, 1, 3).reshape(\r\n self.num_cams * bs, l, self.embed_dims)\r\n\r\n query = self.deformable_attention(\r\n query=queries_rebatch, key=key, value=value,\r\n reference_points=reference_points_rebatch, \r\n spatial_shapes=spatial_shapes,\r\n level_start_index=level_start_index,)\r\n \r\n # for tpv_idx, indexes in enumerate(indexeses):\r\n for i, index_query_per_img in enumerate(indexes):\r\n for j in range(bs):\r\n slots[j, index_query_per_img] += query[j * self.num_cams + i, :len(index_query_per_img)]\r\n\r\n count = bev_masks.sum(-1) > 0\r\n count = count.permute(1, 2, 0).sum(-1)\r\n count = torch.clamp(count, min=1.0)\r\n slots = slots / count[..., None]\r\n slots = self.output_proj(slots)\r\n\r\n return self.dropout(slots) + residual\r" }, { "identifier": "BEVDeformableAttention", "path": "model/encoder/bevformer/attention/image_cross_attention.py", "snippet": "class BEVDeformableAttention(BaseModule):\r\n \"\"\"An attention module used in Deformable-Detr.\r\n\r\n `Deformable DETR: Deformable Transformers for End-to-End Object Detection.\r\n <https://arxiv.org/pdf/2010.04159.pdf>`_.\r\n\r\n Args:\r\n embed_dims (int): The embedding dimension of Attention.\r\n Default: 256.\r\n num_heads (int): Parallel attention heads. Default: 8.\r\n num_levels (int): The number of feature map used in\r\n Attention. Default: 4.\r\n num_points (int): The number of sampling points for\r\n each query in each head. Default: 4.\r\n im2col_step (int): The step used in image_to_column.\r\n Default: 64.\r\n dropout (float): A Dropout layer on `inp_identity`.\r\n Default: 0.1.\r\n batch_first (bool): Key, Query and Value are shape of\r\n (batch, n, embed_dim)\r\n or (n, batch, embed_dim). Default to False.\r\n norm_cfg (dict): Config dict for normalization layer.\r\n Default: None.\r\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\r\n Default: None.\r\n value_proj_ratio (float): The expansion ratio of value_proj.\r\n Default: 1.0.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n embed_dims: int = 256,\r\n num_heads: int = 8,\r\n num_levels: int = 4,\r\n num_points: int = 4,\r\n im2col_step: int = 64,\r\n dropout: float = 0.1,\r\n batch_first: bool = False,\r\n norm_cfg: Optional[dict] = None,\r\n init_cfg: Optional[mmengine.ConfigDict] = None,\r\n value_proj_ratio: float = 1.0):\r\n super().__init__(init_cfg)\r\n if embed_dims % num_heads != 0:\r\n raise ValueError(f'embed_dims must be divisible by num_heads, '\r\n f'but got {embed_dims} and {num_heads}')\r\n dim_per_head = embed_dims // num_heads\r\n self.norm_cfg = norm_cfg\r\n self.batch_first = batch_first\r\n\r\n # you'd better set dim_per_head to a power of 2\r\n # which is more efficient in the CUDA implementation\r\n def _is_power_of_2(n):\r\n if (not isinstance(n, int)) or (n < 0):\r\n raise ValueError(\r\n 'invalid input for _is_power_of_2: {} (type: {})'.format(\r\n n, type(n)))\r\n return (n & (n - 1) == 0) and n != 0\r\n\r\n if not _is_power_of_2(dim_per_head):\r\n warnings.warn(\r\n \"You'd better set embed_dims in \"\r\n 'MultiScaleDeformAttention to make '\r\n 'the dimension of each attention head a power of 2 '\r\n 'which is more efficient in our CUDA implementation.')\r\n\r\n self.im2col_step = im2col_step\r\n self.embed_dims = embed_dims\r\n self.num_levels = num_levels\r\n self.num_heads = num_heads\r\n self.num_points = num_points\r\n self.sampling_offsets = nn.Linear(\r\n embed_dims, num_heads * num_levels * num_points * 2)\r\n self.attention_weights = nn.Linear(embed_dims,\r\n num_heads * num_levels * num_points)\r\n value_proj_size = int(embed_dims * value_proj_ratio)\r\n self.value_proj = nn.Linear(embed_dims, value_proj_size)\r\n self.init_weights()\r\n\r\n def init_weights(self) -> None:\r\n \"\"\"Default initialization for Parameters of Module.\"\"\"\r\n constant_init(self.sampling_offsets, 0.)\r\n device = next(self.parameters()).device\r\n thetas = torch.arange(\r\n self.num_heads, dtype=torch.float32,\r\n device=device) * (2.0 * math.pi / self.num_heads)\r\n grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\r\n grid_init = (grid_init /\r\n grid_init.abs().max(-1, keepdim=True)[0]).view(\r\n self.num_heads, 1, 1,\r\n 2).repeat(1, self.num_levels, self.num_points, 1)\r\n # for i in range(self.num_points):\r\n # grid_init[:, :, i, :] *= i + 1\r\n\r\n self.sampling_offsets.bias.data = grid_init.view(-1)\r\n constant_init(self.attention_weights, val=0., bias=0.)\r\n xavier_init(self.value_proj, distribution='uniform', bias=0.)\r\n self._is_init = True\r\n\r\n @no_type_check\r\n def forward(self,\r\n query: torch.Tensor,\r\n key: Optional[torch.Tensor] = None,\r\n value: Optional[torch.Tensor] = None,\r\n identity: Optional[torch.Tensor] = None,\r\n query_pos: Optional[torch.Tensor] = None,\r\n key_padding_mask: Optional[torch.Tensor] = None,\r\n reference_points: Optional[torch.Tensor] = None,\r\n spatial_shapes: Optional[torch.Tensor] = None,\r\n level_start_index: Optional[torch.Tensor] = None,\r\n **kwargs) -> torch.Tensor:\r\n \"\"\"Forward Function of MultiScaleDeformAttention.\r\n\r\n Args:\r\n query (torch.Tensor): Query of Transformer with shape\r\n (num_query, bs, embed_dims).\r\n key (torch.Tensor): The key tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n value (torch.Tensor): The value tensor with shape\r\n `(num_key, bs, embed_dims)`.\r\n identity (torch.Tensor): The tensor used for addition, with the\r\n same shape as `query`. Default None. If None,\r\n `query` will be used.\r\n query_pos (torch.Tensor): The positional encoding for `query`.\r\n Default: None.\r\n key_padding_mask (torch.Tensor): ByteTensor for `query`, with\r\n shape [bs, num_key].\r\n reference_points (torch.Tensor): The normalized reference\r\n points with shape (bs, num_query, num_levels, 2),\r\n all elements is range in [0, 1], top-left (0,0),\r\n bottom-right (1, 1), including padding area.\r\n or (N, Length_{query}, num_levels, 4), add\r\n additional two dimensions is (w, h) to\r\n form reference boxes.\r\n spatial_shapes (torch.Tensor): Spatial shape of features in\r\n different levels. With shape (num_levels, 2),\r\n last dimension represents (h, w).\r\n level_start_index (torch.Tensor): The start index of each level.\r\n A tensor has shape ``(num_levels, )`` and can be represented\r\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\r\n\r\n Returns:\r\n torch.Tensor: forwarded results with shape\r\n [num_query, bs, embed_dims].\r\n \"\"\"\r\n\r\n if value is None:\r\n value = query\r\n\r\n if identity is None:\r\n identity = query\r\n if query_pos is not None:\r\n query = query + query_pos\r\n if not self.batch_first:\r\n # change to (bs, num_query ,embed_dims)\r\n query = query.permute(1, 0, 2)\r\n value = value.permute(1, 0, 2)\r\n\r\n bs, num_query, _ = query.shape\r\n bs, num_value, _ = value.shape\r\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\r\n\r\n value = self.value_proj(value)\r\n if key_padding_mask is not None:\r\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\r\n value = value.view(bs, num_value, self.num_heads, -1)\r\n sampling_offsets = self.sampling_offsets(query).view(\r\n bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)\r\n attention_weights = self.attention_weights(query).view(\r\n bs, num_query, self.num_heads, self.num_levels * self.num_points)\r\n attention_weights = attention_weights.softmax(-1)\r\n\r\n attention_weights = attention_weights.view(bs, num_query,\r\n self.num_heads,\r\n self.num_levels,\r\n self.num_points)\r\n if reference_points.shape[-1] == 2:\r\n offset_normalizer = torch.stack(\r\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\r\n sampling_locations = reference_points[:, :, None, None, :, :] \\\r\n + sampling_offsets \\\r\n / offset_normalizer[None, None, None, :, None, :]\r\n elif reference_points.shape[-1] == 4:\r\n sampling_locations = reference_points[:, :, None, :, None, :2] \\\r\n + sampling_offsets / self.num_points \\\r\n * reference_points[:, :, None, :, None, 2:] \\\r\n * 0.5\r\n else:\r\n raise ValueError(\r\n f'Last dim of reference_points must be'\r\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\r\n if ((IS_CUDA_AVAILABLE and value.is_cuda)\r\n or (IS_MLU_AVAILABLE and value.is_mlu)):\r\n output = MultiScaleDeformableAttnFunction.apply(\r\n value, spatial_shapes, level_start_index, sampling_locations,\r\n attention_weights, self.im2col_step)\r\n else:\r\n output = multi_scale_deformable_attn_pytorch(\r\n value, spatial_shapes, sampling_locations, attention_weights)\r\n\r\n if not self.batch_first:\r\n # (num_query, bs ,embed_dims)\r\n output = output.permute(1, 0, 2)\r\n\r\n return output\r" }, { "identifier": "TPVCrossAttention", "path": "model/encoder/tpvformer/attention/image_cross_attention.py", "snippet": "class TPVCrossAttention(BaseModule):\r\n\r\n def __init__(\r\n self,\r\n embed_dims=256,\r\n num_cams=6,\r\n dropout=0.1, \r\n init_cfg=None,\r\n batch_first=True,\r\n num_heads=16,\r\n num_levels=4,\r\n num_points=[64, 64, 8]):\r\n super().__init__(init_cfg)\r\n\r\n deformable_attn_config_hw = dict(\r\n type='BEVCrossAttention',\r\n embed_dims=embed_dims,\r\n num_cams=num_cams,\r\n dropout=dropout,\r\n batch_first=batch_first,\r\n deformable_attention=dict(\r\n type='BEVDeformableAttention',\r\n embed_dims=embed_dims,\r\n num_heads=num_heads,\r\n num_levels=num_levels,\r\n num_points=num_points[2],\r\n dropout=dropout,\r\n batch_first=batch_first))\r\n self.attn_hw = build_attention(deformable_attn_config_hw)\r\n\r\n deformable_attn_config_zh = dict(\r\n type='BEVCrossAttention',\r\n embed_dims=embed_dims,\r\n num_cams=num_cams,\r\n dropout=dropout,\r\n batch_first=batch_first,\r\n deformable_attention=dict(\r\n type='BEVDeformableAttention',\r\n embed_dims=embed_dims,\r\n num_heads=num_heads,\r\n num_levels=num_levels,\r\n num_points=num_points[1],\r\n dropout=dropout,\r\n batch_first=batch_first))\r\n self.attn_zh = build_attention(deformable_attn_config_zh)\r\n \r\n deformable_attn_config_wz = dict(\r\n type='BEVCrossAttention',\r\n embed_dims=embed_dims,\r\n num_cams=num_cams,\r\n dropout=dropout,\r\n batch_first=batch_first,\r\n deformable_attention=dict(\r\n type='BEVDeformableAttention',\r\n embed_dims=embed_dims,\r\n num_heads=num_heads,\r\n num_levels=num_levels,\r\n num_points=num_points[0],\r\n dropout=dropout,\r\n batch_first=batch_first))\r\n self.attn_wz = build_attention(deformable_attn_config_wz)\r\n self.attns = [self.attn_hw, self.attn_zh, self.attn_wz]\r\n\r\n def forward(self,\r\n query,\r\n key,\r\n value,\r\n residual=None,\r\n spatial_shapes=None,\r\n reference_points_cams=None,\r\n tpv_masks=None,\r\n level_start_index=None,\r\n **kwargs):\r\n result = []\r\n\r\n for i in range(3):\r\n out = self.attns[i](\r\n query[i],\r\n key,\r\n value,\r\n residual[i] if residual is not None else None,\r\n spatial_shapes=spatial_shapes,\r\n level_start_index=level_start_index,\r\n reference_points_cams=reference_points_cams[i],\r\n bev_masks=tpv_masks[i])\r\n result.append(out)\r\n\r\n return result\r" }, { "identifier": "CrossViewHybridAttention", "path": "model/encoder/tpvformer/attention/cross_view_hybrid_attention.py", "snippet": "class CrossViewHybridAttention(MultiScaleDeformableAttention):\n\n @no_type_check\n @deprecated_api_warning({'residual': 'identity'},\n cls_name='MultiScaleDeformableAttention')\n def forward(self,\n query: torch.Tensor,\n key: Optional[torch.Tensor] = None,\n value: Optional[torch.Tensor] = None,\n identity: Optional[torch.Tensor] = None,\n query_pos: Optional[torch.Tensor] = None,\n key_padding_mask: Optional[torch.Tensor] = None,\n reference_points: Optional[torch.Tensor] = None,\n spatial_shapes: Optional[torch.Tensor] = None,\n level_start_index: Optional[torch.Tensor] = None,\n **kwargs) -> torch.Tensor:\n \"\"\"Forward Function of MultiScaleDeformAttention.\n\n Args:\n query (torch.Tensor): Query of Transformer with shape\n (num_query, bs, embed_dims).\n key (torch.Tensor): The key tensor with shape\n `(num_key, bs, embed_dims)`.\n value (torch.Tensor): The value tensor with shape\n `(num_key, bs, embed_dims)`.\n identity (torch.Tensor): The tensor used for addition, with the\n same shape as `query`. Default None. If None,\n `query` will be used.\n query_pos (torch.Tensor): The positional encoding for `query`.\n Default: None.\n key_padding_mask (torch.Tensor): ByteTensor for `query`, with\n shape [bs, num_key].\n reference_points (torch.Tensor): The normalized reference\n points with shape (bs, num_query, num_levels, 2),\n all elements is range in [0, 1], top-left (0,0),\n bottom-right (1, 1), including padding area.\n or (N, Length_{query}, num_levels, 4), add\n additional two dimensions is (w, h) to\n form reference boxes.\n spatial_shapes (torch.Tensor): Spatial shape of features in\n different levels. With shape (num_levels, 2),\n last dimension represents (h, w).\n level_start_index (torch.Tensor): The start index of each level.\n A tensor has shape ``(num_levels, )`` and can be represented\n as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].\n\n Returns:\n torch.Tensor: forwarded results with shape\n [num_query, bs, embed_dims].\n \"\"\"\n\n if value is None:\n value = query\n\n if identity is None:\n identity = query\n if query_pos is not None:\n query = query + query_pos\n if not self.batch_first:\n # change to (bs, num_query ,embed_dims)\n query = query.permute(1, 0, 2)\n value = value.permute(1, 0, 2)\n\n bs, num_query, _ = query.shape\n bs, num_value, _ = value.shape\n assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value\n\n value = self.value_proj(value)\n if key_padding_mask is not None:\n value = value.masked_fill(key_padding_mask[..., None], 0.0)\n value = value.view(bs, num_value, self.num_heads, -1)\n sampling_offsets = self.sampling_offsets(query).view(\n bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)\n attention_weights = self.attention_weights(query).view(\n bs, num_query, self.num_heads, self.num_levels * self.num_points)\n attention_weights = attention_weights.softmax(-1)\n\n attention_weights = attention_weights.view(bs, num_query,\n self.num_heads,\n self.num_levels,\n self.num_points)\n if reference_points.shape[-1] == 2:\n offset_normalizer = torch.stack(\n [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)\n ### changed here\n sampling_locations = reference_points[:, :, None, :, :, :] \\\n + sampling_offsets \\\n / offset_normalizer[None, None, None, :, None, :]\n elif reference_points.shape[-1] == 4:\n sampling_locations = reference_points[:, :, None, :, None, :2] \\\n + sampling_offsets / self.num_points \\\n * reference_points[:, :, None, :, None, 2:] \\\n * 0.5\n else:\n raise ValueError(\n f'Last dim of reference_points must be'\n f' 2 or 4, but get {reference_points.shape[-1]} instead.')\n if ((IS_CUDA_AVAILABLE and value.is_cuda)\n or (IS_MLU_AVAILABLE and value.is_mlu)):\n output = MultiScaleDeformableAttnFunction.apply(\n value, spatial_shapes, level_start_index, sampling_locations,\n attention_weights, self.im2col_step)\n else:\n output = multi_scale_deformable_attn_pytorch(\n value, spatial_shapes, sampling_locations, attention_weights)\n\n output = self.output_proj(output)\n\n if not self.batch_first:\n # (num_query, bs ,embed_dims)\n output = output.permute(1, 0, 2)\n\n return self.dropout(output) + identity" }, { "identifier": "CameraAwareSE", "path": "model/encoder/tpvformer/modules/camera_se_net.py", "snippet": "class CameraAwareSE(nn.Module):\n\n def __init__(\n self,\n in_channels=96,\n mid_channels=192,\n out_channles=96):\n super().__init__()\n self.in_channels = in_channels\n self.mid_channels = mid_channels\n self.out_channels = out_channles\n self._init_layers()\n\n def _init_layers(self):\n self.bn = nn.BatchNorm1d(16)\n self.context_mlp = Mlp(16, self.mid_channels, self.mid_channels)\n self.context_se = SELayer(self.mid_channels) # NOTE: add camera-aware\n self.context_conv = nn.Conv2d(self.mid_channels,\n self.out_channels,\n kernel_size=1,\n stride=1,\n padding=0)\n \n if self.in_channels == self.mid_channels:\n self.reduce_conv = nn.Identity()\n else:\n self.reduce_conv = nn.Sequential(\n nn.Conv2d(self.in_channels,\n self.mid_channels,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.BatchNorm2d(self.mid_channels),\n nn.ReLU(inplace=True))\n \n def init_weight(self):\n # nn.init.zeros_(self.context_se.conv_expand.weight)\n # nn.init.constant_(self.context_se.conv_expand.bias, 10.0)\n nn.init.zeros_(self.context_mlp.fc2.weight)\n nn.init.constant_(self.context_mlp.fc2.bias, 10.0)\n\n def forward(self, ms_img_feats, metas):\n intrins, sensor2ego = [], []\n for meta in metas:\n intrins.append(meta['intrinsic'])\n sensor2ego.append(meta['cam2ego'])\n intrins = np.asarray(intrins)\n intrins = ms_img_feats[0].new_tensor(intrins) # bs, N, 4, 4\n sensor2ego = np.asarray(sensor2ego)\n sensor2ego = ms_img_feats[0].new_tensor(sensor2ego)[..., :3, :]\n\n batch_size = intrins.shape[0]\n num_cams = intrins.shape[1]\n mlp_input = torch.cat(\n [\n torch.stack(\n [\n intrins[..., 0, 0],\n intrins[..., 1, 1],\n intrins[..., 0, 2],\n intrins[..., 1, 2],\n ],\n dim=-1,\n ),\n sensor2ego.view(batch_size, num_cams, -1),\n ],\n -1,\n ) # bs, N, 16\n mlp_input = self.bn(mlp_input.reshape(-1, mlp_input.shape[-1]))\n context_se = self.context_mlp(mlp_input)[..., None, None] # bs*N, c, 1, 1\n context_se = torch.sigmoid(context_se)\n\n outputs = []\n for i_scale, img_feats in enumerate(ms_img_feats):\n img_feats = self.reduce_conv(img_feats.flatten(0, 1)) # bs*N, c, h, w\n img_feats = self.context_se(img_feats, context_se)\n img_feats = self.context_conv(img_feats)\n outputs.append(img_feats.unflatten(0, (batch_size, num_cams)))\n\n return outputs" } ]
from mmseg.registry import MODELS from mmcv.cnn.bricks.transformer import build_positional_encoding, build_transformer_layer from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention from mmengine.model import ModuleList from torch.nn.init import normal_ from mmengine.logging import MMLogger from ..base_encoder import BaseEncoder from ..bevformer.utils import point_sampling from .utils import get_cross_view_ref_points from ..bevformer.mappings import GridMeterMapping from ..bevformer.attention import BEVCrossAttention, BEVDeformableAttention from .attention import TPVCrossAttention, CrossViewHybridAttention from .modules import CameraAwareSE import torch.nn as nn, torch, copy
11,499
num_feature_levels=4, positional_encoding=None, num_points_cross=[64, 64, 8], num_points_self=[16, 16, 16], transformerlayers=None, num_layers=None, camera_aware=False, camera_aware_mid_channels=None, init_cfg=None): super().__init__(init_cfg) # self.bev_inner = bev_inner # self.bev_outer = bev_outer # self.range_inner = range_inner # self.range_outer = range_outer # assert nonlinear_mode == 'linear_upscale' # TODO # self.nonlinear_mode = nonlinear_mode # self.z_inner = z_inner # self.z_outer = z_outer # self.z_ranges = z_ranges self.embed_dims = embed_dims self.num_feature_levels = num_feature_levels self.num_cams = num_cams self.camera_aware = camera_aware if camera_aware: if camera_aware_mid_channels is None: camera_aware_mid_channels = embed_dims self.camera_se_net = CameraAwareSE( embed_dims, camera_aware_mid_channels, embed_dims) self.mapping = GridMeterMapping( # bev_inner, # bev_outer, # range_inner, # range_outer, # nonlinear_mode, # z_inner, # z_outer, # z_ranges **mapping_args) size_h = self.mapping.size_h size_w = self.mapping.size_w size_d = self.mapping.size_d hw_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(-1).expand(-1, size_w), torch.arange(size_w, dtype=torch.float).unsqueeze(0).expand(size_h, -1), torch.zeros(size_h, size_w)], dim=-1) hw_meter = self.mapping.grid2meter(hw_grid)[..., [0, 1]] zh_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(0).expand(size_d, -1), torch.zeros(size_d, size_h), torch.arange(size_d, dtype=torch.float).unsqueeze(-1).expand(-1, size_h)], dim=-1) zh_meter = self.mapping.grid2meter(zh_grid)[..., [1, 2]] wz_grid = torch.stack( [torch.zeros(size_w, size_d), torch.arange(size_w, dtype=torch.float).unsqueeze(-1).expand(-1, size_d), torch.arange(size_d, dtype=torch.float).unsqueeze(0).expand(size_w, -1)], dim=-1) wz_meter = self.mapping.grid2meter(wz_grid)[..., [0, 2]] positional_encoding.update({'tpv_meters': [hw_meter, zh_meter, wz_meter]}) self.positional_encoding = build_positional_encoding(positional_encoding) self.tpv_size = [size_h, size_w, size_d] # transformer layers if isinstance(transformerlayers, dict): transformerlayers = [ copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert isinstance(transformerlayers, list) and \ len(transformerlayers) == num_layers self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.pre_norm = self.layers[0].pre_norm logger.info('use pre_norm: ' + str(self.pre_norm)) # other learnable embeddings self.level_embeds = nn.Parameter( torch.randn(self.num_feature_levels, self.embed_dims)) self.cams_embeds = nn.Parameter( torch.randn(self.num_cams, self.embed_dims)) # prepare reference points used in image cross-attention and cross-view hybrid-attention self.num_points_cross = num_points_cross self.num_points_self = num_points_self uniform_d = torch.linspace(0, size_d - 1, num_points_cross[2]) hw_3d_grid = torch.cat([ hw_grid[..., [0, 1]].unsqueeze(2).expand(-1, -1, num_points_cross[2], -1), uniform_d.reshape(1, 1, -1, 1).expand(size_h, size_w, -1, -1)], dim=-1) ref_3d_hw = self.mapping.grid2meter(hw_3d_grid) # H, W, P0, 3 uniform_w = torch.linspace(0, size_w - 1, num_points_cross[1]) zh_3d_grid = torch.cat([ zh_grid[..., :1].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1), uniform_w.reshape(1, 1, -1, 1).expand(size_d, size_h, -1, -1), zh_grid[..., 2:].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1) ], dim=-1) ref_3d_zh = self.mapping.grid2meter(zh_3d_grid) # Z, H, P1, 3 uniform_h = torch.linspace(0, size_h - 1, num_points_cross[0]) wz_3d_grid = torch.cat([ uniform_h.reshape(1, 1, -1, 1).expand(size_w, size_d, -1, -1), wz_grid[..., [1, 2]].unsqueeze(2).expand(-1, -1, num_points_cross[0], -1) ], dim=-1) ref_3d_wz = self.mapping.grid2meter(wz_3d_grid) # W, Z, P2, 3 self.register_buffer('ref_3d_hw', ref_3d_hw.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_zh', ref_3d_zh.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_wz', ref_3d_wz.flatten(0, 1).transpose(0, 1), False)
logger = MMLogger.get_instance('selfocc') @MODELS.register_module() class TPVFormerEncoder(BaseEncoder): def __init__( self, mapping_args: dict, # bev_inner=128, # bev_outer=32, # range_inner=51.2, # range_outer=51.2, # nonlinear_mode='linear_upscale', # z_inner=20, # z_outer=10, # z_ranges=[-5.0, 3.0, 11.0], embed_dims=128, num_cams=6, num_feature_levels=4, positional_encoding=None, num_points_cross=[64, 64, 8], num_points_self=[16, 16, 16], transformerlayers=None, num_layers=None, camera_aware=False, camera_aware_mid_channels=None, init_cfg=None): super().__init__(init_cfg) # self.bev_inner = bev_inner # self.bev_outer = bev_outer # self.range_inner = range_inner # self.range_outer = range_outer # assert nonlinear_mode == 'linear_upscale' # TODO # self.nonlinear_mode = nonlinear_mode # self.z_inner = z_inner # self.z_outer = z_outer # self.z_ranges = z_ranges self.embed_dims = embed_dims self.num_feature_levels = num_feature_levels self.num_cams = num_cams self.camera_aware = camera_aware if camera_aware: if camera_aware_mid_channels is None: camera_aware_mid_channels = embed_dims self.camera_se_net = CameraAwareSE( embed_dims, camera_aware_mid_channels, embed_dims) self.mapping = GridMeterMapping( # bev_inner, # bev_outer, # range_inner, # range_outer, # nonlinear_mode, # z_inner, # z_outer, # z_ranges **mapping_args) size_h = self.mapping.size_h size_w = self.mapping.size_w size_d = self.mapping.size_d hw_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(-1).expand(-1, size_w), torch.arange(size_w, dtype=torch.float).unsqueeze(0).expand(size_h, -1), torch.zeros(size_h, size_w)], dim=-1) hw_meter = self.mapping.grid2meter(hw_grid)[..., [0, 1]] zh_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(0).expand(size_d, -1), torch.zeros(size_d, size_h), torch.arange(size_d, dtype=torch.float).unsqueeze(-1).expand(-1, size_h)], dim=-1) zh_meter = self.mapping.grid2meter(zh_grid)[..., [1, 2]] wz_grid = torch.stack( [torch.zeros(size_w, size_d), torch.arange(size_w, dtype=torch.float).unsqueeze(-1).expand(-1, size_d), torch.arange(size_d, dtype=torch.float).unsqueeze(0).expand(size_w, -1)], dim=-1) wz_meter = self.mapping.grid2meter(wz_grid)[..., [0, 2]] positional_encoding.update({'tpv_meters': [hw_meter, zh_meter, wz_meter]}) self.positional_encoding = build_positional_encoding(positional_encoding) self.tpv_size = [size_h, size_w, size_d] # transformer layers if isinstance(transformerlayers, dict): transformerlayers = [ copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert isinstance(transformerlayers, list) and \ len(transformerlayers) == num_layers self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.pre_norm = self.layers[0].pre_norm logger.info('use pre_norm: ' + str(self.pre_norm)) # other learnable embeddings self.level_embeds = nn.Parameter( torch.randn(self.num_feature_levels, self.embed_dims)) self.cams_embeds = nn.Parameter( torch.randn(self.num_cams, self.embed_dims)) # prepare reference points used in image cross-attention and cross-view hybrid-attention self.num_points_cross = num_points_cross self.num_points_self = num_points_self uniform_d = torch.linspace(0, size_d - 1, num_points_cross[2]) hw_3d_grid = torch.cat([ hw_grid[..., [0, 1]].unsqueeze(2).expand(-1, -1, num_points_cross[2], -1), uniform_d.reshape(1, 1, -1, 1).expand(size_h, size_w, -1, -1)], dim=-1) ref_3d_hw = self.mapping.grid2meter(hw_3d_grid) # H, W, P0, 3 uniform_w = torch.linspace(0, size_w - 1, num_points_cross[1]) zh_3d_grid = torch.cat([ zh_grid[..., :1].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1), uniform_w.reshape(1, 1, -1, 1).expand(size_d, size_h, -1, -1), zh_grid[..., 2:].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1) ], dim=-1) ref_3d_zh = self.mapping.grid2meter(zh_3d_grid) # Z, H, P1, 3 uniform_h = torch.linspace(0, size_h - 1, num_points_cross[0]) wz_3d_grid = torch.cat([ uniform_h.reshape(1, 1, -1, 1).expand(size_w, size_d, -1, -1), wz_grid[..., [1, 2]].unsqueeze(2).expand(-1, -1, num_points_cross[0], -1) ], dim=-1) ref_3d_wz = self.mapping.grid2meter(wz_3d_grid) # W, Z, P2, 3 self.register_buffer('ref_3d_hw', ref_3d_hw.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_zh', ref_3d_zh.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_wz', ref_3d_wz.flatten(0, 1).transpose(0, 1), False)
cross_view_ref_points = get_cross_view_ref_points(size_h, size_w, size_d, num_points_self)
2
2023-11-20 12:49:14+00:00
16k
MobileTeleSystems/CoolGraph
cool_graph/runners.py
[ { "identifier": "RawDataProcessor", "path": "cool_graph/data/data_processor.py", "snippet": "class RawDataProcessor:\n \"\"\"\n Preprocessing datasets.\n\n Args:\n groups_names (Dict[int, str]): Name of groups in nodes.\n group_names_node_features (Dict[str, List[str]]): Name of features in groups in nodes.\n mon_nodes_path (str): path to nodes\n mon_edges_path (str): path to edges\n mon_labels_path (str): path to labels\n edge_index_cols (List[str]): columns of edge index in dataset\n label_index_col (str): columns of label index in dataset\n label_mask_col (str): mask of label columns\n read_edge_attr (bool): is set True - read edge features. Default to True.\n group_mask_col (str): Mask for group in data. Default to None.\n features_edges_names (List[str]): List of features on edge. Default to None.\n label_cols (List[str]): List of label columns. Default to None.\n target_names (List[str]): List of target names. Default to None.\n \"\"\"\n\n @staticmethod\n def _check_cols_in_parquet(columns: List[str], path: str) -> bool:\n \"\"\"Cheking colomns in parquet files.\n\n Args:\n columns (List[str]): columns of dataset\n path (str): path to dataset\n\n Raises:\n ValueError: if there is no any files with parquet extension\n ValueError: if there is no path with parquet extension\n\n Returns:\n bool: True if columns and path are right\n \"\"\"\n if columns:\n set_cols = set(columns if type(columns) == list else [columns])\n try:\n parquet_file = [path] if path.endswith(\".parquet\") else []\n parquet_file = (\n parquet_file\n + glob.glob(os.path.join(path, \"*.parquet\"), recursive=True)\n + glob.glob(os.path.join(path, \"**/*.parquet\"), recursive=True)\n )\n parquet_file = parquet_file[0]\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n Couldn't find any files with parquet extension in {path}\\n\n Original exception: \\n\n {str(ex)}\n \"\"\"\n )\n pqt_cols = set(pq.read_schema(parquet_file).names)\n if not set_cols.issubset(pqt_cols):\n diff = set_cols - pqt_cols\n raise ValueError(\n f\"\"\"\n \"{'\", \"'.join(diff)}\" were not found in {path}\n \"\"\"\n )\n return True\n\n def __init__(\n self,\n groups_names: Dict[int, str],\n group_names_node_features: Dict[str, List[str]],\n mon_nodes_path: str,\n mon_edges_path: str,\n mon_labels_path: str,\n edge_index_cols: List[str],\n label_index_col: str,\n label_mask_col: Optional[str] = None,\n read_edge_attr: bool = True,\n group_mask_col: Optional[str] = None,\n features_edges_names: Optional[List[str]] = None,\n label_cols: Optional[List[str]] = None,\n target_names: Optional[List[str]] = None,\n ) -> None:\n self._check_cols_in_parquet(group_mask_col, mon_nodes_path)\n self._check_cols_in_parquet(label_cols, mon_labels_path)\n self._check_cols_in_parquet([label_mask_col], mon_labels_path)\n self._check_cols_in_parquet([label_index_col], mon_labels_path)\n\n for key, val in group_names_node_features.items():\n try:\n self._check_cols_in_parquet(val, mon_nodes_path)\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n {str(ex)} for group {key} aka {groups_names[key]}\n \"\"\"\n )\n\n df_node_feats = pq.read_table(mon_nodes_path).to_pandas()\n df_labels = pq.read_table(mon_labels_path, columns=label_cols).to_pandas()\n df_edge_index = pq.read_table(\n mon_edges_path, columns=edge_index_cols\n ).to_pandas()\n\n # Nodes\n node_features = torch.FloatTensor(df_node_feats.values)\n group_mask = torch.IntTensor(df_node_feats[group_mask_col].values)\n node_features_names_fixed = df_node_feats.columns.tolist()\n\n # Labels\n df_labels.set_index(label_index_col, inplace=True)\n df_labels.sort_index(inplace=True)\n df_labels.reset_index(inplace=True)\n targets = {t: torch.LongTensor(df_labels[t].values) for t in target_names}\n label_mask = torch.BoolTensor(df_labels[label_mask_col].values)\n index = torch.LongTensor(df_labels[label_index_col].values)\n\n try:\n df_node_feats.shape[0] == df_labels.shape[0]\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n Length of features must be equal to the length of labels.\n \"\"\"\n )\n\n # Edges\n edge_index = torch.LongTensor(df_edge_index.values).T\n\n # Nodes\n self.node_features = node_features\n self.group_mask = group_mask\n self.targets = targets\n self.label_mask = label_mask\n self.index = index\n self.edge_index = edge_index\n\n # Edge features\n if read_edge_attr:\n df_edge_feats = pq.read_table(\n mon_edges_path, columns=features_edges_names\n ).to_pandas()\n\n self.edge_features = torch.FloatTensor(df_edge_feats.values)\n self.edge_features_names = df_edge_feats.columns.tolist()\n else:\n self.edge_features = None\n self.edge_features_names = None\n\n self.read_edge_attr = read_edge_attr\n\n # Mappings\n inverse = {v: k for k, v in groups_names.items()}\n self.group_indices_node_findex = {\n inverse[key]: [node_features_names_fixed.index(f) for f in value]\n for key, value in group_names_node_features.items()\n }\n self.groups_names = groups_names\n\n def sample_data(\n self, num_neighbors: int, batch_size: int, seed: int = 0\n ) -> Dict[str, List[torch.utils.data.DataLoader]]:\n \"\"\"Samling data.\n\n Args:\n num_neighbors (int): Number of neighbors are sampled for each node in each iteration.\n batch_size (int): Numbers of samples per batch to load.\n seed (int, optional): Number of seed of samples. Defaults to 0.\n\n Returns:\n Dict[str, List[torch.utils.data.DataLoader]]: Sampled data.\n \"\"\"\n\n return create_loaders(\n self.node_features,\n self.edge_features,\n self.edge_index,\n self.read_edge_attr,\n num_neighbors,\n batch_size,\n self.group_mask,\n self.group_indices_node_findex,\n self.groups_names,\n self.label_mask,\n self.index,\n targets=self.targets,\n )" }, { "identifier": "get_auto_batch_size", "path": "cool_graph/data/batch.py", "snippet": "def get_auto_batch_size(\n groups_num_features: List[int],\n conv_type: Optional[Literal[\"NNConv\", \"GraphConv\"]] = None,\n conv1_aggrs: Optional[Dict[Literal[\"mean\", \"max\", \"add\"], int]] = None,\n conv2_aggrs: Optional[Dict[Literal[\"mean\", \"max\", \"add\"], int]] = None,\n conv3_aggrs: Optional[Dict[Literal[\"mean\", \"max\", \"add\"], int]] = None,\n n_hops: Optional[int] = None,\n lin_prep_size_common: Optional[int] = None,\n lin_prep_sizes: Optional[List[int]] = None,\n edge_attr_repr_sizes: Optional[List[int]] = None,\n num_edge_features: Optional[int] = None,\n device: str = \"cuda:0\",\n num_neighbors: Optional[List[int]] = None,\n) -> int:\n \"\"\"\n Аutomatic batch size calculation.\n Depending on model size and free GPU memory.\n\n Args:\n groups_num_features (List[int]): Number of feats in groups on nodes.\n conv_type (Literal[NNConv, GraphConv]): Model type\n conv1_aggrs (Dict[Literal[mean, max, add], int]]):\n An aggregation per features across a set of elements in conv layer 1. Defaults to None.\n conv2_aggrs (Dict[Literal[mean, max, add], int]]):\n An aggregation per features across a set of elements in conv layer 2. Defaults to None.\n conv3_aggrs (Dict[Literal[mean, max, add], int]]):\n An aggregation per features across a set of elements in conv layer 3. Defaults to None.\n n_hops (int): Hop with neighbors. Defaults to None.\n lin_prep_size_common (int): Size of linear layer (in). Defaults to None.\n lin_prep_sizes (int): Size of linear layer (out). Defaults to None.\n edge_attr_repr_sizes (List[int]): Size of layer of edges attributes. Defaults to None.\n num_edge_features (int): Number of feats on edges. Defaults to None.\n device (str): The current GPU memory usage. Defaults to \"cuda:0\".\n num_neighbors (List[int]): Number of neighbors are sampled for each node in each iteration. Defaults to None.\n\n Returns:\n batch_size (int): Numbers of samples per batch to load.\n \"\"\"\n if lin_prep_sizes is None:\n lin_prep_sizes = []\n if device is None:\n device = \"cuda:0\"\n\n hop1_size = sum(conv1_aggrs.values())\n hop2_size = sum(conv2_aggrs.values()) if n_hops >= 2 else 0\n hop3_size = sum(conv3_aggrs.values()) if n_hops == 3 else 0\n\n max_size_node = max(\n *groups_num_features,\n lin_prep_size_common,\n *lin_prep_sizes,\n hop1_size,\n hop2_size,\n hop3_size,\n )\n\n max_size_edge = 0\n if conv_type == \"NNConv\":\n max_size_edge = max(\n *edge_attr_repr_sizes,\n num_edge_features,\n )\n\n max_size = max_size_node + max_size_edge * 1.5\n\n try:\n all([n != -1 for n in num_neighbors])\n except Exception as ex:\n raise ValueError(\n f\"\"\"\n Found -1, Need to know max neighbors per hop.\n \"\"\"\n )\n m_neighbors = np.prod(num_neighbors)\n\n free_memory = torch.cuda.mem_get_info(device=device)[0] / (1024**3) # GB\n\n floats_per_node_ = 320000\n batch_size_ = 250\n memory_reserved_max_ = 3.8\n\n batch_size = (\n 0.5\n * batch_size_\n * floats_per_node_\n / (m_neighbors * max_size)\n * (free_memory / memory_reserved_max_)\n )\n\n if conv_type == \"NNConv\":\n batch_size /= edge_attr_repr_sizes[-1] * 4\n\n batch_size = int(batch_size)\n\n return batch_size" }, { "identifier": "create_loaders", "path": "cool_graph/data/loaders.py", "snippet": "def create_loaders(\n data: Data = None,\n node_features: torch.FloatTensor = None,\n edge_features: torch.FloatTensor = None,\n edge_index: torch.LongTensor = None,\n read_edge_attr: bool = None,\n num_neighbors: List[int] = None,\n batch_size: int = None,\n group_mask: torch.LongTensor = None,\n groups_features: Dict[int, List[int]] = None,\n groups_names: Dict[int, str] = None,\n label_mask: torch.BoolTensor = None,\n index: torch.LongTensor = None,\n targets: Dict[str, torch.Tensor] = None,\n input_nodes: Optional[List] = None,\n node_feature_indices: Optional[List] = None,\n unique_groups: Optional[int] = None,\n) -> List[torch.utils.data.DataLoader]:\n \"\"\"\n Creating list loaders.\n\n Args:\n node_features (torch.FloatTensor): features on nodes on FloatTensor\n edge_features (torch.FloatTensor): features on edge on FloatTensor\n edge_index (torch.LongTensor): edge indices\n read_edge_attr (bool): if set True - read edge features.\n num_neighbors (List[int]): Number of neighbors are sampled for each node in each iteration.\n batch_size (int): Numbers of samples per batch to load.\n group_mask (torch.LongTensor): Mask for groups in nodes.\n groups_features (Dict[int, List[int]]): Features in groups in nodes.\n groups_names (Dict[int, str]): Name of featutes in groups in nodes.\n label_mask (torch.BoolTensor): Mask for label.\n index (torch.LongTensor): index\n targets (Dict[str, torch.Tensor]): Labels.\n\n Returns:\n List[torch.utils.data.DataLoader]: Created DataLoader object. https://pytorch.org/docs/stable/data.html\n \"\"\"\n unique_groups = np.unique(group_mask)\n try:\n set(unique_groups).issubset(set(groups_features.keys()))\n except Exception as ex:\n raise ValueError(\n f\"\"\"Group mask values should be a subset of feature groups keys\"\"\"\n )\n\n try:\n set(groups_features).issubset(set(groups_names.keys()))\n except Exception as ex:\n raise ValueError(\n f\"\"\"Feature groups keys should be a subset of feature_groups_names\"\"\"\n )\n if data is None:\n data = Data(\n x=node_features,\n edge_index=edge_index,\n edge_attr=edge_features if read_edge_attr else None,\n group_mask=group_mask,\n label_mask=label_mask,\n index=index,\n **targets,\n )\n input_nodes = torch.nonzero(label_mask)[:, 0]\n\n loader = NeighborLoader(\n data,\n num_neighbors=num_neighbors,\n batch_size=batch_size,\n shuffle=True,\n input_nodes=input_nodes,\n )\n\n list_loader = []\n for sampled_data in tqdm(loader, desc=\"Sample data\"):\n sampled_data.label_mask[sampled_data.batch_size :] = False\n\n for group in unique_groups:\n name = groups_names[group]\n mask = sampled_data.group_mask == group\n features = groups_features[group]\n setattr(sampled_data, name, sampled_data.x[mask][:, features])\n\n del sampled_data.x\n\n list_loader.append(sampled_data)\n\n return list_loader" }, { "identifier": "setup_mlflow_from_config", "path": "cool_graph/logging/mlflow_logging.py", "snippet": "def setup_mlflow_from_config(config: Dict) -> None:\n \"\"\"\n Setup mlflow using logging.mlflow section of a config\n \"\"\"\n\n if config.get(\"MLFLOW_DISABLE_INSECURE_REQUEST_WARNING\", False):\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n for key, value in config.items():\n os.environ[key] = str(value)\n\n mlflow.set_tracking_uri(config.get(\"MLFLOW_TRACKING_URI\"))" }, { "identifier": "model_params_to_trial_params", "path": "cool_graph/parameter_search/example_objective.py", "snippet": "def model_params_to_trial_params(\n **model_params: Dict[str, Union[Literal[str], int, float, List, Dict]]\n) -> Dict[str, Union[Literal[str], int, float, List, Dict]]:\n \"\"\"\n Convert readable model_params to trial_params\n for example to run study.enqueue_trial(trial_params)\n \"\"\"\n trial = {}\n trial[\"activation\"] = model_params[\"activation\"]\n trial[\"lin_prep_len\"] = model_params[\"lin_prep_len\"]\n trial[\"lin_prep_dropout_rate\"] = model_params[\"lin_prep_dropout_rate\"]\n trial[\"lin_prep_weight_norm_flag\"] = model_params[\"lin_prep_weight_norm_flag\"]\n last_size = model_params[\"lin_prep_size_common\"]\n trial[\"lin_prep_size_common\"] = last_size\n for i in range(model_params[\"lin_prep_len\"]):\n trial[f\"lin_prep_size{i}_fraction\"] = np.clip(\n model_params[\"lin_prep_sizes\"][i] / last_size, 0.2, 1.0\n )\n last_size = model_params[\"lin_prep_sizes\"][i]\n\n trial[\"conv1_aggrs_mean_fraction\"] = np.clip(\n model_params[\"conv1_aggrs\"][\"mean\"] / last_size, 0.1, 1.0\n )\n trial[\"conv1_aggrs_max_fraction\"] = np.clip(\n model_params[\"conv1_aggrs\"][\"max\"] / last_size, 0.05, 0.7\n )\n trial[\"conv1_aggrs_add_fraction\"] = np.clip(\n model_params[\"conv1_aggrs\"][\"add\"] / last_size, 0.05, 0.7\n )\n\n trial[\"conv1_dropout_rate\"] = model_params[\"conv1_dropout_rate\"]\n\n if model_params[\"n_hops\"] == 2:\n last_size = sum(model_params[\"conv1_aggrs\"].values())\n\n trial[\"conv2_aggrs_mean_fraction\"] = np.clip(\n model_params[\"conv2_aggrs\"][\"mean\"] / last_size, 0.1, 0.7\n )\n trial[\"conv2_aggrs_max_fraction\"] = np.clip(\n model_params[\"conv2_aggrs\"][\"max\"] / last_size, 0.05, 0.5\n )\n trial[\"conv2_aggrs_add_fraction\"] = np.clip(\n model_params[\"conv2_aggrs\"][\"add\"] / last_size, 0.05, 0.5\n )\n\n trial[\"conv2_dropout_rate\"] = model_params[\"conv2_dropout_rate\"]\n\n if model_params[\"conv_type\"] == \"GraphConv\":\n trial[\"graph_conv_weight_norm_flag\"] = model_params[\n \"graph_conv_weight_norm_flag\"\n ]\n\n if model_params[\"conv_type\"] == \"NNConv\":\n trial[\"edge_attr_repr_len\"] = model_params[\"edge_attr_repr_len\"]\n for i in range(model_params[\"edge_attr_repr_len\"] - 1):\n if i == 0:\n trial[f\"edge_attr_repr_size{i}\"] = model_params[\"edge_attr_repr_sizes\"][\n i\n ]\n\n else:\n trial[f\"edge_attr_repr_size{i}_fraction\"] = np.clip(\n model_params[\"edge_attr_repr_sizes\"][i]\n / model_params[\"edge_attr_repr_sizes\"][i - 1],\n 0.2,\n 1.0,\n )\n\n trial[\"edge_attr_repr_size_last\"] = model_params[\"edge_attr_repr_sizes\"][-1]\n\n trial[\"edge_attr_repr_dropout_rate\"] = model_params[\n \"edge_attr_repr_dropout_rate\"\n ]\n\n trial[\"edge_attr_repr_last_dropout_rate_zero\"] = (\n model_params[\"edge_attr_repr_last_dropout_rate\"] == 0\n )\n if not trial[\"edge_attr_repr_last_dropout_rate_zero\"]:\n trial[\"edge_attr_repr_last_dropout_rate\"] = model_params[\n \"edge_attr_repr_last_dropout_rate\"\n ]\n\n trial[\"edge_attr_repr_weight_norm_flag\"] = model_params[\n \"edge_attr_repr_weight_norm_flag\"\n ]\n\n return trial" }, { "identifier": "sample_model_params", "path": "cool_graph/parameter_search/example_objective.py", "snippet": "def sample_model_params(trial: optuna.Trial, conv_type: str = \"GraphConv\") -> Dict:\n params = {}\n params[\"conv_type\"] = conv_type\n params[\"activation\"] = trial.suggest_categorical(\n \"activation\",\n [\n \"relu\", # 1st place\n \"prelu\", # 2nd place\n \"leakyrelu\",\n \"elu\",\n \"gelu\",\n ],\n )\n # NODE FEATURES PREP params\n params[\"lin_prep_len\"] = trial.suggest_int(\"lin_prep_len\", low=0, high=2)\n params[\"lin_prep_dropout_rate\"] = trial.suggest_uniform(\n \"lin_prep_dropout_rate\", low=0, high=0.5\n )\n params[\"lin_prep_weight_norm_flag\"] = trial.suggest_categorical(\n \"lin_prep_weight_norm_flag\", [False, True]\n )\n\n min_lin_prep_size_common = 32\n max_lin_prep_size_common = 1024\n\n last_size = trial.suggest_int(\n \"lin_prep_size_common\",\n min_lin_prep_size_common,\n max_lin_prep_size_common,\n log=True,\n )\n params[\"lin_prep_size_common\"] = last_size\n params[\"lin_prep_sizes\"] = []\n for i in range(params[\"lin_prep_len\"]):\n fraction = trial.suggest_loguniform(\n f\"lin_prep_size{i}_fraction\", low=0.2, high=1.0\n )\n last_size = max(16, int(np.round(last_size * fraction)))\n params[\"lin_prep_sizes\"].append(last_size)\n params[\"n_hops\"] = 2\n\n # CONV1 params\n\n params[\"conv1_aggrs\"] = {}\n fraction = trial.suggest_loguniform(\"conv1_aggrs_mean_fraction\", low=0.1, high=1.0)\n params[\"conv1_aggrs\"][\"mean\"] = max(8, int(np.round(last_size * fraction)))\n\n fraction = trial.suggest_loguniform(\"conv1_aggrs_max_fraction\", low=0.05, high=0.7)\n params[\"conv1_aggrs\"][\"max\"] = int(np.round(last_size * fraction))\n\n fraction = trial.suggest_loguniform(\"conv1_aggrs_add_fraction\", low=0.05, high=0.7)\n params[\"conv1_aggrs\"][\"add\"] = int(np.round(last_size * fraction))\n\n params[\"conv1_dropout_rate\"] = trial.suggest_uniform(\n \"conv1_dropout_rate\", low=0, high=0.5\n )\n\n # return params\n # CONV2 params\n if params[\"n_hops\"] == 2:\n last_size = sum(params[\"conv1_aggrs\"].values())\n params[\"conv2_aggrs\"] = {}\n fraction = trial.suggest_loguniform(\n \"conv2_aggrs_mean_fraction\", low=0.1, high=0.7\n )\n params[\"conv2_aggrs\"][\"mean\"] = max(8, int(np.round(last_size * fraction)))\n\n fraction = trial.suggest_loguniform(\n \"conv2_aggrs_max_fraction\", low=0.05, high=0.5\n )\n params[\"conv2_aggrs\"][\"max\"] = int(np.round(last_size * fraction))\n\n fraction = trial.suggest_loguniform(\n \"conv2_aggrs_add_fraction\", low=0.05, high=0.5\n )\n params[\"conv2_aggrs\"][\"add\"] = int(np.round(last_size * fraction))\n\n params[\"conv2_dropout_rate\"] = trial.suggest_uniform(\n \"conv2_dropout_rate\", low=0, high=0.5\n )\n if params[\"conv_type\"] == \"GraphConv\":\n params[\"graph_conv_weight_norm_flag\"] = trial.suggest_categorical(\n \"graph_conv_weight_norm_flag\", [False, True]\n )\n\n # EDGE ATTR params\n if params[\"conv_type\"] == \"NNConv\":\n params[\"edge_attr_repr_len\"] = trial.suggest_int(\n \"edge_attr_repr_len\", low=1, high=3\n )\n params[\"edge_attr_repr_sizes\"] = []\n for i in range(params[\"edge_attr_repr_len\"] - 1):\n if i == 0:\n params[\"edge_attr_repr_sizes\"].append(\n trial.suggest_int(\n f\"edge_attr_repr_size{i}\", low=4, high=40, log=True\n )\n )\n else:\n fraction = trial.suggest_loguniform(\n f\"edge_attr_repr_size{i}_fraction\", low=0.2, high=1.0\n )\n params[\"edge_attr_repr_sizes\"].append(\n max(4, int(np.round(params[\"edge_attr_repr_sizes\"][-1] * fraction)))\n )\n params[\"edge_attr_repr_sizes\"].append(\n trial.suggest_int(\"edge_attr_repr_size_last\", low=1, high=5, log=True)\n )\n\n params[\"edge_attr_repr_dropout_rate\"] = trial.suggest_uniform(\n \"edge_attr_repr_dropout_rate\", low=0, high=0.5\n )\n if trial.suggest_categorical(\n \"edge_attr_repr_last_dropout_rate_zero\", [True, False]\n ):\n params[\"edge_attr_repr_last_dropout_rate\"] = 0.0\n else:\n params[\"edge_attr_repr_last_dropout_rate\"] = trial.suggest_uniform(\n \"edge_attr_repr_last_dropout_rate\", low=0, high=0.5\n )\n\n params[\"edge_attr_repr_weight_norm_flag\"] = trial.suggest_categorical(\n \"edge_attr_repr_weight_norm_flag\", [False, True]\n )\n\n params[\"edge_attr_repr_last_activation\"] = \"sigmoid\"\n\n return params" }, { "identifier": "Trainer", "path": "cool_graph/train/trainer.py", "snippet": "class Trainer(object):\n def __init__(\n self,\n list_loader_train: List[torch.utils.data.DataLoader],\n list_loader_test: List[torch.utils.data.DataLoader],\n checkpoint_dir: Union[str, pathlib.PosixPath],\n device: str = \"cuda:0\",\n eval_freq: int = 5,\n fill_value: Union[int, float] = -100,\n initial_lr: float = 0.0023,\n weight_decay: float = 0.001,\n loss_name: str = \"CrossEntropyLoss\",\n loss_label_smoothing: bool = False,\n loss_target_weights: Optional[Dict[str, Union[int, float]]] = None,\n loss_group_weights: Optional[List[float]] = None,\n groups_names: Optional[Dict[int, str]] = None,\n groups_names_num_features: Optional[Dict[str, int]] = None,\n num_edge_features: Optional[int] = None,\n main_metric_name: str = \"main_metric\",\n mlflow_experiment_name: Optional[str] = None,\n n_epochs: int = 10,\n scheduler_params: Dict[Literal[\"milestones\", \"gamma\"], int] = {\n \"milestones\": [10, 20, 35, 50, 70, 90, 105],\n \"gamma\": 0.25,\n },\n scheduler_type: str = \"MultiStepLR\",\n target_names: List[str] = [\"y\"],\n target_sizes: Optional[List[int]] = None,\n use_mlflow: bool = False,\n tqdm_disable=False,\n conv_type: Literal[\"NNConv\", \"GraphConv\"] = \"NNConv\",\n metrics: Optional[float] = None,\n log_all_metrics: bool = True,\n **model_params,\n ) -> None:\n \"\"\"\n Training model (GraphConv or NNConv).\n Class that training / logging / saving model. Using train_epoch\n and eval_epoch from helpers.py in training loop below.\n\n Args:\n list_loader_train (List[torch.utils.data.DataLoader]): Train list with Data loader. Combines a dataset\n and a sampler, and provides an iterable over the given dataset.\n https://pytorch.org/docs/stable/data.html\n list_loader_test (List[torch.utils.data.DataLoader]): Test list with Data loader. Combines a dataset\n and a sampler, and provides an iterable over the given dataset.\n https://pytorch.org/docs/stable/data.html\n checkpoint_dir (Union[str, pathlib.PosixPath]): Path for training checkpoints\n device (_type_, optional): The device is an object representing the device on\n which a torch.Tensor is or will be allocated.. Defaults to \"cuda:0\".\n eval_freq (int, optional): Number of epoch group. Defaults to 5.\n fill_value (Union[int, float], optional): If value is None. Defaults to -100.\n initial_lr (float, optional): The learning rate param for Optimization. Defaults to 0.0023.\n weight_decay (float, optional): weight decay (L2 penalty). Defaults to 0.001.\n loss_name (str, optional): This criterion computes the cross entropy loss between\n input logits and target. Defaults to \"CrossEntropyLoss\".\n https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html\n loss_label_smoothing (bool, optional): If set True, use label smoothing. Defaults to False.\n loss_target_weights (Optional[Dict[str, Union[int, float]]], optional): Weights for targets. Defaults to None.\n loss_group_weights (Optional[List[float]], optional): Weights for groups. Defaults to None.\n groups_names (Optional[Dict[int, str]], optional): List with group names in nodes. Defaults to None.\n groups_names_num_features (Optional[Dict[str, int]], optional): Number of feats in groups in nodes. Defaults to None.\n num_edge_features (Optional[int], optional): Number of feats on edges. Defaults to None.\n main_metric_name (str, optional): Main metric for maximaze. Defaults to \"main_metric\".\n mlflow_experiment_name (Optional[str], optional): Name of mlflow experiment. Defaults to None.\n n_epochs (int, optional): Number of epochs. Defaults to 10.\n scheduler_params (Dict, optional): Milestones (list) – List of epoch indices. Must be increasing.\n gamma (float) – Multiplicative factor of learning rate decay.\n Defaults to { \"milestones\": [10, 20, 35, 50, 70, 90, 105], \"gamma\": 0.25, }.\n scheduler_type (str, optional): Decays the learning rate of each parameter group\n by gamma once the number of epoch reaches one of the milestones. Defaults to \"MultiStepLR\".\n target_names (List[str], optional): List of target names. Defaults to [\"y\"].\n target_sizes (Optional[List[int]], optional): Size of list with target. Defaults to None.\n use_mlflow (bool, optional): If set True, use MLFlow. Defaults to False.\n tqdm_disable (bool, optional): Display progress. Defaults to False.\n conv_type (Literal[NNConv, GraphConv], optional): The graph neural network operator. Defaults to \"NNConv\".\n metrics (float, optional): Metrics. Defaults to None.\n log_all_metrics (bool, optional): If set True, logging all metrics. Defaults to True.\n\n Raises:\n NotImplementedError: _description_\n \"\"\"\n for key, value in locals().items():\n setattr(self, key, value)\n\n self._metrics = {}\n self._main_metric = {}\n if isinstance(metrics, str):\n metrics = [metrics]\n if isinstance(\n metrics,\n (\n list,\n tuple,\n ),\n ):\n metrics = {name: metrics for name in target_names}\n\n for k, names in metrics.items():\n self._metrics[k] = {name: get_metric(name) for name in names}\n self._main_metric[k] = names[0]\n\n os.makedirs(checkpoint_dir, exist_ok=True)\n\n torch.cuda.empty_cache()\n gc.collect()\n\n if conv_type == \"NNConv\":\n self._model = NNConvGNN(\n **model_params,\n target_names=target_names,\n target_sizes=target_sizes,\n groups_names=groups_names,\n groups_names_num_features=groups_names_num_features,\n num_edge_features=num_edge_features,\n )\n elif conv_type == \"GraphConv\":\n self._model = GraphConvGNN(\n **model_params,\n target_names=target_names,\n target_sizes=target_sizes,\n groups_names=groups_names,\n groups_names_num_features=groups_names_num_features,\n num_edge_features=num_edge_features,\n )\n else:\n raise NotImplementedError(f\"{conv_type} is not implemented\")\n\n self._model.to(device)\n\n self._optimizer = torch.optim.Adam(\n self._model.parameters(),\n lr=initial_lr,\n weight_decay=weight_decay,\n )\n\n self._loss_criteria = getattr(torch.nn, loss_name)(\n reduction=\"none\", label_smoothing=loss_label_smoothing\n )\n self._use_edge_attr = conv_type == \"NNConv\"\n\n self._scheduler = getattr(torch.optim.lr_scheduler, scheduler_type)(\n self._optimizer, **scheduler_params\n )\n\n self._best_loss = {main_metric_name: -np.inf}\n\n self._train_run_lst = []\n self._test_metric_lst = []\n self._train_metric_lst = []\n\n def train(\n self, start_epoch: int = 0, end_epoch: Optional[int] = None\n ) -> Dict[\n Literal[\n \"best_loss\", \"global_calc_time\", \"train_loss\", \"test_metric\", \"train_metric\"\n ],\n float,\n ]:\n \"\"\"\n Training model and logging metrics.\n \"\"\"\n if end_epoch is None:\n end_epoch = self.n_epochs\n\n self.global_start_time = time.time()\n\n if self.use_mlflow:\n mlflow.end_run()\n mlflow.set_experiment(self.mlflow_experiment_name)\n mlflow.start_run()\n mlflow.log_params(\n {\n \"LossCriteria\": self._loss_criteria,\n \"checkpoint_dir\": self.checkpoint_dir,\n **self.model_params,\n }\n )\n\n for epoch in range(start_epoch, end_epoch):\n self.epoch = epoch\n # TRAIN\n train_run = train_epoch(\n self._model,\n self.list_loader_train,\n self.device,\n self._optimizer,\n self._use_edge_attr,\n target_weights=self.loss_target_weights,\n loss_criteria=self._loss_criteria,\n group_weights=self.loss_group_weights,\n tqdm_disable=self.tqdm_disable,\n )\n train_run[\"lr\"] = self._optimizer.param_groups[0][\"lr\"]\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(train_run, \"run_\"), step=epoch\n )\n train_run[\"epoch\"] = epoch\n self._train_run_lst.append(train_run)\n with open(\n os.path.join(self.checkpoint_dir, \"train_running_loss.txt\"), \"a\"\n ) as f:\n json.dump(train_run, f)\n f.write(\"\\n\")\n\n # calc metrics and perform scheduler step\n if (epoch - 0) % self.eval_freq == 0:\n # calc metrics\n # test\n logger.info(\"\\nEpoch {:03d}: \".format(epoch))\n test_metric = eval_epoch(\n self._model,\n self.list_loader_test,\n self.device,\n self.target_names,\n self.groups_names,\n postfix=\"test\",\n use_edge_attr=self._use_edge_attr,\n tqdm_disable=self.tqdm_disable,\n fill_value=self.fill_value,\n metrics=self._metrics,\n main_metric=self._main_metric,\n log_all_metrics=self.log_all_metrics,\n )\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(test_metric, \"test_\"), step=epoch\n )\n test_metric[\"epoch\"] = epoch\n self._test_metric_lst.append(test_metric)\n with open(\n os.path.join(self.checkpoint_dir, \"test_metric.txt\"), \"a\"\n ) as f:\n json.dump(test_metric, f)\n f.write(\"\\n\")\n\n # train\n logger.info(\"Epoch {:03d}: \".format(epoch))\n train_metric = eval_epoch(\n self._model,\n self.list_loader_train,\n self.device,\n self.target_names,\n self.groups_names,\n postfix=\"train\",\n use_edge_attr=self._use_edge_attr,\n tqdm_disable=self.tqdm_disable,\n metrics=self._metrics,\n main_metric=self._main_metric,\n log_all_metrics=self.log_all_metrics,\n )\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(train_metric, \"train_\"), step=epoch\n )\n train_metric[\"epoch\"] = epoch\n self._train_metric_lst.append(train_metric)\n with open(\n os.path.join(self.checkpoint_dir, \"train_metric.txt\"), \"a\"\n ) as f:\n json.dump(train_metric, f)\n f.write(\"\\n\")\n\n # save model\n checkpoint_file = os.path.join(\n self.checkpoint_dir, f\"state_dict_{epoch:0>4d}.pt\"\n )\n torch.save(self._model.cpu().state_dict(), checkpoint_file)\n self._model.to(self.device)\n\n if (\n test_metric[self.main_metric_name]\n > self._best_loss[self.main_metric_name]\n ):\n self._best_loss = test_metric\n self._best_loss[\"epoch\"] = epoch\n checkpoint_file = os.path.join(\n self.checkpoint_dir, \"state_dict_best.pt\"\n )\n torch.save(self._model.cpu().state_dict(), checkpoint_file)\n self._model.to(self.device)\n with open(\n os.path.join(self.checkpoint_dir, \"best_loss.txt\"), \"w\"\n ) as f:\n json.dump(self._best_loss, f, indent=4)\n\n self.mlflow_log_metrics(\n {\n \"best_epoch\": self._best_loss[\"epoch\"],\n f\"best_{self.main_metric_name}\": self._best_loss[\n self.main_metric_name\n ],\n },\n step=epoch,\n )\n\n if self.scheduler_type == \"ReduceLROnPlateau\":\n self._scheduler.step(train_run[\"total_loss\"])\n if (\n self._optimizer.param_groups[0][\"lr\"]\n <= self.scheduler_params[\"min_lr\"]\n ):\n break\n else:\n self._scheduler.step()\n\n self.global_calc_time = time.time() - self.global_start_time\n train_loss = pd.DataFrame(self._train_run_lst)\n test_metric = pd.DataFrame(self._test_metric_lst)\n train_metric = pd.DataFrame(self._train_metric_lst)\n\n self.mlflow_log_metrics(\n metrics=add_prefix_to_dict_keys(self._best_loss, \"best_\")\n )\n self.mlflow_log_metrics({\"global_calc_time\": self.global_calc_time})\n\n if self.use_mlflow:\n mlflow.end_run()\n torch.cuda.empty_cache()\n\n return {\n \"best_loss\": self._best_loss,\n \"global_calc_time\": self.global_calc_time,\n \"train_loss\": train_loss,\n \"test_metric\": test_metric,\n \"train_metric\": train_metric,\n }\n\n def mlflow_log_metrics(\n self, metrics: Dict[str, Any], step: Optional[int] = None\n ) -> None:\n if self.use_mlflow:\n try:\n mlflow.log_metrics(metrics, step)\n except MlflowException as e:\n save_str_e = traceback.format_exc()\n logger.info(\n \"Epoch {:03d}::\\nCaught exception:\\n{}\".format(\n self.epoch, save_str_e\n )\n )\n with open(\n os.path.join(self.checkpoint_dir, \"MlflowExceptions.txt\"), \"a\"\n ) as f:\n f.write(\n \"Epoch {:03d}::\\nCaught exception:\\n{}\".format(\n self.epoch, save_str_e\n )\n )" } ]
import os import pathlib import hydra import numpy as np import optuna import pandas as pd import torch from datetime import datetime from itertools import product from pathlib import Path from typing import Dict, List, Literal, Optional from hydra import ( compose, core, initialize, initialize_config_dir, initialize_config_module, ) from omegaconf import DictConfig, OmegaConf from optuna.trial import TrialState from sklearn.model_selection import train_test_split from torch_geometric.data import Data from torch_geometric.loader import NeighborLoader, NeighborSampler from tqdm import tqdm from cool_graph.data import RawDataProcessor from cool_graph.data.batch import get_auto_batch_size from cool_graph.data.loaders import create_loaders from cool_graph.logging import setup_mlflow_from_config from cool_graph.parameter_search import ( model_params_to_trial_params, sample_model_params, ) from cool_graph.train import Trainer
12,135
groups_names=self.groups_names, mlflow_experiment_name=self.cfg["logging"].get("mlflow_experiment_name"), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.target_names, use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, target_sizes=self.target_sizes, **self.cfg["model_params"], groups_names_num_features=self.groups_names_num_features, num_edge_features=self.num_edge_features, metrics=self.metrics, log_all_metrics=False, ) result = self.trainer.train() return result class HypeRunner(BaseRunner): """ Runner for optimization model with Optuna. https://optuna.readthedocs.io/en/stable/reference/index.html 1st trial - with default config params (hyper_params). Also, 2nd trial - you can add own trial as argument enqueue_trial in optimazire_run method, and next trial optuna optimize model params randomly, if set None randomly optimization after 1st default trial. Args: data (Data): Loaded dataset. config (DictConfig): Confif with patams (model_params, logging, training, metrics). Default to None. config_path (str): Path with config structure (can be loaded with cli get_config). Default to None. overrides (list): Own params in list. Default to None. train_size (int): Own train size. Default to None. test (int): Own test size. Default to None. seed (int): The desired seed. Default to None. train_idx (list): List of train indices. test_idx (list): List of test indices. Examples -------- >>> from cool_graph.runners import HypeRunner >>> from torch_geometric import datasets >>> # loading amazon dataset >>> data = datasets.Amazon(root="./data/Amazon", name="Computers").data >>> runner = HypeRunner(data) >>> result = runner.run(optimize_run) Study statistics: Number of finished trials: 5 Number of complete trials: 5 Best trial: Value: 0.922 Params: {'conv_type': 'GraphConv', 'activation': 'leakyrelu', 'lin_prep_len': 1, 'lin_prep_dropout_rate': 0.4, 'lin_prep_weight_norm_flag': True, 'lin_prep_size_common': 512, 'lin_prep_sizes': [256], 'n_hops': 2, 'conv1_aggrs': {'mean': 128, 'max': 64, 'add': 32}, 'conv1_dropout_rate': 0.2, 'conv2_aggrs': {'mean': 64, 'max': 32, 'add': 16}, 'conv2_dropout_rate': 0.2, 'graph_conv_weight_norm_flag': True} """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, ) if config is None: if config_path is None: config_path = os.path.join( os.path.dirname(__file__), "./config/in_memory_data.yaml" ) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) self.study = optuna.study def optimize_run( self, n_trials: int = 100, storage: Optional[str] = None, study_name: Optional[str] = None, enqueue_trial: Optional[List[Dict]] = None, ) -> pd.DataFrame: if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() """ Method for running objective function in Optuna. Args: n_trials (int, optional): The number of trials for each process. None represents no limit in terms of the number of trials. Defaults to 100. storage (Optional[str], optional): Database URL. If this argument is set to None, in-memory storage is used, and the Study will not be persistent. Defaults to None. study_name (Optional[str], optional): Study name. If this argument is set to None, a unique name is generated automatically. Defaults to None. enqueue_trial (Optional[List[Dict]], optional): Enqueue a trial with given parameter values. Defaults to None. Returns: trials_dataset (pd.DataFrame): Result dataframe with trial params. """ list_with_params = [] def objective(trial) -> float:
def create_cfg(config: str, overrides: List[str], path_base: str = "cfg") -> Dict: assert path_base in ("cfg", "cwd") core.global_hydra.GlobalHydra.instance().clear() if os.path.isabs(config): config_path = pathlib.Path(config).parent else: config_path = pathlib.Path(os.getcwd()) / pathlib.Path(config).parent config_name = pathlib.Path(config).name.replace(".yaml", "") initialize_config_dir(str(config_path), version_base=None) cfg = compose(config_name=config_name, overrides=overrides) return cfg class ConfigRunner: r"""Runner for cli mode. Using only in cli. This class allows to load data + split data per batchs + split data per train/val + training. See the config full.yaml in ./config for knowing what excactly using as data/logging/model_params/training/metrics. You can use default params, but also you can change it. Steps for changing confis: - make get_config --configs path_where_you_need_configs (default: new path ./configs by itself) """ def __init__(self, config: Optional[DictConfig]) -> None: cfg = OmegaConf.to_container(config, resolve=True) self.cfg = cfg self.target_names = cfg["training"]["targets"] self.groups_names = cfg["data"]["groups_names"] self.target_weights = cfg["training"]["loss"]["target_weights"] self.read_edge_attr = cfg["data"].get("read_edge_attr", True) self.batch_size = cfg["training"]["batch_size"] self.group_mask_col = cfg["data"]["group_mask_col"] self.label_mask_col = cfg["data"]["label_mask_col"] self.label_cols = cfg["data"]["label_cols"] self.label_index_col = cfg["data"]["label_index_col"] self.edge_index_cols = cfg["data"]["edge_index_cols"] self.num_neighbors = cfg["training"]["num_neighbors"] self.features_edges_names = cfg["data"].get("features_edges") self.group_names_node_features = cfg["data"]["features"] self.train_paths = cfg["data"]["train"] self.val_paths = cfg["data"]["validation"] self.metrics = cfg["metrics"] self.chkpt_dir = ( pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19] ) os.makedirs(self.chkpt_dir, exist_ok=True) if self.cfg["logging"].get("use_mlflow", False): setup_mlflow_from_config(cfg["logging"]["mlflow"]) def init_loaders(self) -> None: """ Using RawDataProcessor from cool_graph.data for preprocessing data from disk. """ self.train_sampler = RawDataProcessor( self.groups_names, self.group_names_node_features, mon_nodes_path=self.train_paths["nodes_path"], mon_edges_path=self.train_paths["edges_path"], mon_labels_path=self.train_paths["labels_path"], edge_index_cols=self.edge_index_cols, label_index_col=self.label_index_col, label_mask_col=self.label_mask_col, read_edge_attr=self.read_edge_attr, group_mask_col=self.group_mask_col, features_edges_names=self.features_edges_names, label_cols=self.label_cols, target_names=self.target_names, ) self.val_sampler = RawDataProcessor( self.groups_names, self.group_names_node_features, mon_nodes_path=self.val_paths["nodes_path"], mon_edges_path=self.val_paths["edges_path"], mon_labels_path=self.val_paths["labels_path"], edge_index_cols=self.edge_index_cols, label_index_col=self.label_index_col, label_mask_col=self.label_mask_col, read_edge_attr=self.read_edge_attr, group_mask_col=self.group_mask_col, features_edges_names=self.features_edges_names, label_cols=self.label_cols, target_names=self.target_names, ) def sample_data( self, seed=0 ) -> Dict[Literal["train", "validation"], List[torch.utils.data.DataLoader]]: """ Sampling data in batches. """ if self.batch_size == "auto": self._batch_size = get_auto_batch_size( [len(v) for _, v in self.group_names_node_features.items()], conv_type=self.cfg["model_params"]["conv_type"], conv1_aggrs=self.cfg["model_params"]["conv1_aggrs"], conv2_aggrs=self.cfg["model_params"].get("conv2_aggrs"), conv3_aggrs=self.cfg["model_params"].get("conv3_aggrs"), n_hops=self.cfg["model_params"]["n_hops"], lin_prep_size_common=self.cfg["model_params"]["lin_prep_size_common"], lin_prep_sizes=self.cfg["model_params"]["lin_prep_sizes"], edge_attr_repr_sizes=self.cfg["model_params"].get( "edge_attr_repr_sizes" ), num_edge_features=len(self.cfg["data"].get("features_edges", [])), device=self.cfg["training"]["device"], num_neighbors=self.cfg["training"]["num_neighbors"], ) else: self._batch_size = self.batch_size train_loaders = self.train_sampler.sample_data( self.num_neighbors, self._batch_size, seed=seed ) val_loaders = self.val_sampler.sample_data( self.num_neighbors, self._batch_size, seed=seed ) return {"train": train_loaders, "validation": val_loaders} def run(self, seed: int = 0) -> Dict[str, float]: """ Train model for train_samples and val_sampler. Args: seed (int): seed for training. Default to 0. Returns: result (dict): Result of training for each 5 epochs with metrics from config. """ if not (hasattr(self, "train_sampler") and hasattr(self, "val_sampler")): self.init_loaders() sampled = self.sample_data(seed=seed) train_loaders = sampled["train"] val_loaders = sampled["validation"] self.trainer = Trainer( train_loaders, val_loaders, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.cfg["training"]["loss"].get("target_weights"), loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.cfg["data"]["groups_names"], mlflow_experiment_name=self.cfg["logging"].get("mlflow_experiment_name"), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.cfg["training"]["targets"], use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, **self.cfg["model_params"], groups_names_num_features={ k: len(v) for k, v in self.group_names_node_features.items() }, num_edge_features=len(self.cfg["data"].get("features_edges", [])), metrics=self.metrics, ) result = self.trainer.train() return result class BaseRunner: def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, use_edge_attr: bool = False, **kwargs, ) -> None: """ Main class for Basic runner and Runner with Optuna. Args: data (Data): A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, Data tries to mimic the behavior of a regular Python dictionary. In addition, it provides useful functionality for analyzing graph structures, and provides basic PyTorch tensor functionalities. https://pytorch-geometric.readthedocs.io/en/latest/get_started/introduction.html#data-handling-of-graphs config (DictConfig): Config. Defaults to None. config_path (str): Path to config. Defaults to None. overrides (list): Own params. Can ba params from configs and overrides. Defaults to None. train_size (int): Size for train data. Defaults to None. test_size (int): Size for test data. Defaults to None. seed (int): Seed param for training. Defaults to None. train_idx (list): Indices for train data. Defaults to None. test_idx (list): Indices for test data. Defaults to None. use_edge_attr (bool): If attributes exist on edges, it can be used in training. Defaults to False. """ if config is None: if config_path is None: if use_edge_attr: config_path = "./config/in_memory_data2.yaml" else: config_path = "./config/in_memory_data.yaml" config_path = os.path.join(os.path.dirname(__file__), config_path) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) cfg = OmegaConf.to_container(config, resolve=True) self.data = data self.cfg = cfg self.test_size = test_size self.train_size = train_size self.seed = seed self.train_idx = train_idx self.test_idx = test_idx self.use_edge_attr = use_edge_attr if use_edge_attr and data.edge_attr is None: raise BaseException( "data does not contain edge_attr, please set use_edge_attr=False" ) self.target_names = cfg["training"]["targets"] self.target_weights = cfg["training"]["loss"]["target_weights"] self.batch_size = cfg["training"]["batch_size"] self.num_neighbors = cfg["training"]["num_neighbors"] self.metrics = cfg["metrics"] self.data.group_mask = torch.zeros(len(data.x), dtype=torch.int8) self.data.label_mask = torch.ones(len(data.x), dtype=torch.bool) self.groups_names = {0: "x"} self.groups_names_num_features = {"x": data.x.shape[1]} if len(data.y.shape) == 2: self.target_sizes = [] self.target_names = [] self.target_weights = {} for i in range(data.y.shape[1]): y_sub = data.y[:, i] setattr(data, f"y{i}", y_sub) self.target_sizes.append(len(y_sub.unique())) self.target_names.append(f"y{i}") self.target_weights[f"y{i}"] = 1 else: self.target_names = ["y"] self.target_sizes = [len(data.y.unique())] self.target_weights = {"y": 1} if use_edge_attr: self.num_edge_features = data.edge_attr.shape[1] else: self.num_edge_features = 0 self.chkpt_dir = ( pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19] ) for k, v in kwargs.items(): setattr(self, k, v) if self.cfg["logging"].get("use_mlflow", False): setup_mlflow_from_config(cfg["logging"]["mlflow"]) def init_loaders(self) -> None: """ Sampling data into batches and sampling data with NeighborLoader into list loaders. """ if self.batch_size == "auto": self._batch_size = get_auto_batch_size( [ self.groups_names_num_features[self.groups_names[i]] for i in range(len(self.groups_names)) ], conv_type=self.cfg["model_params"]["conv_type"], conv1_aggrs=self.cfg["model_params"]["conv1_aggrs"], conv2_aggrs=self.cfg["model_params"].get("conv2_aggrs"), conv3_aggrs=self.cfg["model_params"].get("conv3_aggrs"), n_hops=self.cfg["model_params"]["n_hops"], lin_prep_size_common=self.cfg["model_params"]["lin_prep_size_common"], lin_prep_sizes=self.cfg["model_params"]["lin_prep_sizes"], edge_attr_repr_sizes=self.cfg["model_params"].get( "edge_attr_repr_sizes" ), num_edge_features=self.num_edge_features, device=self.cfg["training"]["device"], num_neighbors=self.num_neighbors, ) else: self._batch_size = self.batch_size if (self.train_idx is None) or (self.test_idx is None): train_idx, test_idx = train_test_split( torch.nonzero(self.data.label_mask)[:, 0], train_size=self.train_size, test_size=self.test_size, random_state=self.seed, shuffle=True, ) self.train_idx = train_idx self.test_idx = test_idx def sample_date_prerpoc(sampled_data: Data) -> Data: sampled_data.label_mask[sampled_data.batch_size :] = False for group, name in self.groups_names.items(): x = getattr(sampled_data, name)[sampled_data.group_mask == group] setattr(sampled_data, name, x) return sampled_data loader_train = NeighborLoader( self.data, num_neighbors=self.num_neighbors, batch_size=self._batch_size, shuffle=True, input_nodes=self.train_idx, ) list_loader_train = [] for sampled_data in tqdm(loader_train, desc="Sample data"): list_loader_train.append(sample_date_prerpoc(sampled_data)) self.train_loader = list_loader_train loader_test = NeighborLoader( self.data, num_neighbors=self.num_neighbors, batch_size=self._batch_size, shuffle=True, input_nodes=self.test_idx, ) list_loader_test = [] for sampled_data in tqdm(loader_test, desc="Sample data"): list_loader_test.append(sample_date_prerpoc(sampled_data)) self.test_loader = list_loader_test class Runner(BaseRunner): """ Runner for notebook launch. Args: data (Data): A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, Data tries to mimic the behavior of a regular Python dictionary. In addition, it provides useful functionality for analyzing graph structures, and provides basic PyTorch tensor functionalities. https://pytorch-geometric.readthedocs.io/en/latest/get_started/introduction.html#data-handling-of-graphs config (DictConfig): Config. Defaults to None. config_path (str): Path to config. Defaults to None. overrides (list): Own params. Can ba params from configs and overrides. Defaults to None. train_size (int): Size for train data. Defaults to None. test_size (int): Size for test data. Defaults to None. seed (int): Seed param for training. Defaults to None. train_idx (int): Indices for train data. Defaults to None. test_idx (int): Indices for test data. Defaults to None. use_edge_attr (bool): If attributes exist on edges, it can be used in training. Defaults to False. Examples -------- >>> from cool_graph.runners import Runner >>> from torch_geometric import datasets >>> # loading amazon dataset >>> data = datasets.Amazon(root="./data/Amazon", name="Computers").data >>> runner = Runner(data) >>> result = runner.run() >>> result["best_loss"] {'accuracy': 0.916, 'cross_entropy': 0.286, 'f1_micro': 0.916, 'calc_time': 0.004, 'main_metric': 0.916, 'epoch': 10} Also you can override params in Runner: runner = Runner(data, metrics=['accuracy'], batch_size='auto', train_size=0.7, test_size=0.3, overrides=['training.n_epochs=1'], config_path=path/to/config) result = runner.run() """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, use_edge_attr: bool = False, **kwargs, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, use_edge_attr, **kwargs, ) def run(self) -> Dict[str, float]: """ Training model with params in_memory_data/in_memory_data2 config. See the configs in ./config for knowing what excactly using as logging/model_params/training/metrics. You can use default params, but also you can change it. Steps for changing confis: - make get_config --configs path_where_you_need_configs (default: new path ./configs by itself) """ if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() self.trainer = Trainer( self.train_loader, self.test_loader, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.target_weights, loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.groups_names, mlflow_experiment_name=self.cfg["logging"].get("mlflow_experiment_name"), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.target_names, use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, target_sizes=self.target_sizes, **self.cfg["model_params"], groups_names_num_features=self.groups_names_num_features, num_edge_features=self.num_edge_features, metrics=self.metrics, log_all_metrics=False, ) result = self.trainer.train() return result class HypeRunner(BaseRunner): """ Runner for optimization model with Optuna. https://optuna.readthedocs.io/en/stable/reference/index.html 1st trial - with default config params (hyper_params). Also, 2nd trial - you can add own trial as argument enqueue_trial in optimazire_run method, and next trial optuna optimize model params randomly, if set None randomly optimization after 1st default trial. Args: data (Data): Loaded dataset. config (DictConfig): Confif with patams (model_params, logging, training, metrics). Default to None. config_path (str): Path with config structure (can be loaded with cli get_config). Default to None. overrides (list): Own params in list. Default to None. train_size (int): Own train size. Default to None. test (int): Own test size. Default to None. seed (int): The desired seed. Default to None. train_idx (list): List of train indices. test_idx (list): List of test indices. Examples -------- >>> from cool_graph.runners import HypeRunner >>> from torch_geometric import datasets >>> # loading amazon dataset >>> data = datasets.Amazon(root="./data/Amazon", name="Computers").data >>> runner = HypeRunner(data) >>> result = runner.run(optimize_run) Study statistics: Number of finished trials: 5 Number of complete trials: 5 Best trial: Value: 0.922 Params: {'conv_type': 'GraphConv', 'activation': 'leakyrelu', 'lin_prep_len': 1, 'lin_prep_dropout_rate': 0.4, 'lin_prep_weight_norm_flag': True, 'lin_prep_size_common': 512, 'lin_prep_sizes': [256], 'n_hops': 2, 'conv1_aggrs': {'mean': 128, 'max': 64, 'add': 32}, 'conv1_dropout_rate': 0.2, 'conv2_aggrs': {'mean': 64, 'max': 32, 'add': 16}, 'conv2_dropout_rate': 0.2, 'graph_conv_weight_norm_flag': True} """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, ) if config is None: if config_path is None: config_path = os.path.join( os.path.dirname(__file__), "./config/in_memory_data.yaml" ) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) self.study = optuna.study def optimize_run( self, n_trials: int = 100, storage: Optional[str] = None, study_name: Optional[str] = None, enqueue_trial: Optional[List[Dict]] = None, ) -> pd.DataFrame: if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() """ Method for running objective function in Optuna. Args: n_trials (int, optional): The number of trials for each process. None represents no limit in terms of the number of trials. Defaults to 100. storage (Optional[str], optional): Database URL. If this argument is set to None, in-memory storage is used, and the Study will not be persistent. Defaults to None. study_name (Optional[str], optional): Study name. If this argument is set to None, a unique name is generated automatically. Defaults to None. enqueue_trial (Optional[List[Dict]], optional): Enqueue a trial with given parameter values. Defaults to None. Returns: trials_dataset (pd.DataFrame): Result dataframe with trial params. """ list_with_params = [] def objective(trial) -> float:
self.cfg["model_params"] = sample_model_params(
5
2023-11-22 09:44:16+00:00
16k
HeliosZhao/Animate124
nerf/network_grid_taichi.py
[ { "identifier": "trunc_exp", "path": "activation.py", "snippet": "class _trunc_exp(Function):\n def forward(ctx, x):\n def backward(ctx, g):\ndef biased_softplus(x, bias=0):" }, { "identifier": "NeRFRenderer", "path": "nerf/renderer.py", "snippet": "class NeRFRenderer(nn.Module):\n def __init__(self, opt):\n super().__init__()\n\n self.opt = opt\n self.bound = opt.bound\n self.cascade = 1 + math.ceil(math.log2(opt.bound))\n self.grid_size = 128\n self.max_level = None\n self.dmtet = opt.dmtet\n self.cuda_ray = opt.cuda_ray\n self.taichi_ray = opt.taichi_ray\n self.min_near = opt.min_near\n self.density_thresh = opt.density_thresh\n\n # prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax)\n # NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing.\n aabb_train = torch.FloatTensor(\n [-opt.bound, -opt.bound, -opt.bound, opt.bound, opt.bound, opt.bound])\n aabb_infer = aabb_train.clone()\n self.register_buffer('aabb_train', aabb_train)\n self.register_buffer('aabb_infer', aabb_infer)\n\n self.glctx = None\n\n # extra state for cuda raymarching\n if self.cuda_ray:\n ## NOTE TODO the cuda ray sampling for DNeRF is different, make sure to change\n # density grid\n density_grid = torch.zeros(\n [self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(\n self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n\n # load dmtet vertices\n if self.opt.dmtet:\n self.dmtet = DMTetGeometry(opt.tet_grid_size, opt.tet_mlp, opt).to(opt.device)\n if self.opt.h <= 2048 and self.opt.w <= 2048:\n self.glctx = dr.RasterizeCudaContext()\n else:\n self.glctx = dr.RasterizeGLContext()\n\n if self.taichi_ray:\n from einops import rearrange\n from taichi_modules import RayMarcherTaichi\n from taichi_modules import VolumeRendererTaichi\n from taichi_modules import RayAABBIntersector as RayAABBIntersectorTaichi\n from taichi_modules import raymarching_test as raymarching_test_taichi\n from taichi_modules import composite_test as composite_test_fw\n from taichi_modules import packbits as packbits_taichi\n self.rearrange = rearrange\n self.packbits_taichi = packbits_taichi\n self.ray_aabb_intersector = RayAABBIntersectorTaichi\n self.raymarching_test_taichi = raymarching_test_taichi\n self.composite_test_fw = composite_test_fw\n self.ray_marching = RayMarcherTaichi(\n batch_size=4096) # TODO: hard encoded batch size\n self.volume_render = VolumeRendererTaichi(\n batch_size=4096) # TODO: hard encoded batch size\n # density grid\n density_grid = torch.zeros(\n [self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(\n self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n \n if self.opt.density_activation == 'exp':\n self.density_activation = trunc_exp\n elif self.opt.density_activation == 'softplus':\n self.density_activation = F.softplus\n elif self.opt.density_activation == 'relu':\n self.density_activation = F.relu\n \n # ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192\n def finite_difference_normal(self, x, epsilon=1e-2):\n # x: [N, 3]\n # ipdb.set_trace()\n dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound))\n dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound))\n \n normal = torch.stack([\n 0.5 * (dx_pos - dx_neg) / epsilon, \n 0.5 * (dy_pos - dy_neg) / epsilon, \n 0.5 * (dz_pos - dz_neg) / epsilon\n ], dim=-1)\n\n return -normal\n \n def normal(self, x):\n normal = self.finite_difference_normal(x)\n normal = safe_normalize(normal)\n normal = torch.nan_to_num(normal)\n return normal\n\n @torch.no_grad()\n def density_blob(self, x):\n # x: [B, N, 3]\n\n d = (x ** 2).sum(-1)\n\n if self.opt.density_activation == 'exp':\n g = self.opt.blob_density * \\\n torch.exp(- d / (2 * self.opt.blob_radius ** 2))\n else:\n g = self.opt.blob_density * \\\n (1 - torch.sqrt(d) / self.opt.blob_radius)\n\n return g\n\n def forward(self, x, d):\n raise NotImplementedError()\n\n def density(self, x):\n raise NotImplementedError()\n\n def reset_extra_state(self):\n if not (self.cuda_ray or self.taichi_ray):\n return\n # density grid\n self.density_grid.zero_()\n self.mean_density = 0\n self.iter_density = 0\n\n @torch.no_grad()\n def export_mesh(self, path, resolution=None, decimate_target=-1, S=128):\n from meshutils import decimate_mesh, clean_mesh, poisson_mesh_reconstruction\n if self.opt.dmtet:\n vertices, triangles = self.dmtet.get_verts_face()\n vertices = vertices.detach().cpu().numpy()\n triangles = triangles.detach().cpu().numpy()\n\n else:\n\n if resolution is None:\n resolution = self.grid_size\n\n if self.cuda_ray:\n density_thresh = min(self.mean_density, self.density_thresh) \\\n if np.greater(self.mean_density, 0) else self.density_thresh\n else:\n density_thresh = self.density_thresh\n\n sigmas = np.zeros(\n [resolution, resolution, resolution], dtype=np.float32)\n\n # query\n X = torch.linspace(-1, 1, resolution).split(S)\n Y = torch.linspace(-1, 1, resolution).split(S)\n Z = torch.linspace(-1, 1, resolution).split(S)\n\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n pts = torch.cat(\n [xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [S, 3]\n val = self.density(pts.to(self.aabb_train.device))\n sigmas[xi * S: xi * S + len(xs), yi * S: yi * S + len(ys), zi * S: zi * S + len(\n zs)] = val['sigma'].reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() # [S, 1] --> [x, y, z]\n\n logger.info(\n f'[INFO] marching cubes thresh: {density_thresh} ({sigmas.min()} ~ {sigmas.max()})')\n\n vertices, triangles = mcubes.marching_cubes(sigmas, density_thresh)\n vertices = vertices / (resolution - 1.0) * 2 - 1\n\n # clean\n vertices = vertices.astype(np.float32)\n triangles = triangles.astype(np.int32)\n vertices, triangles = clean_mesh(\n vertices, triangles, remesh=True, remesh_size=0.01)\n\n # decimation\n if decimate_target > 0 and triangles.shape[0] > decimate_target:\n vertices, triangles = decimate_mesh(\n vertices, triangles, decimate_target)\n\n v = torch.from_numpy(vertices).contiguous(\n ).float().to(self.aabb_train.device)\n f = torch.from_numpy(triangles).contiguous().int().to(\n self.aabb_train.device)\n\n # mesh = trimesh.Trimesh(vertices, triangles, process=False) # important, process=True leads to seg fault...\n # mesh.export(os.path.join(path, f'mesh.ply'))\n\n def _export(v, f, h0=2048, w0=2048, ssaa=1, name=''):\n # v, f: torch Tensor\n device = v.device\n v_np = v.cpu().numpy() # [N, 3]\n f_np = f.cpu().numpy() # [M, 3]\n\n logger.info(\n f'[INFO] running xatlas to unwrap UVs for mesh: v={v_np.shape} f={f_np.shape}')\n\n # unwrap uvs\n import xatlas\n import nvdiffrast.torch as dr\n from sklearn.neighbors import NearestNeighbors\n from scipy.ndimage import binary_dilation, binary_erosion\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(v_np, f_np)\n chart_options = xatlas.ChartOptions()\n chart_options.max_iterations = 4 # for faster unwrap...\n atlas.generate(chart_options=chart_options)\n vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n # vmapping, ft_np, vt_np = xatlas.parametrize(v_np, f_np) # [N], [M, 3], [N, 2]\n\n vt = torch.from_numpy(vt_np.astype(np.float32)).float().to(device)\n ft = torch.from_numpy(ft_np.astype(np.int64)).int().to(device)\n\n # render uv maps\n uv = vt * 2.0 - 1.0 # uvs to range [-1, 1]\n uv = torch.cat((uv, torch.zeros_like(\n uv[..., :1]), torch.ones_like(uv[..., :1])), dim=-1) # [N, 4]\n\n if ssaa > 1:\n h = int(h0 * ssaa)\n w = int(w0 * ssaa)\n else:\n h, w = h0, w0\n \n if self.glctx is None:\n if h <= 2048 and w <= 2048:\n self.glctx = dr.RasterizeCudaContext()\n else:\n self.glctx = dr.RasterizeGLContext()\n\n rast, _ = dr.rasterize(self.glctx, uv.unsqueeze(\n 0), ft, (h, w)) # [1, h, w, 4]\n xyzs, _ = dr.interpolate(v.unsqueeze(0), rast, f) # [1, h, w, 3]\n mask, _ = dr.interpolate(torch.ones_like(\n v[:, :1]).unsqueeze(0), rast, f) # [1, h, w, 1]\n\n # masked query \n xyzs = xyzs.view(-1, 3)\n mask = (mask > 0).view(-1)\n \n feats = torch.zeros(h * w, 3, device=device, dtype=torch.float32)\n\n if mask.any():\n xyzs = xyzs[mask] # [M, 3]\n\n # batched inference to avoid OOM\n all_feats = []\n head = 0\n while head < xyzs.shape[0]:\n tail = min(head + 640000, xyzs.shape[0])\n results_ = self.density(xyzs[head:tail])\n all_feats.append(results_['albedo'].float())\n head += 640000\n\n feats[mask] = torch.cat(all_feats, dim=0)\n \n feats = feats.view(h, w, -1)\n mask = mask.view(h, w)\n\n # quantize [0.0, 1.0] to [0, 255]\n feats = feats.cpu().numpy()\n feats = (feats * 255).astype(np.uint8)\n\n ### NN search as an antialiasing ...\n mask = mask.cpu().numpy()\n\n inpaint_region = binary_dilation(mask, iterations=3)\n inpaint_region[mask] = 0\n\n search_region = mask.copy()\n not_search_region = binary_erosion(search_region, iterations=2)\n search_region[not_search_region] = 0\n\n search_coords = np.stack(np.nonzero(search_region), axis=-1)\n inpaint_coords = np.stack(np.nonzero(inpaint_region), axis=-1)\n\n knn = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(search_coords)\n _, indices = knn.kneighbors(inpaint_coords)\n\n feats[tuple(inpaint_coords.T)] = feats[tuple(search_coords[indices[:, 0]].T)]\n\n feats = cv2.cvtColor(feats, cv2.COLOR_RGB2BGR)\n\n # do ssaa after the NN search, in numpy\n if ssaa > 1:\n feats = cv2.resize(feats, (w0, h0), interpolation=cv2.INTER_LINEAR)\n\n cv2.imwrite(os.path.join(path, f'{name}albedo.png'), feats)\n\n # save obj (v, vt, f /)\n obj_file = os.path.join(path, f'{name}mesh.obj')\n mtl_file = os.path.join(path, f'{name}mesh.mtl')\n\n logger.info(f'[INFO] writing obj mesh to {obj_file}')\n with open(obj_file, \"w\") as fp:\n fp.write(f'mtllib {name}mesh.mtl \\n')\n\n logger.info(f'[INFO] writing vertices {v_np.shape}')\n for v in v_np:\n fp.write(f'v {v[0]} {v[1]} {v[2]} \\n')\n\n logger.info(\n f'[INFO] writing vertices texture coords {vt_np.shape}')\n for v in vt_np:\n fp.write(f'vt {v[0]} {1 - v[1]} \\n')\n\n logger.info(f'[INFO] writing faces {f_np.shape}')\n fp.write(f'usemtl mat0 \\n')\n for i in range(len(f_np)):\n fp.write(\n f\"f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1} {f_np[i, 1] + 1}/{ft_np[i, 1] + 1} {f_np[i, 2] + 1}/{ft_np[i, 2] + 1} \\n\")\n\n with open(mtl_file, \"w\") as fp:\n fp.write(f'newmtl mat0 \\n')\n fp.write(f'Ka 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Kd 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Ks 0.000000 0.000000 0.000000 \\n')\n fp.write(f'Tr 1.000000 \\n')\n fp.write(f'illum 1 \\n')\n fp.write(f'Ns 0.000000 \\n')\n fp.write(f'map_Kd {name}albedo.png \\n')\n\n _export(v, f)\n\n def run(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # bg_color: [BN, 3] in range [0, 1]\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n results = {}\n\n # choose aabb\n aabb = self.aabb_train if self.training else self.aabb_infer\n\n # sample steps\n # nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, aabb, self.min_near)\n # nears.unsqueeze_(-1)\n # fars.unsqueeze_(-1)\n nears, fars = near_far_from_bound(rays_o, rays_d, self.bound, type='sphere', min_near=self.min_near)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n if self.training:\n light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [N, 3]\n else:\n light_d = safe_normalize(rays_o[0:1] + torch.randn(3, device=rays_o.device)) # [N, 3]\n \n #print(f'nears = {nears.min().item()} ~ {nears.max().item()}, fars = {fars.min().item()} ~ {fars.max().item()}')\n\n z_vals = torch.linspace(0.0, 1.0, self.opt.num_steps, device=device).unsqueeze(0) # [1, T]\n z_vals = z_vals.expand((N, self.opt.num_steps)) # [N, T]\n z_vals = nears + (fars - nears) * z_vals # [N, T], in [nears, fars]\n\n # perturb z_vals\n sample_dist = (fars - nears) / self.opt.num_steps\n if perturb:\n z_vals = z_vals + (torch.rand(z_vals.shape, device=device) - 0.5) * sample_dist\n #z_vals = z_vals.clamp(nears, fars) # avoid out of bounds xyzs.\n\n # generate xyzs\n xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * z_vals.unsqueeze(-1) # [N, 1, 3] * [N, T, 1] -> [N, T, 3]\n xyzs = torch.min(torch.max(xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n #plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n\n # query SDF and RGB\n density_outputs = self.density(xyzs.reshape(-1, 3))\n\n #sigmas = density_outputs['sigma'].view(N, self.opt.num_steps) # [N, T]\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(N, self.opt.num_steps, -1)\n\n # upsample z_vals (nerf-like)\n if self.opt.upsample_steps > 0:\n with torch.no_grad():\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1) ## confused with this, so the last point should be around relative distance or zero?\n\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T]\n\n # sample new z_vals\n z_vals_mid = (z_vals[..., :-1] + 0.5 * deltas[..., :-1]) # [N, T-1]\n new_z_vals = sample_pdf(z_vals_mid, weights[:, 1:-1], self.opt.upsample_steps, det=not self.training).detach() # [N, t]\n\n new_xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * new_z_vals.unsqueeze(-1) # [N, 1, 3] * [N, t, 1] -> [N, t, 3]\n new_xyzs = torch.min(torch.max(new_xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n # only forward new points to save computation\n new_density_outputs = self.density(new_xyzs.reshape(-1, 3))\n #new_sigmas = new_density_outputs['sigma'].view(N, self.opt.upsample_steps) # [N, t]\n for k, v in new_density_outputs.items():\n new_density_outputs[k] = v.view(N, self.opt.upsample_steps, -1)\n\n # re-order\n z_vals = torch.cat([z_vals, new_z_vals], dim=1) # [N, T+t]\n z_vals, z_index = torch.sort(z_vals, dim=1)\n\n xyzs = torch.cat([xyzs, new_xyzs], dim=1) # [N, T+t, 3]\n xyzs = torch.gather(xyzs, dim=1, index=z_index.unsqueeze(-1).expand_as(xyzs))\n\n for k in density_outputs:\n tmp_output = torch.cat([density_outputs[k], new_density_outputs[k]], dim=1)\n density_outputs[k] = torch.gather(tmp_output, dim=1, index=z_index.unsqueeze(-1).expand_as(tmp_output))\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T+t-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T+t]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+t+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T+t]\n\n dirs = rays_d.view(-1, 1, 3).expand_as(xyzs)\n light_d = light_d.view(-1, 1, 3).expand_as(xyzs)\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(-1, v.shape[-1])\n\n dirs = safe_normalize(dirs)\n sigmas, rgbs, normals = self(xyzs.reshape(-1, 3), dirs.reshape(-1, 3), light_d.reshape(-1,3), ratio=ambient_ratio, shading=shading)\n rgbs = rgbs.view(N, -1, 3) # [N, T+t, 3]\n if normals is not None:\n normals = normals.view(N, -1, 3)\n\n # calculate weight_sum (mask)\n weights_sum = weights.sum(dim=-1) # [N]\n \n # calculate depth \n depth = torch.sum(weights * z_vals, dim=-1)\n\n # calculate color\n image = torch.sum(weights.unsqueeze(-1) * rgbs, dim=-2) # [N, 3], in [0, 1]\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n \n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n # ipdb.set_trace()\n image = image.view(*prefix, 3)\n depth = depth.view(*prefix)\n weights_sum = weights_sum.reshape(*prefix)\n\n if self.training:\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss\n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.sum(-1).mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if normals is not None:\n normal_image = torch.sum(\n weights.unsqueeze(-1) * (normals + 1) / 2, dim=-2) # [N, 3], in [0, 1]\n results['normal_image'] = normal_image\n \n results['image'] = image\n results['depth'] = depth\n results['weights'] = weights\n results['weights_sum'] = weights_sum\n\n return results\n\n\n def run_cuda(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, binarize=False, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # return: image: [B, N, 3], depth: [B, N]\n # ipdb.set_trace()\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, self.aabb_train if self.training else self.aabb_infer)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n if self.training:\n light_d = safe_normalize(rays_o[0:1] + torch.randn(3, device=rays_o.device)) # [N, 3]\n else:\n light_d = safe_normalize(rays_o[0:1] + torch.randn(3, device=rays_o.device)) # [N, 3]\n\n results = {}\n\n if self.training:\n xyzs, dirs, ts, rays = raymarching.march_rays_train(rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb, self.opt.dt_gamma, self.opt.max_steps)\n dirs = safe_normalize(dirs)\n\n if light_d.shape[0] > 1:\n flatten_rays = raymarching.flatten_rays(rays, xyzs.shape[0]).long()\n light_d = light_d[flatten_rays]\n \n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n weights, weights_sum, depth, image = raymarching.composite_rays_train(sigmas, rgbs, ts, rays, T_thresh, binarize)\n \n # normals related regularizations\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if normals is not None:\n _, _, _, normal_image = raymarching.composite_rays_train(sigmas.detach(), (normals + 1) / 2, ts, rays, T_thresh, binarize)\n results['normal_image'] = normal_image\n \n # weights normalization\n results['weights'] = weights\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = nears.clone() # [N]\n\n step = 0\n \n while step < self.opt.max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n n_step = max(min(N // n_alive, 8), 1)\n\n xyzs, dirs, ts = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb if step == 0 else False, self.opt.dt_gamma, self.opt.max_steps)\n dirs = safe_normalize(dirs)\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image, T_thresh, binarize)\n\n rays_alive = rays_alive[rays_alive >= 0]\n #print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}')\n\n step += n_step\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n\n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n image = image.view(*prefix, 3)\n\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n \n return results\n\n def get_sdf_albedo_for_init(self, points=None):\n output = self.density(self.dmtet.verts if points is None else points)\n sigma, albedo = output['sigma'], output['albedo']\n return sigma - self.density_thresh, albedo\n\n def run_dmtet(self, rays_o, rays_d, mvp, h, w, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, **kwargs):\n # mvp: [B, 4, 4]\n\n device = mvp.device\n campos = rays_o[:, 0, :] # only need one ray per batch\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = safe_normalize(campos + torch.randn_like(campos)).view(-1, 1, 1, 3) # [B, 1, 1, 3]\n\n results = {}\n\n verts, faces = self.dmtet.get_verts_face()\n\n # get normals\n i0, i1, i2 = faces[:, 0], faces[:, 1], faces[:, 2]\n v0, v1, v2 = verts[i0, :], verts[i1, :], verts[i2, :]\n\n faces = faces.int()\n \n face_normals = torch.cross(v1 - v0, v2 - v0)\n face_normals = safe_normalize(face_normals)\n \n vn = torch.zeros_like(verts)\n vn.scatter_add_(0, i0[:, None].repeat(1,3), face_normals)\n vn.scatter_add_(0, i1[:, None].repeat(1,3), face_normals)\n vn.scatter_add_(0, i2[:, None].repeat(1,3), face_normals)\n\n vn = torch.where(torch.sum(vn * vn, -1, keepdim=True) > 1e-20, vn, torch.tensor([0.0, 0.0, 1.0], dtype=torch.float32, device=vn.device))\n\n # rasterization\n verts_clip = torch.bmm(F.pad(verts, pad=(0, 1), mode='constant', value=1.0).unsqueeze(0).repeat(mvp.shape[0], 1, 1), \n mvp.permute(0,2,1)).float() # [B, N, 4]\n rast, rast_db = dr.rasterize(self.glctx, verts_clip, faces, (h, w))\n \n alpha, _ = dr.interpolate(torch.ones_like(verts[:, :1]).unsqueeze(0), rast, faces) # [B, H, W, 1]\n xyzs, _ = dr.interpolate(verts.unsqueeze(0), rast, faces) # [B, H, W, 3]\n normal, _ = dr.interpolate(vn.unsqueeze(0).contiguous(), rast, faces)\n normal = safe_normalize(normal)\n\n xyzs = xyzs.view(-1, 3)\n mask = (alpha > 0).view(-1).detach()\n\n # do the lighting here since we have normal from mesh now.\n albedo = torch.zeros_like(xyzs, dtype=torch.float32)\n if mask.any():\n masked_albedo = self.density(xyzs[mask])['albedo']\n albedo[mask] = masked_albedo.float()\n albedo = albedo.view(-1, h, w, 3)\n\n if shading == 'albedo':\n color = albedo\n elif shading == 'textureless':\n lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0)\n color = lambertian.unsqueeze(-1).repeat(1, 1, 1, 3)\n elif shading == 'normal':\n color = (normal + 1) / 2\n else: # 'lambertian'\n lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0)\n color = albedo * lambertian.unsqueeze(-1)\n\n color = dr.antialias(color, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3]\n alpha = dr.antialias(alpha, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 1]\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n \n if torch.is_tensor(bg_color) and len(bg_color.shape) > 1:\n bg_color = bg_color.view(-1, h, w, 3)\n \n depth = rast[:, :, :, [2]] # [B, H, W]\n color = color + (1 - alpha) * bg_color\n\n results['depth'] = depth \n results['image'] = color\n results['weights_sum'] = alpha.squeeze(-1)\n\n normal_image = dr.antialias((normal + 1) / 2, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3]\n results['normal_image'] = normal_image\n \n # regularizations\n if self.training:\n if self.opt.lambda_mesh_normal > 0:\n results['loss_normal'] = normal_consistency(\n face_normals, faces)\n if self.opt.lambda_mesh_lap > 0:\n results['loss_lap'] = laplacian_smooth_loss(verts, faces)\n\n return results\n\n def run_taichi(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n exp_step_factor = kwargs.get('exp_step_factor', 0.)\n MAX_SAMPLES = 1024\n NEAR_DISTANCE = 0.01\n center = torch.zeros(1, 3)\n half_size = torch.ones(1, 3)\n _, hits_t, _ = self.ray_aabb_intersector.apply(rays_o, rays_d, center, half_size, 1)\n hits_t[(hits_t[:, 0, 0] >= 0) & (hits_t[:, 0, 0] < NEAR_DISTANCE), 0, 0] = NEAR_DISTANCE\n\n # TODO: should sample different light_d for each batch... but taichi end doesn't have a flatten_ray implemented currently...\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = (rays_o[0] + torch.randn(3, device=device, dtype=torch.float))\n light_d = safe_normalize(light_d)\n\n results = {}\n\n if self.training:\n rays_a, xyzs, dirs, deltas, ts, _ = self.ray_marching(rays_o, rays_d, hits_t[:, 0], self.density_bitfield, self.cascade, self.bound, exp_step_factor, self.grid_size, MAX_SAMPLES)\n dirs = safe_normalize(dirs)\n # plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n _, weights_sum, depth, image, weights = self.volume_render(sigmas, rgbs, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4))\n \n # normals related regularizations\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if normals is not None:\n _, _, _, normal_image, _ = self.volume_render(sigmas.detach(), (normals + 1) / 2, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4))\n results['normal_image'] = normal_image\n \n # weights normalization\n results['weights'] = weights\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = hits_t[:, 0, 0]\n step = 0\n \n min_samples = 1 if exp_step_factor == 0 else 4\n\n while step < self.opt.max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n # n_step = max(min(N // n_alive, 8), 1)\n n_step = max(min(N // n_alive, 64), min_samples)\n\n xyzs, dirs, deltas, ts, N_eff_samples = \\\n self.raymarching_test_taichi(rays_o, rays_d, hits_t[:, 0], rays_alive,\n self.density_bitfield, self.cascade,\n self.bound, exp_step_factor,\n self.grid_size, MAX_SAMPLES, n_step)\n\n xyzs = self.rearrange(xyzs, 'n1 n2 c -> (n1 n2) c')\n dirs = self.rearrange(dirs, 'n1 n2 c -> (n1 n2) c')\n dirs = safe_normalize(dirs)\n valid_mask = ~torch.all(dirs == 0, dim=1)\n if valid_mask.sum() == 0:\n break\n\n sigmas = torch.zeros(len(xyzs), device=device)\n rgbs = torch.zeros(len(xyzs), 3, device=device)\n normals = torch.zeros(len(xyzs), 3, device=device)\n\n sigmas[valid_mask], _rgbs, normals = self(xyzs[valid_mask], dirs[valid_mask], light_d, ratio=ambient_ratio, shading=shading)\n rgbs[valid_mask] = _rgbs.float()\n sigmas = self.rearrange(sigmas, '(n1 n2) -> n1 n2', n2=n_step)\n rgbs = self.rearrange(rgbs, '(n1 n2) c -> n1 n2 c', n2=n_step)\n if normals is not None:\n normals = self.rearrange(normals, '(n1 n2) c -> n1 n2 c', n2=n_step)\n\n self.composite_test_fw(sigmas, rgbs, deltas, ts, hits_t[:,0], rays_alive,\n kwargs.get('T_threshold', 1e-4), N_eff_samples,\n weights_sum, depth, image)\n\n rays_alive = rays_alive[rays_alive >= 0]\n\n step += n_step\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n\n image = image + self.rearrange(1 - weights_sum, 'n -> n 1') * bg_color\n image = image.view(*prefix, 3)\n\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n \n return results\n\n\n @torch.no_grad()\n def update_extra_state(self, decay=0.95, S=128):\n # call before each epoch to update extra states.\n\n if not (self.cuda_ray or self.taichi_ray):\n return \n \n ### update density grid\n tmp_grid = - torch.ones_like(self.density_grid)\n \n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n\n for xs in X:\n for ys in Y:\n for zs in Z:\n \n # construct points\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)\n indices = raymarching.morton3D(coords).long() # [N]\n xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]\n\n # cascading\n for cas in range(self.cascade):\n bound = min(2 ** cas, self.bound)\n half_grid_size = bound / self.grid_size\n # scale to current cascade's resolution\n cas_xyzs = xyzs * (bound - half_grid_size)\n # add noise in [-hgs, hgs]\n cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size\n # query density\n sigmas = self.density(cas_xyzs)['sigma'].reshape(-1).detach()\n # assign \n tmp_grid[cas, indices] = sigmas\n # ema update\n valid_mask = self.density_grid >= 0\n self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])\n self.mean_density = torch.mean(self.density_grid[valid_mask]).item()\n self.iter_density += 1\n\n # convert to bitfield\n density_thresh = min(self.mean_density, self.density_thresh)\n if self.cuda_ray:\n self.density_bitfield = raymarching.packbits(self.density_grid, density_thresh, self.density_bitfield)\n elif self.taichi_ray:\n self.packbits_taichi(self.density_grid.reshape(-1).contiguous(), density_thresh, self.density_bitfield)\n\n # print(f'[density grid] min={self.density_grid.min().item():.4f}, max={self.density_grid.max().item():.4f}, mean={self.mean_density:.4f}, occ_rate={(self.density_grid > density_thresh).sum() / (128**3 * self.cascade):.3f}')\n\n\n def render(self, rays_o, rays_d, mvp, h, w, staged=False, max_ray_batch=4096, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # return: pred_rgb: [B, N, 3]\n B, N = rays_o.shape[:2]\n device = rays_o.device\n\n if self.dmtet:\n results = self.run_dmtet(rays_o, rays_d, mvp, h, w, **kwargs)\n elif self.cuda_ray:\n results = self.run_cuda(rays_o, rays_d, **kwargs)\n elif self.taichi_ray:\n results = self.run_taichi(rays_o, rays_d, **kwargs)\n else:\n if staged:\n depth = torch.empty((B, N), device=device)\n image = torch.empty((B, N, 3), device=device)\n weights_sum = torch.empty((B, N), device=device)\n\n for b in range(B):\n head = 0\n while head < N:\n tail = min(head + max_ray_batch, N)\n results_ = self.run(rays_o[b:b+1, head:tail], rays_d[b:b+1, head:tail], **kwargs)\n depth[b:b+1, head:tail] = results_['depth']\n weights_sum[b:b+1, head:tail] = results_['weights_sum']\n image[b:b+1, head:tail] = results_['image']\n head += max_ray_batch\n \n results = {}\n results['depth'] = depth\n results['image'] = image\n results['weights_sum'] = weights_sum\n\n else:\n results = self.run(rays_o, rays_d, **kwargs)\n\n return results\n\n def init_tet_from_nerf(self, reset_scale=True):\n sdf = self.get_sdf_from_nerf(reset_scale=reset_scale)\n self.dmtet.init_tet_from_sdf(sdf)\n logger.info(f'init dmtet from NeRF Done ...')\n\n\n @torch.no_grad()\n def get_sdf_from_nerf(self, reset_scale=True):\n if self.cuda_ray:\n density_thresh = min(self.mean_density, self.density_thresh)\n else:\n density_thresh = self.density_thresh\n\n if reset_scale:\n # init scale\n sigma = self.density(self.dmtet.verts)[\n 'sigma'] # verts covers [-1, 1] now\n mask = sigma > density_thresh\n valid_verts = self.dmtet.verts[mask]\n tet_scale = valid_verts.abs().amax(dim=0) + 1e-1\n self.dmtet.reset_tet_scale(tet_scale)\n sdf = (self.density(self.dmtet.verts)[\n 'sigma'] - density_thresh).clamp(-1, 1)\n return sdf" }, { "identifier": "get_encoder", "path": "encoding.py", "snippet": "def get_encoder(encoding, input_dim=3, \n multires=6, \n degree=4,\n num_levels=16, level_dim=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=2048, align_corners=False, interpolation='linear',\n **kwargs):\n\n if encoding == 'None':\n return lambda x, **kwargs: x, input_dim\n \n elif encoding == 'frequency_torch':\n encoder = FreqEncoder_torch(input_dim=input_dim, max_freq_log2=multires-1, N_freqs=multires, log_sampling=True)\n\n elif encoding == 'frequency': # CUDA implementation, faster than torch.\n from freqencoder import FreqEncoder\n encoder = FreqEncoder(input_dim=input_dim, degree=multires)\n\n elif encoding == 'sphere_harmonics':\n from shencoder import SHEncoder\n encoder = SHEncoder(input_dim=input_dim, degree=degree)\n\n elif encoding == 'hashgrid':\n from gridencoder import GridEncoder\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='hash', align_corners=align_corners, interpolation=interpolation)\n \n elif encoding == 'tiledgrid':\n from gridencoder import GridEncoder\n # encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='tiled', align_corners=align_corners, interpolation=interpolation)\n encoder = GridEncoder(input_dim=input_dim, num_levels=num_levels, level_dim=level_dim, base_resolution=base_resolution, log2_hashmap_size=log2_hashmap_size, desired_resolution=desired_resolution, gridtype='tiled', align_corners=align_corners)\n \n elif encoding == 'hashgrid_taichi':\n from taichi_modules.hash_encoder import HashEncoderTaichi\n encoder = HashEncoderTaichi(batch_size=4096) #TODO: hard encoded batch size\n\n\n else:\n raise NotImplementedError('Unknown encoding mode, choose from [None, frequency, sphere_harmonics, hashgrid, tiledgrid]')\n\n return encoder, encoder.output_dim" }, { "identifier": "safe_normalize", "path": "nerf/utils.py", "snippet": "def safe_normalize(x, eps=1e-20):\n return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps))" } ]
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from activation import trunc_exp from .renderer import NeRFRenderer from encoding import get_encoder from .utils import safe_normalize from tqdm import tqdm
13,213
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x def reset_parameters(self): @torch.no_grad() def weight_init(m): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu')) nn.init.zeros_(m.bias) self.apply(weight_init)
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x def reset_parameters(self): @torch.no_grad() def weight_init(m): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu')) nn.init.zeros_(m.bias) self.apply(weight_init)
class NeRFNetwork(NeRFRenderer):
1
2023-11-23 10:34:08+00:00
16k
alexzhou907/DreamPropeller
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
11,983
else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize, ) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) # @torch.no_grad() # wasted two hours to find this bug... why no grad here! def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params["original_image_size"] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df ) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() if self.model.conditioning_key is not None: assert c is not None # if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = ( "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" ) cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride ) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if ( self.cond_stage_key in ["image", "LR_image", "segmentation", "bbox_img"] and self.model.conditioning_key ): # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert len(c) == 1 # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view( (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == "coordinates_bbox": assert ( "original_image_size" in self.split_input_params ), "BoudingBoxRescaling is missing original_image_size" # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params["original_image_size"] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ ( rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h, ) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ ( x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h, ) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device ) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) # cut tknzd crop position from conditioning assert isinstance(cond, dict), "cond must be dict to be fed into model" cut_cond = cond["c_crossattn"][0][..., :-2].to(self.device) adapted_cond = torch.stack( [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd] ) adapted_cond = rearrange(adapted_cond, "l b n -> (l b) n") adapted_cond = self.get_learned_conditioning(adapted_cond) adapted_cond = rearrange( adapted_cond, "(l b) n d -> l b n d", l=z.shape[-1] ) cond_list = [{"c_crossattn": [e]} for e in adapted_cond] else: cond_list = [ cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 ) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = "train" if self.training else "val" if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f"{prefix}/loss_simple": loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f"{prefix}/loss_gamma": loss.mean()}) loss_dict.update({"logvar": self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f"{prefix}/loss_vlb": loss_vlb}) loss += self.original_elbo_weight * loss_vlb loss_dict.update({f"{prefix}/loss": loss}) return loss, loss_dict def p_mean_variance( self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None, ): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score( self, model_out, x, t, c, **corrector_kwargs ) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1.0, 1.0) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample( self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, ): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance( x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * ( 0.5 * model_log_variance ).exp() * noise, logits.argmax(dim=1) if return_x0: return ( model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0, ) else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising( self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm( reversed(range(0, timesteps)), desc="Progressive Generation", total=timesteps, ) if verbose else reversed(range(0, timesteps)) ) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop( self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) if verbose else reversed(range(0, timesteps)) ) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, ) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample( self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs, ): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc
if ismap(xc):
16
2023-11-27 23:39:49+00:00
16k
CineMingle/CineMingle
Movie_Data_Capture.py
[ { "identifier": "get_data_from_json", "path": "scraper.py", "snippet": "def get_data_from_json(\n file_number: str,\n open_cc: opencc.OpenCC,\n specified_source: str, specified_url: str) -> typing.Optional[dict]:\n \n # iterate through all services and fetch the data 从网站上查询片名解析JSON返回元数据\n # :param file_number: 影片名称\n # :param open_cc: 简繁转换器\n # :param specified_source: 指定的媒体数据源\n # :param specified_url: 指定的数据查询地址, 目前未使用\n # :return 给定影片名称的具体信息\n \n try:\n actor_mapping_data = etree.parse(str(Path.home() / '.local' / 'share' / 'mdc' / 'mapping_actor.xml'))\n info_mapping_data = etree.parse(str(Path.home() / '.local' / 'share' / 'mdc' / 'mapping_info.xml'))\n except:\n actor_mapping_data = etree.fromstring(\"<html></html>\", etree.HTMLParser())\n info_mapping_data = etree.fromstring(\"<html></html>\", etree.HTMLParser())\n\n conf = config.getInstance()\n # default fetch order list, from the beginning to the end\n sources = conf.sources()\n\n # TODO 准备参数\n # - 清理 ADC_function, webcrawler\n proxies: dict = None\n config_proxy = conf.proxy()\n if config_proxy.enable:\n proxies = config_proxy.proxies()\n\n # javdb website logic\n # javdb have suffix\n javdb_sites = conf.javdb_sites().split(',')\n for i in javdb_sites:\n javdb_sites[javdb_sites.index(i)] = \"javdb\" + i\n javdb_sites.append(\"javdb\")\n # 不加载过期的cookie,javdb登录界面显示为7天免登录,故假定cookie有效期为7天\n has_valid_cookie = False\n for cj in javdb_sites:\n javdb_site = cj\n cookie_json = javdb_site + '.json'\n cookies_dict, cookies_filepath = load_cookies(cookie_json)\n if isinstance(cookies_dict, dict) and isinstance(cookies_filepath, str):\n cdays = file_modification_days(cookies_filepath)\n if cdays < 7:\n javdb_cookies = cookies_dict\n has_valid_cookie = True\n break\n elif cdays != 9999:\n print(\n f'[!]Cookies file {cookies_filepath} was updated {cdays} days ago, it will not be used for HTTP requests.')\n if not has_valid_cookie:\n # get real random site from javdb_sites, because random is not really random when the seed value is known\n # 已经是没有这些随机数了\n # javdb_site = secrets.choice(javdb_sites)\n javdb_site = None\n javdb_cookies = None\n\n ca_cert = None\n if conf.cacert_file():\n ca_cert = conf.cacert_file()\n\n json_data = search(file_number, sources, proxies=proxies, verify=ca_cert,\n dbsite=javdb_site, dbcookies=javdb_cookies,\n morestoryline=conf.is_storyline(),\n specifiedSource=specified_source, specifiedUrl=specified_url,\n debug = conf.debug())\n # Return if data not found in all sources\n if not json_data:\n print('[-]Movie Number not found!')\n return None\n\n # 增加number严格判断,避免提交任何number,总是返回\"本橋実来 ADZ335\",这种返回number不一致的数据源故障\n # 目前选用number命名规则是javdb.com Domain Creation Date: 2013-06-19T18:34:27Z\n # 然而也可以跟进关注其它命名规则例如airav.wiki Domain Creation Date: 2019-08-28T07:18:42.0Z\n # 如果将来javdb.com命名规则下不同Studio出现同名碰撞导致无法区分,可考虑更换规则,更新相应的number分析和抓取代码。\n if str(json_data.get('number')).upper() != file_number.upper():\n try:\n if json_data.get('allow_number_change'):\n pass\n except:\n print('[-]Movie number has changed! [{}]->[{}]'.format(file_number, str(json_data.get('number'))))\n return None\n\n # ================================================网站规则添加结束================================================\n\n if json_data.get('title') == '':\n print('[-]Movie Number or Title not found!')\n return None\n\n title = json_data.get('title')\n actor_list = str(json_data.get('actor')).strip(\"[ ]\").replace(\"'\", '').split(',') # 字符串转列表\n actor_list = [actor.strip() for actor in actor_list] # 去除空白\n director = json_data.get('director')\n release = json_data.get('release')\n number = json_data.get('number')\n studio = json_data.get('studio')\n source = json_data.get('source')\n runtime = json_data.get('runtime')\n outline = json_data.get('outline')\n label = json_data.get('label')\n series = json_data.get('series')\n year = json_data.get('year')\n\n if json_data.get('cover_small'):\n cover_small = json_data.get('cover_small')\n else:\n cover_small = ''\n\n if json_data.get('trailer'):\n trailer = json_data.get('trailer')\n else:\n trailer = ''\n\n if json_data.get('extrafanart'):\n extrafanart = json_data.get('extrafanart')\n else:\n extrafanart = ''\n\n imagecut = json_data.get('imagecut')\n tag = str(json_data.get('tag')).strip(\"[ ]\").replace(\"'\", '').replace(\" \", '').split(',') # 字符串转列表 @\n while 'XXXX' in tag:\n tag.remove('XXXX')\n while 'xxx' in tag:\n tag.remove('xxx')\n if json_data['source'] =='pissplay': # pissplay actor为英文名,不用去除空格\n actor = str(actor_list).strip(\"[ ]\").replace(\"'\", '')\n else:\n actor = str(actor_list).strip(\"[ ]\").replace(\"'\", '').replace(\" \", '')\n\n # if imagecut == '3':\n # DownloadFileWithFilename()\n\n # ====================处理异常字符====================== #\\/:*?\"<>|\n actor = special_characters_replacement(actor)\n actor_list = [special_characters_replacement(a) for a in actor_list]\n title = special_characters_replacement(title)\n label = special_characters_replacement(label)\n outline = special_characters_replacement(outline)\n series = special_characters_replacement(series)\n studio = special_characters_replacement(studio)\n director = special_characters_replacement(director)\n tag = [special_characters_replacement(t) for t in tag]\n release = release.replace('/', '-')\n tmpArr = cover_small.split(',')\n if len(tmpArr) > 0:\n cover_small = tmpArr[0].strip('\\\"').strip('\\'')\n # ====================处理异常字符 END================== #\\/:*?\"<>|\n\n # 返回处理后的json_data\n json_data['title'] = title\n json_data['original_title'] = title\n json_data['actor'] = actor\n json_data['release'] = release\n json_data['cover_small'] = cover_small\n json_data['tag'] = tag\n json_data['year'] = year\n json_data['actor_list'] = actor_list\n json_data['trailer'] = trailer\n json_data['extrafanart'] = extrafanart\n json_data['label'] = label\n json_data['outline'] = outline\n json_data['series'] = series\n json_data['studio'] = studio\n json_data['director'] = director\n\n if conf.is_translate():\n translate_values = conf.translate_values().split(\",\")\n for translate_value in translate_values:\n if json_data[translate_value] == \"\":\n continue\n if translate_value == \"title\":\n title_dict = json.loads(\n (Path.home() / '.local' / 'share' / 'mdc' / 'c_number.json').read_text(encoding=\"utf-8\"))\n try:\n json_data[translate_value] = title_dict[number]\n continue\n except:\n pass\n if conf.get_translate_engine() == \"azure\":\n t = translate(\n json_data[translate_value],\n target_language=\"zh-Hans\",\n engine=conf.get_translate_engine(),\n key=conf.get_translate_key(),\n )\n else:\n if len(json_data[translate_value]):\n if type(json_data[translate_value]) == str:\n json_data[translate_value] = special_characters_replacement(json_data[translate_value])\n json_data[translate_value] = translate(json_data[translate_value])\n else:\n for i in range(len(json_data[translate_value])):\n json_data[translate_value][i] = special_characters_replacement(\n json_data[translate_value][i])\n list_in_str = \",\".join(json_data[translate_value])\n json_data[translate_value] = translate(list_in_str).split(',')\n\n if open_cc:\n cc_vars = conf.cc_convert_vars().split(\",\")\n ccm = conf.cc_convert_mode()\n\n def convert_list(mapping_data, language, vars):\n total = []\n for i in vars:\n if len(mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=f\",{i},\")) != 0:\n i = mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=f\",{i},\")[0]\n total.append(i)\n return total\n\n def convert(mapping_data, language, vars):\n if len(mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=vars)) != 0:\n return mapping_data.xpath('a[contains(@keyword, $name)]/@' + language, name=vars)[0]\n else:\n raise IndexError('keyword not found')\n\n for cc in cc_vars:\n if json_data[cc] == \"\" or len(json_data[cc]) == 0:\n continue\n if cc == \"actor\":\n try:\n if ccm == 1:\n json_data['actor_list'] = convert_list(actor_mapping_data, \"zh_cn\", json_data['actor_list'])\n json_data['actor'] = convert(actor_mapping_data, \"zh_cn\", json_data['actor'])\n elif ccm == 2:\n json_data['actor_list'] = convert_list(actor_mapping_data, \"zh_tw\", json_data['actor_list'])\n json_data['actor'] = convert(actor_mapping_data, \"zh_tw\", json_data['actor'])\n elif ccm == 3:\n json_data['actor_list'] = convert_list(actor_mapping_data, \"jp\", json_data['actor_list'])\n json_data['actor'] = convert(actor_mapping_data, \"jp\", json_data['actor'])\n except:\n json_data['actor_list'] = [open_cc.convert(aa) for aa in json_data['actor_list']]\n json_data['actor'] = open_cc.convert(json_data['actor'])\n elif cc == \"tag\":\n try:\n if ccm == 1:\n json_data[cc] = convert_list(info_mapping_data, \"zh_cn\", json_data[cc])\n json_data[cc] = delete_all_elements_in_list(\"删除\", json_data[cc])\n elif ccm == 2:\n json_data[cc] = convert_list(info_mapping_data, \"zh_tw\", json_data[cc])\n json_data[cc] = delete_all_elements_in_list(\"删除\", json_data[cc])\n elif ccm == 3:\n json_data[cc] = convert_list(info_mapping_data, \"jp\", json_data[cc])\n json_data[cc] = delete_all_elements_in_list(\"删除\", json_data[cc])\n except:\n json_data[cc] = [open_cc.convert(t) for t in json_data[cc]]\n else:\n try:\n if ccm == 1:\n json_data[cc] = convert(info_mapping_data, \"zh_cn\", json_data[cc])\n json_data[cc] = delete_all_elements_in_str(\"删除\", json_data[cc])\n elif ccm == 2:\n json_data[cc] = convert(info_mapping_data, \"zh_tw\", json_data[cc])\n json_data[cc] = delete_all_elements_in_str(\"删除\", json_data[cc])\n elif ccm == 3:\n json_data[cc] = convert(info_mapping_data, \"jp\", json_data[cc])\n json_data[cc] = delete_all_elements_in_str(\"删除\", json_data[cc])\n except IndexError:\n json_data[cc] = open_cc.convert(json_data[cc])\n except:\n pass\n\n naming_rule = \"\"\n original_naming_rule = \"\"\n for i in conf.naming_rule().split(\"+\"):\n if i not in json_data:\n naming_rule += i.strip(\"'\").strip('\"')\n original_naming_rule += i.strip(\"'\").strip('\"')\n else:\n item = json_data.get(i)\n naming_rule += item if type(item) is not list else \"&\".join(item)\n # PATCH:处理[title]存在翻译的情况,后续NFO文件的original_name只会直接沿用naming_rule,这导致original_name非原始名\n # 理应在翻译处处理 naming_rule和original_naming_rule\n if i == 'title':\n item = json_data.get('original_title')\n original_naming_rule += item if type(item) is not list else \"&\".join(item)\n\n json_data['naming_rule'] = naming_rule\n json_data['original_naming_rule'] = original_naming_rule\n return json_data" }, { "identifier": "file_modification_days", "path": "ADC_function.py", "snippet": "def file_modification_days(filename: str) -> int:\n \"\"\"\n 文件修改时间距此时的天数\n \"\"\"\n mfile = Path(filename)\n if not mfile.is_file():\n return 9999\n mtime = int(mfile.stat().st_mtime)\n now = int(time.time())\n days = int((now - mtime) / (24 * 60 * 60))\n if days < 0:\n return 9999\n return days" }, { "identifier": "get_html", "path": "ADC_function.py", "snippet": "def get_html(url, cookies: dict = None, ua: str = None, return_type: str = None, encoding: str = None, json_headers=None):\n \"\"\"\n 网页请求核心函数\n \"\"\"\n verify = config.getInstance().cacert_file()\n config_proxy = config.getInstance().proxy()\n errors = \"\"\n\n headers = {\"User-Agent\": ua or G_USER_AGENT} # noqa\n if json_headers is not None:\n headers.update(json_headers)\n\n for i in range(config_proxy.retry):\n try:\n if config_proxy.enable:\n proxies = config_proxy.proxies()\n result = requests.get(str(url), headers=headers, timeout=config_proxy.timeout, proxies=proxies,\n verify=verify,\n cookies=cookies)\n else:\n result = requests.get(str(url), headers=headers, timeout=config_proxy.timeout, cookies=cookies)\n\n if return_type == \"object\":\n return result\n elif return_type == \"content\":\n return result.content\n else:\n result.encoding = encoding or result.apparent_encoding\n return result.text\n except Exception as e:\n print(\"[-]Connect retry {}/{}\".format(i + 1, config_proxy.retry))\n errors = str(e)\n if \"getaddrinfo failed\" in errors:\n print(\"[-]Connect Failed! Please Check your proxy config\")\n debug = config.getInstance().debug()\n if debug:\n print(\"[-]\" + errors)\n else:\n print(\"[-]\" + errors)\n print('[-]Connect Failed! Please check your Proxy or Network!')\n raise Exception('Connect Failed')" }, { "identifier": "parallel_download_files", "path": "ADC_function.py", "snippet": "def parallel_download_files(dn_list: typing.Iterable[typing.Sequence], parallel: int = 0, json_headers=None):\n \"\"\"\n download files in parallel 多线程下载文件\n\n 用法示例: 2线程同时下载两个不同文件,并保存到不同路径,路径目录可未创建,但需要具备对目标目录和文件的写权限\n parallel_download_files([\n ('https://site1/img/p1.jpg', 'C:/temp/img/p1.jpg'),\n ('https://site2/cover/n1.xml', 'C:/tmp/cover/n1.xml')\n ])\n\n :dn_list: 可以是 tuple或者list: ((url1, save_fullpath1),(url2, save_fullpath2),) fullpath可以是str或Path\n :parallel: 并行下载的线程池线程数,为0则由函数自己决定\n \"\"\"\n mp_args = []\n for url, fullpath in dn_list:\n if url and isinstance(url, str) and url.startswith('http') \\\n and fullpath and isinstance(fullpath, (str, Path)) and len(str(fullpath)):\n fullpath = Path(fullpath)\n fullpath.parent.mkdir(parents=True, exist_ok=True)\n mp_args.append((url, fullpath, json_headers))\n if not len(mp_args):\n return []\n if not isinstance(parallel, int) or parallel not in range(1, 200):\n parallel = min(5, len(mp_args))\n with ThreadPoolExecutor(parallel) as pool:\n results = list(pool.map(download_one_file, mp_args))\n return results" }, { "identifier": "get_number", "path": "number_parser.py", "snippet": "def get_number(debug: bool, file_path: str) -> str:\n \"\"\"\n 从文件路径中提取番号 from number_parser import get_number\n >>> get_number(False, \"/Users/Guest/AV_Data_Capture/snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"/Users/Guest/AV_Data_Capture/snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \"/Users/Guest/AV_Data_Capture/[脸肿字幕组][PoRO]牝教師4~穢された教壇~ 「生意気ドジっ娘女教師・美結~高飛車ハメ堕ち2濁金」[720p][x264_aac].mp4\")\n '牝教師4~穢された教壇~ 「生意気ドジっ娘女教師・美結~高飛車ハメ堕ち2濁金」'\n >>> get_number(False, \"C:¥Users¥Guest¥snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"C:¥Users¥Guest¥snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \"./snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"./snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \".¥snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \".¥snis-829-C.mp4\")\n 'snis-829'\n >>> get_number(False, \"snis-829.mp4\")\n 'snis-829'\n >>> get_number(False, \"snis-829-C.mp4\")\n 'snis-829'\n \"\"\"\n filepath = os.path.basename(file_path)\n # debug True 和 False 两块代码块合并,原因是此模块及函数只涉及字符串计算,没有IO操作,debug on时输出导致异常信息即可\n try:\n file_number = get_number_by_dict(filepath)\n if file_number:\n return file_number\n elif '字幕组' in filepath or 'SUB' in filepath.upper() or re.match(r'[\\u30a0-\\u30ff]+', filepath):\n filepath = G_spat.sub(\"\", filepath)\n filepath = re.sub(\"\\[.*?\\]\",\"\",filepath)\n filepath = filepath.replace(\".chs\", \"\").replace(\".cht\", \"\")\n file_number = str(re.findall(r'(.+?)\\.', filepath)).strip(\" [']\")\n return file_number\n elif '-' in filepath or '_' in filepath: # 普通提取番号 主要处理包含减号-和_的番号\n filepath = G_spat.sub(\"\", filepath)\n filename = str(re.sub(\"\\[\\d{4}-\\d{1,2}-\\d{1,2}\\] - \", \"\", filepath)) # 去除文件名中时间\n lower_check = filename.lower()\n if 'fc2' in lower_check:\n filename = lower_check.replace('--', '-').replace('_', '-').upper()\n filename = re.sub(\"[-_]cd\\d{1,2}\", \"\", filename, flags=re.IGNORECASE)\n if not re.search(\"-|_\", filename): # 去掉-CD1之后再无-的情况,例如n1012-CD1.wmv\n return str(re.search(r'\\w+', filename[:filename.find('.')], re.A).group())\n file_number = os.path.splitext(filename)\n print(file_number)\n filename = re.search(r'[\\w\\-_]+', filename, re.A)\n if filename:\n file_number = str(filename.group())\n else:\n file_number = file_number[0]\n file_number = re.sub(\"(-|_)c$\", \"\", file_number, flags=re.IGNORECASE)\n file_number = re.sub(\"(-|_)uc$\", \"\", file_number, flags=re.IGNORECASE)\n file_number = re.sub(\"(-|_)u$\", \"\", file_number, flags=re.IGNORECASE)\n if re.search(\"\\d+ch$\", file_number, flags=re.I):\n file_number = file_number[:-2]\n return file_number.upper()\n else: # 提取不含减号-的番号,FANZA CID\n # 欧美番号匹配规则\n oumei = re.search(r'[a-zA-Z]+\\.\\d{2}\\.\\d{2}\\.\\d{2}', filepath)\n if oumei:\n return oumei.group()\n try:\n return str(\n re.findall(r'(.+?)\\.',\n str(re.search('([^<>/\\\\\\\\|:\"\"\\\\*\\\\?]+)\\\\.\\\\w+$', filepath).group()))).strip(\n \"['']\").replace('_', '-')\n except:\n return str(re.search(r'(.+?)\\.', filepath)[0])\n except Exception as e:\n if debug:\n print(f'[-]Number Parser exception: {e} [{file_path}]')\n return None" }, { "identifier": "core_main", "path": "core.py", "snippet": "def core_main(movie_path, number_th, oCC, specified_source=None, specified_url=None):\n conf = config.getInstance()\n # =======================================================================初始化所需变量\n multi_part = False\n part = ''\n leak_word = ''\n c_word = ''\n cn_sub = False\n liuchu = False\n hack = False\n hack_word = ''\n _4k = False\n\n # 下面被注释的变量不需要\n # rootpath = os.getcwd\n number = number_th\n json_data = get_data_from_json(number, oCC, specified_source, specified_url) # 定义番号\n\n # Return if blank dict returned (data not found)\n if not json_data:\n moveFailedFolder(movie_path)\n return\n\n if json_data[\"number\"] != number:\n # fix issue #119\n # the root cause is we normalize the search id\n # print_files() will use the normalized id from website,\n # but paste_file_to_folder() still use the input raw search id\n # so the solution is: use the normalized search id\n number = json_data[\"number\"]\n imagecut = json_data.get('imagecut')\n tag = json_data.get('tag')\n # =======================================================================判断-C,-CD后缀\n if re.search('[-_]CD\\d+', movie_path, re.IGNORECASE):\n multi_part = True\n part = re.findall('[-_]CD\\d+', movie_path, re.IGNORECASE)[0].upper()\n if re.search(r'[-_]C(\\.\\w+$|-\\w+)|\\d+ch(\\.\\w+$|-\\w+)', movie_path,\n re.I) or '中文' in movie_path or '字幕' in movie_path:\n cn_sub = True\n c_word = '-C' # 中文字幕影片后缀\n\n # 判断是否无码\n unce = json_data.get('无码')\n uncensored = int(unce) if isinstance(unce, bool) else int(is_uncensored(number))\n\n if '流出' in movie_path or 'uncensored' in movie_path.lower():\n liuchu = '流出'\n leak = True\n leak_word = '-无码流出' # 流出影片后缀\n else:\n leak = False\n\n if 'hack'.upper() in str(movie_path).upper() or '破解' in movie_path:\n hack = True\n hack_word = \"-hack\"\n\n if '4k'.upper() in str(movie_path).upper() or '4k' in movie_path:\n _4k = True\n\n # 判断是否4k\n if '4K' in tag:\n tag.remove('4K') # 从tag中移除'4K'\n\n # 判断是否为无码破解\n if '无码破解' in tag:\n tag.remove('无码破解') # 从tag中移除'无码破解'\n\n # try:\n # props = get_video_properties(movie_path) # 判断是否为4K视频\n # if props['width'] >= 4096 or props['height'] >= 2160:\n # _4k = True\n # except:\n # pass\n\n # 调试模式检测\n if conf.debug():\n debug_print(json_data)\n\n # 创建文件夹\n # path = create_folder(rootpath + '/' + conf.success_folder(), json_data.get('location_rule'), json_data)\n\n cover = json_data.get('cover')\n ext = image_ext(cover)\n\n fanart_path = f\"fanart{ext}\"\n poster_path = f\"poster{ext}\"\n thumb_path = f\"thumb{ext}\"\n if config.getInstance().image_naming_with_number():\n fanart_path = f\"{number}{leak_word}{c_word}{hack_word}-fanart{ext}\"\n poster_path = f\"{number}{leak_word}{c_word}{hack_word}-poster{ext}\"\n thumb_path = f\"{number}{leak_word}{c_word}{hack_word}-thumb{ext}\"\n\n # main_mode\n # 1: 刮削模式 / Scraping mode\n # 2: 整理模式 / Organizing mode\n # 3:不改变路径刮削\n if conf.main_mode() == 1:\n # 创建文件夹\n path = create_folder(json_data)\n if multi_part == 1:\n number += part # 这时number会被附加上CD1后缀\n\n # 检查小封面, 如果image cut为3,则下载小封面\n if imagecut == 3:\n if 'headers' in json_data:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path, json_data)\n else:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path)\n\n # creatFolder会返回番号路径\n if 'headers' in json_data:\n image_download(cover, fanart_path, thumb_path, path, movie_path, json_data)\n else:\n image_download(cover, fanart_path, thumb_path, path, movie_path)\n\n if not multi_part or part.lower() == '-cd1':\n try:\n # 下载预告片\n if conf.is_trailer() and json_data.get('trailer'):\n trailer_download(json_data.get('trailer'), leak_word, c_word, hack_word, number, path, movie_path)\n\n # 下载剧照 data, path, filepath\n if conf.is_extrafanart() and json_data.get('extrafanart'):\n if 'headers' in json_data:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path, json_data)\n else:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path)\n\n # 下载演员头像 KODI .actors 目录位置\n if conf.download_actor_photo_for_kodi():\n actor_photo_download(json_data.get('actor_photo'), path, number)\n except:\n pass\n\n # 裁剪图\n cutImage(imagecut, path, thumb_path, poster_path, bool(conf.face_uncensored_only() and not uncensored))\n\n # 兼容Jellyfin封面图文件名规则\n if multi_part and conf.jellyfin_multi_part_fanart():\n linkImage(path, number_th, part, leak_word, c_word, hack_word, ext)\n\n # 移动电影\n paste_file_to_folder(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n\n # Move subtitles\n if(conf.check_subtitles()):\n move_status = move_subtitles(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n if move_status:\n cn_sub = True\n # 添加水印\n if conf.is_watermark():\n add_mark(os.path.join(path, poster_path), os.path.join(path, thumb_path), cn_sub, leak, uncensored,\n hack, _4k)\n\n # 最后输出.nfo元数据文件,以完成.nfo文件创建作为任务成功标志\n print_files(path, leak_word, c_word, json_data.get('naming_rule'), part, cn_sub, json_data, movie_path, tag,\n json_data.get('actor_list'), liuchu, uncensored, hack, hack_word\n , _4k, fanart_path, poster_path, thumb_path)\n\n elif conf.main_mode() == 2:\n # 创建文件夹\n path = create_folder(json_data)\n # 移动文件\n paste_file_to_folder_mode2(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n\n # Move subtitles\n if(conf.check_subtitles()):\n move_subtitles(movie_path, path, multi_part, number, part, leak_word, c_word, hack_word)\n\n elif conf.main_mode() == 3:\n path = str(Path(movie_path).parent)\n if multi_part == 1:\n number += part # 这时number会被附加上CD1后缀\n\n # 检查小封面, 如果image cut为3,则下载小封面\n if imagecut == 3:\n if 'headers' in json_data:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path, json_data)\n else:\n small_cover_check(path, poster_path, json_data.get('cover_small'), movie_path)\n\n # creatFolder会返回番号路径\n if 'headers' in json_data:\n image_download(cover, fanart_path, thumb_path, path, movie_path, json_data)\n else:\n image_download(cover, fanart_path, thumb_path, path, movie_path)\n\n if not multi_part or part.lower() == '-cd1':\n try:\n # 下载预告片\n if conf.is_trailer() and json_data.get('trailer'):\n trailer_download(json_data.get('trailer'), leak_word, c_word, hack_word, number, path, movie_path)\n\n # 下载剧照 data, path, filepath\n if conf.is_extrafanart() and json_data.get('extrafanart'):\n if 'headers' in json_data:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path, json_data)\n else:\n extrafanart_download(json_data.get('extrafanart'), path, number, movie_path)\n\n # 下载演员头像 KODI .actors 目录位置\n if conf.download_actor_photo_for_kodi():\n actor_photo_download(json_data.get('actor_photo'), path, number)\n except:\n pass\n\n # 裁剪图\n cutImage(imagecut, path, fanart_path, poster_path, bool(conf.face_uncensored_only() and not uncensored))\n\n # 添加水印\n if conf.is_watermark():\n add_mark(os.path.join(path, poster_path), os.path.join(path, fanart_path), cn_sub, leak, uncensored, hack,\n _4k)\n\n # 兼容Jellyfin封面图文件名规则\n if multi_part and conf.jellyfin_multi_part_fanart():\n linkImage(path, number_th, part, leak_word, c_word, hack_word, ext)\n\n # 最后输出.nfo元数据文件,以完成.nfo文件创建作为任务成功标志\n print_files(path, leak_word, c_word, json_data.get('naming_rule'), part, cn_sub, json_data, movie_path,\n tag, json_data.get('actor_list'), liuchu, uncensored, hack, hack_word, _4k, fanart_path, poster_path,\n thumb_path)" }, { "identifier": "core_main_no_net_op", "path": "core.py", "snippet": "def core_main_no_net_op(movie_path, number):\n conf = config.getInstance()\n part = ''\n leak_word = ''\n leak = False\n c_word = ''\n cn_sub = False\n hack = False\n hack_word = ''\n _4k = False\n imagecut = 1\n multi = False\n part = ''\n path = str(Path(movie_path).parent)\n\n if re.search('[-_]CD\\d+', movie_path, re.IGNORECASE):\n part = re.findall('[-_]CD\\d+', movie_path, re.IGNORECASE)[0].upper()\n multi = True\n if re.search(r'[-_]C(\\.\\w+$|-\\w+)|\\d+ch(\\.\\w+$|-\\w+)', movie_path,\n re.I) or '中文' in movie_path or '字幕' in movie_path or \".chs\" in movie_path or '.cht' in movie_path:\n cn_sub = True\n c_word = '-C' # 中文字幕影片后缀\n uncensored = True if is_uncensored(number) else 0\n if '流出' in movie_path or 'uncensored' in movie_path.lower():\n leak_word = '-无码流出' # 无码流出影片后缀\n leak = True\n\n if 'hack'.upper() in str(movie_path).upper() or '破解' in movie_path:\n hack = True\n hack_word = \"-hack\"\n\n # try:\n\n # props = get_video_properties(movie_path) # 判断是否为4K视频\n # if props['width'] >= 4096 or props['height'] >= 2160:\n # _4k = True\n # except:\n # pass\n prestr = f\"{number}{leak_word}{c_word}{hack_word}\"\n\n full_nfo = Path(path) / f\"{prestr}{part}.nfo\"\n if full_nfo.is_file():\n if full_nfo.read_text(encoding='utf-8').find(r'<tag>无码</tag>') >= 0:\n uncensored = True\n try:\n nfo_xml = etree.parse(full_nfo)\n nfo_fanart_path = nfo_xml.xpath('//fanart/text()')[0]\n ext = Path(nfo_fanart_path).suffix\n except:\n return\n else:\n return\n fanart_path = f\"fanart{ext}\"\n poster_path = f\"poster{ext}\"\n thumb_path = f\"thumb{ext}\"\n if config.getInstance().image_naming_with_number():\n fanart_path = f\"{prestr}-fanart{ext}\"\n poster_path = f\"{prestr}-poster{ext}\"\n thumb_path = f\"{prestr}-thumb{ext}\"\n full_fanart_path = os.path.join(path, fanart_path)\n full_poster_path = os.path.join(path, poster_path)\n full_thumb_path = os.path.join(path, thumb_path)\n\n if not all(os.path.isfile(f) for f in (full_fanart_path, full_thumb_path)):\n return\n\n cutImage(imagecut, path, fanart_path, poster_path, bool(conf.face_uncensored_only() and not uncensored))\n if conf.is_watermark():\n add_mark(full_poster_path, full_thumb_path, cn_sub, leak, uncensored, hack, _4k)\n\n if multi and conf.jellyfin_multi_part_fanart():\n linkImage(path, number, part, leak_word, c_word, hack_word, ext)" }, { "identifier": "moveFailedFolder", "path": "core.py", "snippet": "def moveFailedFolder(filepath):\n conf = config.getInstance()\n failed_folder = conf.failed_folder()\n link_mode = conf.link_mode()\n # 模式3或软连接,改为维护一个失败列表,启动扫描时加载用于排除该路径,以免反复处理\n # 原先的创建软连接到失败目录,并不直观,不方便找到失败文件位置,不如直接记录该文件路径\n if conf.main_mode() == 3 or link_mode:\n ftxt = os.path.abspath(os.path.join(failed_folder, 'failed_list.txt'))\n print(\"[-]Add to Failed List file, see '%s'\" % ftxt)\n with open(ftxt, 'a', encoding='utf-8') as flt:\n flt.write(f'{filepath}\\n')\n elif conf.failed_move() and not link_mode:\n failed_name = os.path.join(failed_folder, os.path.basename(filepath))\n mtxt = os.path.abspath(os.path.join(failed_folder, 'where_was_i_before_being_moved.txt'))\n print(\"'[-]Move to Failed output folder, see '%s'\" % mtxt)\n with open(mtxt, 'a', encoding='utf-8') as wwibbmt:\n tmstr = datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n wwibbmt.write(f'{tmstr} FROM[{filepath}]TO[{failed_name}]\\n')\n try:\n if os.path.exists(failed_name):\n print('[-]File Exists while moving to FailedFolder')\n return\n shutil.move(filepath, failed_name)\n except:\n print('[-]File Moving to FailedFolder unsuccessful!')" }, { "identifier": "debug_print", "path": "core.py", "snippet": "def debug_print(data: json):\n try:\n print(\"[+] ------- DEBUG INFO -------\")\n for i, v in data.items():\n if i == 'outline':\n print('[+] -', \"%-19s\" % i, ':', len(v), 'characters')\n continue\n if i == 'actor_photo' or i == 'year':\n continue\n if i == 'extrafanart':\n print('[+] -', \"%-19s\" % i, ':', len(v), 'links')\n continue\n print(f'[+] - {i:<{cn_space(i, 19)}} : {v}')\n\n print(\"[+] ------- DEBUG INFO -------\")\n except:\n pass" } ]
import argparse import json import os import random import re import sys import time import shutil import typing import urllib3 import signal import platform import config from datetime import datetime, timedelta from lxml import etree from pathlib import Path from opencc import OpenCC from scraper import get_data_from_json from ADC_function import file_modification_days, get_html, parallel_download_files from number_parser import get_number from core import core_main, core_main_no_net_op, moveFailedFolder, debug_print
13,287
# 100MB的日志文件能缩小到3.7MB。 return filepath def signal_handler(*args): """ A signal handler function for handling operating system signals like Ctrl+C (SIGINT). It defines the behavior of the application when such signals are received, such as graceful termination. :param args: Variable argument list, used to handle signal information. """ print('[!]Ctrl+C detected, Exit.') os._exit(9) def sigdebug_handler(*args): """ A signal handler function specifically for toggling debug mode on or off. It alters the debug configuration based on certain system signals (like window size change in Unix systems). :param args: Variable argument list, used to handle signal information. """ conf = config.getInstance() conf.set_override(f"debug_mode:switch={int(not conf.debug())}") print(f"[!]Debug {('oFF', 'On')[int(conf.debug())]}") # 新增失败文件列表跳过处理,及.nfo修改天数跳过处理,提示跳过视频总数,调试模式(-g)下详细被跳过文件,跳过小广告 def movie_lists(source_folder, regexstr: str) -> typing.List[str]: """ Generates a list of movie file paths from the specified source folder. It filters files based on regular expressions and other criteria, such as file type and size. :param source_folder: The folder to scan for movie files. :param regexstr: A regular expression string to filter movie files. :return: A list of paths to the movie files that match the criteria. """ conf = config.getInstance() main_mode = conf.main_mode() debug = conf.debug() nfo_skip_days = conf.nfo_skip_days() link_mode = conf.link_mode() file_type = conf.media_type().lower().split(",") trailerRE = re.compile(r'-trailer\.', re.IGNORECASE) cliRE = None if isinstance(regexstr, str) and len(regexstr): try: cliRE = re.compile(regexstr, re.IGNORECASE) except: pass failed_list_txt_path = Path(conf.failed_folder()).resolve() / 'failed_list.txt' failed_set = set() if (main_mode == 3 or link_mode) and not conf.ignore_failed_list(): try: flist = failed_list_txt_path.read_text(encoding='utf-8').splitlines() failed_set = set(flist) if len(flist) != len(failed_set): # 检查去重并写回,但是不改变failed_list.txt内条目的先后次序,重复的只保留最后的 fset = failed_set.copy() for i in range(len(flist) - 1, -1, -1): fset.remove(flist[i]) if flist[i] in fset else flist.pop(i) failed_list_txt_path.write_text('\n'.join(flist) + '\n', encoding='utf-8') assert len(fset) == 0 and len(flist) == len(failed_set) except: pass if not Path(source_folder).is_dir(): print('[-]Source folder not found!') return [] total = [] # source = Path(source_folder).resolve() source = Path(source_folder) skip_failed_cnt, skip_nfo_days_cnt = 0, 0 escape_folder_set = set(re.split("[,,]", conf.escape_folder())) for full_name in source.glob(r'**/*'): if main_mode != 3 and set(full_name.parent.parts) & escape_folder_set: continue if not full_name.suffix.lower() in file_type: continue absf = str(full_name) if absf in failed_set: skip_failed_cnt += 1 if debug: print('[!]Skip failed movie:', absf) continue is_sym = full_name.is_symlink() if main_mode != 3 and (is_sym or (full_name.stat().st_nlink > 1 and not conf.scan_hardlink())): # 短路布尔 符号链接不取stat(),因为符号链接可能指向不存在目标 continue # 模式不等于3下跳过软连接和未配置硬链接刮削 # 调试用0字节样本允许通过,去除小于120MB的广告'苍老师强力推荐.mp4'(102.2MB)'黑道总裁.mp4'(98.4MB)'有趣的妹子激情表演.MP4'(95MB)'有趣的臺灣妹妹直播.mp4'(15.1MB) movie_size = 0 if is_sym else full_name.stat().st_size # 同上 符号链接不取stat()及st_size,直接赋0跳过小视频检测 # if 0 < movie_size < 125829120: # 1024*1024*120=125829120 # continue if cliRE and not cliRE.search(absf) or trailerRE.search(full_name.name): continue if main_mode == 3: nfo = full_name.with_suffix('.nfo') if not nfo.is_file(): if debug: print(f"[!]Metadata {nfo.name} not found for '{absf}'") elif nfo_skip_days > 0 and file_modification_days(nfo) <= nfo_skip_days: skip_nfo_days_cnt += 1 if debug: print(f"[!]Skip movie by it's .nfo which modified within {nfo_skip_days} days: '{absf}'") continue total.append(absf) if skip_failed_cnt: print(f"[!]Skip {skip_failed_cnt} movies in failed list '{failed_list_txt_path}'.") if skip_nfo_days_cnt: print( f"[!]Skip {skip_nfo_days_cnt} movies in source folder '{source}' who's .nfo modified within {nfo_skip_days} days.") if nfo_skip_days <= 0 or not link_mode or main_mode == 3: return total # 软连接方式,已经成功削刮的也需要从成功目录中检查.nfo更新天数,跳过N天内更新过的 skip_numbers = set() success_folder = Path(conf.success_folder()).resolve() for f in success_folder.glob(r'**/*'): if not re.match(r'\.nfo$', f.suffix, re.IGNORECASE): continue if file_modification_days(f) > nfo_skip_days: continue
def check_update(local_version): """ Check for updates by comparing the local version of the application with the latest version available on GitHub. It fetches the latest release information from GitHub and compares the version numbers. If a new version is available, it prints out the update information. :param local_version: The current local version of the application. """ htmlcode = get_html("https://api.github.com/repos/CineMingle/CineMingle/releases/latest") data = json.loads(htmlcode) remote = int(data["tag_name"].replace(".", "")) local_version = int(local_version.replace(".", "")) if local_version < remote: print("[*]" + ("* New update " + str(data["tag_name"]) + " *").center(54)) print("[*]" + "↓ Download ↓".center(54)) print("[*]https://github.com/CineMingle/CineMingle/releases") print("[*]======================================================") def argparse_function(ver: str) -> typing.Tuple[str, str, str, str, bool, bool, str, str]: """ Parses command-line arguments and returns the parsed values. It sets up the argument parser with various options for the application and returns the parsed arguments and their values. It also loads configuration from a config file. :param ver: The version of the application, used for the version argument. :return: A tuple containing various parsed arguments and flags. """ conf = config.getInstance() parser = argparse.ArgumentParser(epilog=f"Load Config file '{conf.ini_path}'.") parser.add_argument("file", default='', nargs='?', help="Single Movie file path.") parser.add_argument("-p", "--path", default='movies', nargs='?', help="Analysis folder path.") parser.add_argument("-m", "--main-mode", default='', nargs='?', help="Main mode. 1:Scraping 2:Organizing 3:Scraping in analysis folder") parser.add_argument("-n", "--number", default='', nargs='?', help="Custom file number of single movie file.") # parser.add_argument("-C", "--config", default='config.ini', nargs='?', help="The config file Path.") parser.add_argument("-L", "--link-mode", default='', nargs='?', help="Create movie file link. 0:moving movie file, do not create link 1:soft link 2:try hard link first") default_logdir = str(Path.home() / '.mlogs') parser.add_argument("-o", "--log-dir", dest='logdir', default=default_logdir, nargs='?', help=f"""Duplicate stdout and stderr to logfiles in logging folder, default on. default folder for current user: '{default_logdir}'. Change default folder to an empty file, or use --log-dir= to turn log off.""") parser.add_argument("-q", "--regex-query", dest='regexstr', default='', nargs='?', help="python re module regex filepath filtering.") parser.add_argument("-d", "--nfo-skip-days", dest='days', default='', nargs='?', help="Override nfo_skip_days value in config.") parser.add_argument("-c", "--stop-counter", dest='cnt', default='', nargs='?', help="Override stop_counter value in config.") parser.add_argument("-R", "--rerun-delay", dest='delaytm', default='', nargs='?', help="Delay (eg. 1h10m30s or 60 (second)) time and rerun, until all movies proceed. Note: stop_counter value in config or -c must none zero.") parser.add_argument("-i", "--ignore-failed-list", action="store_true", help="Ignore failed list '{}'".format( os.path.join(os.path.abspath(conf.failed_folder()), 'failed_list.txt'))) parser.add_argument("-a", "--auto-exit", action="store_true", help="Auto exit after program complete") parser.add_argument("-g", "--debug", action="store_true", help="Turn on debug mode to generate diagnostic log for issue report.") parser.add_argument("-N", "--no-network-operation", action="store_true", help="No network query, do not get metadata, for cover cropping purposes, only takes effect when main mode is 3.") parser.add_argument("-w", "--website", dest='site', default='', nargs='?', help="Override [priority]website= in config.") parser.add_argument("-D", "--download-images", dest='dnimg', action="store_true", help="Override [common]download_only_missing_images=0 force invoke image downloading.") parser.add_argument("-C", "--config-override", dest='cfgcmd', action='append', nargs=1, help="Common use config override. Grammar: section:key=value[;[section:]key=value] eg. 'de:s=1' or 'debug_mode:switch=1' override[debug_mode]switch=1 Note:this parameters can be used multiple times") parser.add_argument("-z", "--zero-operation", dest='zero_op', action="store_true", help="""Only show job list of files and numbers, and **NO** actual operation is performed. It may help you correct wrong numbers before real job.""") parser.add_argument("-v", "--version", action="version", version=ver) parser.add_argument("-s", "--search", default='', nargs='?', help="Search number") parser.add_argument("-ss", "--specified-source", default='', nargs='?', help="specified Source.") parser.add_argument("-su", "--specified-url", default='', nargs='?', help="specified Url.") args = parser.parse_args() def set_natural_number_or_none(sk, value): if isinstance(value, str) and value.isnumeric() and int(value) >= 0: conf.set_override(f'{sk}={value}') def set_str_or_none(sk, value): if isinstance(value, str) and len(value): conf.set_override(f'{sk}={value}') def set_bool_or_none(sk, value): if isinstance(value, bool) and value: conf.set_override(f'{sk}=1') set_natural_number_or_none("common:main_mode", args.main_mode) set_natural_number_or_none("common:link_mode", args.link_mode) set_str_or_none("common:source_folder", args.path) set_bool_or_none("common:auto_exit", args.auto_exit) set_natural_number_or_none("common:nfo_skip_days", args.days) set_natural_number_or_none("advenced_sleep:stop_counter", args.cnt) set_bool_or_none("common:ignore_failed_list", args.ignore_failed_list) set_str_or_none("advenced_sleep:rerun_delay", args.delaytm) set_str_or_none("priority:website", args.site) if isinstance(args.dnimg, bool) and args.dnimg: conf.set_override("common:download_only_missing_images=0") set_bool_or_none("debug_mode:switch", args.debug) if isinstance(args.cfgcmd, list): for cmd in args.cfgcmd: conf.set_override(cmd[0]) no_net_op = False if conf.main_mode() == 3: no_net_op = args.no_network_operation if no_net_op: conf.set_override("advenced_sleep:stop_counter=0;advenced_sleep:rerun_delay=0s;face:aways_imagecut=1") return args.file, args.number, args.logdir, args.regexstr, args.zero_op, no_net_op, args.search, args.specified_source, args.specified_url class OutLogger(object): def __init__(self, logfile) -> None: self.term = sys.stdout self.log = open(logfile, "w", encoding='utf-8', buffering=1) self.filepath = logfile def __del__(self): self.close() def __enter__(self): pass def __exit__(self, *args): self.close() def write(self, msg): self.term.write(msg) self.log.write(msg) def flush(self): if 'flush' in dir(self.term): self.term.flush() if 'flush' in dir(self.log): self.log.flush() if 'fileno' in dir(self.log): os.fsync(self.log.fileno()) def close(self): if self.term is not None: sys.stdout = self.term self.term = None if self.log is not None: self.log.close() self.log = None class ErrLogger(OutLogger): def __init__(self, logfile) -> None: self.term = sys.stderr self.log = open(logfile, "w", encoding='utf-8', buffering=1) self.filepath = logfile def close(self): if self.term is not None: sys.stderr = self.term self.term = None if self.log is not None: self.log.close() self.log = None def dupe_stdout_to_logfile(logdir: str): """ Duplicates the standard output (stdout) and standard error (stderr) to log files. This function creates log files in the specified directory and redirects stdout and stderr to these files for logging purposes. :param logdir: The directory where log files will be created and saved. """ if not isinstance(logdir, str) or len(logdir) == 0: return log_dir = Path(logdir) if not log_dir.exists(): try: log_dir.mkdir(parents=True, exist_ok=True) except: pass if not log_dir.is_dir(): return # Tips for disabling logs by change directory to a same name empty regular file abslog_dir = log_dir.resolve() log_tmstr = datetime.now().strftime("%Y%m%dT%H%M%S") logfile = abslog_dir / f'mdc_{log_tmstr}.txt' errlog = abslog_dir / f'mdc_{log_tmstr}_err.txt' sys.stdout = OutLogger(logfile) sys.stderr = ErrLogger(errlog) def close_logfile(logdir: str): """ Closes the log files and restores standard output and error streams. This function is typically called at the end of the application to ensure that log files are properly closed. :param logdir: The directory where log files are saved. """ if not isinstance(logdir, str) or len(logdir) == 0 or not os.path.isdir(logdir): return # 日志关闭前保存日志路径 filepath = None try: filepath = sys.stdout.filepath except: pass sys.stdout.close() sys.stderr.close() log_dir = Path(logdir).resolve() if isinstance(filepath, Path): print(f"Log file '{filepath}' saved.") assert (filepath.parent.samefile(log_dir)) # 清理空文件 for f in log_dir.glob(r'*_err.txt'): if f.stat().st_size == 0: try: f.unlink(missing_ok=True) except: pass # 合并日志 只检测日志目录内的文本日志,忽略子目录。三天前的日志,按日合并为单个日志,三个月前的日志, # 按月合并为单个月志,去年及以前的月志,今年4月以后将之按年合并为年志 # 测试步骤: """ LOGDIR=/tmp/mlog mkdir -p $LOGDIR for f in {2016..2020}{01..12}{01..28};do;echo $f>$LOGDIR/mdc_${f}T235959.txt;done for f in {01..09}{01..28};do;echo 2021$f>$LOGDIR/mdc_2021${f}T235959.txt;done for f in {00..23};do;echo 20211001T$f>$LOGDIR/mdc_20211001T${f}5959.txt;done echo "$(ls -1 $LOGDIR|wc -l) files in $LOGDIR" # 1932 files in /tmp/mlog mdc -zgic1 -d0 -m3 -o $LOGDIR # python3 ./Movie_Data_Capture.py -zgic1 -o $LOGDIR ls $LOGDIR # rm -rf $LOGDIR """ today = datetime.today() # 第一步,合并到日。3天前的日志,文件名是同一天的合并为一份日志 for i in range(1): txts = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{8}T\d{6}$', f.stem, re.A)] if not txts or not len(txts): break e = [f for f in txts if '_err' in f.stem] txts.sort() tmstr_3_days_ago = (today.replace(hour=0) - timedelta(days=3)).strftime("%Y%m%dT99") deadline_day = f'mdc_{tmstr_3_days_ago}' day_merge = [f for f in txts if f.stem < deadline_day] if not day_merge or not len(day_merge): break cutday = len('T235959.txt') # cut length mdc_20201201|T235959.txt for f in day_merge: try: day_file_name = str(f)[:-cutday] + '.txt' # mdc_20201201.txt with open(day_file_name, 'a', encoding='utf-8') as m: m.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第二步,合并到月 for i in range(1): # 利用1次循环的break跳到第二步,避免大块if缩进或者使用goto语法 txts = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{8}$', f.stem, re.A)] if not txts or not len(txts): break txts.sort() tmstr_3_month_ago = (today.replace(day=1) - timedelta(days=3 * 30)).strftime("%Y%m32") deadline_month = f'mdc_{tmstr_3_month_ago}' month_merge = [f for f in txts if f.stem < deadline_month] if not month_merge or not len(month_merge): break tomonth = len('01.txt') # cut length mdc_202012|01.txt for f in month_merge: try: month_file_name = str(f)[:-tomonth] + '.txt' # mdc_202012.txt with open(month_file_name, 'a', encoding='utf-8') as m: m.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第三步,月合并到年 for i in range(1): if today.month < 4: break mons = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{6}$', f.stem, re.A)] if not mons or not len(mons): break mons.sort() deadline_year = f'mdc_{today.year - 1}13' year_merge = [f for f in mons if f.stem < deadline_year] if not year_merge or not len(year_merge): break toyear = len('12.txt') # cut length mdc_2020|12.txt for f in year_merge: try: year_file_name = str(f)[:-toyear] + '.txt' # mdc_2020.txt with open(year_file_name, 'a', encoding='utf-8') as y: y.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第四步,压缩年志 如果有压缩需求,请自行手工压缩,或者使用外部脚本来定时完成。推荐nongnu的lzip,对于 # 这种粒度的文本日志,压缩比是目前最好的。lzip -9的运行参数下,日志压缩比要高于xz -9,而且内存占用更少, # 多核利用率更高(plzip多线程版本),解压速度更快。压缩后的大小差不多是未压缩时的2.4%到3.7%左右, # 100MB的日志文件能缩小到3.7MB。 return filepath def signal_handler(*args): """ A signal handler function for handling operating system signals like Ctrl+C (SIGINT). It defines the behavior of the application when such signals are received, such as graceful termination. :param args: Variable argument list, used to handle signal information. """ print('[!]Ctrl+C detected, Exit.') os._exit(9) def sigdebug_handler(*args): """ A signal handler function specifically for toggling debug mode on or off. It alters the debug configuration based on certain system signals (like window size change in Unix systems). :param args: Variable argument list, used to handle signal information. """ conf = config.getInstance() conf.set_override(f"debug_mode:switch={int(not conf.debug())}") print(f"[!]Debug {('oFF', 'On')[int(conf.debug())]}") # 新增失败文件列表跳过处理,及.nfo修改天数跳过处理,提示跳过视频总数,调试模式(-g)下详细被跳过文件,跳过小广告 def movie_lists(source_folder, regexstr: str) -> typing.List[str]: """ Generates a list of movie file paths from the specified source folder. It filters files based on regular expressions and other criteria, such as file type and size. :param source_folder: The folder to scan for movie files. :param regexstr: A regular expression string to filter movie files. :return: A list of paths to the movie files that match the criteria. """ conf = config.getInstance() main_mode = conf.main_mode() debug = conf.debug() nfo_skip_days = conf.nfo_skip_days() link_mode = conf.link_mode() file_type = conf.media_type().lower().split(",") trailerRE = re.compile(r'-trailer\.', re.IGNORECASE) cliRE = None if isinstance(regexstr, str) and len(regexstr): try: cliRE = re.compile(regexstr, re.IGNORECASE) except: pass failed_list_txt_path = Path(conf.failed_folder()).resolve() / 'failed_list.txt' failed_set = set() if (main_mode == 3 or link_mode) and not conf.ignore_failed_list(): try: flist = failed_list_txt_path.read_text(encoding='utf-8').splitlines() failed_set = set(flist) if len(flist) != len(failed_set): # 检查去重并写回,但是不改变failed_list.txt内条目的先后次序,重复的只保留最后的 fset = failed_set.copy() for i in range(len(flist) - 1, -1, -1): fset.remove(flist[i]) if flist[i] in fset else flist.pop(i) failed_list_txt_path.write_text('\n'.join(flist) + '\n', encoding='utf-8') assert len(fset) == 0 and len(flist) == len(failed_set) except: pass if not Path(source_folder).is_dir(): print('[-]Source folder not found!') return [] total = [] # source = Path(source_folder).resolve() source = Path(source_folder) skip_failed_cnt, skip_nfo_days_cnt = 0, 0 escape_folder_set = set(re.split("[,,]", conf.escape_folder())) for full_name in source.glob(r'**/*'): if main_mode != 3 and set(full_name.parent.parts) & escape_folder_set: continue if not full_name.suffix.lower() in file_type: continue absf = str(full_name) if absf in failed_set: skip_failed_cnt += 1 if debug: print('[!]Skip failed movie:', absf) continue is_sym = full_name.is_symlink() if main_mode != 3 and (is_sym or (full_name.stat().st_nlink > 1 and not conf.scan_hardlink())): # 短路布尔 符号链接不取stat(),因为符号链接可能指向不存在目标 continue # 模式不等于3下跳过软连接和未配置硬链接刮削 # 调试用0字节样本允许通过,去除小于120MB的广告'苍老师强力推荐.mp4'(102.2MB)'黑道总裁.mp4'(98.4MB)'有趣的妹子激情表演.MP4'(95MB)'有趣的臺灣妹妹直播.mp4'(15.1MB) movie_size = 0 if is_sym else full_name.stat().st_size # 同上 符号链接不取stat()及st_size,直接赋0跳过小视频检测 # if 0 < movie_size < 125829120: # 1024*1024*120=125829120 # continue if cliRE and not cliRE.search(absf) or trailerRE.search(full_name.name): continue if main_mode == 3: nfo = full_name.with_suffix('.nfo') if not nfo.is_file(): if debug: print(f"[!]Metadata {nfo.name} not found for '{absf}'") elif nfo_skip_days > 0 and file_modification_days(nfo) <= nfo_skip_days: skip_nfo_days_cnt += 1 if debug: print(f"[!]Skip movie by it's .nfo which modified within {nfo_skip_days} days: '{absf}'") continue total.append(absf) if skip_failed_cnt: print(f"[!]Skip {skip_failed_cnt} movies in failed list '{failed_list_txt_path}'.") if skip_nfo_days_cnt: print( f"[!]Skip {skip_nfo_days_cnt} movies in source folder '{source}' who's .nfo modified within {nfo_skip_days} days.") if nfo_skip_days <= 0 or not link_mode or main_mode == 3: return total # 软连接方式,已经成功削刮的也需要从成功目录中检查.nfo更新天数,跳过N天内更新过的 skip_numbers = set() success_folder = Path(conf.success_folder()).resolve() for f in success_folder.glob(r'**/*'): if not re.match(r'\.nfo$', f.suffix, re.IGNORECASE): continue if file_modification_days(f) > nfo_skip_days: continue
number = get_number(False, f.stem)
4
2023-11-25 03:16:13+00:00
16k
abdulhaim/LMRL-Gym
llm_rl_scripts/chess/mc_returns/train_full_games_mc_returns.py
[ { "identifier": "Text", "path": "LLM_RL/environment.py", "snippet": "class Text:\n text: str\n is_action: bool" }, { "identifier": "text_env_eval", "path": "LLM_RL/environment.py", "snippet": "def text_env_eval(\n env: Union[TextEnv, BatchedTextEnv], \n policy: Union[TextPolicy, BatchedTextPolicy], \n n_rollouts: int, \n initial_text_history: Optional[TextHistory]=None, # only allow one initial_text_history here\n seed_generator: Optional[Iterator[int]]=None, \n env_options: Optional[Dict]=None, # only allow one env_options here\n interaction_callback: Optional[Callable[[List[Tuple[TextHistory, TextHistory, TextHistory, float, bool]]], None]]=None, \n bsize: int=1, \n verbose: bool=True, \n) -> Tuple[List[List[InteractionTransition]], Dict[str, Any]]:\n interactions, rewards, dones, eps_lengths = [], [], [], []\n for _ in tqdm(range((n_rollouts+(bsize-1))//bsize), disable=not verbose):\n actual_bsize = min(n_rollouts-len(interactions), bsize)\n npad = bsize - actual_bsize\n interaction_batch = interact_environment(\n env, \n policy, \n initial_text_history=initial_text_history, \n env_seed=[None]*actual_bsize if seed_generator is None else [next(seed_generator) for _ in range(actual_bsize)], \n env_options=[env_options]*actual_bsize, \n bsize=actual_bsize,\n npad=npad,\n )\n \n for interaction in interaction_batch:\n interactions.append(interaction)\n rewards.append(sum(map(lambda x: x.reward, interaction)))\n dones.append(interaction[-1].done)\n eps_lengths.append(len(interaction))\n if interaction_callback is not None:\n interaction_callback(interaction)\n \n rewards = np.asarray(rewards, dtype=np.float32)\n dones = np.asarray(dones, dtype=np.float32)\n results_summary = dict(\n reward=dict(\n mean=np.mean(rewards), \n std=np.std(rewards), \n min=np.min(rewards), \n max=np.max(rewards), \n ), \n done=dict(\n mean=np.mean(dones), \n std=np.std(dones), \n min=np.min(dones), \n max=np.max(dones), \n ), \n length=dict(\n mean=np.mean(eps_lengths),\n std=np.std(eps_lengths),\n min=np.min(eps_lengths),\n max=np.max(eps_lengths),\n ),\n )\n \n return interactions, results_summary" }, { "identifier": "TextTrajectory", "path": "LLM_RL/environment.py", "snippet": "class TextTrajectory:\n text_history: TextHistory\n reward: Tuple[float, ...]\n done: bool\n\n def __post_init__(self):\n assert len(self.reward) == len(self.text_history), \"reward is needed for each text\"\n assert all([r == 0.0 for r, t in zip(self.reward, self.text_history) if not t.is_action]), \"reward for non-actions texts should be 0.0\"" }, { "identifier": "TextTrajectoryChain", "path": "LLM_RL/environment.py", "snippet": "class TextTrajectoryChain:\n text_trajectory: TextTrajectory\n next: Optional[TextTrajectoryChain]" }, { "identifier": "TokenTrajectoryChain", "path": "LLM_RL/environment.py", "snippet": "class TokenTrajectoryChain:\n token_trajectory: TokenTrajectory\n next: Optional[TokenTrajectoryChain]\n\n def __post_init__(self):\n curr, dones = self, []\n while curr.next is not None:\n dones.append(curr.token_trajectory.done)\n curr = curr.next\n assert not np.any(dones[:-1]), 'token trajectory chain can only be done at the end'\n \n def to_list(self) -> List[TokenTrajectory]:\n curr, l = self, []\n while curr is not None:\n l.append(curr.token_trajectory)\n curr = curr.next\n return l\n\n @classmethod\n def from_text_trajectory_chain(\n cls, \n text_trajectory_chain: TextTrajectoryChain, \n tokenizer: PreTrainedTokenizer, \n token_process: Optional[Callable[[List[int]], List[int]]]=None, \n ) -> TokenTrajectoryChain:\n return TokenTrajectoryChain(\n TokenTrajectory.from_text_trajectory(\n text_trajectory_chain.text_trajectory, \n tokenizer, \n token_process=token_process, \n ), \n cls.from_text_trajectory_chain(\n text_trajectory_chain.next, \n tokenizer, \n token_process=token_process, \n ) if text_trajectory_chain.next is not None else None, \n )" }, { "identifier": "MCData", "path": "LLM_RL/algorithms/mc_returns/data.py", "snippet": "class MCData(NamedTuple):\n input_ids: np.ndarray # [t]\n should_take_action: np.ndarray # [t-1]\n returns: np.ndarray # [t-1]\n\n @staticmethod\n def block(\n data: List[MCData], \n blocking_strategy: BlockingStrategy, \n tokenizer: PreTrainedTokenizerBase, \n ) -> Dict[str, np.ndarray]:\n return dict(\n input_ids=block_sequences(\n list(map(lambda x: x.input_ids, data)), \n tokenizer.pad_token_id, \n dtype=np.int32, \n blocking_strategy=blocking_strategy, \n ), \n should_take_action=block_sequences(\n list(map(lambda x: x.should_take_action, data)), \n False, \n dtype=np.bool_, \n blocking_strategy=blocking_strategy._replace(max_length=blocking_strategy.max_length-1), \n ), \n returns=block_sequences(\n list(map(lambda x: x.returns, data)), \n 0.0, \n dtype=np.float32, \n blocking_strategy=blocking_strategy._replace(max_length=blocking_strategy.max_length-1), \n ), \n )\n \n @classmethod\n def from_token_trajectory_chain(\n cls, \n token_trajectory_chain: TokenTrajectoryChain, \n gamma: float, \n ):\n filtered_rewards_chain = []\n should_take_action_chain = []\n for token_trajectory in token_trajectory_chain.to_list():\n should_take_action = token_trajectory.is_action[1:]\n rewards = token_trajectory.reward[1:]\n filtered_rewards = rewards[should_take_action]\n filtered_rewards_chain.append(filtered_rewards)\n should_take_action_chain.append(should_take_action)\n filtered_rewards_chain = np.concatenate(filtered_rewards_chain, axis=0)\n should_take_action_chain = np.concatenate(should_take_action_chain, axis=0)\n \n rtgs_sequence = get_rtg(filtered_rewards_chain, gamma=gamma)\n \n should_take_action = token_trajectory_chain.token_trajectory.is_action[1:]\n returns = np.zeros_like(should_take_action, dtype=np.float32)\n returns[should_take_action] = rtgs_sequence[:should_take_action.sum()]\n return cls(\n input_ids=token_trajectory_chain.token_trajectory.tokens, \n should_take_action=should_take_action, \n returns=returns, \n )" }, { "identifier": "GPT2ValuePolicy", "path": "LLM_RL/algorithms/value_rl_base/gpt2/interface.py", "snippet": "class GPT2ValuePolicy(ValueRLPolicy):\n def __init__(\n self, \n inference: ValueRLInference, \n prng_key: Optional[jax.random.KeyArray], \n generation_config: Optional[GenerationConfig]=None, \n blocking_strategy: BlockingStrategy=BlockingStrategy(padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=None), \n in_str_process: Optional[Callable[[str], str]]=None, \n out_str_process: Optional[Callable[[str], str]]=None, \n input_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n target_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n trace: bool=True, \n ):\n self.inference = inference\n self.prng_key = prng_key\n self.generation_config = generation_config\n self.blocking_strategy = blocking_strategy\n self.in_str_process = in_str_process\n self.out_str_process = out_str_process\n self.input_token_process = input_token_process\n self.target_token_process = target_token_process\n if self.in_str_process is None:\n self.in_str_process = lambda x: x\n if self.out_str_process is None:\n self.out_str_process = lambda x: x\n self.trace = trace\n \n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n if done is None:\n done = [False]*len(text_history)\n # force eos_token for done sequences\n eos_token = self.inference.tokenizer.eos_token\n if self.generation_config is not None and self.generation_config.eos_token_id is not None:\n eos_token = self.inference.tokenizer.decode(self.generation_config.eos_token_id)\n if eos_token is None:\n eos_token = self.inference.tokenizer.pad_token\n if eos_token is None:\n eos_token = ''\n \n raw_input_strs = [\n eos_token if d else self.in_str_process(text_history_to_str(item)) \\\n for item, d in zip(text_history, done)\n ]\n\n new_key = None\n if self.prng_key is not None:\n self.prng_key, new_key = jax.random.split(self.prng_key)\n model_outputs = self.inference.generate_from_str(\n input_strs=raw_input_strs, \n prng_key=new_key, \n blocking_strategy=self.blocking_strategy, \n generation_config=self.generation_config, \n input_token_process=self.input_token_process, \n target_token_process=self.target_token_process, \n trace=self.trace, \n )\n\n raw_output_strs = model_outputs.output_strs\n output_strs = [\n \"\" if d else self.out_str_process(strip_prompt_from_completion(raw_input_str, raw_output_str)) \\\n for raw_input_str, raw_output_str, d in zip(raw_input_strs, raw_output_strs, done)\n ]\n\n return [\n None if d else text_history_item+(Text(output_str, True),) \\\n for text_history_item, output_str, d in zip(text_history, output_strs, done)\n ]\n \n def set_params(self, policy_params: PyTree) -> None:\n pi_beta_params, base_params, \\\n q1_head_params, q2_head_params = policy_params\n self.inference = self.inference.replace(\n pi_beta_params=pi_beta_params, \n base_params=base_params, \n q1_head_params=q1_head_params, \n q2_head_params=q2_head_params, \n )" }, { "identifier": "load_train_state_from_config", "path": "LLM_RL/heads/mlp_head.py", "snippet": "def load_train_state_from_config(\n model_config: MLPHeadConfig, \n model_dtype: Union[str, jnp.dtype], \n optim_getter: Callable[[PyTree], optax.GradientTransformation], \n mesh: Mesh, # should be shape (dp, mp)\n prng_key: jax.random.PRNGKeyArray, \n pad_to_output_dim: Optional[int]=None, \n params_dtype: Optional[Union[str, jnp.dtype]]=jnp.float32, \n) -> Tuple[TrainState, MLPHead]:\n \n model = MLPHead(model_config, dtype=model_dtype)\n model.config.mesh = mesh\n # shard params\n params = freeze(shard_params_from_config(model, prng_key, params_dtype=params_dtype))\n # pad outputs\n if pad_to_output_dim is not None:\n params = freeze(pad_outputs(unfreeze(params), model, pad_to_output_dim, dtype=params_dtype))\n # shard train_state\n train_state = shard_train_state_from_params(model, params, optim_getter(params))\n\n return train_state, model" }, { "identifier": "MLPHeadConfig", "path": "LLM_RL/heads/mlp_head.py", "snippet": "class MLPHeadConfig(HeadConfig):\n def __init__(\n self, \n input_dim: int, \n hidden_dim: int, \n output_dim: int, \n use_bias: bool=True, \n unpadded_output_dim: Optional[int]=None, \n layer1_initializer_range: Optional[int]=None, \n layer1_bias_init: Optional[float]=None, \n layer2_initializer_range: Optional[int]=None, \n layer2_bias_init: Optional[float]=None, \n mesh: Optional[jax.sharding.Mesh]=None, \n ) -> None:\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.output_dim = output_dim\n self.use_bias = use_bias\n self.layer1_initializer_range = layer1_initializer_range\n self.layer1_bias_init = layer1_bias_init\n self.layer2_initializer_range = layer2_initializer_range\n self.layer2_bias_init = layer2_bias_init\n self.mesh = mesh\n self.unpadded_output_dim = unpadded_output_dim\n if self.unpadded_output_dim is None:\n self.unpadded_output_dim = self.output_dim\n super().__init__()\n \n @staticmethod\n def get_partition_rules():\n return [\n (re.escape(\"['dense1']['kernel']\"), PS(\"fsdp\", \"mp\")), \n (re.escape(\"['dense1']['bias']\"), PS(\"mp\")), \n (re.escape(\"['dense2']['kernel']\"), PS(\"mp\", \"fsdp\")), \n (re.escape(\"['dense2']['bias']\"), PS()), \n ]\n\n def to_dict(self) -> Dict[str, Any]:\n if self.mesh is None:\n return super().to_dict()\n else:\n new_conf = MLPHeadConfig(**self.__dict__)\n new_conf.mesh = None\n return new_conf.to_dict()" }, { "identifier": "GPT2MCTrain", "path": "LLM_RL/algorithms/mc_returns/gpt2/interface.py", "snippet": "class GPT2MCTrain(MCTrain):\n @classmethod\n def load_train(\n cls, \n base_train_state: TrainState, \n q_head_train_state: TrainState, \n base_model: FlaxPreTrainedModel, \n q_head_model: nn.Module, \n tokenizer: PreTrainedTokenizerBase, \n loss_fn: Callable, \n detach_q: bool, \n ):\n mesh = base_model.config.mesh\n assert mesh is not None\n assert mesh == q_head_model.config.mesh\n base_train_state_partition_spec = match_partition_rules(base_model.config.get_partition_rules(), base_train_state)\n q_head_train_state_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q_head_train_state)\n\n @partial(\n pjit, \n donate_argnums=(0, 1), \n static_argnames=('train',), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q_head_train_state_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q_head_train_state_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n )\n def _step(\n base_train_state: TrainState, \n q_head_train_state: TrainState, \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n should_take_action: jax.Array, \n returns: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray], \n train: bool=True, \n ) -> Tuple[TrainState, Optional[PyTree], TrainState, TrainState, TrainState, PyTree, PyTree, jax.Array, PyTree]:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS(('dp', 'fsdp'), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS(('dp', 'fsdp'), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS(('dp', 'fsdp'), None))\n should_take_action = with_named_sharding_constraint(should_take_action, mesh, PS(('dp', 'fsdp'), None))\n returns = with_named_sharding_constraint(returns, mesh, PS(('dp', 'fsdp'), None))\n\n # define loss function\n\n def grad_loss(base_params: PyTree, q_head_params: PyTree, prng_key: jax.random.PRNGKeyArray):\n \n # get base hidden states\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n base_model_output = base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n \n # get values\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q_head_output = q_head_model.apply(\n {'params': q_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n # stop gradients\n if detach_q:\n q_head_output = jax.lax.stop_gradient(q_head_output)\n\n q = jnp.take_along_axis(q_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n q_logits = q_head_output[:, :-1, :].astype(jnp.float32)\n\n loss, info = loss_fn(\n q, \n q_logits, \n input_ids[:, 1:], \n attention_mask[:, 1:], \n should_take_action, \n returns, \n )\n return loss, info\n\n # take loss\n (loss, info), (base_grads, q_head_grads) = jax.value_and_grad(grad_loss, has_aux=True, argnums=(0, 1))(\n base_train_state.params, \n q_head_train_state.params, \n prng_key, \n )\n # assert shard gradients\n base_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n base_grads, \n base_train_state_partition_spec.params, \n )\n q_head_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n q_head_grads, \n q_head_train_state_partition_spec.params, \n )\n # update params and optim state\n base_train_state = base_train_state.apply_gradients(grads=base_grads)\n q_head_train_state = q_head_train_state.apply_gradients(grads=q_head_grads)\n\n return base_train_state, q_head_train_state, loss, info\n\n return cls(\n base_train_state=base_train_state, \n q_head_train_state=q_head_train_state, \n base_model=base_model, \n q_head_model=q_head_model, \n tokenizer=tokenizer, \n _step=_step, \n )" }, { "identifier": "GPT2MCInference", "path": "LLM_RL/algorithms/mc_returns/gpt2/interface.py", "snippet": "class GPT2MCInference(MCInference):\n @classmethod\n def load_inference(\n cls, \n pi_beta_params: Optional[PyTree], \n base_params: PyTree, \n q_head_params: PyTree, \n pi_beta_model: Optional[FlaxPreTrainedModel], \n base_model: FlaxPreTrainedModel, \n q_head_model: nn.Module, \n tokenizer: PreTrainedTokenizerBase, \n loss_fn: Callable, \n beta: float=0.0, \n dp_shard_logits: bool=True, \n ):\n mesh = base_model.config.mesh\n assert mesh is not None\n assert mesh == q_head_model.config.mesh\n\n value_inference = GPT2ValueRLInference.load_inference(\n pi_beta_params=pi_beta_params, \n base_params=base_params, \n q1_head_params=q_head_params, \n q2_head_params=None, \n v_head_params=None, \n pi_beta_model=pi_beta_model, \n base_model=base_model, \n q_head_model=q_head_model, \n v_head_model=None, \n tokenizer=tokenizer, \n beta=beta, \n dp_shard_logits=dp_shard_logits, \n )\n\n base_params_partition_spec = match_partition_rules(base_model.config.get_partition_rules(), base_params)\n q_head_params_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q_head_params)\n\n @partial(\n pjit, \n static_argnames=('train',), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=(\n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n )\n def _eval_loss(\n base_params: TrainState, \n q_head_params: TrainState, \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n should_take_action: jax.Array, \n returns: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray], \n train: bool=True, \n ):\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS(('dp', 'fsdp'), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS(('dp', 'fsdp'), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS(('dp', 'fsdp'), None))\n should_take_action = with_named_sharding_constraint(should_take_action, mesh, PS(('dp', 'fsdp'), None))\n returns = with_named_sharding_constraint(returns, mesh, PS(('dp', 'fsdp'), None))\n\n # get base hidden states\n \n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n base_model_output = base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n \n # get values\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q_head_output = q_head_model.apply(\n {'params': q_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n q = jnp.take_along_axis(q_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n q_logits = q_head_output[:, :-1, :].astype(jnp.float32)\n\n loss, info = loss_fn(\n q, \n q_logits, \n input_ids[:, 1:], \n attention_mask[:, 1:], \n should_take_action, \n returns, \n )\n\n return loss, info\n \n return cls(\n pi_beta_params=value_inference.pi_beta_params, \n base_params=value_inference.base_params, \n q1_head_params=value_inference.q1_head_params, \n q2_head_params=value_inference.q2_head_params, \n v_head_params=value_inference.v_head_params, \n pi_beta_model=value_inference.pi_beta_model, \n base_model=value_inference.base_model, \n q_head_model=value_inference.q_head_model, \n v_head_model=value_inference.v_head_model, \n tokenizer=value_inference.tokenizer, \n _generate=value_inference._generate, \n _forward=value_inference._forward, \n _eval_loss=_eval_loss, \n )" }, { "identifier": "FenChessHistoryEnv", "path": "llm_rl_scripts/chess/env/env.py", "snippet": "class FenChessHistoryEnv(TextEnv):\n def __init__(self, max_moves=400, from_position=None, random_opponent=False):\n super().__init__()\n self.chess_env = ChessEnv(fen=True, from_position=from_position, random_opponent=random_opponent)\n self.from_position = from_position\n self.max_moves = max_moves\n self.from_position = from_position\n # self.initial_history = initial_history\n\n def reset(self, seed: Optional[int] = None, options: Optional[Dict] = None):\n self.init_state, _ = self.chess_env.reset()\n self.num_moves_made = 0\n return (Text(preprocess_state_og(self.init_state), False),)\n\n def step(self, text_history: TextHistory):\n assert text_history[-1].is_action\n action = text_history[-1].text\n action = postprocess_move(action)\n st, reward, done, opp_mv = self.chess_env.step(action) \n new_state = Text(preprocess_state_og(st), False)\n self.num_moves_made += 1\n if self.num_moves_made > self.max_moves:\n done = 1\n return (new_state,), reward, done\n \n def copy(self):\n return FenChessHistoryEnv( self.max_moves, self.from_position)" }, { "identifier": "mc_loss", "path": "LLM_RL/algorithms/mc_returns/base_interface.py", "snippet": "def mc_loss(\n q: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n q_logits: jax.Array, # [batch, time-1, vocab] output is masked; shift x[:-1]\n token_ids: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n attention_mask: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n should_take_action: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n returns: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n *, \n cql_weight: Union[float, jax.Array], \n) -> Tuple[jnp.ndarray, Any]:\n # should be an action in the batch\n mask = should_take_action.astype(jnp.float32) * attention_mask\n n = mask.sum()\n q_query_indicators = get_query_indicators(should_take_action.reshape(-1))\n \n # extract selected values\n qsa_selected = (q_query_indicators * q.reshape(-1)).sum(axis=1)\n returns_selected = (q_query_indicators * returns.reshape(-1)).sum(axis=1)\n\n # get masks for selected values\n a_mask = (q_query_indicators.sum(axis=1) > 0).astype(jnp.float32)\n\n # compute q loss\n q_loss = (optax.l2_loss(qsa_selected, jax.lax.stop_gradient(returns_selected)) * a_mask).sum() / n\n\n # compute cql loss on both q heads\n q_cql_loss = optax.softmax_cross_entropy_with_integer_labels(q_logits, token_ids)\n q_cql_loss = (mask * q_cql_loss).sum() / n\n \n loss = q_loss + cql_weight * q_cql_loss\n\n logs = dict(\n losses=dict(\n total_loss=loss, \n q_loss=q_loss, \n q_cql_loss=q_cql_loss, \n ), \n q=get_tensor_stats(qsa_selected, mask=a_mask, n=n), \n returns=get_tensor_stats(returns_selected, mask=a_mask, n=n), \n )\n\n return loss, logs" }, { "identifier": "eval_loss", "path": "LLM_RL/algorithms/mc_returns/train.py", "snippet": "def eval_loss(\n inference: MCInference, \n dataset: Union[Seq2SeqDataset, Seq2SeqIterableDataset], \n prng_key: Optional[KeyArray], \n bsize: int, \n eval_batches: Optional[int], \n) -> Dict[str, Any]:\n # setup evaluator loop state\n eval_logs = []\n\n # eval on batches\n prng_key, new_prng = jax.random.split(prng_key) if prng_key is not None else (None, None)\n d = dataloader(new_prng, dataset, bsize, truncate=True)\n for i, batch in tqdm(enumerate(d)):\n # conditionally terminate early\n if eval_batches is not None and i >= eval_batches:\n break\n\n # get eval logs\n _, info = inference.eval_loss(**batch)\n eval_logs.append(info)\n \n # gather and postproc eval logs\n eval_logs = pull_logs(combine_logs(eval_logs))\n return eval_logs" }, { "identifier": "train_loop", "path": "LLM_RL/algorithms/mc_returns/train.py", "snippet": "def train_loop(\n trainer: MCTrain, \n inference: Union[ValueRLInference, MCInference], \n evaluator: Optional[Callable[[Inference], Tuple[float, Dict[str, Any]]]], \n dataset: Union[Seq2SeqDataset, Seq2SeqIterableDataset], \n prng_key: KeyArray, \n save_dir: Optional[str], \n epochs: int, \n max_steps: Optional[int], \n bsize: int, \n log_every: int, \n eval_every_steps: Optional[int], \n eval_every_epochs: Optional[int], \n eval_at_beginning: bool, \n eval_at_end: bool, \n save_every_steps: Optional[int], \n save_every_epochs: Optional[int], \n save_at_beginning: bool, \n save_at_end: bool, \n save_best: bool, \n max_checkpoints: Optional[int], \n save_train_state: bool, \n save_dtype: jnp.dtype, \n use_wandb: bool, \n wandb_project: Optional[str], \n wandb_run_name: Optional[str], \n wandb_config: Optional[Dict[str, Any]], \n is_main_process: Optional[bool]=None, \n **loop_state: Dict[Hashable, Any], \n) -> Tuple[Train, Inference]:\n assert (not use_wandb) or (use_wandb and wandb_project is not None)\n if is_main_process is None:\n is_main_process = jax.process_index() == 0\n \n # initalize wandb\n wandb_id = loop_state.get('wandb_id', None)\n if use_wandb and is_main_process:\n if wandb_id is None:\n wandb_id = wandb.util.generate_id()\n wandb.init(\n project=wandb_project, \n id=wandb_id, \n name=wandb_run_name, \n config=wandb_config, \n reinit=True, \n resume=\"allow\", \n )\n\n # initalize training loop state\n train_logs = []\n best_perf = loop_state.get('best_perf', float('inf'))\n saved_checkpoints = loop_state.get('saved_checkpoints', deque([]))\n step = 0\n steps_per_epoch = len(dataset) // bsize if isinstance(dataset, Dataset) else None\n if 'steps_per_epoch' in loop_state:\n assert steps_per_epoch == loop_state['steps_per_epoch'], 'loop_state steps_per_epoch does not match dataset steps_per_epoch'\n epoch = -1\n\n def _save(\n name: str, \n add_to_queue: bool, \n **loop_state: Dict[Hashable, Any], \n ):\n nonlocal saved_checkpoints\n print(f'saving checkpoint {name} ...')\n # conditionally delete old checkpoints\n if add_to_queue and is_main_process:\n if (max_checkpoints is not None) and (len(saved_checkpoints) >= max_checkpoints):\n delete(saved_checkpoints.popleft(), recursive=True)\n curr_save_dir = os.path.join(save_dir, name)\n if is_main_process:\n create_path(curr_save_dir)\n dump_state(\n base_model=trainer.base_model, \n q_head_model=trainer.q_head_model, \n base_train_state=trainer.base_train_state, \n q_head_train_state=trainer.q_head_train_state, \n save_dir=curr_save_dir, \n save_train_state=save_train_state, \n enable_save=is_main_process, \n save_dtype=save_dtype, \n **loop_state, \n )\n if add_to_queue and is_main_process:\n saved_checkpoints.append(curr_save_dir)\n print('saved.')\n \n def _inference_update():\n nonlocal inference\n inference = inference.replace(\n base_params=trainer.base_train_state.params, \n q1_head_params=trainer.q_head_train_state.params, \n )\n \n def _eval(\n **loop_state: Dict[Hashable, Any], \n ):\n nonlocal best_perf\n # get eval logs\n _inference_update()\n eval_perf, eval_logs = evaluator(inference)\n\n # publish eval logs\n eval_logs = pull_logs(label_logs(eval_logs, 'eval', {'step': step+1, 'epoch': epoch}))\n log(eval_logs, use_wandb and is_main_process)\n\n # conditionally save best model and optimizer state\n if save_dir is not None and save_best and eval_perf < best_perf:\n print('new best model!')\n best_perf = eval_perf\n _save(\n name='best', \n add_to_queue=False, \n **{**loop_state, 'best_perf': best_perf}, \n )\n \n # begin evaluation\n if evaluator is not None and eval_at_beginning:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # save initial checkpoint\n if save_dir is not None and save_at_beginning:\n _save(\n name='initial', \n add_to_queue=False, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # begin training loop\n for epoch in tqdm(range(epochs)):\n prng_key, new_prng = jax.random.split(prng_key)\n d = dataloader(new_prng, dataset, bsize, truncate=True)\n for batch in tqdm(d, total=steps_per_epoch):\n \n # step model and get training logs\n prng_key, new_prng = jax.random.split(prng_key)\n if 'step' in loop_state and step < loop_state['step']:\n step += 1\n continue\n trainer, _, info = trainer.step(\n **batch, \n prng_key=new_prng, \n train=True, \n )\n train_logs.append(info)\n \n # publish training logs and clear logs\n if (step + 1) % log_every == 0:\n logs = combine_logs(train_logs)\n logs = pull_logs(label_logs(logs, 'train', {'step': step+1, 'epoch': epoch}))\n log(logs, use_wandb and is_main_process)\n train_logs = []\n \n # begin evaluation\n if evaluator is not None and eval_every_steps is not None and (step + 1) % eval_every_steps == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step+1, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_steps is not None and (step + 1) % save_every_steps == 0:\n _save(\n name=f'step_{step+1}', \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step+1, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n\n step += 1\n\n # conditionally terminate\n if max_steps is not None and step >= max_steps:\n break\n \n # begin evaluation\n if evaluator is not None and eval_every_epochs is not None and (epoch + 1) % eval_every_epochs == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_epochs is not None and (epoch + 1) % save_every_epochs == 0:\n _save(\n name=f'epoch_{epoch}', \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # conditionally terminate\n if max_steps is not None and step >= max_steps:\n break\n \n # begin evaluation\n if evaluator is not None and eval_at_end:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # save final checkpoint\n if save_dir is not None and save_at_end:\n _save(\n name='last', \n add_to_queue=False, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n\n # stop wandb\n if use_wandb and is_main_process:\n wandb.finish()\n _inference_update()\n return trainer, inference" }, { "identifier": "MCData", "path": "LLM_RL/algorithms/mc_returns/data.py", "snippet": "class MCData(NamedTuple):\n input_ids: np.ndarray # [t]\n should_take_action: np.ndarray # [t-1]\n returns: np.ndarray # [t-1]\n\n @staticmethod\n def block(\n data: List[MCData], \n blocking_strategy: BlockingStrategy, \n tokenizer: PreTrainedTokenizerBase, \n ) -> Dict[str, np.ndarray]:\n return dict(\n input_ids=block_sequences(\n list(map(lambda x: x.input_ids, data)), \n tokenizer.pad_token_id, \n dtype=np.int32, \n blocking_strategy=blocking_strategy, \n ), \n should_take_action=block_sequences(\n list(map(lambda x: x.should_take_action, data)), \n False, \n dtype=np.bool_, \n blocking_strategy=blocking_strategy._replace(max_length=blocking_strategy.max_length-1), \n ), \n returns=block_sequences(\n list(map(lambda x: x.returns, data)), \n 0.0, \n dtype=np.float32, \n blocking_strategy=blocking_strategy._replace(max_length=blocking_strategy.max_length-1), \n ), \n )\n \n @classmethod\n def from_token_trajectory_chain(\n cls, \n token_trajectory_chain: TokenTrajectoryChain, \n gamma: float, \n ):\n filtered_rewards_chain = []\n should_take_action_chain = []\n for token_trajectory in token_trajectory_chain.to_list():\n should_take_action = token_trajectory.is_action[1:]\n rewards = token_trajectory.reward[1:]\n filtered_rewards = rewards[should_take_action]\n filtered_rewards_chain.append(filtered_rewards)\n should_take_action_chain.append(should_take_action)\n filtered_rewards_chain = np.concatenate(filtered_rewards_chain, axis=0)\n should_take_action_chain = np.concatenate(should_take_action_chain, axis=0)\n \n rtgs_sequence = get_rtg(filtered_rewards_chain, gamma=gamma)\n \n should_take_action = token_trajectory_chain.token_trajectory.is_action[1:]\n returns = np.zeros_like(should_take_action, dtype=np.float32)\n returns[should_take_action] = rtgs_sequence[:should_take_action.sum()]\n return cls(\n input_ids=token_trajectory_chain.token_trajectory.tokens, \n should_take_action=should_take_action, \n returns=returns, \n )" }, { "identifier": "MCIterableDataset", "path": "LLM_RL/algorithms/mc_returns/data.py", "snippet": "class MCIterableDataset(IterableDataset):\n def __init__(self, mc_data: Iterable[Dict[str, np.ndarray]]):\n self.mc_data = mc_data\n \n def __iter__(self):\n return _MCIteratorDataset(iter(self.mc_data))\n \n @classmethod\n def from_mc_data_iterable(\n cls, \n mc_data: Iterable[MCData], \n tokenizer: PreTrainedTokenizerBase, \n blocking_strategy: BlockingStrategy, \n ) -> MCIterableDataset:\n \n class _TokensIterable(Iterable):\n def _tokens_generator(self):\n for item in mc_data:\n yield jax.tree_util.tree_map(lambda x: x[0], MCData.block([item], blocking_strategy, tokenizer))\n\n def __iter__(self):\n return self._tokens_generator()\n\n return cls(_TokensIterable())" } ]
from typing import Optional from JaxSeq.bucket_manager import open_with_bucket as open from JaxSeq.utils import convert_path, load_mesh, setup_experiment_save from JaxSeq.utils import BlockingStrategy, Padding, Truncation, get_weight_decay_mask from JaxSeq.models.gpt2.load import load_train_state, ModelLoadMode from transformers.generation import GenerationConfig from jaxtyping import PyTree from LLM_RL.environment import Text, text_env_eval, TextTrajectory, TextTrajectoryChain, TokenTrajectoryChain from LLM_RL.algorithms.mc_returns.data import MCData from LLM_RL.algorithms.value_rl_base.gpt2.interface import GPT2ValuePolicy from LLM_RL.heads.mlp_head import load_train_state_from_config as load_head_train_state_from_config from LLM_RL.heads.mlp_head import MLPHeadConfig from LLM_RL.algorithms.mc_returns.gpt2.interface import GPT2MCTrain, GPT2MCInference from functools import partial from JaxSeq.logs import pull_logs from transformers import GPT2TokenizerFast from llm_rl_scripts.chess.env.env import FenChessHistoryEnv from JaxSeq.shard_model import copy_sharded_pytree from LLM_RL.algorithms.mc_returns.base_interface import mc_loss from LLM_RL.algorithms.mc_returns.train import eval_loss, train_loop from LLM_RL.algorithms.mc_returns.data import MCData, MCIterableDataset import tyro import jax import jax.numpy as jnp import os import optax import pickle as pkl import re import numpy as np import json
12,253
tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def mc_data_generator(data_name): with open(data_name, "r") as f: for item in f: obj = json.loads(item) # curr_chain = TextTrajectory() # starting with the last element last_trajectory = TextTrajectory([Text(obj[-1]["state"], False), Text(obj[-1]["action"], True)], [0, obj[-1]["reward"]], True) curr_chain = TextTrajectoryChain(text_trajectory=last_trajectory, next=None) # curr_chain.next = curr_chain for traj in reversed(obj): # iterate through move history backwards except for last transition # embed() prev_trajectory = TextTrajectory([Text(traj["state"], False), Text(traj["action"], True)], [0, traj["reward"]], False) curr_chain = TextTrajectoryChain(text_trajectory=prev_trajectory, next=curr_chain) token_trajectory_chain = TokenTrajectoryChain.from_text_trajectory_chain(curr_chain, tokenizer) while token_trajectory_chain.next is not None: yield MCData.from_token_trajectory_chain(token_trajectory_chain, gamma=gamma) token_trajectory_chain = token_trajectory_chain.next mc_data = mc_data_generator(train_data_path) dataset = MCIterableDataset.from_mc_data_iterable(mc_data, tokenizer, BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.RIGHT, max_length=max_length, )) def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) model_prng_key = jax.random.PRNGKey(3) base_train_state, base_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=jnp.float32, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=jnp.float32, ) base_model.config.gradient_checkpointing = gradient_checkpointing base_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy pi_beta_params = copy_sharded_pytree( model=base_model, pytree=base_train_state.params, ) q_prng_key = jax.random.PRNGKey(4) # embed() q_head_train_state, q_head = load_head_train_state_from_config( model_config=MLPHeadConfig( input_dim=base_model.config.n_embd, hidden_dim=base_model.config.n_embd, output_dim=base_model.config.vocab_size, use_bias=True, layer2_initializer_range=0.0, layer2_bias_init=0.0, ), model_dtype=jnp.float32, optim_getter=value_head_optim_getter, mesh=mesh, prng_key=q_prng_key, pad_to_output_dim=None, params_dtype=jnp.float32, ) loop_state = dict() if should_restore_loop_state and (model_load_mode in {ModelLoadMode.TRAIN_STATE, ModelLoadMode.TRAIN_STATE_PARAMS, ModelLoadMode.PARAMS}): with open(os.path.join(convert_path(model_load_path), 'loop_state.pkl'), 'rb') as f: loop_state = pkl.load(f)
def main( model_load_mode: ModelLoadMode, model_load_path: str, train_data_path: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=True, wandb_project: Optional[str]="llm_rl_repo_give_position_ilql", n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-4, weight_decay: float=0.0, tau: float=0.95, cql_weight: float=0.0, gamma: float=0.99, train_bsize: int=32, grad_accum_steps: int=1, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_length: int=150, log_every: int=256, eval_every_steps: Optional[int]=100000, eval_every_epochs: Optional[int]=None, eval_at_beginning: bool=False, eval_at_end: bool=True, save_every_steps: Optional[int]=100000, save_every_epochs: Optional[int]=None, save_at_beginning: bool=True, save_at_end: bool=True, save_best: bool=False, max_checkpoints: Optional[int]=None, save_train_state: bool=True, save_bf16: bool=True, policy_max_input_length: int=150, policy_max_output_length: int=10, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, reranker: bool=False, ): input_args = locals() print(input_args) tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def mc_data_generator(data_name): with open(data_name, "r") as f: for item in f: obj = json.loads(item) # curr_chain = TextTrajectory() # starting with the last element last_trajectory = TextTrajectory([Text(obj[-1]["state"], False), Text(obj[-1]["action"], True)], [0, obj[-1]["reward"]], True) curr_chain = TextTrajectoryChain(text_trajectory=last_trajectory, next=None) # curr_chain.next = curr_chain for traj in reversed(obj): # iterate through move history backwards except for last transition # embed() prev_trajectory = TextTrajectory([Text(traj["state"], False), Text(traj["action"], True)], [0, traj["reward"]], False) curr_chain = TextTrajectoryChain(text_trajectory=prev_trajectory, next=curr_chain) token_trajectory_chain = TokenTrajectoryChain.from_text_trajectory_chain(curr_chain, tokenizer) while token_trajectory_chain.next is not None: yield MCData.from_token_trajectory_chain(token_trajectory_chain, gamma=gamma) token_trajectory_chain = token_trajectory_chain.next mc_data = mc_data_generator(train_data_path) dataset = MCIterableDataset.from_mc_data_iterable(mc_data, tokenizer, BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.RIGHT, max_length=max_length, )) def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) model_prng_key = jax.random.PRNGKey(3) base_train_state, base_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=jnp.float32, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=jnp.float32, ) base_model.config.gradient_checkpointing = gradient_checkpointing base_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy pi_beta_params = copy_sharded_pytree( model=base_model, pytree=base_train_state.params, ) q_prng_key = jax.random.PRNGKey(4) # embed() q_head_train_state, q_head = load_head_train_state_from_config( model_config=MLPHeadConfig( input_dim=base_model.config.n_embd, hidden_dim=base_model.config.n_embd, output_dim=base_model.config.vocab_size, use_bias=True, layer2_initializer_range=0.0, layer2_bias_init=0.0, ), model_dtype=jnp.float32, optim_getter=value_head_optim_getter, mesh=mesh, prng_key=q_prng_key, pad_to_output_dim=None, params_dtype=jnp.float32, ) loop_state = dict() if should_restore_loop_state and (model_load_mode in {ModelLoadMode.TRAIN_STATE, ModelLoadMode.TRAIN_STATE_PARAMS, ModelLoadMode.PARAMS}): with open(os.path.join(convert_path(model_load_path), 'loop_state.pkl'), 'rb') as f: loop_state = pkl.load(f)
loss_fn = partial(mc_loss, cql_weight=cql_weight)
12
2023-11-21 00:16:42+00:00
16k
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP
src/clap_module/conformer/encoder.py
[ { "identifier": "ConvolutionModule", "path": "src/clap_module/conformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\r\n \"\"\"ConvolutionModule in Conformer model.\r\n\r\n Args:\r\n channels (int): The number of channels of conv layers.\r\n kernel_size (int): Kernerl size of conv layers.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):\r\n \"\"\"Construct an ConvolutionModule object.\r\n \"\"\"\r\n super(ConvolutionModule, self).__init__()\r\n # kernerl_size should be a odd number for 'SAME' padding\r\n assert (kernel_size - 1) % 2 == 0\r\n\r\n self.pointwise_conv1 = nn.Conv1d(\r\n channels,\r\n 2 * channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n bias=bias,\r\n )\r\n self.depthwise_conv = nn.Conv1d(\r\n channels,\r\n channels,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n groups=channels,\r\n bias=bias,\r\n )\r\n self.norm = nn.BatchNorm1d(channels)\r\n self.pointwise_conv2 = nn.Conv1d(\r\n channels,\r\n channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n bias=bias,\r\n )\r\n self.activation = activation\r\n\r\n def forward(self, x):\r\n \"\"\"Compute convolution module.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (#batch, time, channels).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time, channels).\r\n\r\n \"\"\"\r\n # exchange the temporal dimension and the feature dimension\r\n x = x.transpose(1, 2)\r\n\r\n # GLU mechanism\r\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\r\n x = nn.functional.glu(x, dim=1) # (batch, channel, dim)\r\n\r\n # 1D Depthwise Conv\r\n x = self.depthwise_conv(x)\r\n x = self.activation(self.norm(x))\r\n\r\n x = self.pointwise_conv2(x)\r\n\r\n return x.transpose(1, 2)\r" }, { "identifier": "EncoderLayer", "path": "src/clap_module/conformer/encoder_layer.py", "snippet": "class EncoderLayer(nn.Module):\r\n \"\"\"Encoder layer module.\r\n\r\n Args:\r\n size (int): Input dimension.\r\n self_attn (torch.nn.Module): Self-attention module instance.\r\n `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance\r\n can be used as the argument.\r\n feed_forward (torch.nn.Module): Feed-forward module instance.\r\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\r\n can be used as the argument.\r\n feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance.\r\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\r\n can be used as the argument.\r\n conv_module (torch.nn.Module): Convolution module instance.\r\n `ConvlutionModule` instance can be used as the argument.\r\n dropout_rate (float): Dropout rate.\r\n normalize_before (bool): Whether to use layer_norm before the first block.\r\n concat_after (bool): Whether to concat attention layer's input and output.\r\n if True, additional linear will be applied.\r\n i.e. x -> x + linear(concat(x, att(x)))\r\n if False, no additional linear will be applied. i.e. x -> x + att(x)\r\n stochastic_depth_rate (float): Proability to skip this layer.\r\n During training, the layer may skip residual computation and return input\r\n as-is with given probability.\r\n\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n size,\r\n self_attn,\r\n feed_forward,\r\n feed_forward_macaron,\r\n conv_module,\r\n dropout_rate,\r\n normalize_before=True,\r\n concat_after=False,\r\n stochastic_depth_rate=0.0,\r\n ):\r\n \"\"\"Construct an EncoderLayer object.\"\"\"\r\n super(EncoderLayer, self).__init__()\r\n self.self_attn = self_attn\r\n self.feed_forward = feed_forward\r\n self.feed_forward_macaron = feed_forward_macaron\r\n self.conv_module = conv_module\r\n self.norm_ff = LayerNorm(size) # for the FNN module\r\n self.norm_mha = LayerNorm(size) # for the MHA module\r\n if feed_forward_macaron is not None:\r\n self.norm_ff_macaron = LayerNorm(size)\r\n self.ff_scale = 0.5\r\n else:\r\n self.ff_scale = 1.0\r\n if self.conv_module is not None:\r\n self.norm_conv = LayerNorm(size) # for the CNN module\r\n self.norm_final = LayerNorm(size) # for the final output of the block\r\n self.dropout = nn.Dropout(dropout_rate)\r\n self.size = size\r\n self.normalize_before = normalize_before\r\n self.concat_after = concat_after\r\n if self.concat_after:\r\n self.concat_linear = nn.Linear(size + size, size)\r\n self.stochastic_depth_rate = stochastic_depth_rate\r\n\r\n def forward(self, x_input, mask, cache=None):\r\n \"\"\"Compute encoded features.\r\n\r\n Args:\r\n x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.\r\n - w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].\r\n - w/o pos emb: Tensor (#batch, time, size).\r\n mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).\r\n cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time, size).\r\n torch.Tensor: Mask tensor (#batch, 1, time).\r\n\r\n \"\"\"\r\n if isinstance(x_input, tuple):\r\n x, pos_emb = x_input[0], x_input[1]\r\n else:\r\n x, pos_emb = x_input, None\r\n\r\n skip_layer = False\r\n # with stochastic depth, residual connection `x + f(x)` becomes\r\n # `x <- x + 1 / (1 - p) * f(x)` at training time.\r\n stoch_layer_coeff = 1.0\r\n if self.training and self.stochastic_depth_rate > 0:\r\n skip_layer = torch.rand(1).item() < self.stochastic_depth_rate\r\n stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate)\r\n\r\n if skip_layer:\r\n if cache is not None:\r\n x = torch.cat([cache, x], dim=1)\r\n if pos_emb is not None:\r\n return (x, pos_emb), mask\r\n return x, mask\r\n\r\n # whether to use macaron style\r\n if self.feed_forward_macaron is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_ff_macaron(x)\r\n x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(\r\n self.feed_forward_macaron(x)\r\n )\r\n if not self.normalize_before:\r\n x = self.norm_ff_macaron(x)\r\n\r\n # convolution module\r\n \"\"\"\r\n if self.conv_module is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_conv(x)\r\n x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))\r\n if not self.normalize_before:\r\n x = self.norm_conv(x)\r\n \"\"\"\r\n\r\n # multi-headed self-attention module\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_mha(x)\r\n\r\n if cache is None:\r\n x_q = x\r\n else:\r\n assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)\r\n x_q = x[:, -1:, :]\r\n residual = residual[:, -1:, :]\r\n mask = None if mask is None else mask[:, -1:, :]\r\n\r\n if pos_emb is not None:\r\n x_att = self.self_attn(x_q, x, x, pos_emb, mask)\r\n else:\r\n x_att = self.self_attn(x_q, x, x, mask)\r\n\r\n if self.concat_after:\r\n x_concat = torch.cat((x, x_att), dim=-1)\r\n x = residual + stoch_layer_coeff * self.concat_linear(x_concat)\r\n else:\r\n x = residual + stoch_layer_coeff * self.dropout(x_att)\r\n if not self.normalize_before:\r\n x = self.norm_mha(x)\r\n\r\n # convolution module\r\n if self.conv_module is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_conv(x)\r\n x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))\r\n if not self.normalize_before:\r\n x = self.norm_conv(x)\r\n\r\n # feed forward module\r\n if self.feed_forward:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_ff(x)\r\n x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(\r\n self.feed_forward(x)\r\n )\r\n if not self.normalize_before:\r\n x = self.norm_ff(x)\r\n else:\r\n raise ValueError(\"not exit\")\r\n\r\n if self.conv_module is not None:\r\n x = self.norm_final(x)\r\n\r\n if cache is not None:\r\n x = torch.cat([cache, x], dim=1)\r\n\r\n if pos_emb is not None:\r\n return (x, pos_emb), mask\r\n\r\n return x, mask\r" }, { "identifier": "get_activation", "path": "src/clap_module/conformer/modules.py", "snippet": "def get_activation(act):\r\n \"\"\"Return activation function.\r\n \"\"\"\r\n # Lazy load to avoid unused import\r\n\r\n activation_funcs = {\r\n \"hardtanh\": torch.nn.Hardtanh,\r\n \"tanh\": torch.nn.Tanh,\r\n \"relu\": torch.nn.ReLU,\r\n \"selu\": torch.nn.SELU,\r\n \"swish\": Swish,\r\n }\r\n\r\n return activation_funcs[act]()\r" }, { "identifier": "VGG2L", "path": "src/clap_module/conformer/modules.py", "snippet": "class VGG2L(torch.nn.Module):\r\n \"\"\"VGG2L module for custom encoder.\r\n\r\n Args:\r\n idim: Input dimension.\r\n odim: Output dimension.\r\n pos_enc: Positional encoding class.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim: int, odim: int, pos_enc: torch.nn.Module = None):\r\n \"\"\"Construct a VGG2L object.\"\"\"\r\n super().__init__()\r\n\r\n self.vgg2l = torch.nn.Sequential(\r\n torch.nn.Conv2d(1, 64, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(64, 64, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.MaxPool2d((3, 2)),\r\n torch.nn.Conv2d(64, 128, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(128, 128, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.MaxPool2d((2, 2)),\r\n )\r\n\r\n if pos_enc is not None:\r\n self.output = torch.nn.Sequential(\r\n torch.nn.Linear(128 * ((idim // 2) // 2), odim), pos_enc\r\n )\r\n else:\r\n self.output = torch.nn.Linear(128 * ((idim // 2) // 2), odim)\r\n\r\n def forward(\r\n self, feats: torch.Tensor, feats_mask: torch.Tensor\r\n ) -> Union[\r\n Tuple[torch.Tensor, torch.Tensor],\r\n Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor],\r\n ]:\r\n \"\"\"Forward VGG2L bottleneck.\r\n\r\n Args:\r\n feats: Feature sequences. (B, F, D_feats)\r\n feats_mask: Mask of feature sequences. (B, 1, F)\r\n\r\n Returns:\r\n vgg_output: VGG output sequences.\r\n (B, sub(F), D_out) or ((B, sub(F), D_out), (B, sub(F), D_att))\r\n vgg_mask: Mask of VGG output sequences. (B, 1, sub(F))\r\n\r\n \"\"\"\r\n feats = feats.unsqueeze(1)\r\n vgg_output = self.vgg2l(feats)\r\n\r\n b, c, t, f = vgg_output.size()\r\n\r\n vgg_output = self.output(\r\n vgg_output.transpose(1, 2).contiguous().view(b, t, c * f)\r\n )\r\n\r\n if feats_mask is not None:\r\n vgg_mask = self.create_new_mask(feats_mask)\r\n else:\r\n vgg_mask = feats_mask\r\n\r\n return vgg_output, vgg_mask\r\n\r\n def create_new_mask(self, feats_mask: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Create a subsampled mask of feature sequences.\r\n\r\n Args:\r\n feats_mask: Mask of feature sequences. (B, 1, F)\r\n\r\n Returns:\r\n vgg_mask: Mask of VGG2L output sequences. (B, 1, sub(F))\r\n\r\n \"\"\"\r\n vgg1_t_len = feats_mask.size(2) - (feats_mask.size(2) % 3)\r\n vgg_mask = feats_mask[:, :, :vgg1_t_len][:, :, ::3]\r\n\r\n vgg2_t_len = vgg_mask.size(2) - (vgg_mask.size(2) % 2)\r\n vgg_mask = vgg_mask[:, :, :vgg2_t_len][:, :, ::2]\r\n\r\n return vgg_mask\r" }, { "identifier": "LegacyRelPositionMultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class LegacyRelPositionMultiHeadedAttention(MultiHeadedAttention):\r\n \"\"\"Multi-Head Attention layer with relative position encoding (old version).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n Paper: https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\r\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\r\n super().__init__(n_head, n_feat, dropout_rate)\r\n self.zero_triu = zero_triu\r\n # linear transformation for positional encoding\r\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\r\n # these two learnable bias are used in matrix c and matrix d\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\r\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\r\n\r\n def rel_shift(self, x):\r\n \"\"\"Compute relative positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, head, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor.\r\n\r\n \"\"\"\r\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\r\n x_padded = torch.cat([zero_pad, x], dim=-1)\r\n\r\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\r\n x = x_padded[:, :, 1:].view_as(x)\r\n\r\n if self.zero_triu:\r\n ones = torch.ones((x.size(2), x.size(3)))\r\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\r\n\r\n return x\r\n\r\n def forward(self, query, key, value, pos_emb, mask):\r\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n pos_emb (torch.Tensor): Positional embedding tensor (#batch, time1, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\r\n\r\n n_batch_pos = pos_emb.size(0)\r\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\r\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\r\n\r\n # (batch, head, time1, d_k)\r\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\r\n # (batch, head, time1, d_k)\r\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\r\n\r\n # compute attention score\r\n # first compute matrix a and matrix c\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n # (batch, head, time1, time2)\r\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\r\n\r\n # compute matrix b and matrix d\r\n # (batch, head, time1, time1)\r\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\r\n matrix_bd = self.rel_shift(matrix_bd)\r\n\r\n scores = (matrix_ac + matrix_bd) / math.sqrt(\r\n self.d_k\r\n ) # (batch, head, time1, time2)\r\n\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "MultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class MultiHeadedAttention(nn.Module):\r\n \"\"\"Multi-Head Attention layer.\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate):\r\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\r\n super(MultiHeadedAttention, self).__init__()\r\n assert n_feat % n_head == 0\r\n # We assume d_v always equals d_k\r\n self.d_k = n_feat // n_head\r\n self.h = n_head\r\n self.linear_q = nn.Linear(n_feat, n_feat)\r\n self.linear_k = nn.Linear(n_feat, n_feat)\r\n self.linear_v = nn.Linear(n_feat, n_feat)\r\n self.linear_out = nn.Linear(n_feat, n_feat)\r\n self.attn = None\r\n self.dropout = nn.Dropout(p=dropout_rate)\r\n\r\n def forward_qkv(self, query, key, value):\r\n \"\"\"Transform query, key and value.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n\r\n Returns:\r\n torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).\r\n torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).\r\n torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).\r\n\r\n \"\"\"\r\n n_batch = query.size(0)\r\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\r\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\r\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\r\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\r\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\r\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\r\n\r\n return q, k, v\r\n\r\n def forward_attention(self, value, scores, mask):\r\n \"\"\"Compute attention context vector.\r\n\r\n Args:\r\n value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).\r\n scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).\r\n mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Transformed value (#batch, time1, d_model)\r\n weighted by the attention score (#batch, time1, time2).\r\n\r\n \"\"\"\r\n n_batch = value.size(0)\r\n if mask is not None:\r\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\r\n min_value = torch.finfo(scores.dtype).min\r\n scores = scores.masked_fill(mask, min_value)\r\n self.attn = torch.softmax(scores, dim=-1).masked_fill(\r\n mask, 0.0\r\n ) # (batch, head, time1, time2)\r\n else:\r\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\r\n\r\n p_attn = self.dropout(self.attn)\r\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\r\n x = (\r\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\r\n ) # (batch, time1, d_model)\r\n\r\n return self.linear_out(x) # (batch, time1, d_model)\r\n\r\n def forward(self, query, key, value, mask):\r\n \"\"\"Compute scaled dot product attention.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "RelPositionMultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class RelPositionMultiHeadedAttention(MultiHeadedAttention):\r\n \"\"\"Multi-Head Attention layer with relative position encoding (new implementation).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n Paper: https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\r\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\r\n super().__init__(n_head, n_feat, dropout_rate)\r\n self.zero_triu = zero_triu\r\n # linear transformation for positional encoding\r\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\r\n # these two learnable bias are used in matrix c and matrix d\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\r\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\r\n\r\n def rel_shift(self, x):\r\n \"\"\"Compute relative positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).\r\n time1 means the length of query vector.\r\n\r\n Returns:\r\n torch.Tensor: Output tensor.\r\n\r\n \"\"\"\r\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\r\n x_padded = torch.cat([zero_pad, x], dim=-1)\r\n\r\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\r\n x = x_padded[:, :, 1:].view_as(x)[\r\n :, :, :, : x.size(-1) // 2 + 1\r\n ] # only keep the positions from 0 to time2\r\n\r\n if self.zero_triu:\r\n ones = torch.ones((x.size(2), x.size(3)), device=x.device)\r\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\r\n\r\n return x\r\n\r\n def forward(self, query, key, value, pos_emb, mask):\r\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n pos_emb (torch.Tensor): Positional embedding tensor\r\n (#batch, 2*time1-1, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\r\n\r\n n_batch_pos = pos_emb.size(0)\r\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\r\n p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)\r\n\r\n # (batch, head, time1, d_k)\r\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\r\n # (batch, head, time1, d_k)\r\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\r\n\r\n # compute attention score\r\n # first compute matrix a and matrix c\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n # (batch, head, time1, time2)\r\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\r\n\r\n # compute matrix b and matrix d\r\n # (batch, head, time1, 2*time1-1)\r\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\r\n matrix_bd = self.rel_shift(matrix_bd)\r\n\r\n scores = (matrix_ac + matrix_bd) / math.sqrt(\r\n self.d_k\r\n ) # (batch, head, time1, time2)\r\n\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "LegacyRelPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class LegacyRelPositionalEncoding(PositionalEncoding):\r\n \"\"\"Relative positional encoding module (old version).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n See : Appendix B in https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Initialize class.\"\"\"\r\n super().__init__(\r\n d_model=d_model,\r\n dropout_rate=dropout_rate,\r\n max_len=max_len,\r\n reverse=True,\r\n )\r\n\r\n def forward(self, x):\r\n \"\"\"Compute positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n torch.Tensor: Positional embedding tensor (1, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale\r\n pos_emb = self.pe[:, : x.size(1)]\r\n return self.dropout(x), self.dropout(pos_emb)\r" }, { "identifier": "PositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class PositionalEncoding(torch.nn.Module):\r\n \"\"\"Positional encoding.\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n reverse (bool): Whether to reverse the input position. Only for\r\n the class LegacyRelPositionalEncoding. We remove it in the current\r\n class RelPositionalEncoding.\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):\r\n \"\"\"Construct an PositionalEncoding object.\r\n \"\"\"\r\n super(PositionalEncoding, self).__init__()\r\n self.d_model = d_model\r\n self.reverse = reverse\r\n self.xscale = math.sqrt(self.d_model)\r\n self.dropout = torch.nn.Dropout(p=dropout_rate)\r\n self.pe = None\r\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\r\n self._register_load_state_dict_pre_hook(_pre_hook)\r\n\r\n def extend_pe(self, x):\r\n \"\"\"Reset the positional encodings.\r\n \"\"\"\r\n if self.pe is not None:\r\n if self.pe.size(1) >= x.size(1):\r\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\r\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\r\n return\r\n pe = torch.zeros(x.size(1), self.d_model)\r\n if self.reverse:\r\n position = torch.arange(\r\n x.size(1) - 1, -1, -1.0, dtype=torch.float32\r\n ).unsqueeze(1)\r\n else:\r\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\r\n div_term = torch.exp(\r\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\r\n * -(math.log(10000.0) / self.d_model)\r\n )\r\n pe[:, 0::2] = torch.sin(position * div_term)\r\n pe[:, 1::2] = torch.cos(position * div_term)\r\n pe = pe.unsqueeze(0)\r\n self.pe = pe.to(device=x.device, dtype=x.dtype)\r\n\r\n def forward(self, x: torch.Tensor):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale + self.pe[:, : x.size(1)]\r\n return self.dropout(x)\r" }, { "identifier": "RelPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class RelPositionalEncoding(torch.nn.Module):\r\n \"\"\"Relative positional encoding module (new implementation).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n See : Appendix B in https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Construct an PositionalEncoding object.\r\n \"\"\"\r\n super(RelPositionalEncoding, self).__init__()\r\n self.d_model = d_model\r\n self.xscale = math.sqrt(self.d_model)\r\n self.dropout = torch.nn.Dropout(p=dropout_rate)\r\n self.pe = None\r\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\r\n\r\n def extend_pe(self, x):\r\n \"\"\"Reset the positional encodings.\r\n \"\"\"\r\n if self.pe is not None:\r\n # self.pe contains both positive and negative parts\r\n # the length of self.pe is 2 * input_len - 1\r\n if self.pe.size(1) >= x.size(1) * 2 - 1:\r\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\r\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\r\n return\r\n # Suppose `i` means to the position of query vecotr and `j` means the\r\n # position of key vector. We use position relative positions when keys\r\n # are to the left (i>j) and negative relative positions otherwise (i<j).\r\n pe_positive = torch.zeros(x.size(1), self.d_model)\r\n pe_negative = torch.zeros(x.size(1), self.d_model)\r\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\r\n div_term = torch.exp(\r\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\r\n * -(math.log(10000.0) / self.d_model)\r\n )\r\n pe_positive[:, 0::2] = torch.sin(position * div_term)\r\n pe_positive[:, 1::2] = torch.cos(position * div_term)\r\n pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)\r\n pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)\r\n\r\n # Reserve the order of positive indices and concat both positive and\r\n # negative indices. This is used to support the shifting trick\r\n # as in https://arxiv.org/abs/1901.02860\r\n pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)\r\n pe_negative = pe_negative[1:].unsqueeze(0)\r\n pe = torch.cat([pe_positive, pe_negative], dim=1)\r\n self.pe = pe.to(device=x.device, dtype=x.dtype)\r\n\r\n def forward(self, x: torch.Tensor):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale\r\n pos_emb = self.pe[\r\n :,\r\n self.pe.size(1) // 2 - x.size(1) + 1 : self.pe.size(1) // 2 + x.size(1),\r\n ]\r\n return self.dropout(x), self.dropout(pos_emb)\r" }, { "identifier": "ScaledPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class ScaledPositionalEncoding(PositionalEncoding):\r\n \"\"\"Scaled positional encoding module.\r\n\r\n See Sec. 3.2 https://arxiv.org/abs/1809.08895\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Initialize class.\"\"\"\r\n super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)\r\n self.alpha = torch.nn.Parameter(torch.tensor(1.0))\r\n\r\n def reset_parameters(self):\r\n \"\"\"Reset parameters.\"\"\"\r\n self.alpha.data = torch.tensor(1.0)\r\n\r\n def forward(self, x):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x + self.alpha * self.pe[:, : x.size(1)]\r\n return self.dropout(x)\r" }, { "identifier": "LayerNorm", "path": "src/clap_module/conformer/modules.py", "snippet": "class LayerNorm(torch.nn.LayerNorm):\r\n \"\"\"Layer normalization module.\r\n\r\n Args:\r\n nout (int): Output dim size.\r\n dim (int): Dimension to be normalized.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, nout, dim=-1):\r\n \"\"\"Construct an LayerNorm object.\"\"\"\r\n super(LayerNorm, self).__init__(nout, eps=1e-12)\r\n self.dim = dim\r\n\r\n def forward(self, x):\r\n \"\"\"Apply layer normalization.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor.\r\n\r\n Returns:\r\n torch.Tensor: Normalized tensor.\r\n\r\n \"\"\"\r\n if self.dim == -1:\r\n return super(LayerNorm, self).forward(x)\r\n return (\r\n super(LayerNorm, self)\r\n .forward(x.transpose(self.dim, -1))\r\n .transpose(self.dim, -1)\r\n )\r" }, { "identifier": "Conv1dLinear", "path": "src/clap_module/conformer/multi_layer_conv.py", "snippet": "class Conv1dLinear(torch.nn.Module):\r\n \"\"\"Conv1D + Linear for Transformer block.\r\n\r\n A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\r\n \"\"\"Initialize Conv1dLinear module.\r\n\r\n Args:\r\n in_chans (int): Number of input channels.\r\n hidden_chans (int): Number of hidden channels.\r\n kernel_size (int): Kernel size of conv1d.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n super(Conv1dLinear, self).__init__()\r\n self.w_1 = torch.nn.Conv1d(\r\n in_chans,\r\n hidden_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.w_2 = torch.nn.Linear(hidden_chans, in_chans)\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n\r\n def forward(self, x):\r\n \"\"\"Calculate forward propagation.\r\n\r\n Args:\r\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\r\n\r\n Returns:\r\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\r\n\r\n \"\"\"\r\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\r\n return self.w_2(self.dropout(x))\r" }, { "identifier": "MultiLayeredConv1d", "path": "src/clap_module/conformer/multi_layer_conv.py", "snippet": "class MultiLayeredConv1d(torch.nn.Module):\r\n \"\"\"Multi-layered conv1d for Transformer block.\r\n\r\n This is a module of multi-leyered conv1d designed\r\n to replace positionwise feed-forward network\r\n in Transforner block, which is introduced in\r\n `FastSpeech: Fast, Robust and Controllable Text to Speech`_.\r\n\r\n .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:\r\n https://arxiv.org/pdf/1905.09263.pdf\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\r\n \"\"\"Initialize MultiLayeredConv1d module.\r\n\r\n Args:\r\n in_chans (int): Number of input channels.\r\n hidden_chans (int): Number of hidden channels.\r\n kernel_size (int): Kernel size of conv1d.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n super(MultiLayeredConv1d, self).__init__()\r\n self.w_1 = torch.nn.Conv1d(\r\n in_chans,\r\n hidden_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.w_2 = torch.nn.Conv1d(\r\n hidden_chans,\r\n in_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n\r\n def forward(self, x):\r\n \"\"\"Calculate forward propagation.\r\n\r\n Args:\r\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\r\n\r\n Returns:\r\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\r\n\r\n \"\"\"\r\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\r\n return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)\r" }, { "identifier": "PositionwiseFeedForward", "path": "src/clap_module/conformer/modules.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\r\n \"\"\"Positionwise feed forward layer.\r\n\r\n Args:\r\n idim (int): Input dimenstion.\r\n hidden_units (int): The number of hidden units.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()):\r\n \"\"\"Construct an PositionwiseFeedForward object.\"\"\"\r\n super(PositionwiseFeedForward, self).__init__()\r\n self.w_1 = torch.nn.Linear(idim, hidden_units)\r\n self.w_2 = torch.nn.Linear(hidden_units, idim)\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n self.activation = activation\r\n\r\n def forward(self, x):\r\n \"\"\"Forward function.\"\"\"\r\n return self.w_2(self.dropout(self.activation(self.w_1(x))))\r" }, { "identifier": "repeat", "path": "src/clap_module/conformer/modules.py", "snippet": "def repeat(N, fn, layer_drop_rate=0.0):\r\n \"\"\"Repeat module N times.\r\n\r\n Args:\r\n N (int): Number of repeat time.\r\n fn (Callable): Function to generate module.\r\n layer_drop_rate (float): Probability of dropping out each fn (layer).\r\n\r\n Returns:\r\n MultiSequential: Repeated model instance.\r\n\r\n \"\"\"\r\n return MultiSequential(*[fn(n) for n in range(N)], layer_drop_rate=layer_drop_rate)\r" }, { "identifier": "Conv2dSubsampling", "path": "src/clap_module/conformer/sub_sampling.py", "snippet": "class Conv2dSubsampling(torch.nn.Module):\r\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\r\n\r\n Args:\r\n idim (int): Input dimension.\r\n odim (int): Output dimension.\r\n dropout_rate (float): Dropout rate.\r\n pos_enc (torch.nn.Module): Custom position encoding layer.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\r\n \"\"\"Construct an Conv2dSubsampling object.\"\"\"\r\n super(Conv2dSubsampling, self).__init__()\r\n self.conv = torch.nn.Sequential(\r\n torch.nn.Conv2d(1, odim, 3, 2),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(odim, odim, 3, 2),\r\n torch.nn.ReLU(),\r\n )\r\n self.out = torch.nn.Sequential(\r\n torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),\r\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\r\n )\r\n\r\n def forward(self, x, x_mask):\r\n \"\"\"Subsample x.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (#batch, time, idim).\r\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\r\n\r\n Returns:\r\n torch.Tensor: Subsampled tensor (#batch, time', odim),\r\n where time' = time // 4.\r\n torch.Tensor: Subsampled mask (#batch, 1, time'),\r\n where time' = time // 4.\r\n\r\n \"\"\"\r\n x = x.unsqueeze(1) # (b, c, t, f)\r\n x = self.conv(x)\r\n b, c, t, f = x.size()\r\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\r\n if x_mask is None:\r\n return x, None\r\n return x, x_mask[:, :, :-2:2][:, :, :-2:2]\r\n\r\n def __getitem__(self, key):\r\n \"\"\"Get item.\r\n\r\n When reset_parameters() is called, if use_scaled_pos_enc is used,\r\n return the positioning encoding.\r\n\r\n \"\"\"\r\n if key != -1:\r\n raise NotImplementedError(\"Support only `-1` (for `reset_parameters`).\")\r\n return self.out[key]\r" }, { "identifier": "AttentionPool1d", "path": "src/clap_module/feature_fusion.py", "snippet": "class AttentionPool1d(nn.Module):\r\n def __init__(\r\n self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None\r\n ):\r\n super().__init__()\r\n self.positional_embedding = nn.Parameter(\r\n torch.randn(spacial_dim + 1, embed_dim) / embed_dim\r\n # torch.randn(spacial_dim, embed_dim) / embed_dim\r\n )\r\n self.k_proj = nn.Linear(embed_dim, embed_dim)\r\n self.q_proj = nn.Linear(embed_dim, embed_dim)\r\n self.v_proj = nn.Linear(embed_dim, embed_dim)\r\n self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)\r\n self.num_heads = num_heads\r\n\r\n def forward(self, x):\r\n # import pdb; pdb.set_trace()\r\n x = x.permute(1, 0, 2) # B*L*D -> L*B*D; NCHW -> (HW)NC\r\n x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC\r\n x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC\r\n x, _ = F.multi_head_attention_forward(\r\n query=x,\r\n key=x,\r\n value=x,\r\n embed_dim_to_check=x.shape[-1],\r\n num_heads=self.num_heads,\r\n q_proj_weight=self.q_proj.weight,\r\n k_proj_weight=self.k_proj.weight,\r\n v_proj_weight=self.v_proj.weight,\r\n in_proj_weight=None,\r\n in_proj_bias=torch.cat(\r\n [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]\r\n ),\r\n bias_k=None,\r\n bias_v=None,\r\n add_zero_attn=False,\r\n dropout_p=0,\r\n out_proj_weight=self.c_proj.weight,\r\n out_proj_bias=self.c_proj.bias,\r\n use_separate_proj_weight=True,\r\n training=self.training,\r\n need_weights=False,\r\n )\r\n\r\n return x[0] # B*D\r" }, { "identifier": "DAF", "path": "src/clap_module/feature_fusion.py", "snippet": "class DAF(nn.Module):\r\n \"\"\"直接相加 DirectAddFuse\r\n \"\"\"\r\n\r\n def __init__(self):\r\n super(DAF, self).__init__()\r\n\r\n def forward(self, x, residual):\r\n return x + residual\r" }, { "identifier": "AFF", "path": "src/clap_module/feature_fusion.py", "snippet": "class AFF(nn.Module):\r\n \"\"\"多特征融合 AFF\r\n \"\"\"\r\n\r\n def __init__(self, channels=64, r=4, type='2D'):\r\n super(AFF, self).__init__()\r\n inter_channels = int(channels // r)\r\n\r\n if type == '1D':\r\n self.local_att = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n elif type == '2D':\r\n self.local_att = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n else:\r\n raise f'the type is not supported.'\r\n\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x, residual):\r\n flag = False\r\n xa = x + residual\r\n if xa.size(0) == 1:\r\n xa = torch.cat([xa, xa], dim=0)\r\n flag = True\r\n xl = self.local_att(xa)\r\n xg = self.global_att(xa)\r\n xlg = xl + xg\r\n wei = self.sigmoid(xlg)\r\n xo = 2 * x * wei + 2 * residual * (1 - wei)\r\n if flag:\r\n xo = xo[0].unsqueeze(0)\r\n return xo\r" }, { "identifier": "iAFF", "path": "src/clap_module/feature_fusion.py", "snippet": "class iAFF(nn.Module):\r\n \"\"\"多特征融合 iAFF\r\n \"\"\"\r\n\r\n def __init__(self, channels=64, r=4, type='2D'):\r\n super(iAFF, self).__init__()\r\n inter_channels = int(channels // r)\r\n\r\n if type == '1D':\r\n # 本地注意力\r\n self.local_att = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n\r\n # 全局注意力\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n\r\n # 第二次本地注意力\r\n self.local_att2 = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n # 第二次全局注意力\r\n self.global_att2 = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n elif type == '2D':\r\n # 本地注意力\r\n self.local_att = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n\r\n # 全局注意力\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n\r\n # 第二次本地注意力\r\n self.local_att2 = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n # 第二次全局注意力\r\n self.global_att2 = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n else:\r\n raise f'the type is not supported'\r\n\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x, residual):\r\n flag = False\r\n xa = x + residual\r\n if xa.size(0) == 1:\r\n xa = torch.cat([xa, xa], dim=0)\r\n flag = True\r\n xl = self.local_att(xa)\r\n xg = self.global_att(xa)\r\n xlg = xl + xg\r\n wei = self.sigmoid(xlg)\r\n xi = x * wei + residual * (1 - wei)\r\n\r\n xl2 = self.local_att2(xi)\r\n xg2 = self.global_att(xi)\r\n xlg2 = xl2 + xg2\r\n wei2 = self.sigmoid(xlg2)\r\n xo = x * wei2 + residual * (1 - wei2)\r\n if flag:\r\n xo = xo[0].unsqueeze(0)\r\n return xo\r" } ]
import logging import torch import math from .convolution import ConvolutionModule from .encoder_layer import EncoderLayer from .modules import get_activation from .modules import VGG2L from .modules import ( LegacyRelPositionMultiHeadedAttention, MultiHeadedAttention, RelPositionMultiHeadedAttention, ) from .embedding import ( LegacyRelPositionalEncoding, PositionalEncoding, RelPositionalEncoding, ScaledPositionalEncoding, ) from .modules import LayerNorm from .multi_layer_conv import ( Conv1dLinear, MultiLayeredConv1d, ) from .modules import ( PositionwiseFeedForward, ) from .modules import repeat from .sub_sampling import Conv2dSubsampling from ..feature_fusion import AttentionPool1d, DAF, AFF, iAFF
14,271
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim),
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim),
torch.nn.LayerNorm(attention_dim),
11
2023-11-25 02:38:32+00:00
16k
facebookresearch/ExPLORe
train_finetuning_pixels.py
[ { "identifier": "DrQLearner", "path": "rlpd/agents/drq/drq_learner.py", "snippet": "class DrQLearner(SACLearner):\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n actor_lr: float = 3e-4,\n critic_lr: float = 3e-4,\n temp_lr: float = 3e-4,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n encoder: str = \"d4pg\",\n hidden_dims: Sequence[int] = (256, 256),\n discount: float = 0.99,\n tau: float = 0.005,\n num_qs: int = 2,\n num_min_qs: Optional[int] = None,\n critic_dropout_rate: Optional[float] = None,\n critic_layer_norm: bool = False,\n target_entropy: Optional[float] = None,\n init_temperature: float = 1.0,\n backup_entropy: bool = True,\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n bc_coeff: float = 0,\n ):\n \"\"\"\n An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1812.05905\n \"\"\"\n\n action_dim = action_space.shape[-1]\n observations = observation_space.sample()\n actions = action_space.sample()\n\n if target_entropy is None:\n target_entropy = -action_dim / 2\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n else:\n raise NotImplementedError\n\n actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)\n actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)\n actor_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=actor_cls,\n latent_dim=latent_dim,\n stop_gradient=True,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n actor_params = FrozenDict(actor_def.init(actor_key, observations)[\"params\"])\n actor = TrainState.create(\n apply_fn=actor_def.apply,\n params=actor_params,\n tx=optax.adam(learning_rate=actor_lr),\n )\n\n critic_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n dropout_rate=critic_dropout_rate,\n use_layer_norm=critic_layer_norm,\n )\n critic_cls = partial(StateActionValue, base_cls=critic_base_cls)\n critic_cls = partial(Ensemble, net_cls=critic_cls, num=num_qs)\n critic_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=critic_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n critic_params = FrozenDict(\n critic_def.init(critic_key, observations, actions)[\"params\"]\n )\n critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.adam(learning_rate=critic_lr),\n )\n target_critic = TrainState.create(\n apply_fn=critic_def.apply,\n params=critic_params,\n tx=optax.GradientTransformation(lambda _: None, lambda _: None),\n )\n\n temp_def = Temperature(init_temperature)\n temp_params = FrozenDict(temp_def.init(temp_key)[\"params\"])\n temp = TrainState.create(\n apply_fn=temp_def.apply,\n params=temp_params,\n tx=optax.adam(learning_rate=temp_lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n actor=actor,\n critic=critic,\n target_critic=target_critic,\n temp=temp,\n target_entropy=target_entropy,\n tau=tau,\n discount=discount,\n num_qs=num_qs,\n num_min_qs=num_min_qs,\n backup_entropy=backup_entropy,\n data_augmentation_fn=data_augmentation_fn,\n bc_coeff=bc_coeff,\n )\n\n @partial(jax.jit, static_argnames=\"utd_ratio\")\n def update(self, batch: DatasetDict, utd_ratio: int):\n new_agent = self\n\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n actor = _share_encoder(source=new_agent.critic, target=new_agent.actor)\n new_agent = new_agent.replace(actor=actor)\n\n rng, key = jax.random.split(new_agent.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n }\n )\n\n new_agent = new_agent.replace(rng=rng)\n\n return SACLearner.update(new_agent, batch, utd_ratio)" }, { "identifier": "PixelBCAgent", "path": "rlpd/agents/drq/bc.py", "snippet": "class PixelBCAgent(BCAgent):\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n actor_lr: float = 3e-4,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n hidden_dims: Sequence[int] = (256, 256),\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n encoder: str = \"d4pg\",\n ):\n assert encoder == \"d4pg\"\n action_dim = action_space.shape[-1]\n observations = observation_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key = jax.random.split(rng, 2)\n\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n actor_base_cls = partial(MLP, hidden_dims=hidden_dims, activate_final=True)\n actor_cls = partial(TanhNormal, base_cls=actor_base_cls, action_dim=action_dim)\n actor_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=actor_cls,\n latent_dim=latent_dim,\n stop_gradient=False,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n actor_params = FrozenDict(actor_def.init(actor_key, observations)[\"params\"])\n actor = TrainState.create(\n apply_fn=actor_def.apply,\n params=actor_params,\n tx=optax.adam(learning_rate=actor_lr),\n )\n\n return cls(\n rng=rng,\n actor=actor,\n )" }, { "identifier": "PixelRM", "path": "rlpd/agents/drq/rm.py", "snippet": "class PixelRM(struct.PyTreeNode):\n rng: PRNGKey\n r_net: TrainState\n m_net: TrainState\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n lr: float = 3e-4,\n hidden_dims: Sequence[int] = (256, 256),\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n encoder: str = \"d4pg\",\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n ):\n\n observations = observation_space.sample()\n actions = action_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, key = jax.random.split(rng)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n else:\n raise NotImplementedError\n base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n )\n net_cls = partial(StateValue, base_cls=base_cls)\n ucb_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=net_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n r_params = FrozenDict(ucb_def.init(key, observations)[\"params\"])\n r_net = TrainState.create(\n apply_fn=ucb_def.apply,\n params=r_params,\n tx=optax.adam(learning_rate=lr),\n )\n\n m_params = FrozenDict(ucb_def.init(key, observations)[\"params\"])\n m_net = TrainState.create(\n apply_fn=ucb_def.apply,\n params=m_params,\n tx=optax.adam(learning_rate=lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n r_net=r_net,\n m_net=m_net,\n data_augmentation_fn=data_augmentation_fn,\n )\n\n def _update(self, batch: DatasetDict) -> Tuple[struct.PyTreeNode, Dict[str, float]]:\n def r_loss_fn(r_params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n rs = self.r_net.apply_fn({\"params\": r_params}, batch[\"observations\"])\n\n loss = ((rs - batch[\"rewards\"]) ** 2.0).mean()\n return loss, {\"r_loss\": loss}\n\n grads, r_info = jax.grad(r_loss_fn, has_aux=True)(self.r_net.params)\n r_net = self.r_net.apply_gradients(grads=grads)\n\n def m_loss_fn(m_params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n ms = self.m_net.apply_fn({\"params\": m_params}, batch[\"observations\"])\n\n loss = optax.sigmoid_binary_cross_entropy(ms, batch[\"masks\"]).mean()\n return loss, {\"m_loss\": loss}\n\n grads, m_info = jax.grad(m_loss_fn, has_aux=True)(self.m_net.params)\n m_net = self.m_net.apply_gradients(grads=grads)\n\n return self.replace(r_net=r_net, m_net=m_net), {**r_info, **m_info}\n\n @partial(jax.jit, static_argnames=\"utd_ratio\")\n def update(self, batch: DatasetDict, utd_ratio: int):\n\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n rng, key = jax.random.split(self.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n }\n )\n new_self = self.replace(rng=rng)\n\n for i in range(utd_ratio):\n\n def slice(x):\n assert x.shape[0] % utd_ratio == 0\n batch_size = x.shape[0] // utd_ratio\n return x[batch_size * i : batch_size * (i + 1)]\n\n mini_batch = jax.tree_util.tree_map(slice, batch)\n new_self, info = new_self._update(mini_batch)\n\n return new_self, info\n\n @jax.jit\n def get_reward(self, batch):\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n rewards = self.r_net.apply_fn(\n {\"params\": self.r_net.params}, batch[\"observations\"]\n )\n return rewards\n\n @jax.jit\n def get_mask(self, batch):\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n\n logits = self.m_net.apply_fn(\n {\"params\": self.m_net.params}, batch[\"observations\"]\n )\n return jax.nn.sigmoid(logits)" }, { "identifier": "PixelRND", "path": "rlpd/agents/drq/rnd.py", "snippet": "class PixelRND(struct.PyTreeNode):\n rng: PRNGKey\n net: TrainState\n frozen_net: TrainState\n coeff: float = struct.field(pytree_node=False)\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n lr: float = 3e-4,\n coeff: float = 1.0,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n feature_dim: int = 256,\n encoder: str = \"d4pg\",\n hidden_dims: Sequence[int] = (256, 256),\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n ):\n\n observations = observation_space.sample()\n actions = action_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, key1, key2 = jax.random.split(rng, 3)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n else:\n raise NotImplementedError\n rnd_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n )\n rnd_cls = partial(StateFeature, base_cls=rnd_base_cls, feature_dim=feature_dim)\n net_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=rnd_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n params = FrozenDict(net_def.init(key1, observations)[\"params\"])\n net = TrainState.create(\n apply_fn=net_def.apply,\n params=params,\n tx=optax.adam(learning_rate=lr),\n )\n frozen_params = FrozenDict(net_def.init(key2, observations)[\"params\"])\n frozen_net = TrainState.create(\n apply_fn=net_def.apply,\n params=frozen_params,\n tx=optax.adam(learning_rate=lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n net=net,\n frozen_net=frozen_net,\n coeff=coeff,\n data_augmentation_fn=data_augmentation_fn,\n )\n\n @jax.jit\n def update(self, batch: DatasetDict) -> Tuple[struct.PyTreeNode, Dict[str, float]]:\n\n rng, key = jax.random.split(self.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n }\n )\n new_self = self.replace(rng=rng)\n\n def loss_fn(params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n feats = new_self.net.apply_fn({\"params\": params}, batch[\"observations\"])\n frozen_feats = new_self.frozen_net.apply_fn(\n {\"params\": new_self.frozen_net.params}, batch[\"observations\"]\n )\n\n loss = ((feats - frozen_feats) ** 2.0).mean()\n return loss, {\"rnd_loss\": loss}\n\n grads, info = jax.grad(loss_fn, has_aux=True)(new_self.net.params)\n net = new_self.net.apply_gradients(grads=grads)\n\n return new_self.replace(net=net), info\n\n @jax.jit\n def get_reward(self, batch):\n if \"pixels\" not in batch[\"next_observations\"]:\n batch = _unpack(batch)\n feats = self.net.apply_fn({\"params\": self.net.params}, batch[\"observations\"])\n frozen_feats = self.net.apply_fn(\n {\"params\": self.frozen_net.params}, batch[\"observations\"]\n )\n return jnp.mean((feats - frozen_feats) ** 2.0, axis=-1) * self.coeff" }, { "identifier": "MemoryEfficientReplayBuffer", "path": "rlpd/data/memory_efficient_replay_buffer.py", "snippet": "class MemoryEfficientReplayBuffer(ReplayBuffer):\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n capacity: int,\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n ):\n self.pixel_keys = pixel_keys\n\n observation_space = copy.deepcopy(observation_space)\n self._num_stack = None\n for pixel_key in self.pixel_keys:\n pixel_obs_space = observation_space.spaces[pixel_key]\n if self._num_stack is None:\n self._num_stack = pixel_obs_space.shape[-1]\n else:\n assert self._num_stack == pixel_obs_space.shape[-1]\n self._unstacked_dim_size = pixel_obs_space.shape[-2]\n low = pixel_obs_space.low[..., 0]\n high = pixel_obs_space.high[..., 0]\n unstacked_pixel_obs_space = Box(\n low=low, high=high, dtype=pixel_obs_space.dtype\n )\n observation_space.spaces[pixel_key] = unstacked_pixel_obs_space\n\n next_observation_space_dict = copy.deepcopy(observation_space.spaces)\n for pixel_key in self.pixel_keys:\n next_observation_space_dict.pop(pixel_key)\n next_observation_space = gym.spaces.Dict(next_observation_space_dict)\n\n self._first = True\n self._is_correct_index = np.full(capacity, False, dtype=bool)\n\n super().__init__(\n observation_space,\n action_space,\n capacity,\n next_observation_space=next_observation_space,\n )\n\n def insert(self, data_dict: DatasetDict):\n if self._insert_index == 0 and self._capacity == len(self) and not self._first:\n indxs = np.arange(len(self) - self._num_stack, len(self))\n for indx in indxs:\n element = super().sample(1, indx=indx)\n self._is_correct_index[self._insert_index] = False\n super().insert(element)\n\n data_dict = data_dict.copy()\n data_dict[\"observations\"] = data_dict[\"observations\"].copy()\n data_dict[\"next_observations\"] = data_dict[\"next_observations\"].copy()\n\n obs_pixels = {}\n next_obs_pixels = {}\n for pixel_key in self.pixel_keys:\n obs_pixels[pixel_key] = data_dict[\"observations\"].pop(pixel_key)\n next_obs_pixels[pixel_key] = data_dict[\"next_observations\"].pop(pixel_key)\n\n if self._first:\n for i in range(self._num_stack):\n for pixel_key in self.pixel_keys:\n data_dict[\"observations\"][pixel_key] = obs_pixels[pixel_key][..., i]\n\n self._is_correct_index[self._insert_index] = False\n super().insert(data_dict)\n\n for pixel_key in self.pixel_keys:\n data_dict[\"observations\"][pixel_key] = next_obs_pixels[pixel_key][..., -1]\n\n self._first = data_dict[\"dones\"]\n\n self._is_correct_index[self._insert_index] = True\n super().insert(data_dict)\n\n for i in range(self._num_stack):\n indx = (self._insert_index + i) % len(self)\n self._is_correct_index[indx] = False\n\n def sample(\n self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n pack_obs_and_next_obs: bool = False,\n ) -> frozen_dict.FrozenDict:\n \"\"\"Samples from the replay buffer.\n\n Args:\n batch_size: Minibatch size.\n keys: Keys to sample.\n indx: Take indices instead of sampling.\n pack_obs_and_next_obs: whether to pack img and next_img into one image.\n It's useful when they have overlapping frames.\n\n Returns:\n A frozen dictionary.\n \"\"\"\n\n if indx is None:\n if hasattr(self.np_random, \"integers\"):\n indx = self.np_random.integers(len(self), size=batch_size)\n else:\n indx = self.np_random.randint(len(self), size=batch_size)\n\n for i in range(batch_size):\n while not self._is_correct_index[indx[i]]:\n if hasattr(self.np_random, \"integers\"):\n indx[i] = self.np_random.integers(len(self))\n else:\n indx[i] = self.np_random.randint(len(self))\n else:\n pass\n\n if keys is None:\n keys = self.dataset_dict.keys()\n else:\n assert \"observations\" in keys\n\n keys = list(keys)\n keys.remove(\"observations\")\n\n batch = super().sample(batch_size, keys, indx)\n batch = batch.unfreeze()\n\n obs_keys = self.dataset_dict[\"observations\"].keys()\n obs_keys = list(obs_keys)\n for pixel_key in self.pixel_keys:\n obs_keys.remove(pixel_key)\n\n batch[\"observations\"] = {}\n for k in obs_keys:\n batch[\"observations\"][k] = _sample(\n self.dataset_dict[\"observations\"][k], indx\n )\n\n for pixel_key in self.pixel_keys:\n obs_pixels = self.dataset_dict[\"observations\"][pixel_key]\n obs_pixels = np.lib.stride_tricks.sliding_window_view(\n obs_pixels, self._num_stack + 1, axis=0\n )\n obs_pixels = obs_pixels[indx - self._num_stack]\n\n if pack_obs_and_next_obs:\n batch[\"observations\"][pixel_key] = obs_pixels\n else:\n batch[\"observations\"][pixel_key] = obs_pixels[..., :-1]\n if \"next_observations\" in keys:\n batch[\"next_observations\"][pixel_key] = obs_pixels[..., 1:]\n\n return frozen_dict.freeze(batch)" }, { "identifier": "ReplayBuffer", "path": "rlpd/data/replay_buffer.py", "snippet": "class ReplayBuffer(Dataset):\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n capacity: int,\n next_observation_space: Optional[gym.Space] = None,\n ):\n if next_observation_space is None:\n next_observation_space = observation_space\n\n observation_data = _init_replay_dict(observation_space, capacity)\n next_observation_data = _init_replay_dict(next_observation_space, capacity)\n dataset_dict = dict(\n observations=observation_data,\n next_observations=next_observation_data,\n actions=np.empty((capacity, *action_space.shape), dtype=action_space.dtype),\n rewards=np.empty((capacity,), dtype=np.float32),\n masks=np.empty((capacity,), dtype=np.float32),\n dones=np.empty((capacity,), dtype=np.float32),\n )\n\n super().__init__(dataset_dict)\n\n self._size = 0\n self._capacity = capacity\n self._insert_index = 0\n\n def __len__(self) -> int:\n return self._size\n\n def insert(self, data_dict: DatasetDict):\n _insert_recursively(self.dataset_dict, data_dict, self._insert_index)\n\n self._insert_index = (self._insert_index + 1) % self._capacity\n self._size = min(self._size + 1, self._capacity)\n\n def insert_batch(self, data_dict: DatasetDict):\n first_key = list(data_dict.keys())[0]\n batch_size = data_dict[first_key].shape[0]\n\n if self._insert_index + batch_size > self._capacity:\n self._insert_index = 0\n self._size = max(self._size, self._insert_index + batch_size)\n _insert_recursively_batch(\n self.dataset_dict, data_dict, self._insert_index, batch_size\n )\n\n def get_iterator(self, queue_size: int = 2, sample_args: dict = {}):\n # See https://flax.readthedocs.io/en/latest/_modules/flax/jax_utils.html#prefetch_to_device\n # queue_size = 2 should be ok for one GPU.\n\n queue = collections.deque()\n\n def enqueue(n):\n for _ in range(n):\n data = self.sample(**sample_args)\n queue.append(jax.device_put(data))\n\n enqueue(queue_size)\n while queue:\n yield queue.popleft()\n enqueue(1)" }, { "identifier": "evaluate", "path": "rlpd/evaluation.py", "snippet": "def evaluate(agent, env: gym.Env, num_episodes: int) -> Dict[str, float]:\n\n trajs = []\n cum_returns = []\n cum_lengths = []\n for i in range(num_episodes):\n observation, done = env.reset(), False\n traj = [observation]\n cum_return = 0\n cum_length = 0\n while not done:\n action = agent.eval_actions(observation)\n observation, reward, done, _ = env.step(action)\n cum_return += reward\n cum_length += 1\n traj.append(observation)\n cum_returns.append(cum_return)\n cum_lengths.append(cum_length)\n trajs.append({\"observation\": np.stack(traj, axis=0)})\n return {\"return\": np.mean(cum_returns), \"length\": np.mean(cum_lengths)}, trajs" }, { "identifier": "wrap_pixels", "path": "rlpd/wrappers/pixels.py", "snippet": "def wrap_pixels(\n env: gym.Env,\n action_repeat: int,\n image_size: int = 84,\n num_stack: Optional[int] = 3,\n camera_id: int = 0,\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n) -> gym.Env:\n if action_repeat > 1:\n env = RepeatAction(env, action_repeat)\n\n env = UniversalSeed(env)\n env = gym.wrappers.RescaleAction(env, -1, 1)\n\n env = PixelObservationWrapper(\n env,\n pixels_only=True,\n render_kwargs={\n \"pixels\": {\n \"height\": image_size,\n \"width\": image_size,\n \"camera_id\": camera_id,\n }\n },\n pixel_keys=pixel_keys,\n )\n\n if num_stack is not None:\n env = FrameStack(env, num_stack=num_stack)\n\n env = gym.wrappers.ClipAction(env)\n\n return env, pixel_keys" }, { "identifier": "PixelICVF", "path": "rlpd/agents/drq/icvf.py", "snippet": "class PixelICVF(struct.PyTreeNode):\n rng: PRNGKey\n net: TrainState\n target_net: TrainState\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n action_space: gym.Space,\n lr: float = 3e-4,\n cnn_features: Sequence[int] = (32, 32, 32, 32),\n cnn_filters: Sequence[int] = (3, 3, 3, 3),\n cnn_strides: Sequence[int] = (2, 1, 1, 1),\n cnn_padding: str = \"VALID\",\n latent_dim: int = 50,\n feature_dim: int = 256,\n encoder: str = \"d4pg\",\n hidden_dims: Sequence[int] = (256, 256),\n pixel_keys: Tuple[str, ...] = (\"pixels\",),\n depth_keys: Tuple[str, ...] = (),\n **kwargs,\n ):\n print(\"Got additional kwargs: \", kwargs)\n\n observations = observation_space.sample()\n actions = action_space.sample()\n\n rng = jax.random.PRNGKey(seed)\n rng, key1, key2 = jax.random.split(rng, 3)\n\n if encoder == \"d4pg\":\n encoder_cls = partial(\n D4PGEncoder,\n features=cnn_features,\n filters=cnn_filters,\n strides=cnn_strides,\n padding=cnn_padding,\n )\n else:\n raise NotImplementedError\n rnd_base_cls = partial(\n MLP,\n hidden_dims=hidden_dims,\n activate_final=True,\n )\n rnd_cls = partial(ICVF, base_cls=rnd_base_cls, feature_dim=feature_dim)\n net_def = PixelMultiplexer(\n encoder_cls=encoder_cls,\n network_cls=rnd_cls,\n latent_dim=latent_dim,\n pixel_keys=pixel_keys,\n depth_keys=depth_keys,\n )\n params = FrozenDict(net_def.init(key1, observations)[\"params\"])\n net = TrainState.create(\n apply_fn=net_def.apply,\n params=params,\n tx=optax.adam(learning_rate=lr),\n )\n target_net = TrainState.create(\n apply_fn=net_def.apply,\n params=params,\n tx=optax.adam(learning_rate=lr),\n )\n\n def data_augmentation_fn(rng, observations):\n for pixel_key, depth_key in zip_longest(pixel_keys, depth_keys):\n key, rng = jax.random.split(rng)\n observations = batched_random_crop(key, observations, pixel_key)\n if depth_key is not None:\n observations = batched_random_crop(key, observations, depth_key)\n return observations\n\n return cls(\n rng=rng,\n net=net,\n target_net=target_net,\n data_augmentation_fn=data_augmentation_fn,\n )\n\n def _update(self, batch: DatasetDict) -> Tuple[struct.PyTreeNode, Dict[str, float]]:\n def loss_fn(params) -> Tuple[jnp.ndarray, Dict[str, float]]:\n def get_v(params, s, g, z):\n phi = self.net.apply_fn({\"params\": params}, s)[\"phi\"]\n psi = self.net.apply_fn({\"params\": params}, g)[\"psi\"]\n T = self.net.apply_fn({\"params\": params}, z)[\"T\"]\n phi_T = apply_layernorm(phi * T)\n psi_T = apply_layernorm(psi * T)\n return -1 * optax.safe_norm(phi_T - psi_T, 1e-3, axis=-1)\n\n V = get_v(\n params, batch[\"observations\"], batch[\"goals\"], batch[\"desired_goals\"]\n )\n nV = get_v(\n self.target_net.params,\n batch[\"next_observations\"],\n batch[\"goals\"],\n batch[\"desired_goals\"],\n )\n target_V = batch[\"rewards\"] + 0.99 * batch[\"masks\"] * nV\n\n V_z = get_v(\n self.target_net.params,\n batch[\"next_observations\"],\n batch[\"desired_goals\"],\n batch[\"desired_goals\"],\n )\n nV_z = get_v(\n self.target_net.params,\n batch[\"next_observations\"],\n batch[\"desired_goals\"],\n batch[\"desired_goals\"],\n )\n adv = batch[\"desired_rewards\"] + 0.99 * batch[\"desired_masks\"] * nV_z - V_z\n\n def expectile_fn(adv, loss, expectile):\n weight = jnp.where(adv >= 0, expectile, 1 - expectile)\n return weight * loss\n\n def masked_mean(x, mask):\n mask = (mask > 0).astype(jnp.float32)\n return jnp.sum(x * mask) / (1e-5 + jnp.sum(mask))\n\n loss = expectile_fn(adv, jnp.square(V - target_V), 0.9).mean()\n return loss, {\n \"icvf_loss\": loss,\n \"V_success\": masked_mean(V, 1.0 - batch[\"masks\"]),\n \"V_failure\": masked_mean(V, batch[\"masks\"]),\n }\n\n grads, info = jax.grad(loss_fn, has_aux=True)(self.net.params)\n net = self.net.apply_gradients(grads=grads)\n target_params = optax.incremental_update(\n self.net.params, self.target_net.params, 0.005\n )\n target_net = self.target_net.replace(params=target_params)\n return self.replace(net=net, target_net=target_net), info\n\n @partial(jax.jit, static_argnames=\"utd_ratio\")\n def update(self, batch: DatasetDict, utd_ratio: int):\n\n # if \"pixels\" not in batch[\"next_observations\"]:\n # batch = _unpack(batch)\n\n rng, key = jax.random.split(self.rng)\n observations = self.data_augmentation_fn(key, batch[\"observations\"])\n rng, key = jax.random.split(rng)\n next_observations = self.data_augmentation_fn(key, batch[\"next_observations\"])\n goals = self.data_augmentation_fn(key, batch[\"goals\"])\n desired_goals = self.data_augmentation_fn(key, batch[\"desired_goals\"])\n\n batch = batch.copy(\n add_or_replace={\n \"observations\": observations,\n \"next_observations\": next_observations,\n \"goals\": goals,\n \"desired_goals\": desired_goals,\n }\n )\n new_self = self.replace(rng=rng)\n\n for i in range(utd_ratio):\n\n def slice(x):\n assert x.shape[0] % utd_ratio == 0\n batch_size = x.shape[0] // utd_ratio\n return x[batch_size * i : batch_size * (i + 1)]\n\n mini_batch = jax.tree_util.tree_map(slice, batch)\n new_self, info = new_self._update(mini_batch)\n\n return new_self, info" }, { "identifier": "gc_dataset", "path": "rlpd/gc_dataset.py", "snippet": "class GCDataset:\nclass GCSDataset(GCDataset):\n def get_default_config():\n def __post_init__(self):\n def sample_goals(self, indx, p_randomgoal=None, p_trajgoal=None, p_currgoal=None):\n def sample(self, batch_size: int, indx=None):\n def get_default_config():\n def sample(self, batch_size: int, indx=None):" }, { "identifier": "Dataset", "path": "rlpd/data/dataset.py", "snippet": "class Dataset(object):\n def __init__(self, dataset_dict: DatasetDict, seed: Optional[int] = None):\n self.dataset_dict = dataset_dict\n self.dataset_len = _check_lengths(dataset_dict)\n\n # Seeding similar to OpenAI Gym:\n # https://github.com/openai/gym/blob/master/gym/spaces/space.py#L46\n self._np_random = None\n self._seed = None\n if seed is not None:\n self.seed(seed)\n\n @property\n def np_random(self) -> np.random.RandomState:\n if self._np_random is None:\n self.seed()\n return self._np_random\n\n def seed(self, seed: Optional[int] = None) -> list:\n self._np_random, self._seed = seeding.np_random(seed)\n return [self._seed]\n\n def __len__(self) -> int:\n return self.dataset_len\n\n def get_iter(self, batch_size):\n for i in range(len(self) // batch_size):\n indx = np.arange(i * batch_size, (i + 1) * batch_size)\n indx = np.clip(indx, a_min=0, a_max=len(self) - 1)\n batch = dict()\n keys = self.dataset_dict.keys()\n\n for k in keys:\n if isinstance(self.dataset_dict[k], dict):\n batch[k] = _sample(self.dataset_dict[k], indx)\n else:\n batch[k] = self.dataset_dict[k][indx]\n\n yield frozen_dict.freeze(batch)\n\n def sample(\n self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None,\n ) -> frozen_dict.FrozenDict:\n if indx is None:\n if hasattr(self.np_random, \"integers\"):\n indx = self.np_random.integers(len(self), size=batch_size)\n else:\n indx = self.np_random.randint(len(self), size=batch_size)\n\n batch = dict()\n\n if keys is None:\n keys = self.dataset_dict.keys()\n\n for k in keys:\n if isinstance(self.dataset_dict[k], dict):\n batch[k] = _sample(self.dataset_dict[k], indx)\n else:\n batch[k] = self.dataset_dict[k][indx]\n\n return frozen_dict.freeze(batch)\n\n def sample_jax(self, batch_size: int, keys: Optional[Iterable[str]] = None):\n if not hasattr(self, \"rng\"):\n self.rng = jax.random.PRNGKey(self._seed or 42)\n\n if keys is None:\n keys = self.dataset_dict.keys()\n\n jax_dataset_dict = {k: self.dataset_dict[k] for k in keys}\n jax_dataset_dict = jax.device_put(jax_dataset_dict)\n\n @jax.jit\n def _sample_jax(rng):\n key, rng = jax.random.split(rng)\n indx = jax.random.randint(\n key, (batch_size,), minval=0, maxval=len(self)\n )\n return rng, jax.tree_map(\n lambda d: jnp.take(d, indx, axis=0), jax_dataset_dict\n )\n\n self._sample_jax = _sample_jax\n\n self.rng, sample = self._sample_jax(self.rng)\n return sample\n\n def split(self, ratio: float) -> Tuple[\"Dataset\", \"Dataset\"]:\n assert 0 < ratio and ratio < 1\n train_index = np.index_exp[: int(self.dataset_len * ratio)]\n test_index = np.index_exp[int(self.dataset_len * ratio) :]\n\n index = np.arange(len(self), dtype=np.int32)\n self.np_random.shuffle(index)\n train_index = index[: int(self.dataset_len * ratio)]\n test_index = index[int(self.dataset_len * ratio) :]\n\n train_dataset_dict = _subselect(self.dataset_dict, train_index)\n test_dataset_dict = _subselect(self.dataset_dict, test_index)\n return Dataset(train_dataset_dict), Dataset(test_dataset_dict)\n\n def _trajectory_boundaries_and_returns(self) -> Tuple[list, list, list]:\n episode_starts = [0]\n episode_ends = []\n\n episode_return = 0\n episode_returns = []\n\n for i in range(len(self)):\n episode_return += self.dataset_dict[\"rewards\"][i]\n\n if self.dataset_dict[\"dones\"][i]:\n episode_returns.append(episode_return)\n episode_ends.append(i + 1)\n if i + 1 < len(self):\n episode_starts.append(i + 1)\n episode_return = 0.0\n\n return episode_starts, episode_ends, episode_returns\n\n def filter_by_fn(self, fn):\n bool_indx = np.full((len(self),), False, dtype=bool)\n for i in range(len(self)):\n tran = {k: v[i] for k, v in self.dataset_dict.items()}\n bool_indx[i] = fn(tran)\n\n self.dataset_dict = _subselect(self.dataset_dict, bool_indx)\n self.dataset_len = _check_lengths(self.dataset_dict)\n\n def filter(\n self, take_top: Optional[float] = None, threshold: Optional[float] = None\n ):\n assert (take_top is None and threshold is not None) or (\n take_top is not None and threshold is None\n )\n\n (\n episode_starts,\n episode_ends,\n episode_returns,\n ) = self._trajectory_boundaries_and_returns()\n\n if take_top is not None:\n threshold = np.percentile(episode_returns, 100 - take_top)\n\n bool_indx = np.full((len(self),), False, dtype=bool)\n\n for i in range(len(episode_returns)):\n if episode_returns[i] >= threshold:\n bool_indx[episode_starts[i] : episode_ends[i]] = True\n\n self.dataset_dict = _subselect(self.dataset_dict, bool_indx)\n\n self.dataset_len = _check_lengths(self.dataset_dict)\n\n def normalize_returns(self, scaling: float = 1000):\n (_, _, episode_returns) = self._trajectory_boundaries_and_returns()\n self.dataset_dict[\"rewards\"] /= np.max(episode_returns) - np.min(\n episode_returns\n )\n self.dataset_dict[\"rewards\"] *= scaling" }, { "identifier": "COGDataset", "path": "rlpd/data/cog_datasets.py", "snippet": "class COGDataset(MemoryEfficientReplayBuffer):\n def __init__(\n self,\n env: gym.Env,\n dataset_path: str,\n capacity: int = 500_000,\n subsample_ratio: float = 1.0,\n pixel_keys: tuple = (\"pixels\",),\n np_rng = None,\n load_successes: bool = True,\n ):\n self.np_rng = np_rng\n super().__init__(\n env.observation_space,\n env.action_space,\n capacity=capacity,\n pixel_keys=pixel_keys\n )\n self.successful_offline_prior_trajs = []\n self.successful_offline_task_trajs = []\n \n self._load_data_from_dir(dataset_path, subsample_ratio)\n \n self.load_successes = load_successes\n if self.load_successes:\n self._load_successful_traj(dataset_path)\n\n def load_successful_traj(self):\n assert self.load_successes, \"did not load successful trajectories upon making this dataset\"\n prior_idx = self.np_rng.integers(len(self.successful_offline_prior_trajs))\n task_idx = self.np_rng.integers(len(self.successful_offline_task_trajs))\n prior_traj = self.successful_offline_prior_trajs[prior_idx]\n task_traj = self.successful_offline_task_trajs[task_idx]\n return prior_traj + task_traj\n \n def _load_data_from_dir(self, dataset_path, subsample_ratio=1.0):\n print(\"subsample ratio:\", subsample_ratio * subsample_ratio) # sub-sampled twice\n for f in os.listdir(dataset_path):\n full_path = os.path.join(dataset_path, f)\n if f.endswith('.npy'):\n print(\"*\"*20, \"\\nloading data from:\", full_path)\n data = np.load(full_path, allow_pickle=True)\n print(\"prior subsampling # trajs:\", len(data))\n data = self._subsample_data(data, subsample_ratio)\n self._load_data(data, subsample_ratio)\n print(\"post subsampling # trajs:\", len(self))\n \n def _subsample_data(self, data, r=1.0):\n assert 0 <= r <= 1\n n = len(data)\n idxs = self.np_rng.choice(n, size=int(n*r), replace=False)\n return data[idxs]\n\n def _load_data(self, data, subsample_ratio=1.0):\n cutoff = int(len(data) * subsample_ratio)\n for i, traj in enumerate(data):\n if i > cutoff:\n break\n trans = dict_to_list(traj)\n for tran in trans:\n data_dict = self._make_data_dict(tran)\n self.insert(data_dict)\n \n def _load_successful_traj(self, dataset_path):\n # load successful offline trajectories for visualizations / evaluation\n prior_data = np.load(os.path.join(dataset_path, 'successful', 'prior_success.npy'), allow_pickle=True)\n task_data = np.load(os.path.join(dataset_path, 'successful', 'task_success.npy'), allow_pickle=True)\n\n for traj in prior_data:\n trans = dict_to_list(traj)\n trans = [self._make_data_dict(tran) for tran in trans]\n self.successful_offline_prior_trajs.append(trans)\n\n for traj in task_data:\n trans = dict_to_list(traj)\n trans = [self._make_data_dict(tran) for tran in trans]\n self.successful_offline_task_trajs.append(trans)\n\n def _make_data_dict(self, tran):\n return dict(\n observations={\"pixels\": np.array(tran[\"observations\"][\"image\"])[..., None]},\n actions=np.array(tran[\"actions\"]),\n next_observations={\"pixels\": np.array(tran[\"next_observations\"][\"image\"])[..., None]},\n rewards=np.array(tran[\"rewards\"]),\n masks=1-np.array(tran[\"terminals\"], dtype=float),\n dones=np.array(tran[\"agent_infos\"][\"done\"])\n )" } ]
import os import numpy as np import tqdm import wandb import matplotlib.pyplot as plt import pickle import roboverse import types import jax import jax.numpy as jnp from absl import app, flags from flax.core import FrozenDict from ml_collections import config_flags from flax.core import frozen_dict from flax.training import checkpoints from rlpd.agents import DrQLearner, PixelRND, PixelRM, PixelBCAgent from rlpd.data import MemoryEfficientReplayBuffer, ReplayBuffer from rlpd.evaluation import evaluate from rlpd.wrappers import wrap_pixels from rlpd.agents.drq.icvf import PixelICVF from rlpd import gc_dataset from gym.wrappers import TimeLimit, FilterObservation, RecordEpisodeStatistics from rlpd.data import Dataset from rlpd.data.cog_datasets import COGDataset from functools import partial
13,032
"File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rm_config", "configs/pixel_rm_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rnd_config", "configs/pixel_rnd_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "bc_config", "configs/pixel_bc_config.py", "File path to the training hyperparameter configuration", lock_config=False, ) flags.DEFINE_string( "offline_relabel_type", "gt", "Whether to use reward from the offline dataset. [gt/pred/min]", ) flags.DEFINE_boolean("use_rnd_offline", False, "Whether to use rnd offline.") flags.DEFINE_boolean("use_rnd_online", False, "Whether to use rnd online.") def combine(one_dict, other_dict): combined = {} for k, v in one_dict.items(): if isinstance(v, FrozenDict) or isinstance(v, dict): if len(v) == 0: combined[k] = v else: combined[k] = combine(v, other_dict[k]) else: tmp = np.empty( (v.shape[0] + other_dict[k].shape[0], *v.shape[1:]), dtype=v.dtype ) tmp[0::2] = v tmp[1::2] = other_dict[k] combined[k] = tmp return FrozenDict(combined) def add_prefix(prefix, dict): return {prefix + k: v for k, v in dict.items()} def main(_): wandb.init(project=FLAGS.project_name, mode="online") wandb.config.update(FLAGS) if FLAGS.save_dir is not None: log_dir = os.path.join( FLAGS.save_dir, f"{FLAGS.env_name}-s{FLAGS.seed}-icvf_{FLAGS.use_icvf}-ours_{FLAGS.use_rnd_offline}", ) print("logging to", log_dir) if FLAGS.checkpoint_model: chkpt_dir = os.path.join(log_dir, "checkpoints") os.makedirs(chkpt_dir, exist_ok=True) if FLAGS.checkpoint_buffer: buffer_dir = os.path.join(log_dir, "buffers") os.makedirs(buffer_dir, exist_ok=True) def wrap(env): return wrap_pixels( env, action_repeat=1, num_stack=1, camera_id=0, ) def render(env, *args, **kwargs): return env.render_obs() if FLAGS.env_name == "Widow250PickTray-v0": env_name_alt = "pickplace" cog_max_path_length = 40 elif FLAGS.env_name == "Widow250DoubleDrawerOpenGraspNeutral-v0": env_name_alt = "closeddrawer_small" cog_max_path_length = 50 elif FLAGS.env_name == "Widow250DoubleDrawerCloseOpenGraspNeutral-v0": env_name_alt = "blockeddrawer1_small" cog_max_path_length = 80 env = roboverse.make(FLAGS.env_name, transpose_image=False) env.render = types.MethodType(render, env) env = FilterObservation(env, ["image"]) env = TimeLimit(env, max_episode_steps=cog_max_path_length) # TODO env, pixel_keys = wrap(env) env = RecordEpisodeStatistics(env, deque_size=1) env.seed(FLAGS.seed) eval_env = roboverse.make(FLAGS.env_name, transpose_image=False) eval_env.render = types.MethodType(render, eval_env) eval_env = FilterObservation(eval_env, ["image"]) eval_env = TimeLimit(eval_env, max_episode_steps=cog_max_path_length) # TODO eval_env, _ = wrap(eval_env) eval_env.seed(FLAGS.seed + 42) dataset_path = os.path.join("data", env_name_alt) print("Data Path:", dataset_path) np_rng = np.random.default_rng(FLAGS.seed)
""" Modified from https://github.com/ikostrikov/rlpd/blob/main/rlpd/train_finetuning_pixels.py Original lincense information: MIT License Copyright (c) 2022 Ilya Kostrikov, Philip J. Ball, Laura Smith Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ #! /usr/bin/env python ### cog imports ### ### cog imports ### FLAGS = flags.FLAGS flags.DEFINE_string("project_name", "explore-cog", "wandb project name.") flags.DEFINE_string("env_name", "cheetah-run-v0", "Environment name.") flags.DEFINE_float( "dataset_subsample_ratio", 0.1, "Ratio of the dataset to subsample (done twice)" ) flags.DEFINE_bool("use_icvf", False, "Whether to use the icvf encoder") flags.DEFINE_float("offline_ratio", 0.5, "Offline ratio.") flags.DEFINE_integer("seed", 42, "Random seed.") flags.DEFINE_integer("eval_episodes", 100, "Number of episodes used for evaluation.") flags.DEFINE_integer("log_interval", 1000, "Logging interval.") flags.DEFINE_integer("eval_interval", 5000, "Eval interval.") flags.DEFINE_integer("batch_size", 256, "Mini batch size.") flags.DEFINE_integer("max_steps", 500000, "Number of training steps.") flags.DEFINE_integer( "start_training", 5000, "Number of training steps to start training." ) flags.DEFINE_boolean("tqdm", True, "Use tqdm progress bar.") flags.DEFINE_string("save_dir", "exp_data_cog", "Directory to save checkpoints.") flags.DEFINE_bool("checkpoint_model", False, "save model") flags.DEFINE_bool("checkpoint_buffer", False, "save replay buffer") flags.DEFINE_integer("utd_ratio", 1, "Update to data ratio.") flags.DEFINE_float("bc_pretrain_rollin", 0.0, "rollin coeff") flags.DEFINE_integer( "bc_pretrain_steps", 10000, "Pre-train BC policy for a number of steps on pure offline data", ) config_flags.DEFINE_config_file( "config", "configs/rlpd_pixels_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rm_config", "configs/pixel_rm_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rnd_config", "configs/pixel_rnd_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "bc_config", "configs/pixel_bc_config.py", "File path to the training hyperparameter configuration", lock_config=False, ) flags.DEFINE_string( "offline_relabel_type", "gt", "Whether to use reward from the offline dataset. [gt/pred/min]", ) flags.DEFINE_boolean("use_rnd_offline", False, "Whether to use rnd offline.") flags.DEFINE_boolean("use_rnd_online", False, "Whether to use rnd online.") def combine(one_dict, other_dict): combined = {} for k, v in one_dict.items(): if isinstance(v, FrozenDict) or isinstance(v, dict): if len(v) == 0: combined[k] = v else: combined[k] = combine(v, other_dict[k]) else: tmp = np.empty( (v.shape[0] + other_dict[k].shape[0], *v.shape[1:]), dtype=v.dtype ) tmp[0::2] = v tmp[1::2] = other_dict[k] combined[k] = tmp return FrozenDict(combined) def add_prefix(prefix, dict): return {prefix + k: v for k, v in dict.items()} def main(_): wandb.init(project=FLAGS.project_name, mode="online") wandb.config.update(FLAGS) if FLAGS.save_dir is not None: log_dir = os.path.join( FLAGS.save_dir, f"{FLAGS.env_name}-s{FLAGS.seed}-icvf_{FLAGS.use_icvf}-ours_{FLAGS.use_rnd_offline}", ) print("logging to", log_dir) if FLAGS.checkpoint_model: chkpt_dir = os.path.join(log_dir, "checkpoints") os.makedirs(chkpt_dir, exist_ok=True) if FLAGS.checkpoint_buffer: buffer_dir = os.path.join(log_dir, "buffers") os.makedirs(buffer_dir, exist_ok=True) def wrap(env): return wrap_pixels( env, action_repeat=1, num_stack=1, camera_id=0, ) def render(env, *args, **kwargs): return env.render_obs() if FLAGS.env_name == "Widow250PickTray-v0": env_name_alt = "pickplace" cog_max_path_length = 40 elif FLAGS.env_name == "Widow250DoubleDrawerOpenGraspNeutral-v0": env_name_alt = "closeddrawer_small" cog_max_path_length = 50 elif FLAGS.env_name == "Widow250DoubleDrawerCloseOpenGraspNeutral-v0": env_name_alt = "blockeddrawer1_small" cog_max_path_length = 80 env = roboverse.make(FLAGS.env_name, transpose_image=False) env.render = types.MethodType(render, env) env = FilterObservation(env, ["image"]) env = TimeLimit(env, max_episode_steps=cog_max_path_length) # TODO env, pixel_keys = wrap(env) env = RecordEpisodeStatistics(env, deque_size=1) env.seed(FLAGS.seed) eval_env = roboverse.make(FLAGS.env_name, transpose_image=False) eval_env.render = types.MethodType(render, eval_env) eval_env = FilterObservation(eval_env, ["image"]) eval_env = TimeLimit(eval_env, max_episode_steps=cog_max_path_length) # TODO eval_env, _ = wrap(eval_env) eval_env.seed(FLAGS.seed + 42) dataset_path = os.path.join("data", env_name_alt) print("Data Path:", dataset_path) np_rng = np.random.default_rng(FLAGS.seed)
ds = COGDataset(
11
2023-11-19 21:28:52+00:00
16k
Luo-Z13/pointobb
PointOBB/mmdet/models/roi_heads/PointOBB_head.py
[ { "identifier": "HEADS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "HEADS = MODELS" }, { "identifier": "MODELS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "MODELS = Registry('models', parent=MMCV_MODELS)" }, { "identifier": "build_head", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_head(cfg):\n \"\"\"Build head.\"\"\"\n return HEADS.build(cfg)" }, { "identifier": "build_roi_extractor", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_roi_extractor(cfg):\n \"\"\"Build roi extractor.\"\"\"\n return ROI_EXTRACTORS.build(cfg)" }, { "identifier": "build_loss", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_loss(cfg):\n \"\"\"Build loss.\"\"\"\n return LOSSES.build(cfg)" }, { "identifier": "StandardRoIHead", "path": "PointOBB/mmdet/models/roi_heads/standard_roi_head.py", "snippet": "class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):\n \"\"\"Simplest base roi head including one bbox head and one mask head.\"\"\"\n\n def init_assigner_sampler(self):\n \"\"\"Initialize assigner and sampler.\"\"\"\n self.bbox_assigner = None\n self.bbox_sampler = None\n if self.train_cfg:\n self.bbox_assigner = build_assigner(self.train_cfg.assigner)\n self.bbox_sampler = build_sampler(\n self.train_cfg.sampler, context=self)\n\n def init_bbox_head(self, bbox_roi_extractor, bbox_head):\n \"\"\"Initialize ``bbox_head``\"\"\"\n self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor)\n self.bbox_head = build_head(bbox_head)\n\n def init_mask_head(self, mask_roi_extractor, mask_head):\n \"\"\"Initialize ``mask_head``\"\"\"\n if mask_roi_extractor is not None:\n self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)\n self.share_roi_extractor = False\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n self.mask_head = build_head(mask_head)\n\n def forward_dummy(self, x, proposals):\n \"\"\"Dummy forward function.\"\"\"\n # bbox head\n outs = ()\n rois = bbox2roi([proposals])\n if self.with_bbox:\n bbox_results = self._bbox_forward(x, rois)\n outs = outs + (bbox_results['cls_score'],\n bbox_results['bbox_pred'])\n # mask head\n if self.with_mask:\n mask_rois = rois[:100]\n mask_results = self._mask_forward(x, mask_rois)\n outs = outs + (mask_results['mask_pred'], )\n return outs\n\n def forward_train(self,\n x,\n img_metas,\n proposal_list,\n gt_bboxes,\n gt_labels,\n ann_weight,\n gt_bboxes_ignore=None,\n gt_masks=None):\n \"\"\"\n Args:\n x (list[Tensor]): list of multi-level img features.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n proposals (list[Tensors]): list of region proposals.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n # assign gts and sample proposals\n if self.with_bbox or self.with_mask:\n num_imgs = len(img_metas)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n for i in range(num_imgs):\n assign_result = self.bbox_assigner.assign(\n proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = self.bbox_sampler.sample(\n assign_result,\n proposal_list[i],\n gt_bboxes[i],\n gt_labels[i],\n feats=[lvl_feat[i][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n losses = dict()\n # bbox head forward and loss\n if self.with_bbox:\n bbox_results = self._bbox_forward_train(x, sampling_results,\n gt_bboxes, gt_labels,ann_weight, #add by fei\n img_metas)\n losses.update(bbox_results['loss_bbox'])\n\n # mask head forward and loss\n if self.with_mask:\n mask_results = self._mask_forward_train(x, sampling_results,\n bbox_results['bbox_feats'],\n gt_masks, img_metas)\n losses.update(mask_results['loss_mask'])\n\n return losses\n\n def _bbox_forward(self, x, rois):\n \"\"\"Box head forward function used in both training and testing.\"\"\"\n # TODO: a more flexible way to decide which feature maps to use\n bbox_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs], rois)\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n cls_score, bbox_pred = self.bbox_head(bbox_feats)\n\n bbox_results = dict(\n cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n return bbox_results\n\n def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, ann_weight,\n img_metas):\n \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n rois = bbox2roi([res.bboxes for res in sampling_results])\n bbox_results = self._bbox_forward(x, rois)\n\n bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,\n gt_labels,ann_weight, self.train_cfg) ## add by fei\n loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],\n bbox_results['bbox_pred'], rois,\n *bbox_targets)\n\n bbox_results.update(loss_bbox=loss_bbox)\n return bbox_results\n\n def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,\n img_metas):\n \"\"\"Run forward function and calculate loss for mask head in\n training.\"\"\"\n if not self.share_roi_extractor:\n pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n mask_results = self._mask_forward(x, pos_rois)\n else:\n pos_inds = []\n device = bbox_feats.device\n for res in sampling_results:\n pos_inds.append(\n torch.ones(\n res.pos_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds.append(\n torch.zeros(\n res.neg_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds = torch.cat(pos_inds)\n\n mask_results = self._mask_forward(\n x, pos_inds=pos_inds, bbox_feats=bbox_feats)\n\n mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,\n self.train_cfg)\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head.loss(mask_results['mask_pred'],\n mask_targets, pos_labels)\n\n mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)\n return mask_results\n\n def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):\n \"\"\"Mask head forward function used in both training and testing.\"\"\"\n assert ((rois is not None) ^\n (pos_inds is not None and bbox_feats is not None))\n if rois is not None:\n mask_feats = self.mask_roi_extractor(\n x[:self.mask_roi_extractor.num_inputs], rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n else:\n assert bbox_feats is not None\n mask_feats = bbox_feats[pos_inds]\n\n mask_pred = self.mask_head(mask_feats)\n mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)\n return mask_results\n\n async def async_simple_test(self,\n x,\n proposal_list,\n img_metas,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n\n det_bboxes, det_labels = await self.async_test_bboxes(\n x, img_metas, proposal_list, self.test_cfg, rescale=rescale)\n bbox_results = bbox2result(det_bboxes, det_labels,\n self.bbox_head.num_classes)\n if not self.with_mask:\n return bbox_results\n else:\n segm_results = await self.async_test_mask(\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=rescale,\n mask_test_cfg=self.test_cfg.get('mask'))\n return bbox_results, segm_results\n\n def simple_test(self,\n x,\n proposal_list,\n img_metas,\n proposals=None,\n rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n\n det_bboxes, det_labels = self.simple_test_bboxes(\n x, img_metas, proposal_list, self.test_cfg, rescale=rescale)\n\n bbox_results = [\n bbox2result(det_bboxes[i], det_labels[i],\n self.bbox_head.num_classes)\n for i in range(len(det_bboxes))\n ]\n\n if not self.with_mask:\n return bbox_results\n else:\n segm_results = self.simple_test_mask(\n x, img_metas, det_bboxes, det_labels, rescale=rescale)\n return list(zip(bbox_results, segm_results))\n\n def aug_test(self, x, proposal_list, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,\n proposal_list,\n self.test_cfg)\n if rescale:\n _det_bboxes = det_bboxes\n else:\n _det_bboxes = det_bboxes.clone()\n _det_bboxes[:, :4] *= det_bboxes.new_tensor(\n img_metas[0][0]['scale_factor'])\n bbox_results = bbox2result(_det_bboxes, det_labels,\n self.bbox_head.num_classes)\n\n # det_bboxes always keep the original scale\n if self.with_mask:\n segm_results = self.aug_test_mask(x, img_metas, det_bboxes,\n det_labels)\n return [(bbox_results, segm_results)]\n else:\n return [bbox_results]\n\n def onnx_export(self, x, proposals, img_metas, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n det_bboxes, det_labels = self.bbox_onnx_export(\n x, img_metas, proposals, self.test_cfg, rescale=rescale)\n\n if not self.with_mask:\n return det_bboxes, det_labels\n else:\n segm_results = self.mask_onnx_export(\n x, img_metas, det_bboxes, det_labels, rescale=rescale)\n return det_bboxes, det_labels, segm_results\n\n def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs):\n \"\"\"Export mask branch to onnx which supports batch inference.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n img_metas (list[dict]): Image meta info.\n det_bboxes (Tensor): Bboxes and corresponding scores.\n has shape [N, num_bboxes, 5].\n det_labels (Tensor): class labels of\n shape [N, num_bboxes].\n\n Returns:\n tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5]\n and class labels of shape [N, num_bboxes].\n \"\"\"\n # image shapes of images in the batch\n\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n raise RuntimeError('[ONNX Error] Can not record MaskHead '\n 'as it has not been executed this time')\n batch_size = det_bboxes.size(0)\n # if det_bboxes is rescaled to the original image size, we need to\n # rescale it back to the testing scale to obtain RoIs.\n det_bboxes = det_bboxes[..., :4]\n batch_index = torch.arange(\n det_bboxes.size(0), device=det_bboxes.device).float().view(\n -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)\n mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)\n mask_rois = mask_rois.view(-1, 5)\n mask_results = self._mask_forward(x, mask_rois)\n mask_pred = mask_results['mask_pred']\n max_shape = img_metas[0]['img_shape_for_onnx']\n num_det = det_bboxes.shape[1]\n det_bboxes = det_bboxes.reshape(-1, 4)\n det_labels = det_labels.reshape(-1)\n segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes,\n det_labels, self.test_cfg,\n max_shape)\n segm_results = segm_results.reshape(batch_size, num_det, max_shape[0],\n max_shape[1])\n return segm_results\n\n def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg,\n **kwargs):\n \"\"\"Export bbox branch to onnx which supports batch inference.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n img_metas (list[dict]): Image meta info.\n proposals (Tensor): Region proposals with\n batch dimension, has shape [N, num_bboxes, 5].\n rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n\n Returns:\n tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5]\n and class labels of shape [N, num_bboxes].\n \"\"\"\n # get origin input shape to support onnx dynamic input shape\n assert len(\n img_metas\n ) == 1, 'Only support one input image while in exporting to ONNX'\n img_shapes = img_metas[0]['img_shape_for_onnx']\n\n rois = proposals\n batch_index = torch.arange(\n rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(\n rois.size(0), rois.size(1), 1)\n rois = torch.cat([batch_index, rois[..., :4]], dim=-1)\n batch_size = rois.shape[0]\n num_proposals_per_img = rois.shape[1]\n\n # Eliminate the batch dimension\n rois = rois.view(-1, 5)\n bbox_results = self._bbox_forward(x, rois)\n cls_score = bbox_results['cls_score']\n bbox_pred = bbox_results['bbox_pred']\n\n # Recover the batch dimension\n rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))\n cls_score = cls_score.reshape(batch_size, num_proposals_per_img,\n cls_score.size(-1))\n\n bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img,\n bbox_pred.size(-1))\n det_bboxes, det_labels = self.bbox_head.onnx_export(\n rois, cls_score, bbox_pred, img_shapes, cfg=rcnn_test_cfg)\n\n return det_bboxes, det_labels" }, { "identifier": "CascadeRoIHead", "path": "PointOBB/mmdet/models/roi_heads/cascade_roi_head.py", "snippet": "class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):\n \"\"\"Cascade roi head including one bbox head and one mask head.\n\n https://arxiv.org/abs/1712.00726\n \"\"\"\n\n def __init__(self,\n num_stages,\n stage_loss_weights,\n bbox_roi_extractor=None,\n bbox_head=None,\n mask_roi_extractor=None,\n mask_head=None,\n shared_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n assert bbox_roi_extractor is not None\n assert bbox_head is not None\n assert shared_head is None, \\\n 'Shared head is not supported in Cascade RCNN anymore'\n\n self.num_stages = num_stages\n self.stage_loss_weights = stage_loss_weights\n super(CascadeRoIHead, self).__init__(\n bbox_roi_extractor=bbox_roi_extractor,\n bbox_head=bbox_head,\n mask_roi_extractor=mask_roi_extractor,\n mask_head=mask_head,\n shared_head=shared_head,\n train_cfg=train_cfg,\n test_cfg=test_cfg,\n pretrained=pretrained,\n init_cfg=init_cfg)\n\n def init_bbox_head(self, bbox_roi_extractor, bbox_head):\n \"\"\"Initialize box head and box roi extractor.\n\n Args:\n bbox_roi_extractor (dict): Config of box roi extractor.\n bbox_head (dict): Config of box in box head.\n \"\"\"\n self.bbox_roi_extractor = ModuleList()\n self.bbox_head = ModuleList()\n if not isinstance(bbox_roi_extractor, list):\n bbox_roi_extractor = [\n bbox_roi_extractor for _ in range(self.num_stages)\n ]\n if not isinstance(bbox_head, list):\n bbox_head = [bbox_head for _ in range(self.num_stages)]\n assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages\n for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):\n self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor))\n self.bbox_head.append(build_head(head))\n\n def init_mask_head(self, mask_roi_extractor, mask_head):\n \"\"\"Initialize mask head and mask roi extractor.\n\n Args:\n mask_roi_extractor (dict): Config of mask roi extractor.\n mask_head (dict): Config of mask in mask head.\n \"\"\"\n self.mask_head = nn.ModuleList()\n if not isinstance(mask_head, list):\n mask_head = [mask_head for _ in range(self.num_stages)]\n assert len(mask_head) == self.num_stages\n for head in mask_head:\n self.mask_head.append(build_head(head))\n if mask_roi_extractor is not None:\n self.share_roi_extractor = False\n self.mask_roi_extractor = ModuleList()\n if not isinstance(mask_roi_extractor, list):\n mask_roi_extractor = [\n mask_roi_extractor for _ in range(self.num_stages)\n ]\n assert len(mask_roi_extractor) == self.num_stages\n for roi_extractor in mask_roi_extractor:\n self.mask_roi_extractor.append(\n build_roi_extractor(roi_extractor))\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n\n def init_assigner_sampler(self):\n \"\"\"Initialize assigner and sampler for each stage.\"\"\"\n self.bbox_assigner = []\n self.bbox_sampler = []\n if self.train_cfg is not None:\n for idx, rcnn_train_cfg in enumerate(self.train_cfg):\n self.bbox_assigner.append(\n build_assigner(rcnn_train_cfg.assigner))\n self.current_stage = idx\n self.bbox_sampler.append(\n build_sampler(rcnn_train_cfg.sampler, context=self))\n\n def forward_dummy(self, x, proposals):\n \"\"\"Dummy forward function.\"\"\"\n # bbox head\n outs = ()\n rois = bbox2roi([proposals])\n if self.with_bbox:\n for i in range(self.num_stages):\n bbox_results = self._bbox_forward(i, x, rois)\n outs = outs + (bbox_results['cls_score'],\n bbox_results['bbox_pred'])\n # mask heads\n if self.with_mask:\n mask_rois = rois[:100]\n for i in range(self.num_stages):\n mask_results = self._mask_forward(i, x, mask_rois)\n outs = outs + (mask_results['mask_pred'], )\n return outs\n\n def _bbox_forward(self, stage, x, rois):\n \"\"\"Box head forward function used in both training and testing.\"\"\"\n bbox_roi_extractor = self.bbox_roi_extractor[stage]\n bbox_head = self.bbox_head[stage]\n bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n rois)\n # do not support caffe_c4 model anymore\n cls_score, bbox_pred = bbox_head(bbox_feats)\n\n bbox_results = dict(\n cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n return bbox_results\n\n def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,\n gt_labels, rcnn_train_cfg):\n \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n rois = bbox2roi([res.bboxes for res in sampling_results])\n bbox_results = self._bbox_forward(stage, x, rois)\n bbox_targets = self.bbox_head[stage].get_targets(\n sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg)\n loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'],\n bbox_results['bbox_pred'], rois,\n *bbox_targets)\n\n bbox_results.update(\n loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)\n return bbox_results\n\n def _mask_forward(self, stage, x, rois):\n \"\"\"Mask head forward function used in both training and testing.\"\"\"\n mask_roi_extractor = self.mask_roi_extractor[stage]\n mask_head = self.mask_head[stage]\n mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],\n rois)\n # do not support caffe_c4 model anymore\n mask_pred = mask_head(mask_feats)\n\n mask_results = dict(mask_pred=mask_pred)\n return mask_results\n\n def _mask_forward_train(self,\n stage,\n x,\n sampling_results,\n gt_masks,\n rcnn_train_cfg,\n bbox_feats=None):\n \"\"\"Run forward function and calculate loss for mask head in\n training.\"\"\"\n pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n mask_results = self._mask_forward(stage, x, pos_rois)\n\n mask_targets = self.mask_head[stage].get_targets(\n sampling_results, gt_masks, rcnn_train_cfg)\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],\n mask_targets, pos_labels)\n\n mask_results.update(loss_mask=loss_mask)\n return mask_results\n\n def forward_train(self,\n x,\n img_metas,\n proposal_list,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None):\n \"\"\"\n Args:\n x (list[Tensor]): list of multi-level img features.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n proposals (list[Tensors]): list of region proposals.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n losses = dict()\n for i in range(self.num_stages):\n self.current_stage = i\n rcnn_train_cfg = self.train_cfg[i]\n lw = self.stage_loss_weights[i]\n\n # assign gts and sample proposals\n sampling_results = []\n if self.with_bbox or self.with_mask:\n bbox_assigner = self.bbox_assigner[i]\n bbox_sampler = self.bbox_sampler[i]\n num_imgs = len(img_metas)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n\n for j in range(num_imgs):\n assign_result = bbox_assigner.assign(\n proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],\n gt_labels[j])\n sampling_result = bbox_sampler.sample(\n assign_result,\n proposal_list[j],\n gt_bboxes[j],\n gt_labels[j],\n feats=[lvl_feat[j][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n # bbox head forward and loss\n bbox_results = self._bbox_forward_train(i, x, sampling_results,\n gt_bboxes, gt_labels,\n rcnn_train_cfg)\n\n for name, value in bbox_results['loss_bbox'].items():\n losses[f's{i}.{name}'] = (\n value * lw if 'loss' in name else value)\n\n # mask head forward and loss\n if self.with_mask:\n mask_results = self._mask_forward_train(\n i, x, sampling_results, gt_masks, rcnn_train_cfg,\n bbox_results['bbox_feats'])\n for name, value in mask_results['loss_mask'].items():\n losses[f's{i}.{name}'] = (\n value * lw if 'loss' in name else value)\n\n # refine bboxes\n if i < self.num_stages - 1:\n pos_is_gts = [res.pos_is_gt for res in sampling_results]\n # bbox_targets is a tuple\n roi_labels = bbox_results['bbox_targets'][0]\n with torch.no_grad():\n roi_labels = torch.where(\n roi_labels == self.bbox_head[i].num_classes,\n bbox_results['cls_score'][:, :-1].argmax(1),\n roi_labels)\n proposal_list = self.bbox_head[i].refine_bboxes(\n bbox_results['rois'], roi_labels,\n bbox_results['bbox_pred'], pos_is_gts, img_metas)\n\n return losses\n\n def simple_test(self, x, proposal_list, img_metas, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n num_imgs = len(proposal_list)\n img_shapes = tuple(meta['img_shape'] for meta in img_metas)\n ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n # \"ms\" in variable names means multi-stage\n ms_bbox_result = {}\n ms_segm_result = {}\n ms_scores = []\n rcnn_test_cfg = self.test_cfg\n\n rois = bbox2roi(proposal_list)\n for i in range(self.num_stages):\n bbox_results = self._bbox_forward(i, x, rois)\n\n # split batch bbox prediction back to each image\n cls_score = bbox_results['cls_score']\n bbox_pred = bbox_results['bbox_pred']\n num_proposals_per_img = tuple(\n len(proposals) for proposals in proposal_list)\n rois = rois.split(num_proposals_per_img, 0)\n cls_score = cls_score.split(num_proposals_per_img, 0)\n if isinstance(bbox_pred, torch.Tensor):\n bbox_pred = bbox_pred.split(num_proposals_per_img, 0)\n else:\n bbox_pred = self.bbox_head[i].bbox_pred_split(\n bbox_pred, num_proposals_per_img)\n ms_scores.append(cls_score)\n\n if i < self.num_stages - 1:\n bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]\n rois = torch.cat([\n self.bbox_head[i].regress_by_class(rois[j], bbox_label[j],\n bbox_pred[j],\n img_metas[j])\n for j in range(num_imgs)\n ])\n\n # average scores of each image by stages\n cls_score = [\n sum([score[i] for score in ms_scores]) / float(len(ms_scores))\n for i in range(num_imgs)\n ]\n\n # apply bbox post-processing to each image individually\n det_bboxes = []\n det_labels = []\n for i in range(num_imgs):\n det_bbox, det_label = self.bbox_head[-1].get_bboxes(\n rois[i],\n cls_score[i],\n bbox_pred[i],\n img_shapes[i],\n scale_factors[i],\n rescale=rescale,\n cfg=rcnn_test_cfg)\n det_bboxes.append(det_bbox)\n det_labels.append(det_label)\n\n if torch.onnx.is_in_onnx_export():\n return det_bboxes, det_labels\n bbox_results = [\n bbox2result(det_bboxes[i], det_labels[i],\n self.bbox_head[-1].num_classes)\n for i in range(num_imgs)\n ]\n ms_bbox_result['ensemble'] = bbox_results\n\n if self.with_mask:\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n mask_classes = self.mask_head[-1].num_classes\n segm_results = [[[] for _ in range(mask_classes)]\n for _ in range(num_imgs)]\n else:\n if rescale and not isinstance(scale_factors[0], float):\n scale_factors = [\n torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n for scale_factor in scale_factors\n ]\n _bboxes = [\n det_bboxes[i][:, :4] *\n scale_factors[i] if rescale else det_bboxes[i][:, :4]\n for i in range(len(det_bboxes))\n ]\n mask_rois = bbox2roi(_bboxes)\n num_mask_rois_per_img = tuple(\n _bbox.size(0) for _bbox in _bboxes)\n aug_masks = []\n for i in range(self.num_stages):\n mask_results = self._mask_forward(i, x, mask_rois)\n mask_pred = mask_results['mask_pred']\n # split batch mask prediction back to each image\n mask_pred = mask_pred.split(num_mask_rois_per_img, 0)\n aug_masks.append(\n [m.sigmoid().cpu().numpy() for m in mask_pred])\n\n # apply mask post-processing to each image individually\n segm_results = []\n for i in range(num_imgs):\n if det_bboxes[i].shape[0] == 0:\n segm_results.append(\n [[]\n for _ in range(self.mask_head[-1].num_classes)])\n else:\n aug_mask = [mask[i] for mask in aug_masks]\n merged_masks = merge_aug_masks(\n aug_mask, [[img_metas[i]]] * self.num_stages,\n rcnn_test_cfg)\n segm_result = self.mask_head[-1].get_seg_masks(\n merged_masks, _bboxes[i], det_labels[i],\n rcnn_test_cfg, ori_shapes[i], scale_factors[i],\n rescale)\n segm_results.append(segm_result)\n ms_segm_result['ensemble'] = segm_results\n\n if self.with_mask:\n results = list(\n zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))\n else:\n results = ms_bbox_result['ensemble']\n\n return results\n\n def aug_test(self, features, proposal_list, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n rcnn_test_cfg = self.test_cfg\n aug_bboxes = []\n aug_scores = []\n for x, img_meta in zip(features, img_metas):\n # only one image in the batch\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n\n proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n scale_factor, flip, flip_direction)\n # \"ms\" in variable names means multi-stage\n ms_scores = []\n\n rois = bbox2roi([proposals])\n for i in range(self.num_stages):\n bbox_results = self._bbox_forward(i, x, rois)\n ms_scores.append(bbox_results['cls_score'])\n\n if i < self.num_stages - 1:\n bbox_label = bbox_results['cls_score'][:, :-1].argmax(\n dim=1)\n rois = self.bbox_head[i].regress_by_class(\n rois, bbox_label, bbox_results['bbox_pred'],\n img_meta[0])\n\n cls_score = sum(ms_scores) / float(len(ms_scores))\n bboxes, scores = self.bbox_head[-1].get_bboxes(\n rois,\n cls_score,\n bbox_results['bbox_pred'],\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None)\n aug_bboxes.append(bboxes)\n aug_scores.append(scores)\n\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes, merged_scores = merge_aug_bboxes(\n aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\n rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms,\n rcnn_test_cfg.max_per_img)\n\n bbox_result = bbox2result(det_bboxes, det_labels,\n self.bbox_head[-1].num_classes)\n\n if self.with_mask:\n if det_bboxes.shape[0] == 0:\n segm_result = [[]\n for _ in range(self.mask_head[-1].num_classes)]\n else:\n aug_masks = []\n aug_img_metas = []\n for x, img_meta in zip(features, img_metas):\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n scale_factor, flip, flip_direction)\n mask_rois = bbox2roi([_bboxes])\n for i in range(self.num_stages):\n mask_results = self._mask_forward(i, x, mask_rois)\n aug_masks.append(\n mask_results['mask_pred'].sigmoid().cpu().numpy())\n aug_img_metas.append(img_meta)\n merged_masks = merge_aug_masks(aug_masks, aug_img_metas,\n self.test_cfg)\n\n ori_shape = img_metas[0][0]['ori_shape']\n segm_result = self.mask_head[-1].get_seg_masks(\n merged_masks,\n det_bboxes,\n det_labels,\n rcnn_test_cfg,\n ori_shape,\n scale_factor=1.0,\n rescale=False)\n return [(bbox_result, segm_result)]\n else:\n return [bbox_result]" }, { "identifier": "BBoxTestMixin", "path": "PointOBB/mmdet/models/roi_heads/test_mixins.py", "snippet": "class BBoxTestMixin:\n\n if sys.version_info >= (3, 7):\n\n async def async_test_bboxes(self,\n x,\n img_metas,\n proposals,\n rcnn_test_cfg,\n rescale=False,\n **kwargs):\n \"\"\"Asynchronized test for box head without augmentation.\"\"\"\n rois = bbox2roi(proposals)\n roi_feats = self.bbox_roi_extractor(\n x[:len(self.bbox_roi_extractor.featmap_strides)], rois)\n if self.with_shared_head:\n roi_feats = self.shared_head(roi_feats)\n sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)\n\n async with completed(\n __name__, 'bbox_head_forward',\n sleep_interval=sleep_interval):\n cls_score, bbox_pred = self.bbox_head(roi_feats)\n\n img_shape = img_metas[0]['img_shape']\n scale_factor = img_metas[0]['scale_factor']\n det_bboxes, det_labels = self.bbox_head.get_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n return det_bboxes, det_labels\n\n def simple_test_bboxes(self,\n x,\n img_metas,\n proposals,\n rcnn_test_cfg,\n rescale=False):\n \"\"\"Test only det bboxes without augmentation.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n img_metas (list[dict]): Image meta info.\n proposals (List[Tensor]): Region proposals.\n rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n\n Returns:\n tuple[list[Tensor], list[Tensor]]: The first list contains\n the boxes of the corresponding image in a batch, each\n tensor has the shape (num_boxes, 5) and last dimension\n 5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor\n in the second list is the labels with shape (num_boxes, ).\n The length of both lists should be equal to batch_size.\n \"\"\"\n # get origin input shape to support onnx dynamic input shape\n\n img_shapes = tuple(meta['img_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n # The length of proposals of different batches may be different.\n # In order to form a batch, a padding operation is required.\n max_size = max([proposal.size(0) for proposal in proposals])\n # padding to form a batch\n for i, proposal in enumerate(proposals):\n supplement = proposal.new_full(\n (max_size - proposal.size(0), proposal.size(1)), 0)\n proposals[i] = torch.cat((supplement, proposal), dim=0)\n rois = torch.stack(proposals, dim=0)\n\n batch_index = torch.arange(\n rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(\n rois.size(0), rois.size(1), 1)\n rois = torch.cat([batch_index, rois[..., :4]], dim=-1)\n batch_size = rois.shape[0]\n num_proposals_per_img = rois.shape[1]\n\n # Eliminate the batch dimension\n rois = rois.view(-1, 5)\n bbox_results = self._bbox_forward(x, rois)\n cls_score = bbox_results['cls_score']\n bbox_pred = bbox_results['bbox_pred']\n\n # Recover the batch dimension\n rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))\n cls_score = cls_score.reshape(batch_size, num_proposals_per_img,\n cls_score.size(-1))\n\n # remove padding, ignore batch_index when calculating mask\n supplement_mask = rois.abs()[..., 1:].sum(dim=-1) == 0\n cls_score[supplement_mask, :] = 0\n\n # bbox_pred would be None in some detector when with_reg is False,\n # e.g. Grid R-CNN.\n if bbox_pred is not None:\n # the bbox prediction of some detectors like SABL is not Tensor\n if isinstance(bbox_pred, torch.Tensor):\n bbox_pred = bbox_pred.reshape(batch_size,\n num_proposals_per_img,\n bbox_pred.size(-1))\n bbox_pred[supplement_mask, :] = 0\n else:\n # TODO: Looking forward to a better way\n # TODO move these special process to a corresponding head\n # For SABL\n bbox_preds = self.bbox_head.bbox_pred_split(\n bbox_pred, num_proposals_per_img)\n # apply bbox post-processing to each image individually\n det_bboxes = []\n det_labels = []\n for i in range(len(proposals)):\n # remove padding\n supplement_mask = proposals[i].abs().sum(dim=-1) == 0\n for bbox in bbox_preds[i]:\n bbox[supplement_mask] = 0\n det_bbox, det_label = self.bbox_head.get_bboxes(\n rois[i],\n cls_score[i],\n bbox_preds[i],\n img_shapes[i],\n scale_factors[i],\n rescale=rescale,\n cfg=rcnn_test_cfg)\n det_bboxes.append(det_bbox)\n det_labels.append(det_label)\n return det_bboxes, det_labels\n else:\n bbox_pred = None\n\n return self.bbox_head.get_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shapes,\n scale_factors,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n\n def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):\n \"\"\"Test det bboxes with test time augmentation.\"\"\"\n aug_bboxes = []\n aug_scores = []\n for x, img_meta in zip(feats, img_metas):\n # only one image in the batch\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n # TODO more flexible\n proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n scale_factor, flip, flip_direction, img_meta[0].get('tile_offset', None)) # add by hui\n rois = bbox2roi([proposals])\n bbox_results = self._bbox_forward(x, rois)\n bboxes, scores = self.bbox_head.get_bboxes(\n rois,\n bbox_results['cls_score'],\n bbox_results['bbox_pred'],\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None)\n aug_bboxes.append(bboxes)\n aug_scores.append(scores)\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes, merged_scores = merge_aug_bboxes(\n aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\n rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms,\n rcnn_test_cfg.max_per_img)\n return det_bboxes, det_labels" }, { "identifier": "MaskTestMixin", "path": "PointOBB/mmdet/models/roi_heads/test_mixins.py", "snippet": "class MaskTestMixin:\n\n if sys.version_info >= (3, 7):\n\n async def async_test_mask(self,\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=False,\n mask_test_cfg=None):\n \"\"\"Asynchronized test for mask head without augmentation.\"\"\"\n # image shape of the first image in the batch (only one)\n ori_shape = img_metas[0]['ori_shape']\n scale_factor = img_metas[0]['scale_factor']\n if det_bboxes.shape[0] == 0:\n segm_result = [[] for _ in range(self.mask_head.num_classes)]\n else:\n if rescale:\n scale_factor = det_bboxes.new_tensor(scale_factor)\n _bboxes = (\n det_bboxes[:, :4] *\n scale_factor if rescale else det_bboxes)\n mask_rois = bbox2roi([_bboxes])\n mask_feats = self.mask_roi_extractor(\n x[:len(self.mask_roi_extractor.featmap_strides)],\n mask_rois)\n\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'):\n sleep_interval = mask_test_cfg['async_sleep_interval']\n else:\n sleep_interval = 0.035\n async with completed(\n __name__,\n 'mask_head_forward',\n sleep_interval=sleep_interval):\n mask_pred = self.mask_head(mask_feats)\n segm_result = self.mask_head.get_seg_masks(\n mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,\n scale_factor, rescale)\n return segm_result\n\n def simple_test_mask(self,\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=False):\n \"\"\"Simple test for mask head without augmentation.\"\"\"\n # image shapes of images in the batch\n ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n segm_results = [[[] for _ in range(self.mask_head.num_classes)]\n for _ in range(len(det_bboxes))]\n return segm_results\n\n # The length of proposals of different batches may be different.\n # In order to form a batch, a padding operation is required.\n\n # padding to form a batch\n max_size = max([bboxes.size(0) for bboxes in det_bboxes])\n for i, (bbox, label) in enumerate(zip(det_bboxes, det_labels)):\n supplement_bbox = bbox.new_full(\n (max_size - bbox.size(0), bbox.size(1)), 0)\n supplement_label = label.new_full((max_size - label.size(0), ), 0)\n det_bboxes[i] = torch.cat((supplement_bbox, bbox), dim=0)\n det_labels[i] = torch.cat((supplement_label, label), dim=0)\n det_bboxes = torch.stack(det_bboxes, dim=0)\n det_labels = torch.stack(det_labels, dim=0)\n\n batch_size = det_bboxes.size(0)\n num_proposals_per_img = det_bboxes.shape[1]\n\n # if det_bboxes is rescaled to the original image size, we need to\n # rescale it back to the testing scale to obtain RoIs.\n det_bboxes = det_bboxes[..., :4]\n if rescale:\n scale_factors = det_bboxes.new_tensor(scale_factors)\n det_bboxes = det_bboxes * scale_factors.unsqueeze(1)\n\n batch_index = torch.arange(\n det_bboxes.size(0), device=det_bboxes.device).float().view(\n -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)\n mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)\n mask_rois = mask_rois.view(-1, 5)\n mask_results = self._mask_forward(x, mask_rois)\n mask_pred = mask_results['mask_pred']\n\n # Recover the batch dimension\n mask_preds = mask_pred.reshape(batch_size, num_proposals_per_img,\n *mask_pred.shape[1:])\n\n # apply mask post-processing to each image individually\n segm_results = []\n for i in range(batch_size):\n mask_pred = mask_preds[i]\n det_bbox = det_bboxes[i]\n det_label = det_labels[i]\n\n # remove padding\n supplement_mask = det_bbox.abs().sum(dim=-1) != 0\n mask_pred = mask_pred[supplement_mask]\n det_bbox = det_bbox[supplement_mask]\n det_label = det_label[supplement_mask]\n\n if det_label.shape[0] == 0:\n segm_results.append([[]\n for _ in range(self.mask_head.num_classes)\n ])\n else:\n segm_result = self.mask_head.get_seg_masks(\n mask_pred, det_bbox, det_label, self.test_cfg,\n ori_shapes[i], scale_factors[i], rescale)\n segm_results.append(segm_result)\n return segm_results\n\n def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):\n \"\"\"Test for mask head with test time augmentation.\"\"\"\n if det_bboxes.shape[0] == 0:\n segm_result = [[] for _ in range(self.mask_head.num_classes)]\n else:\n aug_masks = []\n for x, img_meta in zip(feats, img_metas):\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n scale_factor, flip, flip_direction, img_meta[0].get('tile_offset', None)) # add by hui\n mask_rois = bbox2roi([_bboxes])\n mask_results = self._mask_forward(x, mask_rois)\n # convert to numpy array to save memory\n aug_masks.append(\n mask_results['mask_pred'].sigmoid().cpu().numpy())\n merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)\n\n ori_shape = img_metas[0][0]['ori_shape']\n scale_factor = det_bboxes.new_ones(4)\n segm_result = self.mask_head.get_seg_masks(\n merged_masks,\n det_bboxes,\n det_labels,\n self.test_cfg,\n ori_shape,\n scale_factor=scale_factor,\n rescale=False)\n return segm_result" }, { "identifier": "obb2xyxy", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def obb2xyxy(rbboxes, version='oc'):\n \"\"\"Convert oriented bounding boxes to horizontal bounding boxes.\n\n Args:\n obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]\n version (Str): angle representations.\n\n Returns:\n hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb]\n \"\"\"\n if version == 'oc':\n results = obb2xyxy_oc(rbboxes)\n elif version == 'le135':\n results = obb2xyxy_le135(rbboxes)\n elif version == 'le90':\n results = obb2xyxy_le90(rbboxes)\n else:\n raise NotImplementedError\n return results" }, { "identifier": "regularize_boxes", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def regularize_boxes(boxes,\n pattern: str = None,\n width_longer: bool = True,\n start_angle: float = -90) -> Tensor:\n \"\"\"Regularize rotated boxes.\n\n Due to the angle periodicity, one rotated box can be represented in\n many different (x, y, w, h, t). To make each rotated box unique,\n ``regularize_boxes`` will take the remainder of the angle divided by\n 180 degrees.\n\n However, after taking the remainder of the angle, there are still two\n representations for one rotate box. For example, (0, 0, 4, 5, 0.5) and\n (0, 0, 5, 4, 0.5 + pi/2) are the same areas in the image. To solve the\n problem, the code will swap edges w.r.t ``width_longer``:\n\n - width_longer=True: Make sure the width is longer than the height. If\n not, swap the width and height. The angle ranges in [start_angle,\n start_angle + 180). For the above example, the rotated box will be\n represented as (0, 0, 5, 4, 0.5 + pi/2).\n - width_longer=False: Make sure the angle is lower than\n start_angle+pi/2. If not, swap the width and height. The angle\n ranges in [start_angle, start_angle + 90). For the above example,\n the rotated box will be represented as (0, 0, 4, 5, 0.5).\n\n For convenience, three commonly used patterns are preset in\n ``regualrize_boxes``:\n\n - 'oc': OpenCV Definition. Has the same box representation as\n ``cv2.minAreaRect`` the angle ranges in [-90, 0). Equal to set\n width_longer=False and start_angle=-90.\n - 'le90': Long Edge Definition (90). the angle ranges in [-90, 90).\n The width is always longer than the height. Equal to set\n width_longer=True and start_angle=-90.\n - 'le135': Long Edge Definition (135). the angle ranges in [-45, 135).\n The width is always longer than the height. Equal to set\n width_longer=True and start_angle=-45.\n\n Args:\n pattern (str, Optional): Regularization pattern. Can only be 'oc',\n 'le90', or 'le135'. Defaults to None.\n width_longer (bool): Whether to make sure width is larger than\n height. Defaults to True.\n start_angle (float): The starting angle of the box angle\n represented in degrees. Defaults to -90.\n\n Returns:\n Tensor: Regularized box tensor.\n \"\"\"\n\n if pattern is not None:\n if pattern == 'oc':\n width_longer, start_angle = False, -90\n elif pattern == 'le90':\n width_longer, start_angle = True, -90\n elif pattern == 'le135':\n width_longer, start_angle = True, -45\n else:\n raise ValueError(\"pattern only can be 'oc', 'le90', and\"\n f\"'le135', but get {pattern}.\")\n start_angle = start_angle / 180 * np.pi\n\n x, y, w, h, t = boxes.unbind(dim=-1)\n if width_longer:\n # swap edge and angle if h >= w\n w_ = torch.where(w > h, w, h)\n h_ = torch.where(w > h, h, w)\n t = torch.where(w > h, t, t + np.pi / 2)\n t = ((t - start_angle) % np.pi) + start_angle\n else:\n # swap edge and angle if angle > pi/2\n t = ((t - start_angle) % np.pi)\n w_ = torch.where(t < np.pi / 2, w, h)\n h_ = torch.where(t < np.pi / 2, h, w)\n t = torch.where(t < np.pi / 2, t, t - np.pi / 2) + start_angle\n obb = torch.stack([x, y, w_, h_, t], dim=-1)\n return obb" }, { "identifier": "reduce_mean", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def reduce_mean(tensor):\n \"\"\"\"Obtain the mean of tensor on different GPUs.\"\"\"\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor" }, { "identifier": "obb2poly_np", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def obb2poly_np(rbboxes, version='oc'):\n \"\"\"Convert oriented bounding boxes to polygons.\n\n Args:\n obbs (ndarray): [x_ctr,y_ctr,w,h,angle]\n version (Str): angle representations.\n\n Returns:\n polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]\n \"\"\"\n if version == 'oc':\n results = obb2poly_np_oc(rbboxes)\n elif version == 'le135':\n results = obb2poly_np_le135(rbboxes)\n elif version == 'le90':\n results = obb2poly_np_le90(rbboxes)\n else:\n raise NotImplementedError\n return results" } ]
import math import torch import torch.nn.functional as F import torch.nn as nn import copy import numpy as np import cv2 from mmdet.core import bbox2result, bbox2roi, rbbox2roi, build_assigner, build_sampler, multi_apply from ..builder import HEADS, MODELS, build_head, build_roi_extractor, build_loss from .standard_roi_head import StandardRoIHead from .cascade_roi_head import CascadeRoIHead from mmdet.core.bbox.iou_calculators import bbox_overlaps from .test_mixins import BBoxTestMixin, MaskTestMixin from mmdet.core.bbox import bbox_xyxy_to_cxcywh from mmdet.core.bbox.transforms import rbbox2result from mmcv.cnn import Scale, ConvModule from mmcv.ops import box_iou_rotated from typing import Any, List, Sequence, Tuple, Union from torch import Tensor from mmdet.models.utils.base_bbox_coder import BaseBBoxCoder from ..detectors.utils import obb2xyxy, regularize_boxes, reduce_mean, obb2poly_np
14,154
RangeType = Sequence[Tuple[int, int]] INF = 1e8 def meshgrid(x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor, Tensor]: yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exportingF # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def obb2cxcywh_le90(obboxes): """Convert oriented bounding boxes to horizontal bounding boxes. Args: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] Returns: hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb] """ center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1) Cos, Sin = torch.cos(theta), torch.sin(theta) x_bias = torch.abs(w / 2 * Cos) + torch.abs(h / 2 * Sin) y_bias = torch.abs(w / 2 * Sin) + torch.abs(h / 2 * Cos) bias = torch.cat([x_bias, y_bias], dim=-1) wh = bias * 2 return torch.cat([center, wh, torch.zeros_like(theta)], dim=-1)
RangeType = Sequence[Tuple[int, int]] INF = 1e8 def meshgrid(x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor, Tensor]: yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exportingF # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def obb2cxcywh_le90(obboxes): """Convert oriented bounding boxes to horizontal bounding boxes. Args: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] Returns: hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb] """ center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1) Cos, Sin = torch.cos(theta), torch.sin(theta) x_bias = torch.abs(w / 2 * Cos) + torch.abs(h / 2 * Sin) y_bias = torch.abs(w / 2 * Sin) + torch.abs(h / 2 * Cos) bias = torch.cat([x_bias, y_bias], dim=-1) wh = bias * 2 return torch.cat([center, wh, torch.zeros_like(theta)], dim=-1)
@HEADS.register_module()
0
2023-11-20 07:50:12+00:00
16k
wangermeng2021/llm-webui
main.py
[ { "identifier": "login_huggingface", "path": "src/utils/common.py", "snippet": "def login_huggingface(token,base_model_name_dropdown):\n if base_model_name_dropdown.lower().find(\"llama\") >= 0:\n if token:\n HUGGINGFACE_HUB_TOKEN = token\n print(\"d1:\",HUGGINGFACE_HUB_TOKEN)\n else:\n env_file_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),\"token.env\")\n load_dotenv(env_file_path)\n HUGGINGFACE_HUB_TOKEN = os.getenv('HUGGINGFACE_HUB_TOKEN')\n print(\"d2:\", HUGGINGFACE_HUB_TOKEN)\n login(token=HUGGINGFACE_HUB_TOKEN)\n os.environ[\"HUGGING_FACE_HUB_TOKEN\"] = HUGGINGFACE_HUB_TOKEN" }, { "identifier": "HuggingfaceInference", "path": "src/finetune/huggingface_inference.py", "snippet": "class HuggingfaceInference(Inference):\n def __init__(self,model_path,max_new_tokens=256,temperature=0.7 ,top_p=0.95 ,top_k=1,repetition_penalty=1.15,using_4bit_quantization=True,low_cpu_mem_usage=False):\n self.model = None\n self.tokenizer = None\n self.hg_model = None\n self.model_path = model_path\n self.max_new_tokens = max_new_tokens\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.repetition_penalty = repetition_penalty\n self.prompt_template = PromptTemplate.from_template(\n \"{question}\"\n )\n self.bnb_config = None\n if using_4bit_quantization:\n self.bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n )\n self.low_cpu_mem_usage = low_cpu_mem_usage\n def load_model(self):\n try:\n \n if self.model_path.split(os.sep)[-1].rfind(\"llama\") >=0:\n self.tokenizer = LlamaTokenizer.from_pretrained(self.model_path)\n if self.bnb_config:\n self.hg_model = LlamaForCausalLM.from_pretrained(self.model_path, device_map={\"\":0},quantization_config=self.bnb_config,torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.hg_model = LlamaForCausalLM.from_pretrained(self.model_path, device_map={\"\": 0},torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)\n if self.bnb_config:\n self.hg_model = AutoModelForCausalLM.from_pretrained(self.model_path, device_map={\"\":0},quantization_config=self.bnb_config,torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.hg_model = AutoModelForCausalLM.from_pretrained(self.model_path, device_map={\"\": 0},torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n if not self.tokenizer.pad_token:\n if self.model_path.split(os.sep)[-1].lower().rfind(\"gpt2\")>=0:\n self.tokenizer.pad_token = self.tokenizer.eos_token\n else:\n self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n self.hg_model.resize_token_embeddings(len(self.tokenizer))\n\n except Exception as e:\n return -1, e\n self.model = pipeline(\n \"text-generation\",\n model=self.hg_model,\n tokenizer=self.tokenizer,\n max_new_tokens = self.max_new_tokens,\n temperature=self.temperature,\n top_p=self.top_p,top_k=self.top_k,do_sample=True,\n return_full_text=False,\n repetition_penalty=self.repetition_penalty,\n # return_dict_in_generate = True\n )\n return 0, \"\"\n def infer(self ,input):\n output = self.model(input)\n return output[0]['generated_text'] if output else None\n def free_memory(self):\n if self.hg_model:\n del self.hg_model\n self.hg_model = None\n if self.tokenizer:\n del self.tokenizer\n self.tokenizer = None\n if self.model:\n del self.model\n self.model = None" }, { "identifier": "LlamaCppInference", "path": "src/finetune/llama_cpp_inference.py", "snippet": "class LlamaCppInference(Inference):\n def __init__(self,model_path,max_new_tokens=256,temperature=0.7 ,top_p=0.95 ,top_k=1,repetition_penalty=1.15,n_gpu_layers=35, n_ctx=4048,verbose=False):\n self.model_path = model_path\n self.max_new_tokens = max_new_tokens\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.repetition_penalty = repetition_penalty\n self.prefix1 = \"\"\n self.prefix2 = \"\"\n self.model = None\n\n def load_model(self):\n load_model_status = 0\n msg = None\n try:\n self.model = LlamaCpp(model_path=self.model_path, n_gpu_layers=35, n_ctx=4096,max_tokens=self.max_new_tokens, temperature=self.temperature,\n verbose=False, top_k=self.top_k, top_p=self.top_p,repeat_penalty=self.repetition_penalty)\n except Exception as e:\n load_model_status = -1\n msg = e\n return load_model_status, msg\n def infer(self ,input):\n return self.model(input)\n\n\n def free_memory(self):\n if self.model:\n del self.model\n self.model = None" }, { "identifier": "QAWithRAG", "path": "src/rag/qa_with_rag.py", "snippet": "class QAWithRAG():\n def __init__(self ,config: dict ={}):\n self.text_splitter = None\n self.embedding_function = None\n self.vectorstore = None\n self.retriever = None\n self.chat_llm = None\n\n self.chat_history =[]\n # self.persist_directory = \"./chroma_db\"\n self.persist_directory = None\n self.qa = None\n self.langchain_llm = None\n def free_memory(self):\n if self.chat_llm:\n self.chat_llm.free_memory()\n del self.chat_llm\n self.chat_llm = None\n if self.langchain_llm:\n del self.langchain_llm\n self.langchain_llm = None\n if self.qa:\n del self.qa\n self.qa = None\n\n\n def get_text_splitter(self ,chunk_size ,chunk_overlap ,separators):\n self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, length_function=len,\n separators=separators)\n def load_embedding_model(self ,model_path=\"\"):\n self.embedding_function = HuggingFaceEmbeddings(model_name=model_path ,model_kwargs = {'device': 'cpu'})\n def load_chat_model(self ,model_path,using_4bit_quantization,low_cpu_mem_usage,\n max_new_tokens, temperature, top_k, top_p, repeat_penalty\n ):\n self.set_prompt_template(model_path)\n load_model_status = 0\n if model_path.split('.')[-1] == \"gguf\":\n self.chat_llm = LlamaCppInference(model_path=model_path, max_new_tokens=max_new_tokens, temperature=temperature,\n top_k=top_k, top_p=top_p, repetition_penalty=repeat_penalty)\n load_model_status, msg = self.chat_llm.load_model()\n self.langchain_llm = self.chat_llm.model\n else:\n self.chat_llm = HuggingfaceInference(model_path, max_new_tokens, temperature, top_p, top_k, repeat_penalty, using_4bit_quantization,low_cpu_mem_usage)\n load_model_status, msg = self.chat_llm.load_model()\n self.langchain_llm = HuggingFacePipeline(pipeline=self.chat_llm.model)\n\n return load_model_status, msg\n\n #\n def get_document_data(self ,doc_path):\n self.chat_history = []\n self.chat_history.clear()\n self.doc_ext = doc_path.split('.')[-1]\n if self.doc_ext == \"txt\":\n loader = TextLoader(doc_path, encoding='utf8')\n elif self.doc_ext == \"pdf\":\n loader = PyPDFLoader(doc_path)\n elif self.doc_ext == \"docx\":\n loader = Docx2txtLoader(doc_path)\n else:\n raise ValueError(f\"Unsupported format: {self.doc_ext}\")\n data = loader.load()\n return data\n def add_document_to_vector_store(self, doc_path ,search_top_k ,search_score_threshold):\n data = self.get_document_data(doc_path)\n data = self.text_splitter.split_documents(data)\n try:\n self.vectorstore = Chroma.from_documents(data, self.embedding_function\n ,collection_metadata={\"hnsw:space\": \"cosine\"}\n ,persist_directory=self.persist_directory)\n # self.vectorstore = FAISS.from_documents(data, self.embedding_function) \n except InvalidDimensionException:\n Chroma().delete_collection()\n self.vectorstore = Chroma.from_documents(data, self.embedding_function\n ,collection_metadata={\"hnsw:space\": \"cosine\"}\n ,persist_directory=self.persist_directory)\n # self.vectorstore = FAISS.from_documents(data, self.embedding_function) \n self.set_retriever(search_top_k ,search_score_threshold)\n\n def set_retriever(self ,search_top_k ,score_threshold):\n self.retriever = self.vectorstore.as_retriever(search_type='similarity_score_threshold',\n search_kwargs={'k': search_top_k, \"score_threshold\": score_threshold})\n def set_prompt_template(self ,chat_model_path):\n\n if chat_model_path.lower().find(\"mistral\") >= 0 and chat_model_path.lower().find(\"instruct\") >= 0:\n prompt_template = \"\"\"<s>[INST] Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: [/INST]\"\"\"\n elif chat_model_path.lower().find(\"llama\") >= 0 and chat_model_path.lower().find(\"chat\") >= 0:\n prompt_template = \"\"\"<s>[INST] Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: [/INST]\"\"\"\n elif chat_model_path.lower().find(\"zephyr\") >= 0:\n prompt_template = \"\"\"<|user|>\\n Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: </s><|assistant|>\\n\"\"\"\n else:\n prompt_template = \"\"\"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer:\"\"\"\n\n self.prompt_template = PromptTemplate(\n template=prompt_template, input_variables=[\"context\", \"question\"]\n )\n def generate(self, question):\n self.chat_history = []\n if self.retriever:\n\n chain_type_kwargs = {\"prompt\": self.prompt_template ,\"verbose\": False}\n self.qa = RetrievalQA.from_chain_type(llm=self.langchain_llm, chain_type=\"stuff\", retriever=self.retriever,\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs)\n result = self.qa({\"query\": question}, return_only_outputs=True)\n retrieved_txt_list = []\n if len(result['source_documents'] ) >0:\n if self.doc_ext == \"txt\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n elif self.doc_ext == \"pdf\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n elif self.doc_ext == \"docx\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n answer = result['result']\n else:\n answer = \"Sorry, I can't find any relevant information in document. \" + result['result']\n return answer, retrieved_txt_list\n else:\n return \"\", retrieved_txt_list" }, { "identifier": "read_yaml", "path": "src/utils/common.py", "snippet": "def read_yaml(yaml_path):\n with open(yaml_path) as f1:\n try:\n data = yaml.safe_load(f1)\n return data\n except yaml.YAMLError as e:\n raise ValueError(f'Error loading yaml file: {e}')" }, { "identifier": "get_first_row_from_dataset", "path": "src/utils/common.py", "snippet": "def get_first_row_from_dataset(dataset_path):\n if os.path.exists(os.path.join(dataset_path, \"dataset_dict.json\")):\n dataset = datasets.load_from_disk(dataset_path)\n elif os.path.exists(os.path.join(dataset_path, \"dataset_infos.json\")):\n dataset = datasets.load_dataset(dataset_path)\n elif os.path.exists(os.path.join(dataset_path, \"dataset_info.json\")):\n dataset = datasets.load_from_disk(dataset_path)\n else:\n raise ValueError(\n f'Invalid Dataset format {dataset_path}.')\n try:\n split_list = list(dataset.keys())\n except:\n split_list = [\"train\"]\n new_split_list= [\"\",\"\",\"\"]\n for split in split_list:\n if split.find(\"train\") >= 0:\n new_split_list[0] = split\n elif split.find(\"val\") >= 0:\n new_split_list[1] = split\n elif split.find(\"test\") >= 0:\n new_split_list[2] = split\n\n return dataset[new_split_list[0]][0],new_split_list" }, { "identifier": "get_runs_model_names_from_dir", "path": "src/utils/common.py", "snippet": "def get_runs_model_names_from_dir(root_dir):\n\n run_names = os.listdir(root_dir)\n run_names.sort(key=lambda file: os.path.getmtime(os.path.join(root_dir, file)),reverse=True)\n runs_output_model = []\n for run_name in run_names:\n run_name_dir = os.path.join(root_dir, run_name)\n run_output_model = os.path.join(run_name_dir, \"output_model\")\n if os.path.exists(run_output_model):\n run_output_model_names = os.listdir(run_output_model)\n for run_output_model_name in run_output_model_names:\n model_bin_path = os.path.exists(\n os.path.join(root_dir,\n run_name, \"output_model\", run_output_model_name, \"ori\",\n \"pytorch_model.bin\"))\n if run_output_model_name.find(\"merged_\") >= 0 and model_bin_path:\n runs_output_model.append(os.path.join(run_name, \"output_model\", run_output_model_name, \"ori\"))\n return runs_output_model" }, { "identifier": "get_hg_model_names_from_dir", "path": "src/utils/common.py", "snippet": "def get_hg_model_names_from_dir(root_dir):\n model_names = os.listdir(root_dir)\n model_names.sort(key=lambda file: os.path.getmtime(os.path.join(root_dir, file)),reverse=True)\n return model_names" }, { "identifier": "get_hg_model_names_and_gguf_from_dir", "path": "src/utils/common.py", "snippet": "def get_hg_model_names_and_gguf_from_dir(hg_model_root_dir,runs_model_root_dir):\n output = []\n runs_gguf_files = glob.glob(os.path.join(runs_model_root_dir,\"**\",\"**\",\"**\",\"**\",\"*.gguf\"),recursive=False)\n root_model_gguf_files = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"*.gguf\"),recursive=False)\n root_model_gguf_files1 = glob.glob(os.path.join(hg_model_root_dir, \"**\",\"**\", \"*.gguf\"), recursive=False)\n root_model_hg_dir0 = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"config.json\"),recursive=False)\n root_model_hg_dir1 = glob.glob(os.path.join(hg_model_root_dir, \"**\",\"**\", \"config.json\"), recursive=False)\n runs_hg_dir = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"**\",\"**\",\"**\",\"config.json\"),recursive=False)\n runs_gguf_files.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_gguf_files.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_gguf_files1.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_hg_dir0.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_hg_dir1.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n runs_hg_dir.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n\n for file in runs_gguf_files:\n file_pos = file.find(\"runs\")\n output.append(file[file_pos:])\n for file in root_model_gguf_files:\n output.append(file[file.find(\"models\")+len(\"models\")+1:])\n for file in root_model_gguf_files1:\n output.append(file[file.find(\"models\")+len(\"models\")+1:])\n for file in root_model_hg_dir0:\n file_pos1 = file.find(\"models\")\n file_pos2 = file.find(\"config.json\")\n output.append(file[file_pos1+len(\"models\")+1:file_pos2-1])\n for file in root_model_hg_dir1:\n file_pos1 = file.find(\"models\")\n file_pos2 = file.find(\"config.json\")\n output.append(file[file_pos1+len(\"models\")+1:file_pos2-1])\n for file in runs_hg_dir:\n file_pos = file.find(\"runs\")+len(\"runs\")+1\n output.append(file[file_pos:])\n return output" }, { "identifier": "validate_model_path", "path": "src/utils/common.py", "snippet": "def validate_model_path(model_name):\n if not model_name:\n return False,\"\"\n home_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n base_model_config_path1 = os.path.join(home_dir, \"models\", model_name)\n base_model_config_path2 = os.path.join(base_model_config_path1, \"config.json\")\n run_model_config_path1 = os.path.join(home_dir, \"runs\", model_name)\n run_model_config_path2 = os.path.join(run_model_config_path1, \"config.json\")\n if os.path.exists(base_model_config_path1) and base_model_config_path1.endswith(\".gguf\"):\n return True,base_model_config_path1\n if os.path.exists(run_model_config_path1) and run_model_config_path1.endswith(\".gguf\") :\n return True,run_model_config_path1\n if os.path.exists(base_model_config_path2):\n return True,base_model_config_path1\n if os.path.exists(run_model_config_path2):\n return True,run_model_config_path1\n return False,\"\"" }, { "identifier": "get_runs_models", "path": "src/utils/common.py", "snippet": "def get_runs_models():\n training_runs_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'runs')\n run_names = os.listdir(training_runs_dir)\n run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file)))\n runs_output_model = []\n for run_name in run_names:\n run_name_dir = os.path.join(training_runs_dir, run_name)\n run_output_model = os.path.join(run_name_dir, \"output_model\")\n if os.path.exists(run_output_model):\n run_output_model_names = os.listdir(run_output_model)\n for run_output_model_name in run_output_model_names:\n if run_output_model_name.find(\"merged_\") >= 0:\n runs_output_model.append(os.path.join(run_name, \"output_model\", run_output_model_name, \"ori\"))\n runs_output_model = runs_output_model[::-1]\n return runs_output_model" }, { "identifier": "get_model_type", "path": "src/utils/chat_prompts.py", "snippet": "def get_model_type(model_path):\n if model_path:\n if model_path.lower().find(\"mistral\") >= 0 and model_path.lower().find(\"instruct\") >= 0:\n model_type = \"mistral\"\n elif model_path.lower().find(\"llama\") >= 0 and model_path.lower().find(\"chat\") >= 0:\n model_type = \"llama2\"\n elif model_path.lower().find(\"zephyr\") >= 0:\n model_type = \"zephyr\"\n else:\n model_type = \"other model\"\n else:\n model_type = \"other model\"\n return model_type" }, { "identifier": "get_chat_history_prompt", "path": "src/utils/chat_prompts.py", "snippet": "def get_chat_history_prompt(chat_history,model_type=\"llama2\"):\n if model_type == \"other model\":\n prompt = ','.join(chat_history[:-2])\n prompt = prompt + chat_history[-2]\n elif model_type == \"llama2\":\n prompt = format_chat_history_prompt_for_llama2_7b_chat(chat_history)\n elif model_type == \"zephyr\":\n prompt = format_chat_history_prompt_for_zephyr_7b_instruct(chat_history)\n elif model_type == \"mistral\":\n prompt = format_chat_history_prompt_for_mistral_7b_instruct(chat_history)\n return prompt" }, { "identifier": "get_model_prompt_template", "path": "src/utils/chat_prompts.py", "snippet": "def get_model_prompt_template(model_type=\"llama2\"):\n if model_type == \"other model\":\n prompt_template = PromptTemplate.from_template(\n \"{question}\"\n )\n elif model_type == \"llama2\":\n prompt_template = PromptTemplate.from_template(\n \"<s>[INST] {question} [/INST]\"\n )\n elif model_type == \"zephyr\":\n prompt_template = PromptTemplate.from_template(\n \"<|user|>\\n{question}</s><|assistant|>\\n\"\n )\n elif model_type == \"mistral\":\n prompt_template = PromptTemplate.from_template(\n \"<s>[INST] {question} [/INST]\"\n )\n return prompt_template" }, { "identifier": "download_model", "path": "src/utils/download_model.py", "snippet": "class ModelDownloader:\n def __init__(self, max_retries=5):\n def sanitize_model_and_branch_names(self, model, branch):\n def get_download_links_from_huggingface(self, model, branch, text_only=False, specific_file=None):\n def get_output_folder(self, model, branch, is_lora, is_llamacpp=False, base_folder=None):\n def get_single_file(self, url, output_folder, start_from_scratch=False):\n def start_download_threads(self, file_list, output_folder, start_from_scratch=False, threads=4):\n def download_model_files(self, model, branch, links, sha256, output_folder, progress_bar=None, start_from_scratch=False, threads=1, specific_file=None, is_llamacpp=False):\n def check_model_files(self, model, branch, links, sha256, output_folder):" }, { "identifier": "QloraTrainer", "path": "src/finetune/qlora_trainer.py", "snippet": "class QloraTrainer(PeftTrainer):\n\n def __init__(self, config: dict):\n self.config = config\n self.tokenizer = None\n self.base_model = None\n self.merged_model = None\n self.dataset = None\n self.fused_model = None\n self.train_dataset = None\n self.val_dataset = None\n self.logging_callback = self.LoggingCallbacks()\n print(\"config:\",config)\n def load_dataset(self):\n if self.config[\"dataset\"][\"hg_dataset_dir\"]:\n if os.path.exists(os.path.join(self.config[\"dataset\"][\"hg_dataset_dir\"],\"dataset_infos.json\")):\n if self.config[\"dataset\"][\"hg_train_dataset\"]:\n self.train_dataset= datasets.load_dataset(self.config[\"dataset\"][\"hg_dataset_dir\"],split=self.config[\"dataset\"][\"hg_train_dataset\"])\n if self.config[\"dataset\"][\"hg_val_dataset\"]:\n self.val_dataset = datasets.load_dataset(self.config[\"dataset\"][\"hg_dataset_dir\"],split=self.config[\"dataset\"][\"hg_val_dataset\"])\n elif os.path.exists(os.path.join(self.config[\"dataset\"][\"hg_dataset_dir\"],\"dataset_dict.json\")):\n if self.config[\"dataset\"][\"hg_train_dataset\"]:\n self.train_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"hg_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"hg_train_dataset\"])\n if self.config[\"dataset\"][\"hg_val_dataset\"]:\n self.val_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"hg_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"hg_val_dataset\"])\n else:\n raise ValueError(\n f'Invalid Dataset format {self.config[\"dataset\"][\"hg_dataset_dir\"]}.')\n else:\n\n if self.config[\"dataset\"][\"local_dataset_dir\"]:\n if os.path.exists(os.path.join(self.config[\"dataset\"][\"local_dataset_dir\"], \"dataset_infos.json\")):\n if self.config[\"dataset\"][\"local_train_set\"]:\n self.train_dataset = datasets.load_dataset(self.config[\"dataset\"][\"local_dataset_dir\"],\n split=self.config[\"dataset\"][\"local_train_set\"])\n if self.config[\"dataset\"][\"local_val_set\"]:\n self.val_dataset = datasets.load_dataset(self.config[\"dataset\"][\"local_dataset_dir\"],\n split=self.config[\"dataset\"][\"local_val_set\"])\n elif os.path.exists(os.path.join(self.config[\"dataset\"][\"local_dataset_dir\"], \"dataset_dict.json\")):\n if self.config[\"dataset\"][\"local_train_set\"]:\n self.train_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"local_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"local_train_set\"])\n if self.config[\"dataset\"][\"local_val_set\"]:\n self.val_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"local_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"local_val_set\"])\n else:\n raise ValueError(\n f'Invalid Dataset format {self.config[\"dataset\"][\"local_dataset_dir\"]}.')\n\n\n if self.config[\"dataset\"][\"max_length\"] == \"Model Max Length\":\n\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\") >= 0:\n context_window = 1024*4\n elif self.config[\"model\"][\"base_model_name\"].rfind(\"mistral\") >= 0:\n context_window = 1024*4\n elif self.config[\"model\"][\"base_model_name\"].rfind(\"zephyr\") >= 0:\n context_window = 1024*4\n else:\n context_window = self.tokenizer.model_max_length\n if self.tokenizer.model_max_length == int(1e30):\n context_window = 1024\n else:\n context_window = self.config[\"dataset\"][\"max_length\"]\n print(\"context_window:\",context_window)\n self.train_dataset = self.train_dataset.map(lambda sample: self.tokenizer(\n self.generate_prompt(\n sample,\n self.tokenizer.eos_token),\n max_length=context_window,\n truncation=True,\n # padding=True\n ))\n if self.val_dataset:\n self.val_dataset = self.val_dataset.map(lambda sample: self.tokenizer(\n self.generate_prompt(\n sample,\n self.tokenizer.eos_token),\n max_length=context_window,\n truncation=True,\n padding=True\n ))\n def generate_prompt(self,sample,eos_token):\n\n prompt = self.config[\"dataset\"][\"prefix1\"]+sample[self.config[\"dataset\"][\"datatset_col1\"]]+\\\n self.config[\"dataset\"][\"prefix2\"] + sample[self.config[\"dataset\"][\"datatset_col2\"]]+eos_token\n # print(\"prompt:\",prompt)\n return prompt\n\n def load_model(self):\n\n if self.config[\"model\"][\"fine_tuning_type\"] == \"QLoRA\":\n bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n )\n elif self.config[\"model\"][\"fine_tuning_type\"] == \"LoRA\":\n bnb_config = None\n try:\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\")>=0:\n self.tokenizer = LlamaTokenizer.from_pretrained(self.config[\"model\"][\"base_model_path\"])\n self.base_model = LlamaForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], quantization_config=bnb_config, device_map={\"\":0},trust_remote_code=True)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(self.config[\"model\"][\"base_model_path\"])\n self.base_model = AutoModelForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], quantization_config=bnb_config, device_map={\"\":0},trust_remote_code=True)\n except Exception as e:\n return -1,e\n if not self.tokenizer.pad_token:\n self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n self.base_model.resize_token_embeddings(len(self.tokenizer))\n if self.config[\"training\"][\"gradient_checkpointing\"] and not self.config[\"model\"][\"base_model_name\"].rfind(\"phi\")>=0:\n # self.base_model.gradient_checkpointing_enable()\n self.base_model = prepare_model_for_kbit_training(self.base_model,use_gradient_checkpointing=True,gradient_checkpointing_kwargs={'use_reentrant':False})\n else:\n self.base_model = prepare_model_for_kbit_training(self.base_model, use_gradient_checkpointing=False,gradient_checkpointing_kwargs={'use_reentrant':False})\n if self.config[\"model\"][\"base_model_name\"].lower().rfind(\"llama\")>=0 or \\\n self.config[\"model\"][\"base_model_name\"].lower().rfind(\"mistral\") >= 0 or \\\n self.config[\"model\"][\"base_model_name\"].lower().rfind(\"zephyr\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"llama\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"falcon\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"falcon\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"gpt2\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"gpt2\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"phi\") >= 0:\n target_modules = [\"Wqkv\", \"out_proj\"]\n task_type = \"CAUSAL_LM\"\n else:\n raise ValueError(f'{self.config[\"model\"][\"base_model_name\"]} is not yet supported.')\n #T5,bart, task_type = \"SEQ_2_SEQ_LM\" ,AutoModelForSeq2SeqLM\n \n lora_config = LoraConfig(\n r=self.config[\"model\"][\"lora_r\"],\n lora_alpha=self.config[\"model\"][\"lora_alpha\"],\n target_modules=target_modules,\n lora_dropout=self.config[\"model\"][\"lora_dropout\"],\n bias=self.config[\"model\"][\"lora_bias\"],\n task_type=task_type,\n )\n self.fused_model = get_peft_model(self.base_model, lora_config)\n # self.fused_model.gradient_checkpointing = True\n return 0,\"\"\n def train(self):\n self.run_name = datetime.now().strftime(\"run_%Y-%m-%d_%H-%M-%S\")\n logging_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name,\"tensorboard\")\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name,\"output_model\", run_output_model_name + \"_adapter\")\n checkpoint_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name)\n self.trainer = transformers.Trainer(\n model=self.fused_model,\n train_dataset=self.train_dataset,\n eval_dataset= self.val_dataset if self.val_dataset else None,\n args=transformers.TrainingArguments(\n per_device_train_batch_size=self.config[\"training\"][\"batch_size\"],\n gradient_accumulation_steps=self.config[\"training\"][\"gradient_accumulation_steps\"],\n warmup_steps=self.config[\"training\"][\"warmup_steps\"],\n num_train_epochs=self.config[\"training\"][\"epochs\"],\n learning_rate=self.config[\"training\"][\"learning_rate\"],\n fp16=True,\n output_dir=checkpoint_dir,\n report_to=\"tensorboard\",\n optim=self.config[\"training\"][\"optimizer\"],\n lr_scheduler_type=self.config[\"training\"][\"lr_scheduler_type\"],\n load_best_model_at_end=True if self.val_dataset else False,\n save_strategy=\"steps\",\n save_steps = self.config[\"training\"][\"eval_steps\"],\n save_total_limit=1,\n evaluation_strategy=\"steps\" if self.val_dataset else \"no\",\n eval_steps=self.config[\"training\"][\"eval_steps\"], # eval interval\n per_device_eval_batch_size=1,\n # eval_steps=10, # eval interval\n logging_steps=100,#self.config[\"training\"][\"eval_steps\"]\n # run_name=self.run_name,\n logging_dir=logging_dir,\n ),\n\n callbacks=[self.logging_callback,transformers.EarlyStoppingCallback(early_stopping_patience=self.config[\"training\"][\"early_stopping_patience\"]) ] if self.config[\"training\"][\"early_stopping_patience\"]>0 else [self.logging_callback],\n data_collator=transformers.DataCollatorForLanguageModeling(self.tokenizer, mlm=False),\n\n )\n\n self.fused_model.config.use_cache = False # silence the warnings. Please re-enable for inference!\n try:\n self.trainer.train()\n except Exception as e:\n return -1,e\n # model_save_path = f\"{self.config['training']['output_dir']}/{self.config['model']['base_model_name']}_adapter\"\n self.trainer.save_model(output_model_dir)\n return 0,\"\"\n def merge_and_save(self):\n\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\")>=0:\n base_model = LlamaForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], device_map=\"cpu\",trust_remote_code=True)\n else:\n base_model = AutoModelForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], device_map=\"cpu\",trust_remote_code=True)\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_adapter_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"], \"runs\", self.run_name, \"output_model\",\n run_output_model_name + \"_adapter\")\n\n model = PeftModel.from_pretrained(base_model, output_adapter_model_dir)\n\n merged_model = model.merge_and_unload()\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_merged_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"], \"runs\", self.run_name, \"output_model\",\"merged_\"+run_output_model_name,\"ori\")\n merged_model.save_pretrained(output_merged_model_dir)\n self.tokenizer.save_pretrained(output_merged_model_dir)\n\n def _print_trainable_parameters(self, model):\n \"\"\"\n Prints the number of trainable parameters in the model.\n \"\"\"\n trainable_params = 0\n all_param = 0\n for _, param in model.named_parameters():\n all_param += param.numel()\n if param.requires_grad:\n trainable_params += param.numel()\n print(\n f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n )\n\n\n class LoggingCallbacks(transformers.TrainerCallback):\n # current_step = 0\n # max_steps = 0\n\n def on_step_begin(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, **kwargs):\n pass\n\n def on_step_end(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, **kwargs):\n global TRAINING_STATUS\n if TRAINING_STATUS.status == 1:\n control.should_epoch_stop = True\n control.should_training_stop = True\n else:\n self.max_steps = state.max_steps\n self.current_step = state.global_step\n\n def on_log(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, logs, **kwargs):\n pass\n\n def free_memroy(self):\n try:\n del self.fused_model\n del self.tokenizer\n del self.base_model\n del self.trainer\n torch.cuda.empty_cache()\n except Exception as e:\n print(\"Free memory error:\",e)" }, { "identifier": "TRAINING_STATUS", "path": "src/finetune/qlora_trainer.py", "snippet": "TRAINING_STATUS = TrainingStatus()" }, { "identifier": "download_model_wrapper", "path": "src/utils/download_huggingface_repo.py", "snippet": "def download_model_wrapper(repo_id,local_model_root_dir, specific_file=None, return_links=False, check=False,progress = gr.Progress()):\n if repo_id.endswith(\".gguf\"):\n try:\n model_dir = os.path.join(local_model_root_dir, '/'.join(repo_id.split('/')[0:-1]))\n yield f\"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Downloading file {repo_id.split('/')[-1]} to `{model_dir}/...`</span>\"\n hf_hub_download(repo_id='/'.join(repo_id.split('/')[0:-1]), filename=repo_id.split('/')[-1], local_dir=model_dir, resume_download=True,\n force_download=False)\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n else:\n if repo_id == \"\" or repo_id == \"None\":\n # return gr.update(value=\"Model's name is empty!\",visible=True)\n yield f\"Model's name is empty!\"\n else:\n model_dir = os.path.join(local_model_root_dir, repo_id)\n\n model_config_path = os.path.join(model_dir, \"config.json\")\n model_config_path1 = os.path.join(model_dir, \"pytorch_model.bin\")\n model_config_path2 = os.path.join(model_dir, \"model.safetensors\")\n if os.path.exists(model_config_path1) or os.path.exists(model_config_path2):\n yield '<span style=\"color:green\">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded.</span>'\n else:\n\n try:\n progress(0.0)\n # download_model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\"download-model.py\")\n # downloader = importlib.import_module(download_model_path).ModelDownloader()\n downloader = download_model.ModelDownloader()\n model, branch = downloader.sanitize_model_and_branch_names(repo_id, None)\n yield (\"Getting the download links from Hugging Face\")\n links, sha256, is_lora, is_llamacpp, link_file_size_list = downloader.get_download_links_from_huggingface(model,\n branch,\n text_only=False,\n specific_file=specific_file\n )\n if return_links:\n yield '\\n\\n'.join([f\"`{Path(link).name}`\" for link in links])\n yield (\"Getting the output folder\")\n # base_folder = shared.args.lora_dir if is_lora else shared.args.model_dir\n base_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"models\")\n output_folder = downloader.get_output_folder(model, branch, is_lora, is_llamacpp=is_llamacpp,\n base_folder=base_folder)\n link_file_size_list = np.array(link_file_size_list)\n links = np.array(links)\n sorted_index = np.argsort(link_file_size_list)\n link_file_size_list = link_file_size_list[sorted_index]\n links = links[sorted_index]\n total_file_size = sum(link_file_size_list)\n copyed_file_size = 0\n for link, link_file_size in zip(links, link_file_size_list):\n model_file_name = link.split('/')[-1]\n if model_file_name.find(\"Pooling\")>=0:\n model_file_name = model_file_name+\"/config.json\"\n # yield (f\"Downloading file {model_file_name} to `{output_folder}/...`\")\n yield f\"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Downloading file {model_file_name} to `{output_folder}/...`</span>\"\n hf_hub_download(repo_id=repo_id, filename=model_file_name, local_dir=model_dir, resume_download=True,\n force_download=False)\n copyed_file_size += link_file_size\n progress(copyed_file_size / total_file_size)\n # yield (\"Download successful!\")\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')" }, { "identifier": "download_dataset_wrapper", "path": "src/utils/download_huggingface_repo.py", "snippet": "def download_dataset_wrapper(repo_id,local_dataset_root_dir,progress = gr.Progress()):\n repo_id = repo_id.strip()\n if repo_id == \"\":\n yield \"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;This Dataset's name is empty!</span>\"\n else:\n dataset_dir = os.path.join(local_dataset_root_dir, repo_id)\n # dataset_config_path1 = os.path.join(dataset_dir, \"config.json\")\n dataset_config_path1 = os.path.join(dataset_dir, \"dataset_infos.json\")\n dataset_config_path2 = os.path.join(dataset_dir, \"dataset_dict.json\")\n\n if os.path.exists(dataset_config_path1) or os.path.exists(dataset_config_path2):\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;This Dataset has already been downloaded.</span>\"\n else:\n try:\n\n progress(0.3)\n yield f\"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Downloading dataset to `{dataset_dir}/...`</span>\"\n datasets = load_dataset(repo_id)\n progress(0.8)\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n datasets.save_to_disk(dataset_dir)\n # datasets = load_from_disk(\"dddd\")\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')" } ]
import pandas as pd import math import numpy as np import gc import os,requests import subprocess,threading import time import gradio as gr import os import traceback import numpy as np import glob import shutil import torch import socket from src.utils.common import login_huggingface from src.finetune.huggingface_inference import HuggingfaceInference from src.finetune.llama_cpp_inference import LlamaCppInference from src.rag.qa_with_rag import QAWithRAG from src.utils.common import read_yaml,get_first_row_from_dataset,\ get_runs_model_names_from_dir,get_hg_model_names_from_dir,get_hg_model_names_and_gguf_from_dir,validate_model_path,get_runs_models from src.utils.chat_prompts import get_model_type,get_chat_history_prompt,get_model_prompt_template from transformers.training_args import OptimizerNames from huggingface_hub import hf_hub_download from src.utils import download_model from pathlib import Path from src.finetune.qlora_trainer import QloraTrainer from src.finetune.qlora_trainer import TRAINING_STATUS from src.utils.download_huggingface_repo import download_model_wrapper,download_dataset_wrapper
12,358
local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_dataset_list(): local_dataset_list = [] local_dataset_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets") matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_infos.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_infos.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_dict.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_dict.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) return local_dataset_list,local_dataset_root_dir def start_tensorboard_server(): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((LOCAL_HOST_IP, 6006)) s.close() except Exception as e: tensorboard_cmd = f"tensorboard --logdir {os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs')} --reload_multifile True" tensorboard_proc = subprocess.Popen(tensorboard_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, close_fds=True) # bufsize=0, close_fds=True def init(): global config_dict,transformer_optimizer_list,model_context_window,init_train_file_path,init_val_file_path global INIT_PREFIX1,INIT_COL1_TEXT,INIT_PREFIX2,INIT_COL2_TEXT,INIT_PREFIX3,INIT_COL3_TEXT,INIT_PREFIX4,INIT_COL4_TEXT,col_names,DATASET_FIRST_ROW global local_model_list,local_model_root_dir global base_model_names,base_model_context_window,embedding_model_names,training_base_model_names global local_dataset_list, local_dataset_root_dir start_tensorboard_server() model_zoo_config = read_yaml(os.path.join(os.path.dirname(os.path.abspath(__file__)),"config","model_zoo.yaml")) transformer_optimizer_list = list(vars(OptimizerNames)["_value2member_map_"].keys()) #get dynamic context window from selected model model_context_window = [2048,1024,512] init_train_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets", INIT_DATASET_NAME) DATASET_FIRST_ROW,split_list = get_first_row_from_dataset(init_train_file_path) col_names = list(DATASET_FIRST_ROW) col_names.insert(0,"") INIT_PREFIX1 = "<s>[INST] " INIT_PREFIX2 = "here are the inputs " INIT_PREFIX3 = " [/INST]" INIT_PREFIX4 = "</s>" INIT_COL1_TEXT = str(DATASET_FIRST_ROW[col_names[1]]) INIT_COL2_TEXT = str(DATASET_FIRST_ROW[col_names[2]]) INIT_COL3_TEXT = str(DATASET_FIRST_ROW[col_names[3]]) INIT_COL4_TEXT = "" local_model_list,local_model_root_dir = get_local_model_list() base_model_names = [model_name for model_name in model_zoo_config["model_list"]] training_base_model_names = [model_name for model_name in base_model_names if not model_name.endswith(".gguf")] # base_model_context_window = [model_name[1] for model_name in model_zoo_config["model_list"]] embedding_model_names = [model_name for model_name in model_zoo_config["embedding_model_list"]] local_dataset_list, local_dataset_root_dir = get_local_dataset_list() with gr.Blocks(title="FINETUNE",css="#vertical_center_align_markdown { position:absolute; top:30%;background-color:white;} .white_background {background-color: #ffffff} .none_border {border: none;border-collapse:collapse;}") as demo: init() local_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_dataset_root_dir_textbox = gr.Textbox(label="",value=local_dataset_root_dir, visible=False) local_embedding_model_root_dir_textbox = gr.Textbox(label="", value=os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models"), visible=False) local_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_home_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) session_state = gr.State(value={}) # html = gr.HTML("<p align='center';>llm-web-ui</p>",elem_id="header") with gr.Tab("Home"): with gr.Row(): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;Chat Model", elem_classes="white_background") local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir, runs_model_root_dir) home_chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_home_chat_model_dir})"] home_chat_model_source_radio = gr.Radio(home_chat_model_source_radio_choices, label="Chat Model source", show_label=False, value=home_chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_home_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model", show_label=False, allow_custom_value=True, value=base_model_names[ 0] if base_model_names else None, interactive=True, scale=4, min_width=1) local_home_chat_model_names_dropdown = gr.Dropdown(local_home_chat_model_names, label=f"Chat Model", show_label=False, value=local_home_chat_model_names[ 0] if local_home_chat_model_names else None, interactive=True, scale=4, min_width=1, visible=False) download_hub_home_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_home_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) refresh_local_home_chat_model_names_btn = gr.Button("Refresh", scale=1, visible=False) load_home_chat_model_btn = gr.Button("Load Model", scale=1, visible=True) using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 )
# os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8889' # os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:8889' LOCAL_HOST_IP = "0.0.0.0" TENSORBOARD_URL = "http://" + LOCAL_HOST_IP + ":6006/" INIT_DATASET_NAME = "test_python_code_instructions_5000_rows" RAG_DATA_LIST_DROPDOWN = "" TEXT_SPLITTER_DROPDOWN = "" CHUNK_SIZE_SLIDER = 0 CHUNK_OVERLAP_SLIDER = -1 SEPARATORS_TEXTBOX = "" EMBEDDING_MODEL_SOURCE_RADIO = "" HUB_EMBEDDING_MODEL_NAMES_DROPDOWN = "" LOCAL_EMBEDDING_MODEL_NAMES_DROPDOWN = "" CHAT_MODEL_SOURCE_RADIO = "" HUB_CHAT_MODEL_NAMES_DROPDOWN = "" LOCAL_CHAT_MODEL_NAMES_DROPDOWN = "" SEARCH_TOP_K_SLIDER = "" SEARCH_SCORE_THRESHOLD_SLIDER = "" training_ret_val = -1 error_msg = "" current_running_model_name = "" infer_model = None stop_generation_status = False chatbot_history=[] chatbot_height = 500 rag_chatbot_history=[] rag_stop_generation_status = False qa_with_rag = QAWithRAG() train_param_config = {} train_param_config["dataset"]={} train_param_config["model"]={} train_param_config["training"]={} model_zoo_config = {} transformer_optimizer_list = [] model_context_window = 0 init_train_file_path = None init_val_file_path = None INIT_PREFIX1 = "" INIT_PREFIX2 = "" INIT_PREFIX3 = "" INIT_PREFIX4 = "" INIT_COL1_TEXT = "" INIT_COL2_TEXT = "" INIT_COL3_TEXT = "" INIT_COL4_TEXT = "" col_names = [] DATASET_FIRST_ROW = None local_model_list = "" local_model_root_dir = "" base_model_names = [] training_base_model_names = [] embedding_model_names = [] base_model_context_window = [] local_dataset_list = [] local_dataset_root_dir = "" def get_local_embedding_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_dataset_list(): local_dataset_list = [] local_dataset_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets") matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_infos.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_infos.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_dict.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_dict.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) return local_dataset_list,local_dataset_root_dir def start_tensorboard_server(): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((LOCAL_HOST_IP, 6006)) s.close() except Exception as e: tensorboard_cmd = f"tensorboard --logdir {os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs')} --reload_multifile True" tensorboard_proc = subprocess.Popen(tensorboard_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, close_fds=True) # bufsize=0, close_fds=True def init(): global config_dict,transformer_optimizer_list,model_context_window,init_train_file_path,init_val_file_path global INIT_PREFIX1,INIT_COL1_TEXT,INIT_PREFIX2,INIT_COL2_TEXT,INIT_PREFIX3,INIT_COL3_TEXT,INIT_PREFIX4,INIT_COL4_TEXT,col_names,DATASET_FIRST_ROW global local_model_list,local_model_root_dir global base_model_names,base_model_context_window,embedding_model_names,training_base_model_names global local_dataset_list, local_dataset_root_dir start_tensorboard_server() model_zoo_config = read_yaml(os.path.join(os.path.dirname(os.path.abspath(__file__)),"config","model_zoo.yaml")) transformer_optimizer_list = list(vars(OptimizerNames)["_value2member_map_"].keys()) #get dynamic context window from selected model model_context_window = [2048,1024,512] init_train_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets", INIT_DATASET_NAME) DATASET_FIRST_ROW,split_list = get_first_row_from_dataset(init_train_file_path) col_names = list(DATASET_FIRST_ROW) col_names.insert(0,"") INIT_PREFIX1 = "<s>[INST] " INIT_PREFIX2 = "here are the inputs " INIT_PREFIX3 = " [/INST]" INIT_PREFIX4 = "</s>" INIT_COL1_TEXT = str(DATASET_FIRST_ROW[col_names[1]]) INIT_COL2_TEXT = str(DATASET_FIRST_ROW[col_names[2]]) INIT_COL3_TEXT = str(DATASET_FIRST_ROW[col_names[3]]) INIT_COL4_TEXT = "" local_model_list,local_model_root_dir = get_local_model_list() base_model_names = [model_name for model_name in model_zoo_config["model_list"]] training_base_model_names = [model_name for model_name in base_model_names if not model_name.endswith(".gguf")] # base_model_context_window = [model_name[1] for model_name in model_zoo_config["model_list"]] embedding_model_names = [model_name for model_name in model_zoo_config["embedding_model_list"]] local_dataset_list, local_dataset_root_dir = get_local_dataset_list() with gr.Blocks(title="FINETUNE",css="#vertical_center_align_markdown { position:absolute; top:30%;background-color:white;} .white_background {background-color: #ffffff} .none_border {border: none;border-collapse:collapse;}") as demo: init() local_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_dataset_root_dir_textbox = gr.Textbox(label="",value=local_dataset_root_dir, visible=False) local_embedding_model_root_dir_textbox = gr.Textbox(label="", value=os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models"), visible=False) local_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_home_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) session_state = gr.State(value={}) # html = gr.HTML("<p align='center';>llm-web-ui</p>",elem_id="header") with gr.Tab("Home"): with gr.Row(): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;Chat Model", elem_classes="white_background") local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir, runs_model_root_dir) home_chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_home_chat_model_dir})"] home_chat_model_source_radio = gr.Radio(home_chat_model_source_radio_choices, label="Chat Model source", show_label=False, value=home_chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_home_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model", show_label=False, allow_custom_value=True, value=base_model_names[ 0] if base_model_names else None, interactive=True, scale=4, min_width=1) local_home_chat_model_names_dropdown = gr.Dropdown(local_home_chat_model_names, label=f"Chat Model", show_label=False, value=local_home_chat_model_names[ 0] if local_home_chat_model_names else None, interactive=True, scale=4, min_width=1, visible=False) download_hub_home_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_home_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) refresh_local_home_chat_model_names_btn = gr.Button("Refresh", scale=1, visible=False) load_home_chat_model_btn = gr.Button("Load Model", scale=1, visible=True) using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 )
if validate_model_path(base_model_names[0])[0]:
9
2023-11-25 12:37:21+00:00
16k
danilonumeroso/conar
models/vkc_reasoner.py
[ { "identifier": "AlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class AlgorithmReasoner(nn.Module):\n @staticmethod\n def prepare_batch(batch):\n batch = batch.clone()\n for name, tensor in batch.items():\n if not torch.is_tensor(tensor):\n continue\n if name.endswith('_temporal') and 'index' not in name:\n tensor = tensor.transpose(1, 0)\n batch[name] = tensor\n return batch\n\n @staticmethod\n def get_masks(train, batch, continue_logits, enforced_mask):\n mask = continue_logits[batch.batch] > 0\n mask_cp = (continue_logits > 0.0).bool()\n mask_edges = mask[batch.edge_index[0]]\n if not train and enforced_mask is not None:\n enforced_mask_ids = enforced_mask[batch.batch]\n mask &= enforced_mask_ids\n mask_cp &= enforced_mask\n return mask_cp, mask, mask_edges\n\n def add_encoder(self, stage, name, loc, data_type, data_sample, bias):\n if name == 'adj': # we use edge indices\n return\n if data_type == Type.SCALAR or data_type == Type.MASK or data_type == Type.MASK_ONE:\n self.encoders[stage][name] = nn.Linear(1, self.latent_features, bias=bias)\n\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n self.encoders[stage][name] = nn.Linear(in_shape, self.latent_features, bias=bias)\n\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]: # pointers are 1-hot encoded on the edges\n self.encoders[stage][name] = nn.Linear(1, self.latent_features, bias=bias)\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.encoders[stage][name] = nn.ModuleList([\n nn.Linear(1, self.latent_features, bias=bias),\n nn.Linear(1, self.latent_features, bias=bias)\n ])\n\n def add_decoder(self, stage, name, loc, data_type, data_sample, bias):\n assert name != 'adj', 'Adjacency matrix should not be decoded'\n dec = None\n if loc == Location.NODE:\n if data_type in (Type.SCALAR, Type.MASK, Type.MASK_ONE):\n dec = nn.Linear(2*self.latent_features, 1, bias=bias)\n\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n dec = nn.Linear(2*self.latent_features, in_shape, bias=bias)\n\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]: # pointers are decoded from both node and edge information\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n if loc == Location.GRAPH:\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n in_shape = data_sample.shape[-1] if data_type == Type.CATEGORICAL else 1\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(self.latent_features, in_shape, bias=bias),\n ])\n\n if loc == Location.EDGE:\n if data_type in (Type.SCALAR, Type.MASK, Type.MASK_ONE):\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, 1, bias=bias),\n nn.Linear(2*self.latent_features, 1, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(self.latent_features, in_shape, bias=bias),\n ])\n if data_type == Type.POINTER:\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n assert dec is not None, breakpoint()\n self.decoders[stage][name] = dec\n\n\n\n\n def __init__(self,\n spec,\n data,\n latent_features,\n algo_processor,\n bias=True,\n use_TF=False,\n use_sinkhorn=True,\n L1_loss=False,\n xavier_on_scalars=True,\n global_termination_pool='max', #'predinet',\n get_attention=False,\n use_batch_norm=False,\n transferring=False,\n timeit=True,\n **kwargs):\n\n super().__init__()\n self.step_idx = 0\n self.latent_features = latent_features\n self.assert_checks = False\n self.timeit = timeit\n self.debug = False\n self.debug_epoch_threshold = 1e9\n self.L1_loss = L1_loss\n self.global_termination_pool = global_termination_pool\n self.next_step_pool = True\n self.processor = algo_processor\n self.triplet_reasoning = False\n if isinstance(self.processor.processors[0].processor, TripletMPNN):\n self.triplet_reasoning = True\n self.triplet_reductor = nn.Linear(2*latent_features, latent_features, bias=bias)\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.get_attention = get_attention\n self.lambda_mul = 1 # 0.0001\n self.transferring = transferring\n self.node_encoder = nn.Sequential(\n nn.Linear(2*latent_features, latent_features, bias=bias),\n )\n self.encoders = nn.ModuleDict({\n 'input': nn.ModuleDict({\n }),\n 'hint': nn.ModuleDict({\n }),\n })\n self.decoders = nn.ModuleDict({\n 'hint': nn.ModuleDict({\n }),\n 'output': nn.ModuleDict({\n })\n })\n for name, (stage, loc, datatype) in spec.items():\n if name == 'adj': # we use edge indices\n continue\n if stage == 'input':\n self.add_encoder(stage, name, loc, datatype, getattr(data, name), bias)\n if stage == 'output':\n self.add_decoder(stage, name, loc, datatype, getattr(data, name), bias)\n if stage == 'hint':\n self.add_encoder(stage, name, loc, datatype, getattr(data, name), bias)\n self.add_decoder(stage, name, loc, datatype, getattr(data, name), bias)\n\n self.node_pointer_vec = nn.Parameter(torch.randn(latent_features))\n if xavier_on_scalars:\n assert False, \"NEEDS REFACTORING\"\n torch.nn.init.trunc_normal_(self.encoders['input']['edge_attr'].weight, std=1/torch.sqrt(torch.tensor(latent_features)))\n\n if global_termination_pool == 'attention':\n inp_dim = latent_features\n self.global_attn = GlobalAttentionPlusCoef(\n nn.Sequential(\n nn.Linear(inp_dim, latent_features, bias=bias),\n nn.LeakyReLU(),\n nn.Linear(latent_features, 1, bias=bias)\n ),\n nn=None)\n\n if global_termination_pool == 'predinet':\n lf = latent_features\n self.predinet = PrediNet(lf, 1, lf, lf, flatten_pooling=torch_geometric.nn.glob.global_max_pool)\n\n self.termination_network = nn.Sequential(\n nn.BatchNorm1d(latent_features) if use_batch_norm else nn.Identity(),\n nn.Linear(latent_features, 1, bias=bias),\n )\n\n def get_continue_logits(self, batch_ids, latent_nodes, sth_else=None):\n if self.global_termination_pool == 'mean':\n graph_latent = torch_geometric.nn.global_mean_pool(latent_nodes, batch_ids)\n if self.global_termination_pool == 'max':\n graph_latent = torch_geometric.nn.global_max_pool(latent_nodes, batch_ids)\n if self.global_termination_pool == 'attention':\n graph_latent, coef = self.global_attn(latent_nodes, batch_ids)\n if self.get_attention:\n self.attentions[self.step_idx] = coef.clone().detach()\n self.per_step_latent[self.step_idx] = sth_else\n\n if self.global_termination_pool == 'predinet':\n assert not torch.isnan(latent_nodes).any()\n graph_latent = self.predinet(latent_nodes, batch_ids)\n\n if self.get_attention:\n self.attentions[self.step_idx] = latent_nodes\n continue_logits = self.termination_network(graph_latent).view(-1)\n return continue_logits\n\n def zero_termination(self):\n self.true_positive = 0\n self.false_positive = 0\n self.false_negative = 0\n self.true_negative = 0\n\n def zero_steps(self):\n self.sum_of_processed_nodes = 0\n self.sum_of_processed_edges = 0\n self.step_idx = 0\n self.sum_of_steps = 0\n self.cnt = 0\n\n @staticmethod\n def convert_logits_to_outputs(spec,\n logits,\n fr,\n to,\n num_nodes,\n batch_ids,\n include_probabilities=True,\n dbg=False):\n outs = defaultdict(dict)\n\n for stage in logits.keys():\n for name in logits[stage].keys():\n if name not in logits[stage] or name not in spec:\n continue\n stage, loc, data_type = spec[name]\n assert stage != Stage.INPUT\n if data_type == Type.SOFT_POINTER:\n assert False, f\"Not yet added, please add {name}\"\n if data_type in [Type.CATEGORICAL]:\n indices = logits[stage][name].argmax(-1)\n outshape = logits[stage][name].shape[-1]\n outs[stage][name] = F.one_hot(indices, num_classes=outshape).float()\n if data_type == Type.MASK_ONE:\n _, amax = torch_scatter.scatter_max(logits[stage][name], batch_ids, dim=0)\n amax = amax.squeeze(-1)\n outs[stage][name] = torch.zeros_like(logits[stage][name])\n outs[stage][name][amax] = 1\n if data_type == Type.MASK:\n outs[stage][name] = (logits[stage][name] > 0).float()\n if data_type == Type.SCALAR:\n outs[stage][name] = logits[stage][name]\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pointer_logits = logits[stage][name]\n _, pointers = torch_scatter.scatter_max(pointer_logits, fr, dim_size=num_nodes)\n pointers = to[pointers]\n pointer_probabilities = torch_geometric.utils.softmax(pointer_logits, fr, num_nodes=num_nodes)\n outs[stage][name] = pointers\n if include_probabilities:\n outs[stage][f'{name}_probabilities'] = pointer_probabilities\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pointer_logits = logits[stage][name]\n pointers = pointer_logits.argmax(-1)\n pointer_probabilities = F.softmax(pointer_logits, dim=-1)\n outs[stage][name] = pointers\n if include_probabilities:\n outs[stage][f'{name}_probabilities'] = pointer_probabilities\n return outs\n\n def set_initial_states(self, batch, init_last_latent=None):\n self.processor.zero_lstm(batch.num_nodes) # NO-OP if processor(s) don't use LSTM\n self.last_latent = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n if init_last_latent is not None:\n self.last_latent = init_last_latent\n self.last_latent_edges = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n self.last_continue_logits = torch.ones(batch.num_graphs, device=batch.edge_index.device)\n self.last_logits = defaultdict(dict)\n\n\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage == Stage.INPUT:\n continue\n if name not in self.decoders[stage]:\n continue\n if stage == Stage.OUTPUT:\n\n if loc in [Location.NODE, Location.GRAPH]:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)\n if data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name).unsqueeze(-1)\n if data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name).bool(), 1e9, -1e9).unsqueeze(-1)\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.last_logits[stage][name] = torch.where(batch.edge_index[0, :] == batch.edge_index[1, :], 1e9, -1e9).to(batch.edge_index.device) # self-loops\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name).bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n ptrs = getattr(batch, name).int()\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n ptrs = ptrs - starts_edge\n self.last_logits[stage][name] = torch.full((batch.edge_index.shape[1], int(ptrs.max().item())+1), -1e9).to(batch.edge_index.device)\n self.last_logits[stage][name][torch.arange(ptrs.shape[0]), ptrs] = 1e9\n else:\n assert False, breakpoint()\n\n if stage == Stage.HINT:\n\n if loc in [Location.NODE, Location.GRAPH]:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)[0]\n elif data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name)[0].unsqueeze(-1)\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name)[0, :].bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.last_logits[stage][name] = torch.where(batch.edge_index[0, :] == batch.edge_index[1, :], 1e9, -1e9).to(batch.edge_index.device) # self-loops\n else:\n assert False, breakpoint()\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)[0]\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name)[0, :].bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name)[0, :].unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n ptrs = getattr(batch, name)[0, :].int()\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n ptrs = ptrs - starts_edge\n self.max_nodes_in_graph = int(ptrs.max().item())+1 # FIXME try another way to infer\n self.last_logits[stage][name] = torch.where(edge_one_hot_encode_pointers_edge(ptrs, batch, self.max_nodes_in_graph).bool(), 1e9, -1e9).to(batch.edge_index.device)\n else:\n assert False, breakpoint()\n\n self.all_hint_logits = []\n self.all_masks_graph = []\n\n def update_per_mask(self, before, after, mask=None):\n # NOTE: this does expansion of the mask, if you do\n # NOT use expansion, use torch.where\n if mask is None:\n mask = self.mask\n mask = mask.unsqueeze(-1).expand_as(before)\n return torch.where(mask, after, before)\n\n def update_state_dict(self, before, after):\n new_before = defaultdict(dict)\n for stage in after.keys():\n for name in after[stage].keys():\n _, loc, data_type = self.dataset_spec[name]\n if loc == Location.GRAPH:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name], mask=self.mask_cp)\n if loc == Location.EDGE:\n if data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL, Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name], mask=self.mask_edges)\n else:\n assert False, \"Please implement\"\n if loc == Location.NODE:\n if data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name])\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n new_before[stage][name] = torch.where(self.mask_edges, after[stage][name], before[stage][name])\n else:\n assert False, breakpoint()\n return new_before\n\n def update_states(self, batch, current_latent, edges_current_latent,\n logits, continue_logits):\n self.last_continue_logits = torch.where(self.mask_cp, continue_logits,\n self.last_continue_logits)\n self.last_latent = self.update_per_mask(self.last_latent, current_latent)\n self.last_latent_edges = self.update_per_mask(self.last_latent_edges, edges_current_latent, mask=self.mask_edges)\n self.last_logits = self.update_state_dict(self.last_logits, logits)\n self.all_hint_logits.append(self.last_logits['hint'])\n self.all_masks_graph.append(self.mask_cp)\n preds = type(self).convert_logits_to_outputs(\n self.dataset_spec, self.last_logits, batch.edge_index[0],\n batch.edge_index[1], batch.num_nodes, batch.batch,\n self.epoch > self.debug_epoch_threshold)\n self.last_hint = preds['hint']\n self.last_output = preds['output']\n\n def prepare_initial_masks(self, batch):\n self.mask = torch.ones_like(batch.batch, dtype=torch.bool, device=batch.edge_index.device)\n self.mask_cp = torch.ones(batch.num_graphs, dtype=torch.bool, device=batch.edge_index.device)\n self.mask_edges = torch.ones_like(batch.edge_index[0], dtype=torch.bool, device=batch.edge_index.device)\n\n def loop_condition(self, termination, STEPS_SIZE):\n return (((not self.training and termination.any()) or\n (self.training and termination.any())) and\n self.step_idx+1 < STEPS_SIZE)\n\n def loop_body(self,\n batch,\n node_fts,\n edge_fts,\n graph_fts,\n hint_inp_curr,\n hint_out_curr,\n true_termination,\n first_n_processors=1000):\n\n current_latent, edges_current_latent, preds, continue_logits =\\\n self.forward(\n batch,\n node_fts,\n edge_fts,\n graph_fts,\n first_n_processors=first_n_processors,\n )\n termination = continue_logits\n\n self.debug_batch = batch\n self.debug_hint_out_curr = hint_out_curr\n if self.timeit:\n st = time.time()\n self.update_states(batch, current_latent, edges_current_latent, preds, termination)\n if self.timeit:\n print(f'updating states: {time.time()-st}')\n\n def get_step_input(self, x_curr, batch):\n if self.training and self.use_TF or self.hardcode_outputs:\n return x_curr\n return type(self).convert_logits_to_outputs(\n self.dataset_spec, self.last_logits, batch.edge_index[0],\n batch.edge_index[1], batch.num_nodes, batch.batch,\n self.epoch > self.debug_epoch_threshold)['hint']\n\n def encode_inputs(self, batch):\n node_fts = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n edge_fts = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.INPUT:\n continue\n if name not in self.encoders[stage]:\n continue\n data = getattr(batch, name)\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n assert False, breakpoint() # we don't have it for now (B-F/MST), will figure out later\n if data_type != Type.CATEGORICAL:\n data = data.unsqueeze(-1)\n if loc == Location.EDGE:\n edge_fts += self.encoders[stage][name](data)\n if loc == Location.NODE:\n node_fts += self.encoders[stage][name](data)\n return node_fts, edge_fts\n\n def encode_hints(self, hints, batch):\n node_fts = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n edge_fts = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n graph_fts = torch.zeros(batch.num_graphs, self.latent_features, device=batch.edge_index.device)\n\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.HINT:\n continue\n if name not in self.encoders[stage]:\n continue\n hint = hints[name]\n if loc == Location.NODE and data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n node_fts = node_fts + self.encoders['hint'][name](hint)\n if loc == Location.EDGE and data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n edge_fts = edge_fts + self.encoders['hint'][name](hint)\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(hint, batch.edge_index)\n edge_fts = edge_fts + self.encoders['hint'][name](pred_gt_one_hot.unsqueeze(-1))\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers_edge(hint, batch, self.max_nodes_in_graph)\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n encoding = self.encoders['hint'][name][0](pred_gt_one_hot.unsqueeze(-1))\n encoding_2 = self.encoders['hint'][name][1](pred_gt_one_hot.unsqueeze(-1))\n encoding_sparse = SparseTensor(row=batch.edge_index[0], col=batch.edge_index[1], value=encoding)\n res_1 = encoding_sparse.mean(1)[batch.edge_index[0], batch.edge_index[1]-starts_edge]\n res_2 = encoding_2.mean(1)\n edge_fts += res_1 + res_2 # INPLACE\n if loc == Location.GRAPH and data_type in [Type.CATEGORICAL, Type.SCALAR, Type.MASK]:\n graph_fts = graph_fts + self.encoders['hint'][name](hint)\n return node_fts, edge_fts, graph_fts\n\n def get_input_output_hints(self, batch):\n hint_inp_curr = {}\n hint_out_curr = {}\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.HINT:\n continue\n hint_inp_curr[name] = getattr(batch, name)[self.step_idx]\n hint_out_curr[name] = getattr(batch, name)[self.step_idx+1]\n if 'mask' in data_type or data_type == Type.SCALAR:\n hint_inp_curr[name] = hint_inp_curr[name].unsqueeze(-1)\n hint_out_curr[name] = hint_out_curr[name].unsqueeze(-1)\n return hint_inp_curr, hint_out_curr\n\n def process(\n self,\n batch,\n EPSILON=0,\n enforced_mask=None,\n hardcode_outputs=False,\n debug=False,\n first_n_processors=1000,\n init_last_latent=None,\n **kwargs):\n\n SIZE, STEPS_SIZE = prepare_constants(batch)\n self.hardcode_outputs = hardcode_outputs\n\n # Pytorch Geometric batches along the node dimension, but we execute\n # along the temporal (step) dimension, hence we need to transpose\n # a few tensors. Done by `prepare_batch`.\n if self.assert_checks:\n check_edge_index_sorted(batch.edge_index)\n if self.epoch > self.debug_epoch_threshold:\n breakpoint()\n self.zero_steps()\n batch = type(self).prepare_batch(batch)\n # When we want to calculate last step metrics/accuracies\n # we need to take into account again different termination per graph\n # hence we save last step tensors (e.g. outputs) into their\n # corresponding tensor. The function below prepares these tensors\n # (all set to zeros, except masking for computation, which are ones)\n self.set_initial_states(batch, init_last_latent=init_last_latent)\n # Prepare masking tensors (each graph does at least 1 iteration of the algo)\n self.prepare_initial_masks(batch)\n # A flag if we had a wrong graph in the batch. Used for visualisation\n # of what went wrong\n self.wrong_flag = False\n assert self.mask_cp.all(), self.mask_cp\n if self.timeit:\n st = time.time()\n node_fts_inp, edge_fts_inp = self.encode_inputs(batch)\n if self.timeit:\n print(f'encoding inputs: {time.time()-st}')\n\n while True:\n hint_inp_curr, hint_out_curr = self.get_input_output_hints(batch)\n if not self.training:\n assert (self.last_continue_logits > 0).any() or True\n\n # Some algorithms output fewer values than they take\n # so if we reuse our last step outputs, they need to be fed back in.\n if self.timeit:\n st = time.time()\n hint_inp_curr = self.get_step_input(hint_inp_curr, batch)\n if self.timeit:\n print(f'getting step input : {time.time()-st}')\n st = time.time()\n node_fts_hint, edge_fts_hint, graph_fts = self.encode_hints(hint_inp_curr, batch)\n node_fts = node_fts_inp + node_fts_hint\n edge_fts = edge_fts_inp + edge_fts_hint\n if self.timeit:\n print(f'encoding hints: {time.time()-st}')\n\n true_termination = torch.where(self.step_idx+1 >= batch.lengths-1, -1e9, 1e9)\n\n # Does one iteration of the algo and accumulates statistics\n self.loop_body(batch,\n node_fts,\n edge_fts,\n graph_fts,\n hint_inp_curr,\n hint_out_curr,\n true_termination,\n first_n_processors=first_n_processors)\n # And calculate what graphs would execute on the next step.\n self.mask_cp, self.mask, self.mask_edges = type(self).get_masks(self.training, batch, true_termination if self.training else self.last_continue_logits, enforced_mask)\n if not self.loop_condition(\n self.mask_cp,\n STEPS_SIZE):\n break\n assert self.mask_cp.any()\n self.step_idx += 1\n\n return self.all_hint_logits, self.last_logits, self.all_masks_graph\n\n def decode(self, batch, encoded_nodes, hidden, edge_fts, graph_fts):\n catted = torch.cat((encoded_nodes, hidden), dim=1)\n outs = defaultdict(dict)\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage == Stage.INPUT:\n continue\n\n if loc == Location.NODE:\n\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n outs[stage][name] = self.decoders[stage][name](catted)\n\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n fr = self.decoders[stage][name][0](catted[batch.edge_index[0]])\n to = self.decoders[stage][name][1](catted[batch.edge_index[1]])\n edge = self.decoders[stage][name][2](edge_fts)\n prod = self.decoders[stage][name][3](to.max(fr+edge)).squeeze(-1)\n if data_type in [Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION] and self.use_sinkhorn:\n prod = torch.maximum(prod, self.decoders[stage][name][3](fr.max(to+edge)).squeeze(-1))\n prod = sinkhorn_normalize(batch, prod, temperature=0.1, steps=10 if self.training else 60, add_noise=self.training)\n outs[stage][name] = prod\n\n if loc == Location.GRAPH:\n aggr_node_fts = torch_scatter.scatter_max(catted, batch.batch, dim=0)[0]\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n outs[stage][name] = self.decoders[stage][name][0](aggr_node_fts) + self.decoders[stage][name][1](graph_fts)\n else:\n assert False\n\n if loc == Location.EDGE:\n fr = self.decoders[stage][name][0](catted[batch.edge_index[0]])\n to = self.decoders[stage][name][1](catted[batch.edge_index[1]])\n edge = self.decoders[stage][name][2](edge_fts)\n if data_type in (Type.CATEGORICAL, Type.MASK, Type.SCALAR):\n outs[stage][name] = fr + to + edge\n elif data_type == Type.POINTER:\n pred = fr + to + edge\n pred_2 = self.decoders[stage][name][3](catted)\n ebatch = batch.edge_index_batch\n st = batch.ptr[ebatch]\n en = batch.ptr[ebatch+1]\n dense_pred_2, mask_pred_2 = tg_utils.to_dense_batch(pred_2, batch=batch.batch)\n edge_pred_2 = dense_pred_2[ebatch]\n mask_edge_pred_2 = mask_pred_2[ebatch]\n probs_logits = self.decoders[stage][name][4](torch.maximum(pred[:, None, :], edge_pred_2)).squeeze(-1)\n probs_logits[~mask_edge_pred_2] = -1e9\n outs[stage][name] = probs_logits\n else:\n assert False\n\n return outs\n\n def encode_nodes(self, current_input, last_latent):\n return torch.cat((current_input, last_latent), dim=1)\n\n def forward(self, batch, node_fts, edge_fts, graph_fts, first_n_processors=1000):\n if torch.isnan(node_fts).any():\n breakpoint()\n assert not torch.isnan(self.last_latent).any()\n assert not torch.isnan(node_fts).any()\n if self.timeit:\n st = time.time()\n if self.timeit:\n print(f'projecting nodes: {time.time()-st}')\n\n if self.timeit:\n st = time.time()\n edge_index = batch.edge_index\n hidden, edges_hidden = self.processor(node_fts, edge_fts, graph_fts, edge_index, self.last_latent, self.last_latent_edges, first_n_processors=first_n_processors, batch=batch)\n if self.timeit:\n print(f'message passing: {time.time()-st}')\n assert not torch.isnan(hidden).any()\n if self.timeit:\n st = time.time()\n if self.triplet_reasoning:\n edge_fts = self.triplet_reductor(torch.cat([edge_fts, edges_hidden], dim=-1))\n outs = self.decode(batch, node_fts, hidden, edge_fts, graph_fts)\n if self.timeit:\n print(f'decoding hints: {time.time()-st}')\n continue_logits = torch.where(self.step_idx+1 >= batch.lengths-1, -1e9, 1e9)\n return hidden, edges_hidden, outs, continue_logits" }, { "identifier": "LitAlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class LitAlgorithmReasoner(pl.LightningModule):\n def __init__(self,\n hidden_dim,\n algo_processor,\n dataset_class,\n dataset_root,\n dataset_kwargs,\n algorithm='mst_prim',\n update_edges_hidden=False,\n use_TF=False,\n use_sinkhorn=True,\n xavier_on_scalars=True,\n learning_rate=get_hyperparameters()['lr'],\n weight_decay=get_hyperparameters()['weight_decay'],\n test_with_val=False,\n test_with_val_every_n_epoch=20,\n test_train_every_n_epoch=20,\n **algorithm_base_kwargs):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.dataset_class = dataset_class\n self.dataset_root = dataset_root\n self.dataset_kwargs = dataset_kwargs\n self.learning_rate = learning_rate\n self.weight_decay = weight_decay\n self.timeit = False\n self.update_edges_hidden = update_edges_hidden\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.algorithm = algorithm\n self.xavier_on_scalars = xavier_on_scalars\n self.test_with_val = test_with_val\n self.test_with_val_every_n_epoch = test_with_val_every_n_epoch\n self.test_train_every_n_epoch = test_train_every_n_epoch\n self._datasets = {}\n if self.test_with_val:\n self.val_dataloader = self.val_dataloader_alt\n self.validation_step = self.validation_step_alt\n self._current_epoch = 0\n self.load_dataset('train')\n\n self.algorithm_module = AlgorithmReasoner(self.dataset.spec,\n self.dataset[0],\n hidden_dim,\n algo_processor,\n update_edges_hidden=update_edges_hidden,\n use_TF=use_TF,\n use_sinkhorn=use_sinkhorn,\n timeit=self.timeit,\n xavier_on_scalars=xavier_on_scalars,\n **algorithm_base_kwargs)\n self.save_hyperparameters(ignore=['algo_processor'])\n\n @property\n def current_epoch(self) -> int:\n \"\"\"The current epoch in the ``Trainer``, or 0 if not attached.\"\"\"\n return self.trainer.current_epoch if self._trainer else self._current_epoch\n\n @current_epoch.setter\n def current_epoch(self, epoch) -> int:\n self._current_epoch = epoch\n\n def prepare_for_transfer(self):\n algo_processor = copy.deepcopy(self.algorithm_module.processor)\n self.algorithm_module = AlgorithmReasoner(self.hidden_dim,\n self.node_features,\n self.edge_features,\n self.output_features,\n algo_processor,\n use_TF=False,\n timeit=self.timeit,\n **self.algorithm_base_kwargs)\n for p in self.algorithm_module.processor.parameters():\n p.requires_grad = False\n\n @staticmethod\n def pointer_loss(predecessor_pred, predecessor_gt_edge_1h,\n softmax_idx, num_nodes):\n loss_unreduced = cross_entropy(predecessor_pred, softmax_idx, predecessor_gt_edge_1h, num_nodes)\n sum_loss = loss_unreduced.flatten().sum()\n cnt_loss = predecessor_gt_edge_1h.count_nonzero()\n return sum_loss / cnt_loss\n\n def single_prediction_loss(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n loss = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[graph_mask], pred_gt[graph_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n\n if loc == Location.NODE:\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(pred_gt, batch.edge_index)\n loss = type(self).pointer_loss(\n pred[edge_mask],\n pred_gt_one_hot[edge_mask],\n batch.edge_index[0][edge_mask], batch.num_nodes)\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.MASK_ONE:\n lsms = torch_scatter.scatter_log_softmax(pred[node_mask], batch.batch[node_mask].unsqueeze(-1), dim=0)\n loss = (-lsms[(pred_gt[node_mask] == 1.)]).mean()\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[node_mask], pred_gt[node_mask].argmax(-1))\n if loc == Location.EDGE:\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[edge_mask], pred_gt[edge_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n loss = F.cross_entropy(\n pred[edge_mask],\n pred_gt[edge_mask])\n assert loss is not None, f'{stage}/{name}/{loc}/{data_type}'\n return loss\n\n def get_step_loss(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n if self.timeit:\n st = time.time()\n batch = self.algorithm_module.prepare_batch(batch)\n losses_dict = defaultdict(list)\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n assert graph_mask.any()\n for name in pred:\n stage, loc, data_type = self.dataset.spec[name]\n pred_gt = getattr(batch, name)[i+1]\n losses_dict[name].append(\n self.single_prediction_loss(name, pred[name], pred_gt,\n batch, graph_mask, node_mask,\n edge_mask))\n\n for name in output_logits:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n losses_dict[name].append(\n self.single_prediction_loss(name, output_logits[name],\n getattr(batch, name), batch,\n graph_mask, node_mask, edge_mask))\n\n for k, v in losses_dict.items():\n losses_dict[k] = torch.stack(v).mean()\n if self.timeit:\n print(f'loss calculation: {time.time()-st}')\n input()\n\n return losses_dict\n\n def single_prediction_acc(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n acc = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.NODE:\n if data_type == Type.MASK_ONE:\n # try:\n acc = (pred[node_mask].squeeze(-1).nonzero() == pred_gt[node_mask].nonzero()).float().mean()\n # except Exception as e:\n # breakpoint()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION, Type.MASK]:\n acc = (pred[node_mask].squeeze(-1) == pred_gt[node_mask]).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[node_mask].squeeze(-1) - pred_gt[node_mask])**2).mean()\n if data_type == Type.CATEGORICAL:\n acc = (pred[node_mask].argmax(-1) == pred_gt[node_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[node_mask].squeeze(-1), pred_gt[node_mask])\n\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n acc = (pred[graph_mask].argmax(-1) == pred_gt[graph_mask].argmax(-1)).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[graph_mask].squeeze(-1) - pred_gt[graph_mask])**2).mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[graph_mask].squeeze(-1), pred_gt[graph_mask])\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n acc = (pred[edge_mask].argmax(-1) == pred_gt[edge_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[edge_mask].squeeze(-1), pred_gt[edge_mask])\n if data_type == Type.SCALAR:\n acc = ((pred[edge_mask].squeeze(-1) - pred_gt[edge_mask])**2).mean()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n acc = (pred[edge_mask] == pred_gt[edge_mask]).float().mean()\n assert acc is not None, f\"Please implement {name}\"\n return acc\n\n def get_metrics(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n batch = self.algorithm_module.prepare_batch(batch)\n accs_dict = defaultdict(list)\n\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec, {'hint': pred},\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['hint']\n\n for name in outputs:\n acc = self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name)[i+1],\n batch,\n graph_mask,\n node_mask,\n edge_mask)\n accs_dict[name].append(acc)\n\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec,\n output_logits,\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['output']\n for name in outputs:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n accs_dict[name].append(\n self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name),\n batch,\n graph_mask,\n node_mask,\n edge_mask))\n\n for k, v in accs_dict.items():\n accs_dict[k] = torch.stack(v).mean()\n\n return accs_dict\n\n def fwd_step(self, batch, batch_idx):\n if self.timeit:\n st = time.time()\n self.algorithm_module.epoch = self.current_epoch\n all_hint_logits, output_logits, masks = self.algorithm_module.process(batch)\n if self.timeit:\n print(f'forward step: {time.time()-st}')\n input()\n return all_hint_logits, output_logits, masks\n\n def training_step(self, batch, batch_idx):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'train/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs)\n total_loss = sum(losses_dict.values()) / len(losses_dict)\n self.log('train/loss/average_loss', total_loss, prog_bar=False, on_step=True, on_epoch=True, batch_size=batch.num_graphs)\n accs_dict = {}\n if self.current_epoch % self.test_train_every_n_epoch == 0:\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'train/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n # if sum(losses_dict.values()) > 1e5:\n # breakpoint()\n return {'loss': total_loss, 'losses_dict': losses_dict, 'accuracies': accs_dict}\n\n def valtest_step(self, batch, batch_idx, mode):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'{mode}/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n if torch.isnan(sum(losses_dict.values())).any():\n breakpoint()\n self.log(f'{mode}/loss/average_loss', sum(losses_dict.values()) / len(losses_dict), batch_size=batch.num_graphs, add_dataloader_idx=False)\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'{mode}/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n return {'losses': losses_dict, 'accuracies': accs_dict}\n\n def validation_step_alt(self, batch, batch_idx, dataloader_idx):\n if dataloader_idx == 1 and not self.trainer.state.stage == 'sanity_check' and self.current_epoch % self.test_with_val_every_n_epoch == 0:\n return self.valtest_step(batch, batch_idx, 'periodic_test')\n if dataloader_idx == 0:\n return self.valtest_step(batch, batch_idx, 'val')\n\n def validation_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'val')\n\n def test_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'test')\n\n def predict_step(self, batch, batch_idx):\n return self.fwd_step(batch, batch_idx)\n\n def load_dataset(self, split, suffix=''):\n split = split+suffix\n nn = CONFIGS[self.algorithm][split]['num_nodes']\n self.dataset_kwargs['split'] = split\n if (split, nn) not in self._datasets:\n self._datasets[(split, nn)] = self.dataset_class(\n self.dataset_root,\n nn,\n CONFIGS[self.algorithm][split]['num_samples'],\n algorithm=self.algorithm,\n **self.dataset_kwargs)\n self.dataset = self._datasets[(split, nn)]\n print(f'Loading {self.dataset=} (num nodes: {nn}) with kwargs')\n pprint(self.dataset_kwargs)\n print()\n\n def get_a_loader(self, split, suffix=''):\n self.load_dataset(split, suffix='')\n self.algorithm_module.dataset_spec = self.dataset.spec\n dl = DataLoader(self.dataset,\n batch_size=get_hyperparameters()['batch_size'],\n shuffle=True if split == 'train' else False,\n drop_last=False,\n follow_batch=['edge_index'],\n num_workers=1,\n persistent_workers=True)\n return dl\n\n def train_dataloader(self):\n return self.get_a_loader('train')\n\n def val_dataloader_alt(self):\n return [self.get_a_loader('val'), self.get_a_loader('test')]\n\n def val_dataloader(self):\n return self.get_a_loader('val')\n\n def test_dataloader(self, suffix=''):\n return self.get_a_loader('test'+suffix)\n\n def configure_optimizers(self):\n lr = self.learning_rate\n wd = self.weight_decay\n optimizer = optim.Adam(self.parameters(),\n weight_decay=wd,\n lr=lr)\n return optimizer" }, { "identifier": "get_number_of_nodes", "path": "utils_execution.py", "snippet": "def get_number_of_nodes(algorithm, split):\n nns = CONFIGS[algorithm][split]['num_nodes']\n if isinstance(nns, int):\n nns = [nns]\n return nns" }, { "identifier": "get_hyperparameters", "path": "hyperparameters.py", "snippet": "def get_hyperparameters():\n return {\n 'dim_latent': 128,\n 'num_bits': 8,\n 'weight_decay': 0,\n 'lr': 0.0003,\n 'nee_warmup_steps': 4000,\n 'dim_nodes_mst_prim': 1,\n 'dim_target_mst_prim': 1,\n 'device': 'cuda',\n 'batch_size': 64,\n 'bias': True,\n 'seed': 47, # for dataset generation\n 'calculate_termination_statistics': False,\n }" }, { "identifier": "CONFIGS", "path": "datasets/_configs.py", "snippet": "CONFIGS = defaultdict(lambda: _DEFAULT_CONFIG)" } ]
from collections import defaultdict from pprint import pprint from torch_geometric.loader import DataLoader from pytorch_lightning.trainer.supporters import CombinedLoader from models.algorithm_reasoner import AlgorithmReasoner, LitAlgorithmReasoner from utils_execution import get_number_of_nodes from hyperparameters import get_hyperparameters from datasets._configs import CONFIGS import torch import torch_geometric import torch_geometric.utils as tg_utils import torch_scatter import networkx as nx
13,380
class LitVKCReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, transferring=False, learning_rate=get_hyperparameters()['lr'], double_process=False, **algo_reasoner_kwargs): super().__init__(hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=bias, transferring=transferring, learning_rate=learning_rate, **algo_reasoner_kwargs) self.algorithm_module = AlgorithmReasoner( self.dataset.spec, self.dataset[0], hidden_dim, algo_processor, bias=bias, transferring=transferring, timeit=self.timeit, double_process=double_process, **algo_reasoner_kwargs) self.double_process = double_process self.save_hyperparameters(ignore=['algo_processor']) def training_step(self, batch, batch_idx): ret = {'loss': 0, 'losses_dict': defaultdict(list), 'accuracies': defaultdict(list)} for bb in batch: ans = super().training_step(bb, batch_idx) ret['loss'] += ans['loss'] for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k].append(v) ret['loss'] /= len(batch) for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k] = torch.tensor(v).mean() return ret def get_VKC_metrics(self, batch, output_logits): selected_dense = torch_geometric.utils.to_dense_batch(output_logits['output']['selected'], batch=batch.batch)[0] selected_dense_topk = torch.sort(torch.topk(selected_dense.squeeze(-1), self.dataset.k, dim=-1).indices).values selected_topk = (selected_dense_topk+batch.ptr[:-1].unsqueeze(-1)).view(-1) selected_topk_gt = batch.selected.nonzero().squeeze(-1) selected_batch = batch.batch[selected_topk] acc_selected_topk = torch_scatter.scatter_mean((selected_topk == selected_topk_gt).float(), selected_batch).mean() G = tg_utils.to_networkx(batch, to_undirected=True, edge_attrs=['edge_attr']) mspl = nx.multi_source_dijkstra_path_length(G, sources=selected_topk.tolist(), weight='edge_attr') mspl = torch.tensor([mspl[i] for i in range(batch.num_nodes)]).to(selected_dense) farthest = torch_scatter.scatter_max(mspl, batch.batch)[0] assert (farthest + torch.finfo(torch.float32).eps >= batch.farthest).all() return { 'acc_topk': acc_selected_topk, 'farthest': farthest.mean(), 'farthest_gt': batch.farthest.mean(), 'farthest_relative_error': ((farthest-batch.farthest)/batch.farthest).mean(), } def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_VKC_metrics(batch, output_logits)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix nns = get_number_of_nodes(self.algorithm, split) for nn in nns: self.dataset_kwargs['split'] = split if (split, nn) not in self._datasets: self._datasets[(split, nn)] = self.dataset_class( self.dataset_root, nn,
class LitVKCReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, transferring=False, learning_rate=get_hyperparameters()['lr'], double_process=False, **algo_reasoner_kwargs): super().__init__(hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=bias, transferring=transferring, learning_rate=learning_rate, **algo_reasoner_kwargs) self.algorithm_module = AlgorithmReasoner( self.dataset.spec, self.dataset[0], hidden_dim, algo_processor, bias=bias, transferring=transferring, timeit=self.timeit, double_process=double_process, **algo_reasoner_kwargs) self.double_process = double_process self.save_hyperparameters(ignore=['algo_processor']) def training_step(self, batch, batch_idx): ret = {'loss': 0, 'losses_dict': defaultdict(list), 'accuracies': defaultdict(list)} for bb in batch: ans = super().training_step(bb, batch_idx) ret['loss'] += ans['loss'] for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k].append(v) ret['loss'] /= len(batch) for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k] = torch.tensor(v).mean() return ret def get_VKC_metrics(self, batch, output_logits): selected_dense = torch_geometric.utils.to_dense_batch(output_logits['output']['selected'], batch=batch.batch)[0] selected_dense_topk = torch.sort(torch.topk(selected_dense.squeeze(-1), self.dataset.k, dim=-1).indices).values selected_topk = (selected_dense_topk+batch.ptr[:-1].unsqueeze(-1)).view(-1) selected_topk_gt = batch.selected.nonzero().squeeze(-1) selected_batch = batch.batch[selected_topk] acc_selected_topk = torch_scatter.scatter_mean((selected_topk == selected_topk_gt).float(), selected_batch).mean() G = tg_utils.to_networkx(batch, to_undirected=True, edge_attrs=['edge_attr']) mspl = nx.multi_source_dijkstra_path_length(G, sources=selected_topk.tolist(), weight='edge_attr') mspl = torch.tensor([mspl[i] for i in range(batch.num_nodes)]).to(selected_dense) farthest = torch_scatter.scatter_max(mspl, batch.batch)[0] assert (farthest + torch.finfo(torch.float32).eps >= batch.farthest).all() return { 'acc_topk': acc_selected_topk, 'farthest': farthest.mean(), 'farthest_gt': batch.farthest.mean(), 'farthest_relative_error': ((farthest-batch.farthest)/batch.farthest).mean(), } def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_VKC_metrics(batch, output_logits)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix nns = get_number_of_nodes(self.algorithm, split) for nn in nns: self.dataset_kwargs['split'] = split if (split, nn) not in self._datasets: self._datasets[(split, nn)] = self.dataset_class( self.dataset_root, nn,
CONFIGS[self.algorithm][split]['num_samples'],
4
2023-11-20 15:32:43+00:00
16k
harisankar95/pathfinding3D
test/test_path.py
[ { "identifier": "DiagonalMovement", "path": "pathfinding3d/core/diagonal_movement.py", "snippet": "class DiagonalMovement:\n always = 1\n never = 2\n if_at_most_one_obstacle = 3\n only_when_no_obstacle = 4" }, { "identifier": "Grid", "path": "pathfinding3d/core/grid.py", "snippet": "class Grid:\n def __init__(\n self,\n width: int = 0,\n height: int = 0,\n depth: int = 0,\n matrix: MatrixType = None,\n grid_id: Optional[int] = None,\n inverse: bool = False,\n ):\n \"\"\"\n A grid represents the map (as 3d-list of nodes).\n\n Parameters\n ----------\n width : int, optional\n The width of the grid.\n height : int, optional\n The height of the grid.\n depth : int, optional\n The depth of the grid.\n matrix : MatrixType\n A 3D array of values (numbers or objects specifying weight)\n that determine how nodes are connected and if they are walkable.\n If no matrix is given, all nodes will be walkable.\n inverse : bool, optional\n If true, all values in the matrix that are not 0 will be considered\n walkable. Otherwise all values that are 0 will be considered walkable.\n \"\"\"\n self.width, self.height, self.depth = self._validate_dimensions(width, height, depth, matrix)\n self.nodes = (\n build_nodes(self.width, self.height, self.depth, matrix, inverse, grid_id)\n if self.is_valid_grid()\n else [[[]]]\n )\n\n def _validate_dimensions(self, width: int, height: int, depth: int, matrix: MatrixType) -> tuple:\n if matrix is not None:\n if not (\n isinstance(matrix, (list, np.ndarray))\n and len(matrix) > 0\n and len(matrix[0]) > 0\n and len(matrix[0][0]) > 0\n ):\n raise ValueError(\"Provided matrix is not a 3D structure or is empty.\")\n return len(matrix), len(matrix[0]), len(matrix[0][0])\n return width, height, depth\n\n def is_valid_grid(self) -> bool:\n return self.width > 0 and self.height > 0 and self.depth > 0\n\n def node(self, x: int, y: int, z: int) -> Optional[GridNode]:\n \"\"\"\n Get node at position\n\n Parameters\n ----------\n x : int\n x position\n y : int\n y position\n z : int\n z position\n\n Returns\n -------\n GridNode\n node at position\n \"\"\"\n return self.nodes[x][y][z] if self.inside(x, y, z) else None\n\n def inside(self, x: int, y: int, z: int) -> bool:\n \"\"\"\n Check, if field position is inside map\n\n Parameters\n ----------\n x : int\n x position\n y : int\n y position\n z : int\n z position\n\n Returns\n -------\n bool\n True, if position is inside map\n \"\"\"\n return 0 <= x < self.width and 0 <= y < self.height and 0 <= z < self.depth\n\n def walkable(self, x: int, y: int, z: int) -> bool:\n \"\"\"\n Check, if the tile is inside grid and if it is set as walkable\n\n Parameters\n ----------\n x : int\n x position\n y : int\n y position\n z : int\n z position\n\n Returns\n -------\n bool\n True, if position is inside map and walkable\n \"\"\"\n return self.inside(x, y, z) and self.nodes[x][y][z].walkable\n\n def calc_cost(self, node_a: GridNode, node_b: GridNode, weighted: bool = False) -> float:\n \"\"\"\n Get the distance between current node and the neighbor (cost)\n\n Parameters\n ----------\n node_a : GridNode\n current node\n node_b : GridNode\n neighbor node\n weighted : bool, optional\n True, if weighted algorithm is used, by default False\n\n Returns\n -------\n float\n distance between current node and the neighbor (cost)\n \"\"\"\n # Check if we have a straight, diagonal in plane or diagonal in space\n dx = node_b.x - node_a.x\n dy = node_b.y - node_a.y\n dz = node_b.z - node_a.z\n\n ng = math.sqrt(dx * dx + dy * dy + dz * dz)\n\n # weight for weighted algorithms\n if weighted:\n ng *= node_b.weight\n\n return ng\n\n def neighbors(\n self,\n node: GridNode,\n diagonal_movement: int = DiagonalMovement.never,\n ) -> List[GridNode]:\n \"\"\"\n Get all neighbors of one node\n\n Parameters\n ----------\n node : GridNode\n node to get neighbors from\n diagonal_movement : int, optional\n if diagonal movement is allowed\n (see enum in diagonal_movement), by default DiagonalMovement.never\n\n Returns\n -------\n list\n list of neighbor nodes\n \"\"\"\n x, y, z = node.x, node.y, node.z\n\n neighbors = []\n # current plane\n cs0 = cd0 = cs1 = cd1 = cs2 = cd2 = cs3 = cd3 = False\n # upper plane\n us0 = ud0 = us1 = ud1 = us2 = ud2 = us3 = ud3 = ut = False # ut = upper top\n # lower plane\n ls0 = ld0 = ls1 = ld1 = ls2 = ld2 = ls3 = ld3 = lb = False # lb = lower bottom\n\n # -y\n if self.walkable(x, y - 1, z):\n neighbors.append(self.nodes[x][y - 1][z])\n cs0 = True\n\n # +x\n if self.walkable(x + 1, y, z):\n neighbors.append(self.nodes[x + 1][y][z])\n cs1 = True\n\n # +y\n if self.walkable(x, y + 1, z):\n neighbors.append(self.nodes[x][y + 1][z])\n cs2 = True\n\n # -x\n if self.walkable(x - 1, y, z):\n neighbors.append(self.nodes[x - 1][y][z])\n cs3 = True\n\n # +z\n if self.walkable(x, y, z + 1):\n neighbors.append(self.nodes[x][y][z + 1])\n ut = True\n\n # -z\n if self.walkable(x, y, z - 1):\n neighbors.append(self.nodes[x][y][z - 1])\n lb = True\n\n # check for connections to other grids\n if node.connections:\n neighbors.extend(node.connections)\n\n if diagonal_movement == DiagonalMovement.never:\n return neighbors\n\n if diagonal_movement == DiagonalMovement.only_when_no_obstacle:\n cd0 = cs0 and cs1\n cd1 = cs1 and cs2\n cd2 = cs2 and cs3\n cd3 = cs3 and cs0\n\n us0 = cs0 and ut\n us1 = cs1 and ut\n us2 = cs2 and ut\n us3 = cs3 and ut\n\n ls0 = cs0 and lb\n ls1 = cs1 and lb\n ls2 = cs2 and lb\n ls3 = cs3 and lb\n\n elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:\n cd0 = cs0 or cs1\n cd1 = cs1 or cs2\n cd2 = cs2 or cs3\n cd3 = cs3 or cs0\n\n us0 = cs0 or ut\n us1 = cs1 or ut\n us2 = cs2 or ut\n us3 = cs3 or ut\n\n ls0 = cs0 or lb\n ls1 = cs1 or lb\n ls2 = cs2 or lb\n ls3 = cs3 or lb\n\n elif diagonal_movement == DiagonalMovement.always:\n cd0 = cd1 = cd2 = cd3 = True\n us0 = us1 = us2 = us3 = True\n ls0 = ls1 = ls2 = ls3 = True\n\n # +x -y\n if cd0 and self.walkable(x + 1, y - 1, z):\n neighbors.append(self.nodes[x + 1][y - 1][z])\n else:\n cd0 = False\n\n # +x +y\n if cd1 and self.walkable(x + 1, y + 1, z):\n neighbors.append(self.nodes[x + 1][y + 1][z])\n else:\n cd1 = False\n\n # -x +y\n if cd2 and self.walkable(x - 1, y + 1, z):\n neighbors.append(self.nodes[x - 1][y + 1][z])\n else:\n cd2 = False\n\n # -x -y\n if cd3 and self.walkable(x - 1, y - 1, z):\n neighbors.append(self.nodes[x - 1][y - 1][z])\n else:\n cd3 = False\n\n # -y +z\n if us0 and self.walkable(x, y - 1, z + 1):\n neighbors.append(self.nodes[x][y - 1][z + 1])\n else:\n us0 = False\n\n # +x +z\n if us1 and self.walkable(x + 1, y, z + 1):\n neighbors.append(self.nodes[x + 1][y][z + 1])\n else:\n us1 = False\n\n # +y +z\n if us2 and self.walkable(x, y + 1, z + 1):\n neighbors.append(self.nodes[x][y + 1][z + 1])\n else:\n us2 = False\n\n # -x +z\n if us3 and self.walkable(x - 1, y, z + 1):\n neighbors.append(self.nodes[x - 1][y][z + 1])\n else:\n us3 = False\n\n # -y -z\n if ls0 and self.walkable(x, y - 1, z - 1):\n neighbors.append(self.nodes[x][y - 1][z - 1])\n else:\n ls0 = False\n\n # +x -z\n if ls1 and self.walkable(x + 1, y, z - 1):\n neighbors.append(self.nodes[x + 1][y][z - 1])\n else:\n ls1 = False\n\n # +y -z\n if ls2 and self.walkable(x, y + 1, z - 1):\n neighbors.append(self.nodes[x][y + 1][z - 1])\n else:\n ls2 = False\n\n # -x -z\n if ls3 and self.walkable(x - 1, y, z - 1):\n neighbors.append(self.nodes[x - 1][y][z - 1])\n else:\n ls3 = False\n\n # remaining daigonal neighbors\n if diagonal_movement == DiagonalMovement.only_when_no_obstacle:\n ud0 = cs0 and cd0 and cs1 and us0 and us1 and ut\n ud1 = cs1 and cd1 and cs2 and us1 and us2 and ut\n ud2 = cs2 and cd2 and cs3 and us2 and us3 and ut\n ud3 = cs3 and cd3 and cs0 and us3 and us0 and ut\n\n ld0 = cs0 and cd0 and cs1 and ls0 and ls1 and lb\n ld1 = cs1 and cd1 and cs2 and ls1 and ls2 and lb\n ld2 = cs2 and cd2 and cs3 and ls2 and ls3 and lb\n ld3 = cs3 and cd3 and cs0 and ls3 and ls0 and lb\n\n elif diagonal_movement == DiagonalMovement.if_at_most_one_obstacle:\n ud0 = sum([cs0, cd0, cs1, us0, us1, ut]) >= 5\n ud1 = sum([cs1, cd1, cs2, us1, us2, ut]) >= 5\n ud2 = sum([cs2, cd2, cs3, us2, us3, ut]) >= 5\n ud3 = sum([cs3, cd3, cs0, us3, us0, ut]) >= 5\n\n ld0 = sum([cs0, cd0, cs1, ls0, ls1, lb]) >= 5\n ld1 = sum([cs1, cd1, cs2, ls1, ls2, lb]) >= 5\n ld2 = sum([cs2, cd2, cs3, ls2, ls3, lb]) >= 5\n ld3 = sum([cs3, cd3, cs0, ls3, ls0, lb]) >= 5\n\n elif diagonal_movement == DiagonalMovement.always:\n ud0 = ud1 = ud2 = ud3 = True\n ld0 = ld1 = ld2 = ld3 = True\n\n # +x -y +z\n if ud0 and self.walkable(x + 1, y - 1, z + 1):\n neighbors.append(self.nodes[x + 1][y - 1][z + 1])\n\n # +x +y +z\n if ud1 and self.walkable(x + 1, y + 1, z + 1):\n neighbors.append(self.nodes[x + 1][y + 1][z + 1])\n\n # -x +y +z\n if ud2 and self.walkable(x - 1, y + 1, z + 1):\n neighbors.append(self.nodes[x - 1][y + 1][z + 1])\n\n # -x -y +z\n if ud3 and self.walkable(x - 1, y - 1, z + 1):\n neighbors.append(self.nodes[x - 1][y - 1][z + 1])\n\n # +x -y -z\n if ld0 and self.walkable(x + 1, y - 1, z - 1):\n neighbors.append(self.nodes[x + 1][y - 1][z - 1])\n\n # +x +y -z\n if ld1 and self.walkable(x + 1, y + 1, z - 1):\n neighbors.append(self.nodes[x + 1][y + 1][z - 1])\n\n # -x +y -z\n if ld2 and self.walkable(x - 1, y + 1, z - 1):\n neighbors.append(self.nodes[x - 1][y + 1][z - 1])\n\n # -x -y -z\n if ld3 and self.walkable(x - 1, y - 1, z - 1):\n neighbors.append(self.nodes[x - 1][y - 1][z - 1])\n\n return neighbors\n\n def cleanup(self):\n \"\"\"\n Cleanup grid\n \"\"\"\n for x_nodes in self.nodes:\n for y_nodes in x_nodes:\n for z_node in y_nodes:\n z_node.cleanup()" }, { "identifier": "GridNode", "path": "pathfinding3d/core/node.py", "snippet": "class GridNode(Node):\n \"\"\"\n basic node, saves X, Y and Z coordinates on some grid and determine if\n it is walkable.\n \"\"\"\n\n # Coordinates\n x: int = 0\n y: int = 0\n z: int = 0\n\n # Wether this node can be walked through.\n walkable: bool = True\n\n # used for weighted algorithms\n weight: float = 1.0\n\n # grid_id is used if we have more than one grid,\n # normally we just count our grids by number\n # but you can also use a string here.\n # Set it to None if you only have one grid.\n grid_id: Optional[int] = None\n\n connections: Optional[List] = None\n\n identifier: Optional[Tuple] = None\n\n def __post_init__(self):\n super().__init__()\n # for heap\n self.identifier: Tuple = (\n (self.x, self.y, self.z) if self.grid_id is None else (self.x, self.y, self.z, self.grid_id)\n )\n\n def __iter__(self):\n yield self.x\n yield self.y\n yield self.z\n if self.grid_id is not None:\n yield self.grid_id\n\n def connect(self, other_node: \"GridNode\"):\n if not self.connections:\n self.connections = [other_node]\n else:\n self.connections.append(other_node)" }, { "identifier": "AStarFinder", "path": "pathfinding3d/finder/a_star.py", "snippet": "class AStarFinder(Finder):\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using A* algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n\n if not heuristic:\n if diagonal_movement == DiagonalMovement.never:\n self.heuristic = manhattan\n else:\n # When diagonal movement is allowed the manhattan heuristic is\n # not admissible it should be octile instead\n self.heuristic = octile\n\n def check_neighbors(\n self,\n start: GridNode,\n end: GridNode,\n grid: Grid,\n open_list: List,\n open_value: int = 1,\n backtrace_by=None,\n ) -> Optional[List[GridNode]]:\n \"\"\"\n Find next path segment based on given node\n (or return path if we found the end)\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n open_list : List\n stores nodes that will be processed next\n\n Returns\n -------\n Optional[List[GridNode]]\n path\n \"\"\"\n\n # pop node with minimum 'f' value\n node = open_list.pop_node()\n node.closed = True\n\n # if reached the end position, construct the path and return it\n # (ignored for bi-directional a*, there we look for a neighbor that is\n # part of the oncoming path)\n if not backtrace_by and node == end:\n return backtrace(end)\n\n # get neighbors of the current node\n neighbors = self.find_neighbors(grid, node)\n for neighbor in neighbors:\n if neighbor.closed:\n # already visited last minimum f value\n continue\n if backtrace_by and neighbor.opened == backtrace_by:\n # found the oncoming path\n if backtrace_by == BY_END:\n return bi_backtrace(node, neighbor)\n\n return bi_backtrace(neighbor, node)\n\n # check if the neighbor has not been inspected yet, or\n # can be reached with smaller cost from the current node\n self.process_node(grid, neighbor, node, end, open_list, open_value)\n\n # the end has not been reached (yet) keep the find_path loop running\n return None\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid using the A* algorithm\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n\n start.g = 0\n start.f = 0\n return super().find_path(start, end, grid)" }, { "identifier": "BestFirst", "path": "pathfinding3d/finder/best_first.py", "snippet": "class BestFirst(AStarFinder):\n \"\"\"\n Similar to the default A* algorithm from a_star.\n \"\"\"\n\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using BestFirst algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n\n self.weighted = False\n\n def apply_heuristic(self, node_a: GridNode, node_b: GridNode, heuristic: Optional[Callable] = None) -> float:\n \"\"\"\n Helper function to apply heuristic\n\n Parameters\n ----------\n node_a : GridNode\n first node\n node_b : GridNode\n second node\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n\n Returns\n -------\n float\n heuristic value\n \"\"\"\n return super().apply_heuristic(node_a, node_b, heuristic) * 1000000" }, { "identifier": "BiAStarFinder", "path": "pathfinding3d/finder/bi_a_star.py", "snippet": "class BiAStarFinder(AStarFinder):\n \"\"\"\n Similar to the default A* algorithm from a_star.\n \"\"\"\n\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using Bi-A* algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid using the A* algorithm\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n (can be a list of grids)\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n self.start_time = time.time() # execution time limitation\n self.runs = 0 # count number of iterations\n\n start_open_list = SimpleHeap(start, grid)\n start.g = 0\n start.f = 0\n start.opened = BY_START\n\n end_open_list = SimpleHeap(end, grid)\n end.g = 0\n end.f = 0\n end.opened = BY_END\n\n while len(start_open_list) > 0 and len(end_open_list) > 0:\n self.runs += 1\n self.keep_running()\n path = self.check_neighbors(\n start,\n end,\n grid,\n start_open_list,\n open_value=BY_START,\n backtrace_by=BY_END,\n )\n if path:\n return path, self.runs\n\n self.runs += 1\n self.keep_running()\n path = self.check_neighbors(\n end,\n start,\n grid,\n end_open_list,\n open_value=BY_END,\n backtrace_by=BY_START,\n )\n if path:\n return path, self.runs\n\n # failed to find path\n return [], self.runs" }, { "identifier": "BreadthFirstFinder", "path": "pathfinding3d/finder/breadth_first.py", "snippet": "class BreadthFirstFinder(Finder):\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using Breadth First algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n weighted=False,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n if not diagonal_movement:\n self.diagonalMovement = DiagonalMovement.never\n\n def check_neighbors(\n self,\n start: GridNode,\n end: GridNode,\n grid: Grid,\n open_list: List,\n ) -> List[GridNode]:\n \"\"\"\n Find next path segment based on given node\n (or return path if we found the end)\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n open_list : List\n stores nodes that will be processed next\n\n Returns\n -------\n List[GridNode]\n path\n \"\"\"\n node = open_list.pop_node()\n node.closed = True\n\n if node == end:\n return backtrace(end)\n\n neighbors = self.find_neighbors(grid, node)\n for neighbor in neighbors:\n if neighbor.closed or neighbor.opened:\n continue\n\n open_list.push_node(neighbor)\n neighbor.opened = True\n neighbor.parent = node" }, { "identifier": "DijkstraFinder", "path": "pathfinding3d/finder/dijkstra.py", "snippet": "class DijkstraFinder(AStarFinder):\n def __init__(\n self,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n ):\n \"\"\"\n Find shortest path using Dijkstra algorithm\n\n Parameters\n ----------\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n \"\"\"\n super().__init__(\n heuristic=null,\n weight=weight,\n diagonal_movement=diagonal_movement,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n\n def apply_heuristic(self, node_a: Node, node_b: Node, heuristic: Optional[Callable] = None) -> float:\n \"\"\"\n Helper function to apply heuristic\n\n Parameters\n ----------\n node_a : Node\n first node\n node_b : Node\n second node\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n\n Returns\n -------\n float\n 0.0\n \"\"\"\n return 0.0" }, { "identifier": "ExecutionRunsException", "path": "pathfinding3d/finder/finder.py", "snippet": "class ExecutionRunsException(Exception):\n def __init__(self, message):\n super().__init__(message)" }, { "identifier": "ExecutionTimeException", "path": "pathfinding3d/finder/finder.py", "snippet": "class ExecutionTimeException(Exception):\n def __init__(self, message):\n super().__init__(message)" }, { "identifier": "IDAStarFinder", "path": "pathfinding3d/finder/ida_star.py", "snippet": "class IDAStarFinder(Finder):\n \"\"\"\n Iterative Deeping A Star (IDA*) path-finder.\n\n Recursion based on:\n http://www.apl.jhu.edu/~hall/AI-Programming/IDA-Star.html\n\n Path retracing based on:\n V. Nageshwara Rao, Vipin Kumar and K. Ramesh\n \"A Parallel Implementation of Iterative-Deeping-A*\", January 1987.\n ftp://ftp.cs.utexas.edu/.snapshot/hourly.1/pub/AI-Lab/tech-reports/\n UT-AI-TR-87-46.pdf\n\n based on the JavaScript implementation by Gerard Meier\n (www.gerardmeier.com)\n \"\"\"\n\n def __init__(\n self,\n heuristic: Optional[Callable] = None,\n weight: int = 1,\n diagonal_movement: int = DiagonalMovement.never,\n time_limit: float = TIME_LIMIT,\n max_runs: Union[int, float] = MAX_RUNS,\n track_recursion: bool = True,\n ):\n \"\"\"\n Find shortest path using IDA* algorithm\n\n Parameters\n ----------\n heuristic : Callable\n heuristic used to calculate distance of 2 points\n weight : int\n weight for the edges\n diagonal_movement : int\n if diagonal movement is allowed\n (see enum in diagonal_movement)\n time_limit : float\n max. runtime in seconds\n max_runs : int\n max. amount of tries until we abort the search\n (optional, only if we enter huge grids and have time constrains)\n <=0 means there are no constrains and the code might run on any\n large map.\n track_recursion : bool\n if we should track recursion\n \"\"\"\n super().__init__(\n heuristic=heuristic,\n weight=weight,\n diagonal_movement=diagonal_movement,\n weighted=False,\n time_limit=time_limit,\n max_runs=max_runs,\n )\n self.track_recursion = track_recursion\n if not heuristic:\n if diagonal_movement == DiagonalMovement.never:\n self.heuristic = manhattan\n else:\n # When diagonal movement is allowed the manhattan heuristic is\n # not admissible it should be octile instead\n self.heuristic = octile\n\n self.nodes_visited: int\n\n def search(\n self,\n node: GridNode,\n g: float,\n cutoff: float,\n path: List[GridNode],\n depth: int,\n end: GridNode,\n grid: Grid,\n ) -> Union[float, GridNode]:\n \"\"\"\n Recursive IDA* search implementation\n\n Parameters\n ----------\n node : GridNode\n current node\n g : float\n cost from start to current node\n cutoff : float\n cutoff cost\n path : List[GridNode]\n path\n depth : int\n current depth\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n\n Returns\n -------\n Union[float, GridNode]\n cutoff cost or end node\n \"\"\"\n self.runs += 1\n self.keep_running()\n\n self.nodes_visited += 1\n\n f = g + self.apply_heuristic(node, end) * self.weight\n\n # We've searched too deep for this iteration.\n if f > cutoff:\n return f\n\n if node == end:\n if len(path) < depth:\n path += [None] * (depth - len(path) + 1)\n path[depth] = node\n return node\n\n neighbors = self.find_neighbors(grid, node)\n\n # Sort the neighbors, gives nicer paths. But, this deviates\n # from the original algorithm - so I left it out\n # TODO: make this an optional parameter\n # def sort_neighbors(a, b):\n # return self.apply_heuristic(a, end) - \\\n # self.apply_heuristic(b, end)\n # sorted(neighbors, sort_neighbors)\n min_t = float(\"inf\")\n for neighbor in neighbors:\n if self.track_recursion:\n # Retain a copy for visualisation. Due to recursion, this\n # node may be part of other paths too.\n neighbor.retain_count += 1\n neighbor.tested = True\n\n t = self.search(\n neighbor,\n g + grid.calc_cost(node, neighbor),\n cutoff,\n path,\n depth + 1,\n end,\n grid,\n )\n\n if isinstance(t, GridNode):\n if len(path) < depth:\n path += [None] * (depth - len(path) + 1)\n path[depth] = node\n return t\n\n # Decrement count, then determine whether it's actually closed.\n if self.track_recursion:\n neighbor.retain_count -= 1\n if neighbor.retain_count == 0:\n neighbor.tested = False\n\n if t < min_t:\n min_t = t\n\n return min_t\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid using the IDA* algorithm\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n self.start_time = time.time() # execution time limitation\n self.runs = 0 # count number of iterations\n\n self.nodes_visited = 0 # for statistics\n\n # initial search depth, given the typical heuristic contraints,\n # there should be no cheaper route possible.\n cutoff = self.apply_heuristic(start, end)\n\n while True:\n path = []\n\n # search till cut-off depth:\n t = self.search(start, 0, cutoff, path, 0, end, grid)\n\n if isinstance(t, bool) and not t:\n # only when an error occured we return \"False\"\n break\n\n # If t is a node, it's also the end node. Route is now\n # populated with a valid path to the end node.\n if isinstance(t, GridNode):\n return (\n [(node.x, node.y, node.z, node.grid_id) for node in path],\n self.runs,\n )\n\n # Try again, this time with a deeper cut-off. The t score\n # is the closest we got to the end node.\n cutoff = t\n\n return [], self.runs" }, { "identifier": "MinimumSpanningTree", "path": "pathfinding3d/finder/msp.py", "snippet": "class MinimumSpanningTree(Finder):\n \"\"\"\n Minimum Spanning Tree implementation by Brad Beattie\n (see https://github.com/brean/python-pathfinding/issues/18)\n\n The wikipedia page has a nice description about MSP:\n https://en.wikipedia.org/wiki/Minimum_spanning_tree\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.heuristic = heuristic.null\n\n def tree(self, grid: Grid, start: GridNode) -> List:\n \"\"\"\n Returns a list of nodes that are part of the minimum spanning tree\n of the grid.\n\n Parameters\n ----------\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n start : GridNode\n start node\n\n Returns\n -------\n List\n \"\"\"\n\n return list(self.itertree(grid, start))\n\n def itertree(self, grid: Grid, start: GridNode):\n \"\"\"\n Returns a generator that yields nodes that are part of the minimum\n spanning tree of the grid.\n\n Parameters\n ----------\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n start : GridNode\n start node\n \"\"\"\n # Finder.process_node requires an end node, which we don't have.\n # The following value tricks the call to Finder.apply_heuristic.\n # Though maybe we want to generate a limited spanning tree that\n # trends in a certain direction? In which case we'd want a more\n # nuanced solution.\n end = namedtuple(\"FakeNode\", [\"x\", \"y\", \"z\"])(-1, -1, -1)\n\n start.opened = True\n\n open_list = SimpleHeap(start, grid)\n\n while len(open_list) > 0:\n self.runs += 1\n self.keep_running()\n\n node = open_list.pop_node()\n node.closed = True\n yield node\n\n neighbors = self.find_neighbors(grid, node)\n for neighbor in neighbors:\n if not neighbor.closed:\n self.process_node(grid, neighbor, node, end, open_list, open_value=True)\n\n def find_path(self, start: GridNode, end: GridNode, grid: Grid) -> Tuple[List, int]:\n \"\"\"\n Find a path from start to end node on grid using the Minimum Spanning\n Tree algorithm\n\n Parameters\n ----------\n start : GridNode\n start node\n end : GridNode\n end node\n grid : Grid\n grid that stores all possible steps/tiles as 3D-list\n\n Returns\n -------\n Tuple[List, int]\n path, number of iterations\n \"\"\"\n self.start_time = time.time() # execution time limitation\n self.runs = 0 # count number of iterations\n\n for node in self.itertree(grid, start):\n if node == end:\n path = deque()\n step = node\n while step.parent:\n path.appendleft(step)\n step = step.parent\n path.appendleft(step)\n return path, self.runs\n\n return [], self.runs" } ]
import numpy as np import pytest from pathfinding3d.core.diagonal_movement import DiagonalMovement from pathfinding3d.core.grid import Grid from pathfinding3d.core.node import GridNode from pathfinding3d.finder.a_star import AStarFinder from pathfinding3d.finder.best_first import BestFirst from pathfinding3d.finder.bi_a_star import BiAStarFinder from pathfinding3d.finder.breadth_first import BreadthFirstFinder from pathfinding3d.finder.dijkstra import DijkstraFinder from pathfinding3d.finder.finder import ExecutionRunsException, ExecutionTimeException from pathfinding3d.finder.ida_star import IDAStarFinder from pathfinding3d.finder.msp import MinimumSpanningTree
11,009
finders = [ AStarFinder, BestFirst, BiAStarFinder, DijkstraFinder, IDAStarFinder, BreadthFirstFinder, MinimumSpanningTree, ] TIME_LIMIT = 10 # give it a 10 second limit. weighted_finders = [ AStarFinder, BiAStarFinder, DijkstraFinder, MinimumSpanningTree, ] SIMPLE_MATRIX = np.zeros((5, 5, 5)) SIMPLE_MATRIX[0, 0, 0] = 1 SIMPLE_MATRIX[0, 0, 1] = 1 SIMPLE_MATRIX[0, 0, 2] = 1 SIMPLE_MATRIX[0, 0, 3] = 1 SIMPLE_MATRIX[0, 0, 4] = 1 SIMPLE_MATRIX[1, :, :] = 1 SIMPLE_MATRIX[2, :, :] = 1 SIMPLE_MATRIX[3, :, :] = 1 SIMPLE_MATRIX[4, 0, 0] = 1 SIMPLE_MATRIX[4, 1, 0] = 1 SIMPLE_MATRIX[4, 2, 0] = 1 SIMPLE_MATRIX[4, 3, 0] = 1 SIMPLE_MATRIX[4, 4, 0] = 1 WEIGHTED_SIMPLE_MATRIX = np.copy(SIMPLE_MATRIX) WEIGHTED_SIMPLE_MATRIX[4, 1, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 3, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 0] = 99 WEIGHTED_SIMPLE_MATRIX[1, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[2, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[3, :, :] = 99 def test_path(): """ test if we can find a path """ grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 9 def test_weighted_path(): grid = Grid(matrix=WEIGHTED_SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in weighted_finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 11 def test_path_diagonal(): # test diagonal movement grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup()
finders = [ AStarFinder, BestFirst, BiAStarFinder, DijkstraFinder, IDAStarFinder, BreadthFirstFinder, MinimumSpanningTree, ] TIME_LIMIT = 10 # give it a 10 second limit. weighted_finders = [ AStarFinder, BiAStarFinder, DijkstraFinder, MinimumSpanningTree, ] SIMPLE_MATRIX = np.zeros((5, 5, 5)) SIMPLE_MATRIX[0, 0, 0] = 1 SIMPLE_MATRIX[0, 0, 1] = 1 SIMPLE_MATRIX[0, 0, 2] = 1 SIMPLE_MATRIX[0, 0, 3] = 1 SIMPLE_MATRIX[0, 0, 4] = 1 SIMPLE_MATRIX[1, :, :] = 1 SIMPLE_MATRIX[2, :, :] = 1 SIMPLE_MATRIX[3, :, :] = 1 SIMPLE_MATRIX[4, 0, 0] = 1 SIMPLE_MATRIX[4, 1, 0] = 1 SIMPLE_MATRIX[4, 2, 0] = 1 SIMPLE_MATRIX[4, 3, 0] = 1 SIMPLE_MATRIX[4, 4, 0] = 1 WEIGHTED_SIMPLE_MATRIX = np.copy(SIMPLE_MATRIX) WEIGHTED_SIMPLE_MATRIX[4, 1, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 3, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 0] = 99 WEIGHTED_SIMPLE_MATRIX[1, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[2, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[3, :, :] = 99 def test_path(): """ test if we can find a path """ grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 9 def test_weighted_path(): grid = Grid(matrix=WEIGHTED_SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in weighted_finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 11 def test_path_diagonal(): # test diagonal movement grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup()
finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT)
0
2023-11-21 10:14:12+00:00
16k
yuukawahiroshi/ddb-tools
mixins_ddb.py
[ { "identifier": "DDIModel", "path": "utils/ddi_utils.py", "snippet": "class DDIModel:\n def __init__(self, ddi_bytes: bytes) -> None:\n self.ddi_bytes = ddi_bytes\n self.ddi_data = None\n self.phdc_data = {}\n self.tdb_data = {}\n self.sta_data = {}\n self.art_data = {}\n self.vqm_data = {}\n self.offset_map = {}\n\n def read(self, temp_path: Optional[str] = None, cat_only: bool = False):\n if temp_path or cat_only:\n import yaml\n\n if cat_only:\n with open(os.path.join(temp_path, 'sta.yml'), mode='r',\n encoding='utf-8') as sta_f:\n self.sta_data = yaml.load(sta_f)\n with open(os.path.join(temp_path, 'art.yml'), mode='r',\n encoding='utf-8') as art_f:\n self.art_data = yaml.load(art_f)\n vqm_data = None\n if os.path.isfile(os.path.join(temp_path, 'vqm.yml')):\n with open(os.path.join(temp_path, 'vqm.yml'), mode='r',\n encoding='utf-8') as vqm_f:\n self.vqm_data = yaml.load(vqm_f)\n else:\n self.ddi_data = io.BytesIO(self.ddi_bytes)\n # DBSe\n # Tonio.ddi has no DBSe block\n \n # assert int.from_bytes(ddi_data.read(8), byteorder='little') == 0\n # assert ddi_data.read(4).decode() == 'DBSe'\n # assert int.from_bytes(ddi_data.read(4), byteorder='little') == 0\n # assert int.from_bytes(ddi_data.read(8), byteorder='little') == 1\n # assert int.from_bytes(ddi_data.read(4), byteorder='little') == 3\n\n # PHDC\n phdc_offset = self.ddi_bytes.find(b'PHDC')\n if phdc_offset >= 0:\n self.ddi_data.seek(phdc_offset)\n self.phdc_data = self.read_phdc()\n\n self.offset_map['phdc'] = [phdc_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'phdc.yml'), mode='w',\n encoding='utf-8') as phdc_f:\n phdc_str = yaml.dump(self.phdc_data, default_flow_style=False,\n sort_keys=False)\n phdc_f.write(phdc_str)\n\n # TDB\n tdb_offset = self.ddi_bytes.find(b'\\xFF'*8+b'TDB ')\n if tdb_offset >= 0:\n self.ddi_data.seek(tdb_offset)\n self.tdb_data = self.read_tdb()\n self.offset_map['tdb'] = [tdb_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'tdb.yml'), mode='w',\n encoding='utf-8') as tdb_f:\n tdb_str = yaml.dump(self.tdb_data, default_flow_style=False,\n sort_keys=False)\n tdb_f.write(tdb_str)\n\n # DBV\n dbv_offset = self.ddi_bytes.find(b'\\x00'*8+b'DBV ')\n self.ddi_data.seek(dbv_offset)\n self.read_dbv()\n self.offset_map['dbv'] = [dbv_offset, self.ddi_data.tell()]\n\n # STA\n sta_offset = self.ddi_bytes.find(b'\\x00'*8+b'STA ')\n sta_offset = reverse_search(self.ddi_bytes, b'ARR ', sta_offset) - 8\n self.ddi_data.seek(sta_offset)\n self.sta_data = self.read_sta()\n self.offset_map['sta'] = [sta_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'sta.yml'), mode='w',\n encoding='utf-8') as sta_f:\n sta_str = yaml.dump(self.sta_data, default_flow_style=False,\n sort_keys=False)\n sta_f.write(sta_str)\n\n # ART\n art_offset = self.ddi_bytes.find(b'\\x00'*8+b'ART ')\n art_offset = reverse_search(self.ddi_bytes, b'ARR ', art_offset) - 8\n self.ddi_data.seek(art_offset)\n self.art_data = self.read_art()\n self.offset_map['art'] = [art_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'art.yml'), mode='w',\n encoding='utf-8') as art_f:\n art_str = yaml.dump(self.art_data, default_flow_style=False,\n sort_keys=False)\n art_f.write(art_str)\n\n # VQM\n vqm_offset = self.ddi_bytes.find(b'\\xFF'*8+b'VQM ')\n self.vqm_data = None\n if vqm_offset != -1:\n self.ddi_data.seek(vqm_offset)\n self.vqm_data = self.read_vqm()\n self.offset_map['vqm'] = [vqm_offset, self.ddi_data.tell()]\n\n if temp_path:\n with open(os.path.join(temp_path, 'vqm.yml'), mode='w',\n encoding='utf-8') as vqm_f:\n vqm_str = yaml.dump(self.vqm_data, default_flow_style=False,\n sort_keys=False)\n vqm_f.write(vqm_str)\n \n \n # DDI convert\n self.ddi_data_dict: dict[str, dict[str, list[artp_type]]] = {\n 'sta': {},\n 'art': {},\n }\n\n if self.vqm_data is not None:\n self.ddi_data_dict = {\n 'vqm': {},\n 'sta': {},\n 'art': {},\n }\n vqm_dict = []\n for idx, vqmp in self.vqm_data.items():\n vqm_dict.append({'snd': vqmp['snd'], 'epr': vqmp['epr'], 'pitch': vqmp['pitch1']})\n self.ddi_data_dict['vqm'] = vqm_dict\n\n sta_dict: dict[str, list[artp_type]] = {}\n for stau in self.sta_data.values():\n stau_dict: list[artp_type] = []\n for idx, stap in stau['stap'].items():\n stau_dict.append({'snd': stap['snd'], 'epr': stap['epr'], 'pitch': stap['pitch1']})\n sta_dict[stau['phoneme']] = stau_dict\n self.ddi_data_dict['sta'] = {key: sta_dict[key]\n for key in sorted(sta_dict.keys())}\n\n art_dict: dict[str, list[artp_type]] = {}\n for art in self.art_data.values():\n if 'artu' in art.keys():\n for artu in art['artu'].values():\n key = art['phoneme']+' '+artu['phoneme']\n art_dict[key] = []\n for artp in artu['artp'].values():\n art_dict[key].append({'snd': artp['snd'],\n 'snd_start': artp['snd_start'],\n 'epr': artp['epr'],\n 'pitch': artp['pitch1']})\n if 'art' in art.keys():\n for sub_art in art['art'].values():\n sub_art: art_type\n if 'artu' in sub_art.keys():\n for artu in sub_art['artu'].values():\n key = art['phoneme']+' '+sub_art['phoneme']+' '+artu['phoneme']\n art_dict[key] = []\n for artp in artu['artp'].values():\n art_dict[key].append({'snd': artp['snd'],\n 'snd_start': artp['snd_start'],\n 'epr': artp['epr'],\n 'pitch': artp['pitch1']})\n self.ddi_data_dict['art'] = {key: art_dict[key]\n for key in sorted(art_dict.keys())}\n\n\n def save(self, dst_path: Optional[str] = None):\n import yaml\n\n with open(os.path.join(dst_path, 'ddi.yml'), mode='w', encoding='utf-8') as ddi_f:\n ddi_str = yaml.dump(self.ddi_data_dict, default_flow_style=False,\n sort_keys=False)\n ddi_f.write(ddi_str)\n\n\n def read_phdc(self):\n phdc_data: dict[str, dict[int, list[str]]\n | dict[str, dict[int, str]]\n | dict[str, list[str]]\n | str]\n phdc_data = {}\n # PHDC\n phoneme_data: dict[str, list[str]] = {\"voiced\": [], \"unvoiced\": []}\n assert self.ddi_data.read(4).decode() == 'PHDC'\n phdc_size = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 4\n phoneme_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for i in range(phoneme_num):\n bytes_str = self.ddi_data.read(0x1F)\n assert bytes_str[-1] in [0, 1]\n real_data = bytes_str[:-1].decode().strip('\\x00')\n\n phoneme_type = \"voiced\" if bytes_str[-1] == 0 else \"unvoiced\"\n\n phoneme_data[phoneme_type].append(real_data)\n phdc_data['phoneme'] = phoneme_data\n\n # PHG2\n phg2_data: dict[str, dict[int, str]] = {}\n assert self.ddi_data.read(4).decode() == 'PHG2'\n phg2_size = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n phg2_epr_guide_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for i in range(phg2_epr_guide_num):\n phg2_key = read_str(self.ddi_data)\n phg2_data[phg2_key] = {}\n temp_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for j in range(temp_num):\n idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n phg2_data[phg2_key][idx] = read_str(self.ddi_data)\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n phdc_data['phg2'] = phg2_data\n\n # epr_guide\n epr_guide_data: dict[str, list[str]] = {}\n epr_guide_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_guide_size = phdc_size-phg2_size-0x10-0x1F*phoneme_num-4\n epr_guide_bytes = self.ddi_data.read(epr_guide_size)\n offset = 0\n for i in range(epr_guide_num):\n key = epr_guide_bytes[offset:offset+0x20].decode().strip('\\x00')\n assert int.from_bytes(epr_guide_bytes[offset+0x20:offset+0x24],\n byteorder='little') == 4\n epr_guide_data[key] = []\n offset += 0x24\n while(offset < len(epr_guide_bytes) and epr_guide_bytes[offset] == 0):\n if epr_guide_bytes[offset+7] == 0x40:\n value = epr_guide_bytes[offset:offset + 7]\n start_idx = 0\n for i in range(7):\n if value[i] != 0:\n start_idx = i\n break\n # TODO: Need to check carefully. \"b'XXX'\" and we only take XXX\n value = bytes_to_str(value[start_idx:])\n epr_guide_data[key].append(value)\n else:\n assert int.from_bytes(epr_guide_bytes[offset:offset + 8],\n byteorder='little') == 0\n epr_guide_data[key].append('')\n offset += 8\n assert offset == len(epr_guide_bytes)\n phdc_data['epr_guide'] = epr_guide_data\n\n # hash string\n # phdc_data['hash'] = self.ddi_data.read(0x20).decode()\n # assert int.from_bytes(self.ddi_data.read(0xE0), byteorder='little') == 0\n # assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n # assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 2\n\n return phdc_data\n\n\n def read_tdb(self) -> dict[int, str]:\n tdb_data: dict[int, str] = {}\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'TDB '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi (B9 13 10 00)\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n tmm_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n str_list = ['pitch', 'dynamics', 'opening']\n for i in range(tmm_num):\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'TMM '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n # print(i, idx)\n str_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert str_num == 3\n for j in range(str_num):\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert int.from_bytes(read_arr(self.ddi_data), byteorder='little') == 0\n assert read_str(self.ddi_data) == str_list[j]\n phoneme = read_str(self.ddi_data)\n tdb_data[idx] = phoneme\n assert read_str(self.ddi_data) == 'timbre'\n return tdb_data\n\n\n def read_dbv(self) -> None:\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'DBV '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # 4 for AVANNA, 5 for others?\n\n\n def read_sta(self) -> dict[int, artu_type]:\n sta_data: dict[int, artu_type] = {}\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert int.from_bytes(read_arr(self.ddi_data), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n\n assert self.ddi_data.read(4).decode() == 'STA '\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 1\n stau_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for i in range(stau_num):\n stau_data: artu_type = {'phoneme': '', 'stap': {}}\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'STAu'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n stau_idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(8) == b'\\xFF'*8\n stap_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for j in range(stap_num):\n stap_data: artp_type = {'snd': '', 'snd_length': '', 'epr': []}\n _pos = self.ddi_data.tell()\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'STAp'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n stap_data['unknown1'] = bytes_to_str(self.ddi_data.read(0x0a))\n stap_data['pitch1'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['pitch2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['unknown2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['dynamics'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n stap_data['unknown3'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n \n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 2\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0x3D\n assert self.ddi_data.read(4).decode() == 'EMPT'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert read_str(self.ddi_data) == 'SND'\n stap_data['snd_length'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'EMPT'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert read_str(self.ddi_data) == 'EpR'\n self.ddi_data.read(4) # == b'\\xFF'*4 Exception: Tonio.ddi (epr_num)\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n epr_offset_pos = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8),\n byteorder='little')\n epr_list.append(f'{epr_offset_pos:0>8x}={epr_offset:0>8x}')\n stap_data['epr'] = epr_list\n stap_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n snd_identifier = int.from_bytes(self.ddi_data.read(4),\n byteorder='little')\n # TODO: why this number?\n snd_offset_pos = self.ddi_data.tell()\n snd_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n stap_data['snd'] = f'{snd_offset_pos:0>8x}={snd_offset:016x}_{snd_identifier:08x}'\n\n _pos = self.ddi_data.tell()\n stap_data['unknown4'] = bytes_to_str(self.ddi_data.read(0x10))\n stap_idx = read_str(self.ddi_data)\n assert stap_idx not in stau_data['stap'].keys()\n stau_data['stap'][stap_idx] = stap_data\n stau_data['stap'] = {k: stau_data['stap'][k]\n for k in sorted(stau_data['stap'].keys())}\n stau_data['phoneme'] = read_str(self.ddi_data)\n sta_data[stau_idx] = stau_data\n sta_data = {k: sta_data[k] for k in sorted(sta_data.keys())}\n assert read_str(self.ddi_data) == 'normal'\n assert read_str(self.ddi_data) == 'stationary'\n return sta_data\n\n\n def read_art(self) -> dict[int, art_type]:\n total_art_data: dict[int, art_type] = {}\n int.from_bytes(self.ddi_data.read(8), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(read_arr(self.ddi_data), byteorder='little') != 0\n while(True):\n start = self.ddi_data.read(8)\n if not (start in [b'\\x00'*8, b'\\xFF'*8]):\n offset = self.ddi_data.tell()-8\n self.ddi_data.seek(offset)\n assert read_str(self.ddi_data) == 'articulation'\n break\n assert self.ddi_data.read(4).decode() == 'ART '\n art_idx, art_data = self.read_art_block()\n total_art_data[art_idx] = art_data\n total_art_data = {key: total_art_data[key]\n for key in sorted(total_art_data.keys())}\n return total_art_data\n\n\n def read_art_block(self) -> tuple[int, art_type]:\n art_data: art_type = {'phoneme': '', 'artu': {}, 'art': {}}\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n art_idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n artu_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n i = -1\n for i in range(artu_num):\n artu_data: artu_type = {'phoneme': '', 'artp': {}}\n assert int.from_bytes(self.ddi_data.read(8), byteorder='little') == 0\n block_type = self.ddi_data.read(4).decode()\n if block_type == 'ART ':\n sub_art_idx, sub_art_data = self.read_art_block()\n art_data['art'][sub_art_idx] = sub_art_data\n continue\n else:\n assert block_type == 'ARTu'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n artu_idx = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n # TODO: why to be 1?\n assert int.from_bytes(self.ddi_data.read(8),\n byteorder='little') in [0, 1]\n self.ddi_data.read(4)\n assert self.ddi_data.read(4) == b'\\xFF'*4\n artp_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n for j in range(artp_num):\n artp_data: artp_type = {'snd': '', 'snd_unknown': '', 'epr': []}\n dev_artp_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n artp_data['dev_artp'] = f'{dev_artp_offset:0>8x}'\n assert self.ddi_data.read(4).decode() == 'ARTp'\n int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n artp_data['unknown1'] = bytes_to_str(self.ddi_data.read(0x0a))\n artp_data['pitch1'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['pitch2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['unknown2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['dynamics'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n artp_data['unknown3'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n # print(f'art {i:4d} {j:4d} {unknown}')\n # if env['unknown'] is None:\n # env['unknown'] = unknown\n # else:\n # assert env['unknown'] == unknown\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 2\n # TODO: This doesn't seem to be an index actually\n artp_idx = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n assert self.ddi_data.read(4).decode() == 'EMPT'\n snd_len_empt1 = int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n # artp_data['snd_len_empt1'] = f'{snd_len_empt1:08x}'\n assert read_str(self.ddi_data) == 'SND'\n snd_len_sta = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n artp_data['snd_len_sta'] = f'{snd_len_sta:08x}'\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert self.ddi_data.read(4).decode() == 'EMPT'\n snd_len_empt2 = int.from_bytes(self.ddi_data.read(4), byteorder='little') # == 0 Exception: Tonio.ddi\n # artp_data['snd_len_empt2'] = f'{snd_len_empt2:08x}'\n assert read_str(self.ddi_data) == 'EpR'\n loc = self.ddi_data.tell()\n try:\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n epr_offset_pos = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8),\n byteorder='little')\n epr_list.append(f'{epr_offset_pos:0>8x}={epr_offset:0>8x}')\n artp_data['epr'] = epr_list\n artp_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n except AssertionError:\n self.ddi_data.seek(loc)\n self.ddi_data.read(4) # == b'\\xFF'*4 Exception: Tonio.ddi (epr_num)\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n epr_offset_pos = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8),\n byteorder='little')\n epr_list.append(f'{epr_offset_pos:0>8x}={epr_offset:0>8x}')\n artp_data['epr'] = epr_list\n artp_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n \n snd_identifier = int.from_bytes(self.ddi_data.read(4),\n byteorder='little')\n # TODO: why this number?\n snd_offset_pos = self.ddi_data.tell()\n snd_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n artp_data['snd'] = f'{snd_offset_pos:08x}={snd_offset-0x12:016x}_{snd_identifier:08x}'\n\n snd_offset2_pos = self.ddi_data.tell()\n snd_offset2 = int.from_bytes(self.ddi_data.read(8), byteorder='little') # == snd_offset+0x800 Exception: Tonio.ddi (0)\n artp_data['snd_start'] = f'{snd_offset2_pos:08x}={snd_offset2-0x12:016x}_{snd_identifier:08x}'\n\n ddi_bytes: bytes = self.ddi_bytes[self.ddi_data.tell():self.ddi_data.tell() + 1024]\n align_length = ddi_bytes.find(b'default')-4\n align_bytes = self.ddi_data.read(align_length)\n frame_align = []\n if align_length > 4:\n align_group_num = int.from_bytes(align_bytes[0:4], byteorder='little')\n # In V3 format, each group has int32 * 4 bytes\n align_bytes = align_bytes[4:]\n align_io = io.BytesIO(align_bytes)\n for _ in range(0, align_group_num):\n frame_align_group = {\n \"start\": int.from_bytes(align_io.read(4), byteorder='little'),\n \"end\": int.from_bytes(align_io.read(4), byteorder='little'),\n \"start2\": int.from_bytes(align_io.read(4), byteorder='little'),\n \"end2\": int.from_bytes(align_io.read(4), byteorder='little'),\n }\n frame_align.append(frame_align_group)\n else: # V2 format\n frame_align_group = []\n for i in range(0, len(align_bytes), 4):\n frame_align_group.append(int.from_bytes(align_bytes[i:i+4], byteorder='little'))\n frame_align.append(frame_align_group)\n artp_data['frame_align'] = frame_align\n \n assert read_str(self.ddi_data) == 'default'\n\n assert artp_idx not in artu_data['artp'].keys()\n artu_data['artp'][artp_idx] = artp_data\n artu_data['artp'] = {k: artu_data['artp'][k]\n for k in sorted(artu_data['artp'].keys())}\n artu_data['phoneme'] = read_str(self.ddi_data)\n art_data['artu'][artu_idx] = artu_data\n art_data['artu'] = {k: art_data['artu'][k]\n for k in sorted(art_data['artu'].keys())}\n art_data['art'] = {k: art_data['art'][k]\n for k in sorted(art_data['art'].keys())}\n art_data['phoneme'] = read_str(self.ddi_data)\n if len(art_data['art'].keys()) == 0:\n del art_data['art']\n if len(art_data['artu'].keys()) == 0:\n del art_data['artu']\n return art_idx, art_data\n\n\n def read_vqm(self) -> dict[int, artp_type]:\n vqm_data: dict[int, artp_type] = {}\n\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'VQM '\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert self.ddi_data.read(8) == b'\\xFF'*8\n\n assert self.ddi_data.read(4).decode() == 'VQMu'\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n\n vqmp_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == vqmp_num\n for i in range(vqmp_num):\n vqmp_data = {'snd': '', 'epr': []}\n assert self.ddi_data.read(8) == b'\\xFF'*8\n assert self.ddi_data.read(4).decode() == 'VQMp'\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 1\n vqmp_data['unknown1'] = bytes_to_str(self.ddi_data.read(0x0a))\n vqmp_data['pitch1'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n vqmp_data['pitch2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n vqmp_data['unknown2'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n vqmp_data['dynamics'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n # TODO: that may not be same as env['unknown']\n vqmp_data['unknown3'] = struct.unpack('<f', self.ddi_data.read(4))[0]\n assert int.from_bytes(self.ddi_data.read(4), byteorder='little') == 0\n assert self.ddi_data.read(4) == b'\\xFF'*4\n epr_num = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n epr_list: list[str] = []\n for k in range(epr_num):\n ddi_epr_offset = self.ddi_data.tell()\n epr_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n epr_list.append(f'{ddi_epr_offset:0>8x}={epr_offset:0>8x}')\n vqmp_data['epr'] = epr_list\n vqmp_data['fs'] = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n assert self.ddi_data.read(2) == b'\\x01\\x00'\n snd_identifier = int.from_bytes(self.ddi_data.read(4), byteorder='little')\n ddi_snd_offset = self.ddi_data.tell()\n snd_offset = int.from_bytes(self.ddi_data.read(8), byteorder='little')\n vqmp_data['snd'] = f'{ddi_snd_offset:0>8x}={snd_offset:016x}_{snd_identifier:08x}'\n assert self.ddi_data.read(0x10) == b'\\xFF'*0x10\n vqmp_idx = int(read_str(self.ddi_data))\n vqm_data[vqmp_idx] = vqmp_data\n assert read_str(self.ddi_data) == 'GROWL'\n assert read_str(self.ddi_data) == 'vqm'\n return vqm_data" }, { "identifier": "str_to_bytes", "path": "utils/ddi_utils.py", "snippet": "def str_to_bytes(data: str) -> bytes:\n return bytes([int(piece, 16) for piece in data.split(' ')])" }, { "identifier": "str_to_data", "path": "utils/ddi_utils.py", "snippet": "def str_to_data(data: str) -> bytes:\n data = str(data)\n return len(data).to_bytes(4, byteorder='little') + data.encode()" }, { "identifier": "stream_reverse_search", "path": "utils/ddi_utils.py", "snippet": "def stream_reverse_search(data: io.BufferedReader, search: bytes, offset: int, limit: int = -1) -> int:\n if limit == -1:\n limit = 1024 * 1024 * 10\n offset -= len(search)\n for i in range(offset, 0, -1):\n data.seek(i)\n if data.read(len(search)) == search:\n return i\n if offset - i > limit:\n break\n\n return -1" } ]
from typing import TypedDict from utils.ddi_utils import DDIModel, str_to_bytes, str_to_data, stream_reverse_search import argparse import io import re import os import os.path import struct
11,937
ddb_snd_offset = output_stream.tell() mixins_ddb_stream.seek(snd_offset) snd_bytes = mixins_ddb_stream.read(snd_len) hed = snd_bytes[0:4].decode() if hed != "SND ": raise Exception("Mixins DDB file is broken") output_stream.write(snd_bytes) vqm_meta_list.append({ "idx": vqm_idx, "epr": epr_list, "snd_id": snd_id, "snd": ddb_snd_offset, "fs": vqm_info["fs"], "unknown1": vqm_info["unknown1"], "pitch1": vqm_info["pitch1"], "pitch2": vqm_info["pitch2"], "unknown2": vqm_info["unknown2"], "unknown3": vqm_info["unknown3"], "dynamics": vqm_info["dynamics"], }) # Create new DDI vqm_stream = _create_vqm_stream(vqm_meta_list) ddi_vqm_bytes = vqm_stream.getvalue() if "vqm" in src_ddi_model.ddi_data_dict: ddi_vqm_pos = src_ddi_model.offset_map["vqm"][0] ddi_vqm_end_pos = src_ddi_model.offset_map["vqm"][1] else: ddi_vqm_pos = src_ddi_bytes.find(ddi_footer) ddi_vqm_end_pos = ddi_vqm_pos # Bump dbv_len dbv_len_post = src_ddi_model.offset_map["dbv"][0] + 0x18 src_ddi_stream.seek(dbv_len_post) src_ddi_dbv_len = int.from_bytes(src_ddi_stream.read(4), byteorder='little') src_ddi_dbv_len += 1 src_ddi_stream.seek(dbv_len_post) src_ddi_stream.write(src_ddi_dbv_len.to_bytes(4, byteorder='little')) src_ddi_bytes = src_ddi_stream.getvalue() dst_ddi_bytes = byte_replace(src_ddi_bytes, ddi_vqm_pos, ddi_vqm_end_pos - ddi_vqm_pos, ddi_vqm_bytes) return dst_ddi_bytes def mixins_sta2vqm(src_ddi_bytes: bytes, output_stream: io.BufferedWriter, mixins_ddi_model: DDIModel, mixins_ddb_stream: io.BufferedReader, sta2vqm_phoneme: str): mixins_ddi_stream = mixins_ddi_model.ddi_data print("Reading source DDI...") src_ddi_model = DDIModel(src_ddi_bytes) src_ddi_model.read() src_ddi_stream = src_ddi_model.ddi_data if "vqm" in src_ddi_model.ddi_data_dict: print("Source DDI already has vqm stream, continue will replace it and won't remove vqm stream from ddb file.") print("Continue? (Y/n)", end=" ") choice = input().strip().lower() if choice != "y" or choice != "": return # Find stationary in mixins mixins_sta_items = None for _, sta_items in mixins_ddi_model.sta_data.items(): if sta_items["phoneme"] == sta2vqm_phoneme: mixins_sta_items = sta_items break if mixins_sta_items is None: raise Exception("Mixins DDI doesn't have stationary entry for phoneme \"%s\"" % sta2vqm_phoneme) vqm_meta_list: list[VQMMeta] = [] vqm_idx = 0 for sta_idx, sta_item in mixins_sta_items["stap"].items(): output_epr_list = [] # EpR epr_list = sta_item["epr"] if len(epr_list) < 100: print(f"Warning: EpR count is less than 100, EpR count: {len(epr_list)}") continue epr_list = epr_list[0:100] for epr_info in epr_list: epr_offset = epr_info.split("=") ddb_epr_offset = output_stream.tell() epr_offset = int(epr_offset[1], 16) mixins_ddb_stream.seek(epr_offset) hed = mixins_ddb_stream.read(4) if hed != b"FRM2": raise Exception("Mixins DDB file is broken") frm_len = int.from_bytes(mixins_ddb_stream.read(4), byteorder='little') epr_cutoff = epr_offset + frm_len mixins_ddb_stream.seek(epr_offset) frm_bytes = mixins_ddb_stream.read(frm_len) output_stream.write(frm_bytes) output_epr_list.append(ddb_epr_offset) # SND ddi_snd_pos, snd_name = sta_item["snd"].split("=") snd_offset, snd_id = snd_name.split("_") ddi_snd_pos = int(ddi_snd_pos, 16) snd_offset = int(snd_offset, 16) snd_id = int(snd_id, 16)
#!/bin/env python3 # I thought what I'd do was, I'd pretend I was one of those deaf-mutes. from __future__ import annotations ddi_footer = b'\x05\x00\x00\x00' + "voice".encode() class SmartFormatter(argparse.HelpFormatter): def _split_lines(self, text, width): if text.startswith('R|'): return text[2:].splitlines() # this is the RawTextHelpFormatter._split_lines return argparse.HelpFormatter._split_lines(self, text, width) class VQMMeta(TypedDict): idx: str epr: list[int] snd_id: int snd: int fs: int unknown1: str pitch1: float pitch2: float unknown2: float unknown3: float dynamics: float def byte_replace(src_bytes: bytes, offset: int, override_len: int, replace_bytes: bytes): return src_bytes[:offset] + replace_bytes + src_bytes[offset + override_len:] def parse_args(args=None): # : list[str] # initialize parser parser = argparse.ArgumentParser(formatter_class=SmartFormatter) parser.add_argument('--src_path', required=True, help='source ddi file path') parser.add_argument('--mixins_path', help='the mixins ddi file path. default to be same as src_path') parser.add_argument('--dst_path', help='output folder, ' 'default to be "./[singer name]/mixins"') parser.add_argument('--mixins_item', choices=['vqm', 'sta2vqm'], default='vqm', help='R|mixins item, ' 'default to be "vqm"\n' 'select from: \n' ' vqm: growl\n' ' sta2vqm: convert stationary entry to growl\n') parser.add_argument('--sta2vqm_phoneme', default="Grw", help='phoneme for sta2vqm, will use this phoneme to generate growl, default to be "Grw"') # parse args args = parser.parse_args(args) src_ddi_path: str = os.path.normpath(args.src_path) if not os.path.exists(src_ddi_path): raise Exception("ddi file not exists") src_path = os.path.dirname(src_ddi_path) src_singer_name = os.path.splitext(os.path.basename(src_ddi_path))[0] mixins_ddi_path = args.mixins_path or src_ddi_path mixins_ddi_path: str = os.path.normpath(mixins_ddi_path) mixins_path = os.path.dirname(mixins_ddi_path) mixins_singer_name = os.path.splitext(os.path.basename(mixins_ddi_path))[0] dst_path: str = args.dst_path if dst_path is None: dst_path = os.path.join(src_path, "mixins") dst_path: str = os.path.normpath(dst_path) # make dirs if not os.path.exists(dst_path): os.makedirs(dst_path) mixins_item = args.mixins_item return src_path, src_singer_name, mixins_path, mixins_singer_name, dst_path, mixins_item, args def _create_vqm_stream(vqm_meta_list: list[VQMMeta]): # Create VQM struct vqm_stream = io.BytesIO() vqm_stream.write(b'\xFF'*8) vqm_stream.write(b'VQM ') vqm_stream.write((0).to_bytes(4, byteorder='little')) vqm_stream.write((1).to_bytes(4, byteorder='little')) vqm_stream.write((0).to_bytes(4, byteorder='little')) vqm_stream.write((1).to_bytes(4, byteorder='little')) vqm_stream.write(b'\xFF'*8) vqm_stream.write(b'VQMu') vqm_stream.write((0).to_bytes(4, byteorder='little')) vqm_stream.write((1).to_bytes(4, byteorder='little')) vqm_stream.write((0).to_bytes(4, byteorder='little')) vqm_stream.write(len(vqm_meta_list).to_bytes(4, byteorder='little')) vqm_stream.write(len(vqm_meta_list).to_bytes(4, byteorder='little')) for vqm_meta in vqm_meta_list: vqm_stream.write(b'\xFF'*8) vqm_stream.write(b"VQMp") vqm_stream.write((0).to_bytes(4, byteorder='little')) vqm_stream.write((0).to_bytes(4, byteorder='little')) vqm_stream.write((1).to_bytes(4, byteorder='little')) vqm_stream.write(str_to_bytes(vqm_meta["unknown1"])) vqm_stream.write(struct.pack("<f", 224.0)) # Unknown vqm_stream.write(struct.pack("<f", vqm_meta["pitch2"])) vqm_stream.write(struct.pack("<f", vqm_meta["unknown2"])) vqm_stream.write(struct.pack("<f", vqm_meta["dynamics"])) vqm_stream.write(struct.pack("<f", vqm_meta["unknown3"])) vqm_stream.write((0).to_bytes(4, byteorder='little')) # EpR vqm_stream.write(b'\xFF'*4) vqm_stream.write(len(vqm_meta["epr"]).to_bytes(4, byteorder='little')) for epr_offset in vqm_meta["epr"]: vqm_stream.write(epr_offset.to_bytes(8, byteorder='little')) # SND vqm_stream.write(vqm_meta["fs"].to_bytes(4, byteorder='little')) vqm_stream.write(b'\x01\x00') vqm_stream.write(vqm_meta["snd_id"].to_bytes(4, byteorder='little')) vqm_stream.write(vqm_meta["snd"].to_bytes(8, byteorder='little')) vqm_stream.write(b'\xFF'*0x10) vqm_stream.write(str_to_data(vqm_meta["idx"])) vqm_stream.write(str_to_data("GROWL")) vqm_stream.write(str_to_data("vqm")) return vqm_stream def mixins_vqm(src_ddi_bytes: bytes, output_stream: io.BufferedWriter, mixins_ddi_model: DDIModel, mixins_ddb_stream: io.BufferedReader): mixins_ddi_stream = mixins_ddi_model.ddi_data if "vqm" not in mixins_ddi_model.ddi_data_dict: raise Exception("Mixins DDI doesn't have vqm stream.") print("Reading source DDI...") src_ddi_model = DDIModel(src_ddi_bytes) src_ddi_model.read() src_ddi_stream = src_ddi_model.ddi_data if "vqm" in src_ddi_model.ddi_data_dict: print("Source DDI already has vqm stream, continue will replace it and won't remove vqm stream from ddb file.") print("Continue? (Y/n)", end=" ") choice = input().strip().lower() if choice != "y" or choice != "": return vqm_meta_list: list[VQMMeta] = [] for vqm_idx, vqm_info in mixins_ddi_model.vqm_data.items(): epr_list = [] for epr_info in vqm_info["epr"]: ddi_epr_pos, epr_offset = epr_info.split("=") ddb_epr_offset = output_stream.tell() ddi_epr_pos = int(ddi_epr_pos, 16) epr_offset = int(epr_offset, 16) mixins_ddb_stream.seek(epr_offset) hed = mixins_ddb_stream.read(4).decode() if hed != "FRM2": raise Exception("Mixins DDB file is broken") frm_len = int.from_bytes(mixins_ddb_stream.read(4), byteorder='little') mixins_ddb_stream.seek(epr_offset) frm_bytes = mixins_ddb_stream.read(frm_len) output_stream.write(frm_bytes) epr_list.append(ddb_epr_offset) ddi_snd_pos, snd_name = vqm_info["snd"].split("=") snd_offset, snd_id = snd_name.split("_") ddi_snd_pos = int(ddi_snd_pos, 16) snd_offset = int(snd_offset, 16) snd_id = int(snd_id, 16) mixins_ddb_stream.seek(snd_offset) hed = mixins_ddb_stream.read(4).decode() if hed != "SND ": raise Exception("Mixins DDB file is broken") snd_len = int.from_bytes(mixins_ddb_stream.read(4), byteorder='little') ddb_snd_offset = output_stream.tell() mixins_ddb_stream.seek(snd_offset) snd_bytes = mixins_ddb_stream.read(snd_len) hed = snd_bytes[0:4].decode() if hed != "SND ": raise Exception("Mixins DDB file is broken") output_stream.write(snd_bytes) vqm_meta_list.append({ "idx": vqm_idx, "epr": epr_list, "snd_id": snd_id, "snd": ddb_snd_offset, "fs": vqm_info["fs"], "unknown1": vqm_info["unknown1"], "pitch1": vqm_info["pitch1"], "pitch2": vqm_info["pitch2"], "unknown2": vqm_info["unknown2"], "unknown3": vqm_info["unknown3"], "dynamics": vqm_info["dynamics"], }) # Create new DDI vqm_stream = _create_vqm_stream(vqm_meta_list) ddi_vqm_bytes = vqm_stream.getvalue() if "vqm" in src_ddi_model.ddi_data_dict: ddi_vqm_pos = src_ddi_model.offset_map["vqm"][0] ddi_vqm_end_pos = src_ddi_model.offset_map["vqm"][1] else: ddi_vqm_pos = src_ddi_bytes.find(ddi_footer) ddi_vqm_end_pos = ddi_vqm_pos # Bump dbv_len dbv_len_post = src_ddi_model.offset_map["dbv"][0] + 0x18 src_ddi_stream.seek(dbv_len_post) src_ddi_dbv_len = int.from_bytes(src_ddi_stream.read(4), byteorder='little') src_ddi_dbv_len += 1 src_ddi_stream.seek(dbv_len_post) src_ddi_stream.write(src_ddi_dbv_len.to_bytes(4, byteorder='little')) src_ddi_bytes = src_ddi_stream.getvalue() dst_ddi_bytes = byte_replace(src_ddi_bytes, ddi_vqm_pos, ddi_vqm_end_pos - ddi_vqm_pos, ddi_vqm_bytes) return dst_ddi_bytes def mixins_sta2vqm(src_ddi_bytes: bytes, output_stream: io.BufferedWriter, mixins_ddi_model: DDIModel, mixins_ddb_stream: io.BufferedReader, sta2vqm_phoneme: str): mixins_ddi_stream = mixins_ddi_model.ddi_data print("Reading source DDI...") src_ddi_model = DDIModel(src_ddi_bytes) src_ddi_model.read() src_ddi_stream = src_ddi_model.ddi_data if "vqm" in src_ddi_model.ddi_data_dict: print("Source DDI already has vqm stream, continue will replace it and won't remove vqm stream from ddb file.") print("Continue? (Y/n)", end=" ") choice = input().strip().lower() if choice != "y" or choice != "": return # Find stationary in mixins mixins_sta_items = None for _, sta_items in mixins_ddi_model.sta_data.items(): if sta_items["phoneme"] == sta2vqm_phoneme: mixins_sta_items = sta_items break if mixins_sta_items is None: raise Exception("Mixins DDI doesn't have stationary entry for phoneme \"%s\"" % sta2vqm_phoneme) vqm_meta_list: list[VQMMeta] = [] vqm_idx = 0 for sta_idx, sta_item in mixins_sta_items["stap"].items(): output_epr_list = [] # EpR epr_list = sta_item["epr"] if len(epr_list) < 100: print(f"Warning: EpR count is less than 100, EpR count: {len(epr_list)}") continue epr_list = epr_list[0:100] for epr_info in epr_list: epr_offset = epr_info.split("=") ddb_epr_offset = output_stream.tell() epr_offset = int(epr_offset[1], 16) mixins_ddb_stream.seek(epr_offset) hed = mixins_ddb_stream.read(4) if hed != b"FRM2": raise Exception("Mixins DDB file is broken") frm_len = int.from_bytes(mixins_ddb_stream.read(4), byteorder='little') epr_cutoff = epr_offset + frm_len mixins_ddb_stream.seek(epr_offset) frm_bytes = mixins_ddb_stream.read(frm_len) output_stream.write(frm_bytes) output_epr_list.append(ddb_epr_offset) # SND ddi_snd_pos, snd_name = sta_item["snd"].split("=") snd_offset, snd_id = snd_name.split("_") ddi_snd_pos = int(ddi_snd_pos, 16) snd_offset = int(snd_offset, 16) snd_id = int(snd_id, 16)
real_snd_offset = stream_reverse_search(mixins_ddb_stream, b"SND ", snd_offset)
3
2023-11-20 11:37:46+00:00
16k
shercoo/RGDiffSR
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n # print('************************encoder shape',x.shape)\n\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n\n\n if conditioning is not None:\n if isinstance(conditioning, dict):\n if isinstance(list(conditioning.values())[0],list):\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n else:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "Attention_AR_counter", "path": "text_super_resolution/model/VisionLAN/utils.py", "snippet": "class Attention_AR_counter():\n def __init__(self, display_string, dict_file, case_sensitive):\n self.correct = 0\n self.total_samples = 0.\n self.distance_C = 0\n self.total_C = 0.\n self.distance_W = 0\n self.total_W = 0.\n self.display_string = display_string\n self.case_sensitive = case_sensitive\n self.de = cha_encdec(dict_file, case_sensitive)\n\n def clear(self):\n self.correct = 0\n self.total_samples = 0.\n self.distance_C = 0\n self.total_C = 0.\n self.distance_W = 0\n self.total_W = 0.\n \n def add_iter(self, output, out_length, label_length, labels):\n self.total_samples += label_length.size()[0]\n prdt_texts, prdt_prob = self.de.decode(output, out_length)\n for i in range(0, len(prdt_texts)):\n if not self.case_sensitive:\n prdt_texts[i] = prdt_texts[i].lower()\n labels[i] = labels[i].lower()\n all_words = []\n for w in labels[i].split('|') + prdt_texts[i].split('|'):\n if w not in all_words:\n all_words.append(w)\n l_words = [all_words.index(_) for _ in labels[i].split('|')]\n p_words = [all_words.index(_) for _ in prdt_texts[i].split('|')]\n self.distance_C += ed.eval(labels[i], prdt_texts[i])\n self.distance_W += ed.eval(l_words, p_words)\n self.total_C += len(labels[i])\n self.total_W += len(l_words)\n self.correct = self.correct + 1 if labels[i] == prdt_texts[i] else self.correct\n return prdt_texts, labels\n\n def show(self):\n print(self.display_string)\n if self.total_samples == 0:\n pass\n print('Accuracy: {:.6f}, AR: {:.6f}, CER: {:.6f}, WER: {:.6f}'.format(\n self.correct / self.total_samples,\n 1 - self.distance_C / self.total_C,\n self.distance_C / self.total_C,\n self.distance_W / self.total_W))\n self.clear()\n def show_test(self,best_acc, change= False):\n print(self.display_string)\n if self.total_samples == 0:\n pass\n if (self.correct / self.total_samples) > best_acc:\n best_acc = np.copy(self.correct / self.total_samples)\n change = True\n print('Accuracy: {:.6f}, AR: {:.6f}, CER: {:.6f}, WER: {:.6f}, best_acc: {:.6f}'.format(\n self.correct / self.total_samples,\n 1 - self.distance_C / self.total_C,\n self.distance_C / self.total_C,\n self.distance_W / self.total_W, best_acc))\n\n self.clear()\n return best_acc, change\n \n def convert(self, output, out_length):\n prdt_texts, prdt_prob = self.de.decode(output, out_length)\n prdt_prob = prdt_prob.cpu().unsqueeze(0)\n MAX_LEN = 25\n length = prdt_prob.size(1)\n if length >= MAX_LEN:\n return prdt_prob[:, :MAX_LEN, :], prdt_prob\n pad = torch.zeros([prdt_prob.shape[0], MAX_LEN - length, prdt_prob.shape[2]])\n prdt_prob = torch.cat([prdt_prob, pad], dim=1)\n return prdt_texts, prdt_prob" }, { "identifier": "TPSSpatialTransformer", "path": "text_super_resolution/model/tps_spatial_transformer.py", "snippet": "class TPSSpatialTransformer(nn.Module):\n\n def __init__(self, output_image_size=None, num_control_points=None, margins=None):\n super(TPSSpatialTransformer, self).__init__()\n self.output_image_size = output_image_size\n self.num_control_points = num_control_points\n self.margins = margins\n\n self.target_height, self.target_width = output_image_size\n target_control_points = build_output_control_points(num_control_points, margins)\n N = num_control_points\n # N = N - 4\n\n # create padded kernel matrix\n forward_kernel = torch.zeros(N + 3, N + 3)\n target_control_partial_repr = compute_partial_repr(target_control_points, target_control_points)\n forward_kernel[:N, :N].copy_(target_control_partial_repr)\n forward_kernel[:N, -3].fill_(1)\n forward_kernel[-3, :N].fill_(1)\n forward_kernel[:N, -2:].copy_(target_control_points)\n forward_kernel[-2:, :N].copy_(target_control_points.transpose(0, 1))\n # compute inverse matrix\n inverse_kernel = torch.inverse(forward_kernel)\n\n # create target cordinate matrix\n HW = self.target_height * self.target_width\n target_coordinate = list(itertools.product(range(self.target_height), range(self.target_width)))\n target_coordinate = torch.Tensor(target_coordinate) # HW x 2\n Y, X = target_coordinate.split(1, dim = 1)\n Y = Y / (self.target_height - 1)\n X = X / (self.target_width - 1)\n target_coordinate = torch.cat([X, Y], dim = 1) # convert from (y, x) to (x, y)\n target_coordinate_partial_repr = compute_partial_repr(target_coordinate, target_control_points)\n target_coordinate_repr = torch.cat([\n target_coordinate_partial_repr, torch.ones(HW, 1), target_coordinate\n ], dim = 1)\n\n # register precomputed matrices\n self.register_buffer('inverse_kernel', inverse_kernel)\n self.register_buffer('padding_matrix', torch.zeros(3, 2))\n self.register_buffer('target_coordinate_repr', target_coordinate_repr)\n self.register_buffer('target_control_points', target_control_points)\n\n def forward(self, input, source_control_points):\n assert source_control_points.ndimension() == 3\n assert source_control_points.size(1) == self.num_control_points\n assert source_control_points.size(2) == 2\n batch_size = source_control_points.size(0)\n\n Y = torch.cat([source_control_points, self.padding_matrix.expand(batch_size, 3, 2)], 1)\n mapping_matrix = torch.matmul(self.inverse_kernel, Y)\n source_coordinate = torch.matmul(self.target_coordinate_repr, mapping_matrix)\n\n grid = source_coordinate.view(-1, self.target_height, self.target_width, 2)\n grid = torch.clamp(grid, 0, 1) # the source_control_points may be out of [0, 1].\n # the input to grid_sample is normalized [-1, 1], but what we get is [0, 1]\n grid = 2.0 * grid - 1.0\n output_maps = grid_sample(input, grid, canvas=None)\n return output_maps, source_coordinate" }, { "identifier": "STNHead", "path": "text_super_resolution/model/stn_head.py", "snippet": "class STNHead(nn.Module):\n def __init__(self, in_planes, num_ctrlpoints, activation='none', input_size=(16, 64)):\n super(STNHead, self).__init__()\n\n self.in_planes = in_planes\n self.num_ctrlpoints = num_ctrlpoints\n self.activation = activation\n self.stn_convnet = nn.Sequential(\n # conv3x3_block(in_planes, 32), # 32*128\n # nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(in_planes, 32), # 16*64\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(32, 64), # 8*32\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(64, 128), # 4*16\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(128, 256), # 2*8\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(256, 256), # 1*4,\n nn.MaxPool2d(kernel_size=(1,2), stride=(1,2)),\n conv3x3_block(256, 256)) # 1*2\n\n flatten_width = int(input_size[1] / 32)\n # print(\"flw:\", input_size[1] / 32)\n self.stn_fc1 = nn.Sequential(\n nn.Linear(512, 512), #flatten_width*256\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True))\n self.stn_fc2 = nn.Linear(512, num_ctrlpoints*2)\n\n self.init_weights(self.stn_convnet)\n self.init_weights(self.stn_fc1)\n self.init_stn(self.stn_fc2)\n\n def init_weights(self, module):\n for m in module.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.001)\n m.bias.data.zero_()\n\n def init_stn(self, stn_fc2):\n margin = 0.01\n sampling_num_per_side = int(self.num_ctrlpoints / 2)\n ctrl_pts_x = np.linspace(margin, 1.-margin, sampling_num_per_side)\n ctrl_pts_y_top = np.ones(sampling_num_per_side) * margin\n ctrl_pts_y_bottom = np.ones(sampling_num_per_side) * (1-margin)\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n ctrl_points = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0).astype(np.float32)\n # print(ctrl_points.shape)\n if self.activation is 'none':\n pass\n elif self.activation == 'sigmoid':\n ctrl_points = -np.log(1. / ctrl_points - 1.)\n elif self.activation == 'relu':\n ctrl_points = F.relu(torch.Tensor(ctrl_points))\n stn_fc2.weight.data.zero_()\n stn_fc2.bias.data = torch.Tensor(ctrl_points).view(-1)\n\n def forward(self, x):\n x = self.stn_convnet(x)\n batch_size, _, h, w = x.size()\n x = x.view(batch_size, -1)\n\n # print(\"x:\", x.shape)\n\n img_feat = self.stn_fc1(x)\n x = self.stn_fc2(0.1 * img_feat)\n if self.activation == 'sigmoid':\n x = torch.sigmoid(x)\n if self.activation == 'relu':\n x = F.relu(x)\n x = x.view(-1, self.num_ctrlpoints, 2)\n return img_feat, x" }, { "identifier": "VisionLAN", "path": "text_super_resolution/model/VisionLAN/VisionLAN.py", "snippet": "class VisionLAN(nn.Module):\n '''\n Architecture of VisionLAN\n input\n input: input image\n label_pos: character index\n output\n text_pre: word-level prediction from VRM\n test_rem: remaining string prediction from MLM\n text_mas: occluded character prediction from MLM\n '''\n def __init__(self, strides, input_shape):\n super(VisionLAN, self).__init__()\n self.backbone = resnet.resnet45(strides, compress_layer=False)\n self.input_shape = input_shape\n self.MLM_VRM = MLM_VRM()\n def forward(self, input, label_pos, training_stp, Train_in = True):\n # extract features\n features = self.backbone(input)\n # MLM + VRM\n if Train_in:\n text_pre, test_rem, text_mas, mask_map = self.MLM_VRM(features[-1], label_pos, training_stp, is_Train=Train_in)\n return text_pre, test_rem, text_mas, mask_map\n else:\n output, out_length = self.MLM_VRM(features[-1], label_pos, training_stp, is_Train=Train_in)\n return output, out_length" }, { "identifier": "SemanticLoss", "path": "text_super_resolution/loss/semantic_loss.py", "snippet": "class SemanticLoss(nn.Module):\n def __init__(self, margin=0.1):\n super(SemanticLoss, self).__init__()\n self.cos_sim = nn.CosineSimilarity(dim=-1, eps=1e-8)\n self.margin = margin\n\n self.lambda1 = 1.0\n self.lambda2 = 1.0\n\n self.kl_loss = torch.nn.KLDivLoss()\n\n def forward(self, pred_vec, gt_vec):\n # pred_vec: [N, C]\n # gt_vec: [N, C]\n # mean_sim = torch.mean(self.cos_sim(gt_vec, pred_vec))\n # sim_loss = 1 - mean_sim\n \n #noise = Variable(torch.rand(pred_vec.shape)) * 0.1 - 0.05\n\n #normed_pred_vec = pred_vec + noise.to(pred_vec.device)\n # print(\"pred_vec:\", pred_vec.shape)\n norm_vec = torch.abs(gt_vec - pred_vec)\n margin_loss = torch.mean(norm_vec) #\n\n # pr int(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n ce_loss = self.kl_loss(torch.log(pred_vec + 1e-20), gt_vec + 1e-20)\n # print(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n\n return self.lambda1 * margin_loss + self.lambda2 * ce_loss# ce_loss #margin_loss # + ce_loss # + sim_loss #margin_loss +\n\n def cross_entropy(self, pred_vec, gt_vec, l=1e-5):\n cal = gt_vec * torch.log(pred_vec+l) + (1 - gt_vec) * torch.log(1 - pred_vec+l)\n #print(\"cal:\", cal)\n return -cal" }, { "identifier": "ssim_psnr", "path": "text_super_resolution/utils/ssim_psnr.py", "snippet": "def calculate_psnr(img1, img2):\ndef weighted_calculate_psnr(img1, img2, weighted_mask):\ndef gaussian(window_size, sigma):\ndef create_window(window_size, channel):\ndef create_rect_window(window_H, window_W, channel):\ndef _ssim_weighted(img1_, img2_, window, window_size, channel, weighted_mask, size_average=True):\ndef _ssim(img1, img2, window, window_size, channel, size_average=True):\ndef _tri_ssim(img1, img2, img3, window, window_size, channel, size_average=True):\ndef _ssim_rect(img1, img2, window, window_size, channel, size_average=True):\n def __init__(self, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, img3):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, weighted_mask):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\ndef ssim(img1, img2, window_size=11, size_average=True):\ndef ssim_weighted(img1, img2, weighted_mask, window_size=11, size_average=True):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n H, W = window_size\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\nclass Distorted_SSIM(torch.nn.Module):\nclass SSIM(torch.nn.Module):\nclass TRI_SSIM(torch.nn.Module):\nclass SSIM_WEIGHTED(torch.nn.Module):\nclass SSIM_TSR(torch.nn.Module):" } ]
import datetime import math import cv2 import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import pygame from collections import OrderedDict from matplotlib import pyplot as plt from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from torchvision import transforms from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from text_super_resolution.model.VisionLAN.utils import Attention_AR_counter from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer from text_super_resolution.model.stn_head import STNHead from text_super_resolution.model.VisionLAN.VisionLAN import VisionLAN from utils.render_standard_text import * from text_super_resolution.loss.semantic_loss import SemanticLoss from text_super_resolution.utils import ssim_psnr from pygame import freetype from utils.metrics import *
12,312
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else:
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
14
2023-11-20 06:34:21+00:00
16k
mjavadpur/mj_ONNX_SadTalker
inference_onnx.py
[ { "identifier": "AnimateFromCoeff", "path": "src/facerender/animate_onnx.py", "snippet": "class AnimateFromCoeff():\n\n def __init__(self, sadtalker_path, device):\n\n with open(sadtalker_path['facerender_yaml']) as f:\n config = yaml.safe_load(f)\n\n generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],\n **config['model_params']['common_params'])\n kp_extractor = KPDetector(**config['model_params']['kp_detector_params'],\n **config['model_params']['common_params'])\n he_estimator = HEEstimator(**config['model_params']['he_estimator_params'],\n **config['model_params']['common_params'])\n mapping = MappingNet(**config['model_params']['mapping_params'])\n\n generator.to(device)\n kp_extractor.to(device)\n he_estimator.to(device)\n mapping.to(device)\n for param in generator.parameters():\n param.requires_grad = False\n for param in kp_extractor.parameters():\n param.requires_grad = False \n for param in he_estimator.parameters():\n param.requires_grad = False\n for param in mapping.parameters():\n param.requires_grad = False\n\n if sadtalker_path is not None:\n if 'checkpoint' in sadtalker_path: # use safe tensor\n self.load_cpk_facevid2vid_safetensor(sadtalker_path['checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=None)\n else:\n self.load_cpk_facevid2vid(sadtalker_path['free_view_checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator)\n else:\n raise AttributeError(\"Checkpoint should be specified for video head pose estimator.\")\n\n if sadtalker_path['mappingnet_checkpoint'] is not None:\n self.load_cpk_mapping(sadtalker_path['mappingnet_checkpoint'], mapping=mapping)\n else:\n raise AttributeError(\"Checkpoint should be specified for video head pose estimator.\") \n\n self.kp_extractor = kp_extractor\n self.generator = generator\n self.he_estimator = he_estimator\n self.mapping = mapping\n\n self.kp_extractor.eval()\n self.generator.eval()\n self.he_estimator.eval()\n self.mapping.eval()\n \n self.device = device\n \n def load_cpk_facevid2vid_safetensor(self, checkpoint_path, generator=None, \n kp_detector=None, he_estimator=None, \n device=\"cpu\"):\n\n checkpoint = safetensors.torch.load_file(checkpoint_path)\n\n if generator is not None:\n x_generator = {}\n for k,v in checkpoint.items():\n if 'generator' in k:\n x_generator[k.replace('generator.', '')] = v\n generator.load_state_dict(x_generator)\n if kp_detector is not None:\n x_generator = {}\n for k,v in checkpoint.items():\n if 'kp_extractor' in k:\n x_generator[k.replace('kp_extractor.', '')] = v\n kp_detector.load_state_dict(x_generator)\n if he_estimator is not None:\n x_generator = {}\n for k,v in checkpoint.items():\n if 'he_estimator' in k:\n x_generator[k.replace('he_estimator.', '')] = v\n he_estimator.load_state_dict(x_generator)\n \n return None\n\n def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None, \n kp_detector=None, he_estimator=None, optimizer_generator=None, \n optimizer_discriminator=None, optimizer_kp_detector=None, \n optimizer_he_estimator=None, device=\"cpu\"):\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))\n if generator is not None:\n generator.load_state_dict(checkpoint['generator'])\n if kp_detector is not None:\n kp_detector.load_state_dict(checkpoint['kp_detector'])\n if he_estimator is not None:\n he_estimator.load_state_dict(checkpoint['he_estimator'])\n if discriminator is not None:\n try:\n discriminator.load_state_dict(checkpoint['discriminator'])\n except:\n print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')\n if optimizer_generator is not None:\n optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])\n if optimizer_discriminator is not None:\n try:\n optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])\n except RuntimeError as e:\n print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')\n if optimizer_kp_detector is not None:\n optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])\n if optimizer_he_estimator is not None:\n optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])\n\n return checkpoint['epoch']\n \n def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,\n optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))\n if mapping is not None:\n mapping.load_state_dict(checkpoint['mapping'])\n if discriminator is not None:\n discriminator.load_state_dict(checkpoint['discriminator'])\n if optimizer_mapping is not None:\n optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])\n if optimizer_discriminator is not None:\n optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])\n\n return checkpoint['epoch']\n\n def generate(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):\n\n source_image=x['source_image'].type(torch.FloatTensor)\n source_semantics=x['source_semantics'].type(torch.FloatTensor)\n target_semantics=x['target_semantics_list'].type(torch.FloatTensor) \n source_image=source_image.to(self.device)\n source_semantics=source_semantics.to(self.device)\n target_semantics=target_semantics.to(self.device)\n if 'yaw_c_seq' in x:\n yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)\n yaw_c_seq = x['yaw_c_seq'].to(self.device)\n else:\n yaw_c_seq = None\n if 'pitch_c_seq' in x:\n pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)\n pitch_c_seq = x['pitch_c_seq'].to(self.device)\n else:\n pitch_c_seq = None\n if 'roll_c_seq' in x:\n roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor) \n roll_c_seq = x['roll_c_seq'].to(self.device)\n else:\n roll_c_seq = None\n\n frame_num = x['frame_num']\n\n predictions_video = make_animation(source_image, source_semantics, target_semantics,\n self.generator, self.kp_extractor, self.he_estimator, self.mapping, \n yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)\n\n predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])\n predictions_video = predictions_video[:frame_num]\n\n video = []\n for idx in range(predictions_video.shape[0]):\n image = predictions_video[idx]\n image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)\n video.append(image)\n result = img_as_ubyte(video)\n\n ### the generated video is 256x256, so we keep the aspect ratio, \n original_size = crop_info[0]\n if original_size:\n result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]\n \n video_name = x['video_name'] + '.mp4'\n path = os.path.join(video_save_dir, 'temp_'+video_name)\n \n imageio.mimsave(path, result, fps=float(25))\n\n av_path = os.path.join(video_save_dir, video_name)\n return_path = av_path \n \n audio_path = x['audio_path'] \n audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]\n new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')\n start_time = 0\n # cog will not keep the .mp3 filename\n sound = AudioSegment.from_file(audio_path)\n frames = frame_num \n end_time = start_time + frames*1/25*1000\n word1=sound.set_frame_rate(16000)\n word = word1[start_time:end_time]\n word.export(new_audio_path, format=\"wav\")\n\n save_video_with_watermark(path, new_audio_path, av_path, watermark= False)\n print(f'The generated video is named {video_save_dir}/{video_name}') \n\n if 'full' in preprocess.lower():\n # only add watermark to the full image.\n video_name_full = x['video_name'] + '_full.mp4'\n full_video_path = os.path.join(video_save_dir, video_name_full)\n return_path = full_video_path\n paste_pic(path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False)\n print(f'The generated video is named {video_save_dir}/{video_name_full}') \n else:\n full_video_path = av_path \n\n #### paste back then enhancers\n if enhancer:\n video_name_enhancer = x['video_name'] + '_enhanced.mp4'\n enhanced_path = os.path.join(video_save_dir, 'temp_'+video_name_enhancer)\n av_path_enhancer = os.path.join(video_save_dir, video_name_enhancer) \n return_path = av_path_enhancer\n\n try:\n enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer)\n imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))\n except:\n enhanced_images_gen_with_len = enhancer_list(full_video_path, method=enhancer, bg_upsampler=background_enhancer)\n imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))\n \n save_video_with_watermark(enhanced_path, new_audio_path, av_path_enhancer, watermark= False)\n print(f'The generated video is named {video_save_dir}/{video_name_enhancer}')\n os.remove(enhanced_path)\n\n os.remove(path)\n os.remove(new_audio_path)\n\n return return_path\n \n def generate_deploy(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):\n # Create Talking Face\n # 1. Reading Data\n source_image=x['source_image'].type(torch.FloatTensor)\n source_semantics=x['source_semantics'].type(torch.FloatTensor)\n target_semantics=x['target_semantics_list'].type(torch.FloatTensor) \n source_image=source_image.to(self.device)\n source_semantics=source_semantics.to(self.device)\n target_semantics=target_semantics.to(self.device)\n # 2. برای محاسبه به دستگاه self.device انتقال دهید\n if 'yaw_c_seq' in x:\n yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)\n yaw_c_seq = x['yaw_c_seq'].to(self.device)\n else:\n yaw_c_seq = None\n if 'pitch_c_seq' in x:\n pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)\n pitch_c_seq = x['pitch_c_seq'].to(self.device)\n else:\n pitch_c_seq = None\n if 'roll_c_seq' in x:\n roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor) \n roll_c_seq = x['roll_c_seq'].to(self.device)\n else:\n roll_c_seq = None\n\n frame_num = x['frame_num']\n # 3. پیش‌بینی‌های مدل مولد برای ویدیوهای Talking Face\n predictions_video = make_animation(source_image, source_semantics, target_semantics,\n self.generator, self.kp_extractor, self.he_estimator, self.mapping, \n yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)\n # 4. تنظیم شکل و برش\n predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])\n predictions_video = predictions_video[:frame_num]\n # 5. هر فریم ویدیو را پیمایش کنید و آن را به Numpy تبدیل کنید و در نتیجه ذخیره کنید.\n video = []\n for idx in range(predictions_video.shape[0]):\n image = predictions_video[idx]\n image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)\n video.append(image)\n result = img_as_ubyte(video)\n # 6. اندازه تصویر در نتیجه را متناسب با اطلاعات اندازه اصلی در crop_info تغییر دهید.\n original_size = crop_info[0]\n if original_size:\n result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]\n\n # 7. از کتابخانه imageio برای ذخیره نتیجه به عنوان یک فایل ویدیویی با نرخ فریم 25 استفاده کنید.\n video_name = x['video_name'] + '.mp4'\n path = os.path.join(video_save_dir, 'temp_'+video_name)\n \n imageio.mimsave(path, result, fps=float(25))\n\n av_path = os.path.join(video_save_dir, video_name)\n return_path = av_path \n \n # 8. مسیر صوتی را در پارامتر x وارد کنید و یک مسیر فایل صوتی جدید ایجاد کنید.\n audio_path = x['audio_path'] \n audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]\n new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')\n start_time = 0\n sound = AudioSegment.from_file(audio_path)\n frames = frame_num \n end_time = start_time + frames*1/25*1000\n word1=sound.set_frame_rate(16000)\n word = word1[start_time:end_time]\n word.export(new_audio_path, format=\"wav\")\n \n\n save_video_with_watermark(path, new_audio_path, av_path, watermark= False)\n print(f' ---- The first generated video is named {video_save_dir}/{video_name}') \n \n if 'full' in preprocess.lower():\n # only add watermark to the full image.\n video_name_full = x['video_name'] + '_full.mp4'\n full_video_path = os.path.join(video_save_dir, video_name_full)\n return_path = full_video_path\n paste_pic(path, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False)\n print(f' ---- The full generated video is named {video_save_dir}/{video_name_full}') \n else:\n full_video_path = av_path \n \n if enhancer:\n video_name_enhancer = x['video_name'] + '_enhanced.mp4'\n enhanced_path = os.path.join(video_save_dir, 'temp_'+video_name_enhancer)\n av_path_enhancer = os.path.join(video_save_dir, video_name_enhancer) \n return_path = av_path_enhancer\n\n print(\" ---- video_name_enhancer: \" + video_name_enhancer + \"\\n ---- enhanced_path: \" + enhanced_path + \"\\n ---- av_path_enhancer: \" + av_path_enhancer + \"\\n ---- return_path: \" + return_path)\n\n try:\n enhanced_images_gen_with_len = enhancer_generator_with_len(full_video_path, method=enhancer, bg_upsampler=background_enhancer)\n print(\" -- len of enhanced_images_gen_with_len -- \" + str(len(enhanced_images_gen_with_len)))\n imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))\n print(\"\\n -------- try execute enhanced_path ---\" + enhanced_path + \"\\n ---- path:\" + path+ \"\\n ---- full_video_path:\" + full_video_path)\n except:\n enhanced_images_gen_with_len = enhancer_list(full_video_path, method=enhancer, bg_upsampler=background_enhancer)\n print(\" -- len of enhanced_images_gen_with_len -- \" + str(len(enhanced_images_gen_with_len)))\n imageio.mimsave(enhanced_path, enhanced_images_gen_with_len, fps=float(25))\n print(\"\\n -------- except execute enhanced_path ---\" + enhanced_path+ \"\\n ---- path:\" + path+ \"\\n ---- full_video_path:\" + full_video_path)\n \n save_video_with_watermark(enhanced_path, new_audio_path, av_path_enhancer, watermark= False)\n print(f' ---- The enhance generated video is named {video_save_dir}/{video_name_enhancer}')\n # os.remove(enhanced_path)\n\n # حالت فول تصویر پس‌بازگشت\n # paste_pic(av_path_enhancer, pic_path, crop_info, new_audio_path, full_video_path, extended_crop= True if 'ext' in preprocess.lower() else False)\n # print(f'The final enhancer generated video is named {full_video_path}') \n # return_path = full_video_path\n \n # os.remove(path)\n # os.remove(new_audio_path)\n print(f' ---- Final return_path: {return_path}')\n\n return return_path" }, { "identifier": "get_data", "path": "src/generate_batch.py", "snippet": "def get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=False, idlemode=False, length_of_audio=False, use_blink=True):\n\n syncnet_mel_step_size = 16\n fps = 25\n\n pic_name = os.path.splitext(os.path.split(first_coeff_path)[-1])[0]\n audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]\n\n \n if idlemode:\n num_frames = int(length_of_audio * 25)\n indiv_mels = np.zeros((num_frames, 80, 16))\n else:\n wav = audio.load_wav(audio_path, 16000) \n wav_length, num_frames = parse_audio_length(len(wav), 16000, 25)\n wav = crop_pad_audio(wav, wav_length)\n orig_mel = audio.melspectrogram(wav).T\n spec = orig_mel.copy() # nframes 80\n indiv_mels = []\n\n for i in tqdm(range(num_frames), 'mel:'):\n start_frame_num = i-2\n start_idx = int(80. * (start_frame_num / float(fps)))\n end_idx = start_idx + syncnet_mel_step_size\n seq = list(range(start_idx, end_idx))\n seq = [ min(max(item, 0), orig_mel.shape[0]-1) for item in seq ]\n m = spec[seq, :]\n indiv_mels.append(m.T)\n indiv_mels = np.asarray(indiv_mels) # T 80 16\n\n ratio = generate_blink_seq_randomly(num_frames) # T\n source_semantics_path = first_coeff_path\n source_semantics_dict = scio.loadmat(source_semantics_path)\n ref_coeff = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70\n ref_coeff = np.repeat(ref_coeff, num_frames, axis=0)\n\n if ref_eyeblink_coeff_path is not None:\n ratio[:num_frames] = 0\n refeyeblink_coeff_dict = scio.loadmat(ref_eyeblink_coeff_path)\n refeyeblink_coeff = refeyeblink_coeff_dict['coeff_3dmm'][:,:64]\n refeyeblink_num_frames = refeyeblink_coeff.shape[0]\n if refeyeblink_num_frames<num_frames:\n div = num_frames//refeyeblink_num_frames\n re = num_frames%refeyeblink_num_frames\n refeyeblink_coeff_list = [refeyeblink_coeff for i in range(div)]\n refeyeblink_coeff_list.append(refeyeblink_coeff[:re, :64])\n refeyeblink_coeff = np.concatenate(refeyeblink_coeff_list, axis=0)\n print(refeyeblink_coeff.shape[0])\n\n ref_coeff[:, :64] = refeyeblink_coeff[:num_frames, :64] \n \n indiv_mels = torch.FloatTensor(indiv_mels).unsqueeze(1).unsqueeze(0) # bs T 1 80 16\n\n if use_blink:\n ratio = torch.FloatTensor(ratio).unsqueeze(0) # bs T\n else:\n ratio = torch.FloatTensor(ratio).unsqueeze(0).fill_(0.) \n # bs T\n ref_coeff = torch.FloatTensor(ref_coeff).unsqueeze(0) # bs 1 70\n\n indiv_mels = indiv_mels.to(device)\n ratio = ratio.to(device)\n ref_coeff = ref_coeff.to(device)\n\n return {'indiv_mels': indiv_mels, \n 'ref': ref_coeff, \n 'num_frames': num_frames, \n 'ratio_gt': ratio,\n 'audio_name': audio_name, 'pic_name': pic_name}" }, { "identifier": "get_facerender_data", "path": "src/generate_facerender_batch.py", "snippet": "def get_facerender_data(coeff_path, pic_path, first_coeff_path, audio_path, \n batch_size, input_yaw_list=None, input_pitch_list=None, input_roll_list=None, \n expression_scale=1.0, still_mode = False, preprocess='crop', size = 256):\n\n semantic_radius = 13\n video_name = os.path.splitext(os.path.split(coeff_path)[-1])[0]\n txt_path = os.path.splitext(coeff_path)[0]\n\n data={}\n\n img1 = Image.open(pic_path)\n source_image = np.array(img1)\n source_image = img_as_float32(source_image)\n source_image = transform.resize(source_image, (size, size, 3))\n source_image = source_image.transpose((2, 0, 1))\n source_image_ts = torch.FloatTensor(source_image).unsqueeze(0)\n source_image_ts = source_image_ts.repeat(batch_size, 1, 1, 1)\n data['source_image'] = source_image_ts\n \n source_semantics_dict = scio.loadmat(first_coeff_path)\n generated_dict = scio.loadmat(coeff_path)\n\n if 'full' not in preprocess.lower():\n source_semantics = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70\n generated_3dmm = generated_dict['coeff_3dmm'][:,:70]\n\n else:\n source_semantics = source_semantics_dict['coeff_3dmm'][:1,:73] #1 70\n generated_3dmm = generated_dict['coeff_3dmm'][:,:70]\n\n source_semantics_new = transform_semantic_1(source_semantics, semantic_radius)\n source_semantics_ts = torch.FloatTensor(source_semantics_new).unsqueeze(0)\n source_semantics_ts = source_semantics_ts.repeat(batch_size, 1, 1)\n data['source_semantics'] = source_semantics_ts\n\n # target \n generated_3dmm[:, :64] = generated_3dmm[:, :64] * expression_scale\n\n if 'full' in preprocess.lower():\n generated_3dmm = np.concatenate([generated_3dmm, np.repeat(source_semantics[:,70:], generated_3dmm.shape[0], axis=0)], axis=1)\n\n if still_mode:\n generated_3dmm[:, 64:] = np.repeat(source_semantics[:, 64:], generated_3dmm.shape[0], axis=0)\n\n with open(txt_path+'.txt', 'w') as f:\n for coeff in generated_3dmm:\n for i in coeff:\n f.write(str(i)[:7] + ' '+'\\t')\n f.write('\\n')\n\n target_semantics_list = [] \n frame_num = generated_3dmm.shape[0]\n data['frame_num'] = frame_num\n for frame_idx in range(frame_num):\n target_semantics = transform_semantic_target(generated_3dmm, frame_idx, semantic_radius)\n target_semantics_list.append(target_semantics)\n\n remainder = frame_num%batch_size\n if remainder!=0:\n for _ in range(batch_size-remainder):\n target_semantics_list.append(target_semantics)\n\n target_semantics_np = np.array(target_semantics_list) #frame_num 70 semantic_radius*2+1\n target_semantics_np = target_semantics_np.reshape(batch_size, -1, target_semantics_np.shape[-2], target_semantics_np.shape[-1])\n data['target_semantics_list'] = torch.FloatTensor(target_semantics_np)\n data['video_name'] = video_name\n data['audio_path'] = audio_path\n \n if input_yaw_list is not None:\n yaw_c_seq = gen_camera_pose(input_yaw_list, frame_num, batch_size)\n data['yaw_c_seq'] = torch.FloatTensor(yaw_c_seq)\n if input_pitch_list is not None:\n pitch_c_seq = gen_camera_pose(input_pitch_list, frame_num, batch_size)\n data['pitch_c_seq'] = torch.FloatTensor(pitch_c_seq)\n if input_roll_list is not None:\n roll_c_seq = gen_camera_pose(input_roll_list, frame_num, batch_size) \n data['roll_c_seq'] = torch.FloatTensor(roll_c_seq)\n \n return data" }, { "identifier": "init_path", "path": "src/utils/init_path.py", "snippet": "def init_path(checkpoint_dir, config_dir, size=512, old_version=False, preprocess='crop'):\n\n if old_version:\n #### load all the checkpoint of `pth`\n sadtalker_paths = {\n 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'),\n 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'),\n 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'),\n 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'),\n 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth')\n }\n\n use_safetensor = False\n elif len(glob.glob(os.path.join(checkpoint_dir, '*.safetensors'))):\n print('using safetensor as default')\n sadtalker_paths = {\n \"checkpoint\":os.path.join(checkpoint_dir, 'SadTalker_V0.0.2_'+str(size)+'.safetensors'),\n }\n use_safetensor = True\n else:\n print(\"WARNING: The new version of the model will be updated by safetensor, you may need to download it mannully. We run the old version of the checkpoint this time!\")\n use_safetensor = False\n \n sadtalker_paths = {\n 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'),\n 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'),\n 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'),\n 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'),\n 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth')\n }\n\n sadtalker_paths['dir_of_BFM_fitting'] = os.path.join(config_dir) # , 'BFM_Fitting'\n sadtalker_paths['audio2pose_yaml_path'] = os.path.join(config_dir, 'auido2pose.yaml')\n sadtalker_paths['audio2exp_yaml_path'] = os.path.join(config_dir, 'auido2exp.yaml')\n sadtalker_paths['use_safetensor'] = use_safetensor # os.path.join(config_dir, 'auido2exp.yaml')\n\n if 'full' in preprocess:\n sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00109-model.pth.tar')\n sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender_still.yaml')\n else:\n sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00229-model.pth.tar')\n sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender.yaml')\n\n return sadtalker_paths" }, { "identifier": "CropAndExtract", "path": "src/utils/preprocess.py", "snippet": "class CropAndExtract():\n def __init__(self, sadtalker_path, device):\n\n self.propress = Preprocesser(device)\n self.net_recon = networks.define_net_recon(net_recon='resnet50', use_last_fc=False, init_path='').to(device)\n \n if sadtalker_path['use_safetensor']:\n checkpoint = safetensors.torch.load_file(sadtalker_path['checkpoint']) \n self.net_recon.load_state_dict(load_x_from_safetensor(checkpoint, 'face_3drecon'))\n else:\n checkpoint = torch.load(sadtalker_path['path_of_net_recon_model'], map_location=torch.device(device)) \n self.net_recon.load_state_dict(checkpoint['net_recon'])\n\n self.net_recon.eval()\n self.lm3d_std = load_lm3d(sadtalker_path['dir_of_BFM_fitting'])\n self.device = device\n \n def generate(self, input_path, save_dir, crop_or_resize='crop', source_image_flag=False, pic_size=256):\n\n pic_name = os.path.splitext(os.path.split(input_path)[-1])[0] \n\n landmarks_path = os.path.join(save_dir, pic_name+'_landmarks.txt') \n coeff_path = os.path.join(save_dir, pic_name+'.mat') \n png_path = os.path.join(save_dir, pic_name+'.png') \n\n #load input\n if not os.path.isfile(input_path):\n raise ValueError('input_path must be a valid path to video/image file')\n elif input_path.split('.')[-1] in ['jpg', 'png', 'jpeg']:\n # loader for first frame\n full_frames = [cv2.imread(input_path)]\n fps = 25\n else:\n # loader for videos\n video_stream = cv2.VideoCapture(input_path)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n full_frames = [] \n while 1:\n still_reading, frame = video_stream.read()\n if not still_reading:\n video_stream.release()\n break \n full_frames.append(frame) \n if source_image_flag:\n break\n\n x_full_frames= [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in full_frames] \n\n #### crop images as the \n if 'crop' in crop_or_resize.lower(): # default crop\n x_full_frames, crop, quad = self.propress.crop(x_full_frames, still=True if 'ext' in crop_or_resize.lower() else False, xsize=512)\n clx, cly, crx, cry = crop\n lx, ly, rx, ry = quad\n lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)\n oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx\n crop_info = ((ox2 - ox1, oy2 - oy1), crop, quad)\n elif 'full' in crop_or_resize.lower():\n x_full_frames, crop, quad = self.propress.crop(x_full_frames, still=True if 'ext' in crop_or_resize.lower() else False, xsize=512)\n clx, cly, crx, cry = crop\n lx, ly, rx, ry = quad\n lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry)\n oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx\n crop_info = ((ox2 - ox1, oy2 - oy1), crop, quad)\n else: # resize mode\n oy1, oy2, ox1, ox2 = 0, x_full_frames[0].shape[0], 0, x_full_frames[0].shape[1] \n crop_info = ((ox2 - ox1, oy2 - oy1), None, None)\n\n frames_pil = [Image.fromarray(cv2.resize(frame,(pic_size, pic_size))) for frame in x_full_frames]\n if len(frames_pil) == 0:\n print('No face is detected in the input file')\n return None, None\n\n # save crop info\n for frame in frames_pil:\n cv2.imwrite(png_path, cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR))\n\n # 2. get the landmark according to the detected face. \n if not os.path.isfile(landmarks_path): \n lm = self.propress.predictor.extract_keypoint(frames_pil, landmarks_path)\n else:\n print(' Using saved landmarks.')\n lm = np.loadtxt(landmarks_path).astype(np.float32)\n lm = lm.reshape([len(x_full_frames), -1, 2])\n\n if not os.path.isfile(coeff_path):\n # load 3dmm paramter generator from Deep3DFaceRecon_pytorch \n video_coeffs, full_coeffs = [], []\n for idx in tqdm(range(len(frames_pil)), desc='3DMM Extraction In Video:'):\n frame = frames_pil[idx]\n W,H = frame.size\n lm1 = lm[idx].reshape([-1, 2])\n \n if np.mean(lm1) == -1:\n lm1 = (self.lm3d_std[:, :2]+1)/2.\n lm1 = np.concatenate(\n [lm1[:, :1]*W, lm1[:, 1:2]*H], 1\n )\n else:\n lm1[:, -1] = H - 1 - lm1[:, -1]\n\n trans_params, im1, lm1, _ = align_img(frame, lm1, self.lm3d_std)\n \n trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)]).astype(np.float32)\n im_t = torch.tensor(np.array(im1)/255., dtype=torch.float32).permute(2, 0, 1).to(self.device).unsqueeze(0)\n \n with torch.no_grad():\n full_coeff = self.net_recon(im_t)\n coeffs = split_coeff(full_coeff)\n\n pred_coeff = {key:coeffs[key].cpu().numpy() for key in coeffs}\n \n pred_coeff = np.concatenate([\n pred_coeff['exp'], \n pred_coeff['angle'],\n pred_coeff['trans'],\n trans_params[2:][None],\n ], 1)\n video_coeffs.append(pred_coeff)\n full_coeffs.append(full_coeff.cpu().numpy())\n\n semantic_npy = np.array(video_coeffs)[:,0] \n\n savemat(coeff_path, {'coeff_3dmm': semantic_npy, 'full_3dmm': np.array(full_coeffs)[0]})\n\n return coeff_path, png_path, crop_info" }, { "identifier": "Audio2Coeff", "path": "src/test_audio2coeff.py", "snippet": "class Audio2Coeff():\n\n def __init__(self, sadtalker_path, device):\n #load config\n fcfg_pose = open(sadtalker_path['audio2pose_yaml_path'])\n cfg_pose = CN.load_cfg(fcfg_pose)\n cfg_pose.freeze()\n fcfg_exp = open(sadtalker_path['audio2exp_yaml_path'])\n cfg_exp = CN.load_cfg(fcfg_exp)\n cfg_exp.freeze()\n\n # load audio2pose_model\n self.audio2pose_model = Audio2Pose(cfg_pose, None, device=device)\n self.audio2pose_model = self.audio2pose_model.to(device)\n self.audio2pose_model.eval()\n for param in self.audio2pose_model.parameters():\n param.requires_grad = False \n \n try:\n if sadtalker_path['use_safetensor']:\n checkpoints = safetensors.torch.load_file(sadtalker_path['checkpoint'])\n self.audio2pose_model.load_state_dict(load_x_from_safetensor(checkpoints, 'audio2pose'))\n else:\n load_cpk(sadtalker_path['audio2pose_checkpoint'], model=self.audio2pose_model, device=device)\n except:\n raise Exception(\"Failed in loading audio2pose_checkpoint\")\n\n # load audio2exp_model\n netG = SimpleWrapperV2()\n netG = netG.to(device)\n for param in netG.parameters():\n netG.requires_grad = False\n netG.eval()\n try:\n if sadtalker_path['use_safetensor']:\n checkpoints = safetensors.torch.load_file(sadtalker_path['checkpoint'])\n netG.load_state_dict(load_x_from_safetensor(checkpoints, 'audio2exp'))\n else:\n load_cpk(sadtalker_path['audio2exp_checkpoint'], model=netG, device=device)\n except:\n raise Exception(\"Failed in loading audio2exp_checkpoint\")\n self.audio2exp_model = Audio2Exp(netG, cfg_exp, device=device, prepare_training_loss=False)\n self.audio2exp_model = self.audio2exp_model.to(device)\n for param in self.audio2exp_model.parameters():\n param.requires_grad = False\n self.audio2exp_model.eval()\n \n self.device = device\n\n def generate(self, batch, coeff_save_dir, pose_style, ref_pose_coeff_path=None):\n\n with torch.no_grad():\n #test\n results_dict_exp= self.audio2exp_model.test(batch)\n exp_pred = results_dict_exp['exp_coeff_pred'] #bs T 64\n\n #for class_id in range(1):\n #class_id = 0#(i+10)%45\n #class_id = random.randint(0,46) #46 styles can be selected \n batch['class'] = torch.LongTensor([pose_style]).to(self.device)\n results_dict_pose = self.audio2pose_model.test(batch) \n pose_pred = results_dict_pose['pose_pred'] #bs T 6\n\n pose_len = pose_pred.shape[1]\n if pose_len<13: \n pose_len = int((pose_len-1)/2)*2+1\n pose_pred = torch.Tensor(savgol_filter(np.array(pose_pred.cpu()), pose_len, 2, axis=1)).to(self.device)\n else:\n pose_pred = torch.Tensor(savgol_filter(np.array(pose_pred.cpu()), 13, 2, axis=1)).to(self.device) \n \n coeffs_pred = torch.cat((exp_pred, pose_pred), dim=-1) #bs T 70\n\n coeffs_pred_numpy = coeffs_pred[0].clone().detach().cpu().numpy() \n\n if ref_pose_coeff_path is not None: \n coeffs_pred_numpy = self.using_refpose(coeffs_pred_numpy, ref_pose_coeff_path)\n \n savemat(os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name'])), \n {'coeff_3dmm': coeffs_pred_numpy})\n\n return os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name']))\n \n def using_refpose(self, coeffs_pred_numpy, ref_pose_coeff_path):\n num_frames = coeffs_pred_numpy.shape[0]\n refpose_coeff_dict = loadmat(ref_pose_coeff_path)\n refpose_coeff = refpose_coeff_dict['coeff_3dmm'][:,64:70]\n refpose_num_frames = refpose_coeff.shape[0]\n if refpose_num_frames<num_frames:\n div = num_frames//refpose_num_frames\n re = num_frames%refpose_num_frames\n refpose_coeff_list = [refpose_coeff for i in range(div)]\n refpose_coeff_list.append(refpose_coeff[:re, :])\n refpose_coeff = np.concatenate(refpose_coeff_list, axis=0)\n\n #### relative head pose\n coeffs_pred_numpy[:, 64:70] = coeffs_pred_numpy[:, 64:70] + ( refpose_coeff[:num_frames, :] - refpose_coeff[0:1, :] )\n return coeffs_pred_numpy" }, { "identifier": "get_data", "path": "src/generate_batch.py", "snippet": "def get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=False, idlemode=False, length_of_audio=False, use_blink=True):\n\n syncnet_mel_step_size = 16\n fps = 25\n\n pic_name = os.path.splitext(os.path.split(first_coeff_path)[-1])[0]\n audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]\n\n \n if idlemode:\n num_frames = int(length_of_audio * 25)\n indiv_mels = np.zeros((num_frames, 80, 16))\n else:\n wav = audio.load_wav(audio_path, 16000) \n wav_length, num_frames = parse_audio_length(len(wav), 16000, 25)\n wav = crop_pad_audio(wav, wav_length)\n orig_mel = audio.melspectrogram(wav).T\n spec = orig_mel.copy() # nframes 80\n indiv_mels = []\n\n for i in tqdm(range(num_frames), 'mel:'):\n start_frame_num = i-2\n start_idx = int(80. * (start_frame_num / float(fps)))\n end_idx = start_idx + syncnet_mel_step_size\n seq = list(range(start_idx, end_idx))\n seq = [ min(max(item, 0), orig_mel.shape[0]-1) for item in seq ]\n m = spec[seq, :]\n indiv_mels.append(m.T)\n indiv_mels = np.asarray(indiv_mels) # T 80 16\n\n ratio = generate_blink_seq_randomly(num_frames) # T\n source_semantics_path = first_coeff_path\n source_semantics_dict = scio.loadmat(source_semantics_path)\n ref_coeff = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70\n ref_coeff = np.repeat(ref_coeff, num_frames, axis=0)\n\n if ref_eyeblink_coeff_path is not None:\n ratio[:num_frames] = 0\n refeyeblink_coeff_dict = scio.loadmat(ref_eyeblink_coeff_path)\n refeyeblink_coeff = refeyeblink_coeff_dict['coeff_3dmm'][:,:64]\n refeyeblink_num_frames = refeyeblink_coeff.shape[0]\n if refeyeblink_num_frames<num_frames:\n div = num_frames//refeyeblink_num_frames\n re = num_frames%refeyeblink_num_frames\n refeyeblink_coeff_list = [refeyeblink_coeff for i in range(div)]\n refeyeblink_coeff_list.append(refeyeblink_coeff[:re, :64])\n refeyeblink_coeff = np.concatenate(refeyeblink_coeff_list, axis=0)\n print(refeyeblink_coeff.shape[0])\n\n ref_coeff[:, :64] = refeyeblink_coeff[:num_frames, :64] \n \n indiv_mels = torch.FloatTensor(indiv_mels).unsqueeze(1).unsqueeze(0) # bs T 1 80 16\n\n if use_blink:\n ratio = torch.FloatTensor(ratio).unsqueeze(0) # bs T\n else:\n ratio = torch.FloatTensor(ratio).unsqueeze(0).fill_(0.) \n # bs T\n ref_coeff = torch.FloatTensor(ref_coeff).unsqueeze(0) # bs 1 70\n\n indiv_mels = indiv_mels.to(device)\n ratio = ratio.to(device)\n ref_coeff = ref_coeff.to(device)\n\n return {'indiv_mels': indiv_mels, \n 'ref': ref_coeff, \n 'num_frames': num_frames, \n 'ratio_gt': ratio,\n 'audio_name': audio_name, 'pic_name': pic_name}" }, { "identifier": "get_facerender_data", "path": "src/generate_facerender_batch.py", "snippet": "def get_facerender_data(coeff_path, pic_path, first_coeff_path, audio_path, \n batch_size, input_yaw_list=None, input_pitch_list=None, input_roll_list=None, \n expression_scale=1.0, still_mode = False, preprocess='crop', size = 256):\n\n semantic_radius = 13\n video_name = os.path.splitext(os.path.split(coeff_path)[-1])[0]\n txt_path = os.path.splitext(coeff_path)[0]\n\n data={}\n\n img1 = Image.open(pic_path)\n source_image = np.array(img1)\n source_image = img_as_float32(source_image)\n source_image = transform.resize(source_image, (size, size, 3))\n source_image = source_image.transpose((2, 0, 1))\n source_image_ts = torch.FloatTensor(source_image).unsqueeze(0)\n source_image_ts = source_image_ts.repeat(batch_size, 1, 1, 1)\n data['source_image'] = source_image_ts\n \n source_semantics_dict = scio.loadmat(first_coeff_path)\n generated_dict = scio.loadmat(coeff_path)\n\n if 'full' not in preprocess.lower():\n source_semantics = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70\n generated_3dmm = generated_dict['coeff_3dmm'][:,:70]\n\n else:\n source_semantics = source_semantics_dict['coeff_3dmm'][:1,:73] #1 70\n generated_3dmm = generated_dict['coeff_3dmm'][:,:70]\n\n source_semantics_new = transform_semantic_1(source_semantics, semantic_radius)\n source_semantics_ts = torch.FloatTensor(source_semantics_new).unsqueeze(0)\n source_semantics_ts = source_semantics_ts.repeat(batch_size, 1, 1)\n data['source_semantics'] = source_semantics_ts\n\n # target \n generated_3dmm[:, :64] = generated_3dmm[:, :64] * expression_scale\n\n if 'full' in preprocess.lower():\n generated_3dmm = np.concatenate([generated_3dmm, np.repeat(source_semantics[:,70:], generated_3dmm.shape[0], axis=0)], axis=1)\n\n if still_mode:\n generated_3dmm[:, 64:] = np.repeat(source_semantics[:, 64:], generated_3dmm.shape[0], axis=0)\n\n with open(txt_path+'.txt', 'w') as f:\n for coeff in generated_3dmm:\n for i in coeff:\n f.write(str(i)[:7] + ' '+'\\t')\n f.write('\\n')\n\n target_semantics_list = [] \n frame_num = generated_3dmm.shape[0]\n data['frame_num'] = frame_num\n for frame_idx in range(frame_num):\n target_semantics = transform_semantic_target(generated_3dmm, frame_idx, semantic_radius)\n target_semantics_list.append(target_semantics)\n\n remainder = frame_num%batch_size\n if remainder!=0:\n for _ in range(batch_size-remainder):\n target_semantics_list.append(target_semantics)\n\n target_semantics_np = np.array(target_semantics_list) #frame_num 70 semantic_radius*2+1\n target_semantics_np = target_semantics_np.reshape(batch_size, -1, target_semantics_np.shape[-2], target_semantics_np.shape[-1])\n data['target_semantics_list'] = torch.FloatTensor(target_semantics_np)\n data['video_name'] = video_name\n data['audio_path'] = audio_path\n \n if input_yaw_list is not None:\n yaw_c_seq = gen_camera_pose(input_yaw_list, frame_num, batch_size)\n data['yaw_c_seq'] = torch.FloatTensor(yaw_c_seq)\n if input_pitch_list is not None:\n pitch_c_seq = gen_camera_pose(input_pitch_list, frame_num, batch_size)\n data['pitch_c_seq'] = torch.FloatTensor(pitch_c_seq)\n if input_roll_list is not None:\n roll_c_seq = gen_camera_pose(input_roll_list, frame_num, batch_size) \n data['roll_c_seq'] = torch.FloatTensor(roll_c_seq)\n \n return data" }, { "identifier": "init_path", "path": "src/utils/init_path.py", "snippet": "def init_path(checkpoint_dir, config_dir, size=512, old_version=False, preprocess='crop'):\n\n if old_version:\n #### load all the checkpoint of `pth`\n sadtalker_paths = {\n 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'),\n 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'),\n 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'),\n 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'),\n 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth')\n }\n\n use_safetensor = False\n elif len(glob.glob(os.path.join(checkpoint_dir, '*.safetensors'))):\n print('using safetensor as default')\n sadtalker_paths = {\n \"checkpoint\":os.path.join(checkpoint_dir, 'SadTalker_V0.0.2_'+str(size)+'.safetensors'),\n }\n use_safetensor = True\n else:\n print(\"WARNING: The new version of the model will be updated by safetensor, you may need to download it mannully. We run the old version of the checkpoint this time!\")\n use_safetensor = False\n \n sadtalker_paths = {\n 'wav2lip_checkpoint' : os.path.join(checkpoint_dir, 'wav2lip.pth'),\n 'audio2pose_checkpoint' : os.path.join(checkpoint_dir, 'auido2pose_00140-model.pth'),\n 'audio2exp_checkpoint' : os.path.join(checkpoint_dir, 'auido2exp_00300-model.pth'),\n 'free_view_checkpoint' : os.path.join(checkpoint_dir, 'facevid2vid_00189-model.pth.tar'),\n 'path_of_net_recon_model' : os.path.join(checkpoint_dir, 'epoch_20.pth')\n }\n\n sadtalker_paths['dir_of_BFM_fitting'] = os.path.join(config_dir) # , 'BFM_Fitting'\n sadtalker_paths['audio2pose_yaml_path'] = os.path.join(config_dir, 'auido2pose.yaml')\n sadtalker_paths['audio2exp_yaml_path'] = os.path.join(config_dir, 'auido2exp.yaml')\n sadtalker_paths['use_safetensor'] = use_safetensor # os.path.join(config_dir, 'auido2exp.yaml')\n\n if 'full' in preprocess:\n sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00109-model.pth.tar')\n sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender_still.yaml')\n else:\n sadtalker_paths['mappingnet_checkpoint'] = os.path.join(checkpoint_dir, 'mapping_00229-model.pth.tar')\n sadtalker_paths['facerender_yaml'] = os.path.join(config_dir, 'facerender.yaml')\n\n return sadtalker_paths" } ]
from glob import glob from time import strftime from argparse import ArgumentParser from src.facerender.animate_onnx import AnimateFromCoeff from src.generate_batch import get_data from src.generate_facerender_batch import get_facerender_data from src.utils.init_path import init_path from src.utils.preprocess import CropAndExtract from src.test_audio2coeff import Audio2Coeff from src.generate_batch import get_data from src.generate_facerender_batch import get_facerender_data from src.utils.init_path import init_path from src.face3d.visualize import gen_composed_video import shutil import torch import os, sys, time import base64
13,128
# from src.facerender.animate import AnimateFromCoeff def main(args): #torch.backends.cudnn.enabled = False # tts_service = os.getenv("TTS_SERVER") facerender_batch_size = 10 startInference = time.time() pic_path = args.source_image audio_path = args.driven_audio save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S")) os.makedirs(save_dir, exist_ok=True) pose_style = args.pose_style device = args.device batch_size = args.batch_size input_yaw_list = args.input_yaw input_pitch_list = args.input_pitch input_roll_list = args.input_roll ref_eyeblink = args.ref_eyeblink ref_pose = args.ref_pose current_root_path = os.path.split(sys.argv[0])[0] sadtalker_paths = init_path(args.checkpoint_dir, os.path.join(current_root_path, 'src/config'), args.size, args.old_version, args.preprocess) #init model preprocess_model = CropAndExtract(sadtalker_paths, device) audio_to_coeff = Audio2Coeff(sadtalker_paths, device) animate_from_coeff = AnimateFromCoeff(sadtalker_paths, device) #crop image and extract 3dmm from image first_frame_dir = os.path.join(save_dir, 'first_frame_dir') os.makedirs(first_frame_dir, exist_ok=True) print('3DMM Extraction for source image') first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess,\ source_image_flag=True, pic_size=args.size) if first_coeff_path is None: print("Can't get the coeffs of the input") return if ref_eyeblink is not None: ref_eyeblink_videoname = os.path.splitext(os.path.split(ref_eyeblink)[-1])[0] ref_eyeblink_frame_dir = os.path.join(save_dir, ref_eyeblink_videoname) os.makedirs(ref_eyeblink_frame_dir, exist_ok=True) print('3DMM Extraction for the reference video providing eye blinking') ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir, args.preprocess, source_image_flag=False) else: ref_eyeblink_coeff_path=None if ref_pose is not None: if ref_pose == ref_eyeblink: ref_pose_coeff_path = ref_eyeblink_coeff_path else: ref_pose_videoname = os.path.splitext(os.path.split(ref_pose)[-1])[0] ref_pose_frame_dir = os.path.join(save_dir, ref_pose_videoname) os.makedirs(ref_pose_frame_dir, exist_ok=True) print('3DMM Extraction for the reference video providing pose') ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir, args.preprocess, source_image_flag=False) else: ref_pose_coeff_path=None #audio2ceoff
# from src.facerender.animate import AnimateFromCoeff def main(args): #torch.backends.cudnn.enabled = False # tts_service = os.getenv("TTS_SERVER") facerender_batch_size = 10 startInference = time.time() pic_path = args.source_image audio_path = args.driven_audio save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S")) os.makedirs(save_dir, exist_ok=True) pose_style = args.pose_style device = args.device batch_size = args.batch_size input_yaw_list = args.input_yaw input_pitch_list = args.input_pitch input_roll_list = args.input_roll ref_eyeblink = args.ref_eyeblink ref_pose = args.ref_pose current_root_path = os.path.split(sys.argv[0])[0] sadtalker_paths = init_path(args.checkpoint_dir, os.path.join(current_root_path, 'src/config'), args.size, args.old_version, args.preprocess) #init model preprocess_model = CropAndExtract(sadtalker_paths, device) audio_to_coeff = Audio2Coeff(sadtalker_paths, device) animate_from_coeff = AnimateFromCoeff(sadtalker_paths, device) #crop image and extract 3dmm from image first_frame_dir = os.path.join(save_dir, 'first_frame_dir') os.makedirs(first_frame_dir, exist_ok=True) print('3DMM Extraction for source image') first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess,\ source_image_flag=True, pic_size=args.size) if first_coeff_path is None: print("Can't get the coeffs of the input") return if ref_eyeblink is not None: ref_eyeblink_videoname = os.path.splitext(os.path.split(ref_eyeblink)[-1])[0] ref_eyeblink_frame_dir = os.path.join(save_dir, ref_eyeblink_videoname) os.makedirs(ref_eyeblink_frame_dir, exist_ok=True) print('3DMM Extraction for the reference video providing eye blinking') ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir, args.preprocess, source_image_flag=False) else: ref_eyeblink_coeff_path=None if ref_pose is not None: if ref_pose == ref_eyeblink: ref_pose_coeff_path = ref_eyeblink_coeff_path else: ref_pose_videoname = os.path.splitext(os.path.split(ref_pose)[-1])[0] ref_pose_frame_dir = os.path.join(save_dir, ref_pose_videoname) os.makedirs(ref_pose_frame_dir, exist_ok=True) print('3DMM Extraction for the reference video providing pose') ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir, args.preprocess, source_image_flag=False) else: ref_pose_coeff_path=None #audio2ceoff
batch = get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=args.still)
6
2023-11-25 06:53:12+00:00
16k
microsoft/Project-BayesDAG
src/causica/preprocessing/data_processor.py
[ { "identifier": "CausalDataset", "path": "src/causica/datasets/dataset.py", "snippet": "class CausalDataset(Dataset):\n \"\"\"\n Class to store the np.ndarray adjacency matrix and samples\n from the intervention distributions as attributes of the Dataset object.\n \"\"\"\n\n def __init__(\n self,\n train_data: np.ndarray,\n train_mask: np.ndarray,\n adjacency_data: Optional[np.ndarray],\n subgraph_data: Optional[np.ndarray],\n intervention_data: Optional[List[InterventionData]],\n counterfactual_data: Optional[List[InterventionData]],\n val_data: Optional[np.ndarray] = None,\n val_mask: Optional[np.ndarray] = None,\n test_data: Optional[np.ndarray] = None,\n test_mask: Optional[np.ndarray] = None,\n variables: Optional[Variables] = None,\n data_split: Optional[Dict[str, Any]] = None,\n held_out_interventions: Optional[Dict[str, Any]]=None,\n true_posterior: Optional[Any]=None,\n graph_args: Optional[Dict[str, Any]]=None,\n ) -> None:\n super().__init__(train_data, train_mask, val_data, val_mask, test_data, test_mask, variables, data_split, held_out_interventions, true_posterior, graph_args)\n\n self._counterfactual_data = counterfactual_data\n self._intervention_data = intervention_data\n self._adjacency_data = adjacency_data\n self._subgraph_data = subgraph_data\n\n def get_adjacency_data_matrix(self) -> np.ndarray:\n \"\"\"\n Return the np.ndarray dag adjacency matrix.\n \"\"\"\n if self._adjacency_data is None:\n raise TypeError(\"Adjacency matrix is None. No adjacency matrix has been loaded.\")\n return self._adjacency_data\n\n def set_adjacency_data_matrix(self, A: np.ndarray) -> None:\n \"\"\"\n Externally set the np.ndarray dag adjacency matrix. If already set with a matrix, it will overwrite it\n \"\"\"\n self._adjacency_data = A.copy()\n\n @property\n def has_adjacency_data_matrix(self) -> bool:\n \"\"\"\n Returns: If the adjacency matrix is loaded\n \"\"\"\n return self._adjacency_data is not None\n\n def get_known_subgraph_mask_matrix(self) -> np.ndarray:\n \"\"\"\n Return the np.ndarray dag mask matrix.\n \"\"\"\n if self._subgraph_data is None:\n raise TypeError(\"Adjacency matrix is None. No adjacency matrix has been loaded.\")\n return self._subgraph_data\n\n def get_intervention_data(self) -> List[InterventionData]:\n \"\"\"\n Return the list of interventions and samples from intervened distributions\n \"\"\"\n if self._intervention_data is None:\n raise TypeError(\"Intervention data is None. No intervention data has been loaded.\")\n return self._intervention_data\n\n def get_counterfactual_data(self) -> List[InterventionData]:\n \"\"\"\n Return the list of interventions and samples for the counterfactual data\n \"\"\"\n if self._counterfactual_data is None:\n raise TypeError(\"Counterfactual data is None. No counterfactual data has been loaded.\")\n return self._counterfactual_data\n\n @property\n def has_counterfactual_data(self) -> bool:\n \"\"\"\n Returns True if object has counterfactual data.\n \"\"\"\n return self._counterfactual_data is not None" }, { "identifier": "Dataset", "path": "src/causica/datasets/dataset.py", "snippet": "class Dataset(BaseDataset):\n \"\"\"\n Class to store dense train/val/test data and masks and variables metadata.\n Note that the data and masks provided by this class are read only.\n \"\"\"\n\n def __init__(\n self,\n train_data: np.ndarray,\n train_mask: np.ndarray,\n val_data: Optional[np.ndarray] = None,\n val_mask: Optional[np.ndarray] = None,\n test_data: Optional[np.ndarray] = None,\n test_mask: Optional[np.ndarray] = None,\n variables: Optional[Variables] = None,\n data_split: Optional[Dict[str, Any]] = None,\n held_out_interventions: Optional[Dict[str, Any]]=None,\n true_posterior: Optional[Any]=None,\n graph_args: Optional[Dict[str, Any]]=None\n ) -> None:\n super().__init__(train_data, train_mask, val_data, val_mask, test_data, test_mask, variables, data_split, held_out_interventions, true_posterior, graph_args)\n\n # Ensure that data and masks are immutable\n if not issparse(self._train_data):\n self._train_data.setflags(write=False)\n self._train_mask.setflags(write=False)\n if test_data is not None and not issparse(test_data):\n self._test_data = cast(np.ndarray, test_data)\n self._test_data.setflags(write=False)\n self._test_mask = cast(np.ndarray, test_mask)\n self._test_mask.setflags(write=False)\n\n if val_data is not None and not issparse(val_data):\n self._val_data = cast(np.ndarray, val_data)\n self._val_mask = cast(np.ndarray, val_mask)\n self._val_data.setflags(write=False)\n self._val_mask.setflags(write=False)\n\n def to_causal(\n self,\n adjacency_data: Optional[np.ndarray],\n subgraph_data: Optional[np.ndarray],\n intervention_data: Optional[List[InterventionData]],\n counterfactual_data: Optional[List[InterventionData]] = None,\n ):\n \"\"\"\n Return the dag version of this dataset.\n \"\"\"\n return CausalDataset(\n train_data=self._train_data,\n train_mask=self._train_mask,\n adjacency_data=adjacency_data,\n subgraph_data=subgraph_data,\n intervention_data=intervention_data,\n counterfactual_data=counterfactual_data,\n val_data=self._val_data,\n val_mask=self._val_mask,\n test_data=self._test_data,\n test_mask=self._test_mask,\n variables=self._variables,\n data_split=self._data_split,\n held_out_interventions=self._held_out_interventions,\n true_posterior=self._true_posterior,\n graph_args=self._graph_args\n )\n\n @property\n def train_data_and_mask(self) -> Tuple[np.ndarray, np.ndarray]:\n # Add to avoid inconsistent type mypy error\n return self._train_data, self._train_mask" }, { "identifier": "SparseDataset", "path": "src/causica/datasets/dataset.py", "snippet": "class SparseDataset(BaseDataset):\n \"\"\"\n Class to store sparse train/val/test data and masks and variables metadata.\n \"\"\"\n\n def __init__(\n self,\n train_data: csr_matrix,\n train_mask: csr_matrix,\n val_data: Optional[csr_matrix] = None,\n val_mask: Optional[csr_matrix] = None,\n test_data: Optional[csr_matrix] = None,\n test_mask: Optional[csr_matrix] = None,\n variables: Optional[Variables] = None,\n data_split: Optional[Dict[str, Any]] = None,\n ) -> None:\n super().__init__(train_data, train_mask, val_data, val_mask, test_data, test_mask, variables, data_split)\n # Declare types to avoid mypy error\n self._val_data: Optional[csr_matrix]\n self._val_mask: Optional[csr_matrix]\n self._test_data: Optional[csr_matrix]\n self._test_mask: Optional[csr_matrix]\n self._train_data: csr_matrix\n self._train_mask: csr_matrix\n\n def to_dense(self) -> Dataset:\n \"\"\"\n Return the dense version of this dataset, i.e. all sparse data and mask arrays are transformed to dense.\n \"\"\"\n val_data = self._val_data.toarray() if self._val_data is not None else None\n val_mask = self._val_mask.toarray() if self._val_mask is not None else None\n test_data = self._test_data.toarray() if self._test_data is not None else None\n test_mask = self._test_mask.toarray() if self._test_mask is not None else None\n return Dataset(\n self._train_data.toarray(),\n self._train_mask.toarray(),\n val_data,\n val_mask,\n test_data,\n test_mask,\n self._variables,\n self._data_split,\n )" }, { "identifier": "InterventionData", "path": "src/causica/datasets/intervention_data.py", "snippet": "class InterventionData(NamedTuple):\n \"\"\"Class that acts as a container for observational (rank-1), interventional (rank-2) or counterfactual (rank-3) data.\n\n This data object can be serialized by converting to a dict, taking the form\n\n {\n \"intervention_idxs\": Optional[np.ndarray]\n \"intervention_values\": Optional[np.ndarray]\n \"test_data\": np.ndarray\n \"conditioning_idxs\": Optional[np.ndarray] = None\n \"conditioning_values\": Optional[np.ndarray] = None\n \"effect_idxs\": Optional[np.ndarray] = None\n \"intervention_reference\": Optional[np.ndarray] = None\n \"reference_data\": Optional[np.ndarray] = None\n },\n\n Args:\n conditioning_idxs: np.ndarray. 1d array containing the indices of each variable on which we condition on. For counterfactuals,\n all variables should be conditioned on.\n conditioning_values: np.ndarray. 1d array containing the values being assigned to the conditioned variables.\n effect_idxs: np.ndarray. 1d array containing the indices of each variable for which we want to evaluate the effect of the treatment.\n intervention_idxs: np.ndarray. 1d array containing the indices of each variable on which an intervention is made.\n intervention_values: np.ndarray. 1d array containing the values being assigned to the intervened variables.\n intervention_reference: np.ndarray 1d array containing reference values for interventions.\n test_data: np.ndarray. Samples from intervened distribution.\n reference_data: np.ndarray. Samples from intervened distribution with reference intervention.\n \"\"\"\n\n intervention_idxs: Optional[np.ndarray]\n intervention_values: Optional[np.ndarray]\n test_data: np.ndarray\n conditioning_idxs: Optional[np.ndarray] = None\n conditioning_values: Optional[np.ndarray] = None\n effect_idxs: Optional[np.ndarray] = None\n intervention_reference: Optional[np.ndarray] = None\n reference_data: Optional[np.ndarray] = None\n\n def to_dict(self):\n # When converting to dict, numpy arrays are converted to lists\n result = self._asdict()\n for k, v in result.items():\n if v is not None:\n result[k] = v.tolist()\n return result\n\n @classmethod\n def from_dict(cls, input_dict):\n type_converted_input = {k: np.atleast_1d(v) if v is not None else None for k, v in input_dict.items()}\n return cls(**type_converted_input)" }, { "identifier": "Variables", "path": "src/causica/datasets/variables.py", "snippet": "class Variables:\n \"\"\"\n This class represents any variables present in a model.\n \"\"\"\n\n def __init__(\n self,\n variables: List[Variable],\n auxiliary_variables: Optional[List[Variable]] = None,\n used_cols: Optional[List[int]] = None,\n ) -> None:\n \"\"\"\n Args:\n variables: A list Variable objects.\n auxiliary_variables: A list of Variable objects only used for input into VAE,\n not produced in output.\n These are assumed to be appended onto the end of the variables in the data.\n Defaults to None - no aux variables present.\n used_cols: A list of column ids that were used when processing the original data.\n \"\"\"\n if not auxiliary_variables:\n auxiliary_variables = []\n self.auxiliary_variables = auxiliary_variables\n self._variables = variables\n\n self._deduplicate_names()\n\n # Dictionary mapping from variable name to variable index.\n self.name_to_idx = {var.name: idx for idx, var in enumerate(self._variables)}\n\n # Lists containing query and target variable indices\n self.target_var_idxs = []\n self.not_target_var_idxs = []\n self.query_var_idxs = []\n self.not_query_var_idxs = []\n for idx, var in enumerate(self._variables):\n if var.query:\n self.query_var_idxs.append(idx)\n else:\n self.not_query_var_idxs.append(idx)\n if var.target:\n self.target_var_idxs.append(idx)\n else:\n self.not_target_var_idxs.append(idx)\n\n if len(self.target_var_idxs) > 0 and all(idx in self.query_var_idxs for idx in self.target_var_idxs):\n warnings.warn(\n \"All target variables are marked as queriable, it is likely that active learning will always \"\n \"select these variables first.\"\n )\n\n # Lists containing continuous (including text) and binary/categorical variable indices\n self.var_idxs_by_type: DefaultDict[str, List[int]] = defaultdict(list)\n for idx, var in enumerate(self._variables + self.auxiliary_variables):\n self.var_idxs_by_type[var.type_].append(idx)\n\n # List of lists, where self.unprocessed_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data.\n self.unprocessed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.unprocessed_non_aux_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data (non-auxiliary).\n self.unprocessed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_cols[i] gives the columns occupied by the ith variable in the processed\n # data.\n self.processed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.processed_dim\n self.processed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_non_aux_cols[i] gives the columns occupied by the ith variable in the processed\n # data (non-auxiliary).\n self.processed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.processed_dim\n self.processed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # Set of all query group names, maintaining order in which they are first encountered when iterating through\n # the variables list. This is the simplest way to do this since dictionaries are guaranteed to be\n # insertion-ordered since Python 3.7\n self.group_names = list(dict.fromkeys([var.group_name for var in self._variables]))\n\n # List containing indices for each query group, where the query group names are assumed to be in the same order\n # as self.group_names\n self.group_idxs = [\n [idx for idx, var in enumerate(self._variables) if var.group_name == group_name]\n for group_name in self.group_names\n ]\n\n # Remove groups containing no queriable variables from self.group_names and self.group_idxs, as\n # we can guarantee that we will never query these groups.\n is_group_queriable = [any(self._variables[idx].query for idx in idxs) for idxs in self.group_idxs]\n\n self.group_names = [name for group_idx, name in enumerate(self.group_names) if is_group_queriable[group_idx]]\n self.group_idxs = [idxs for group_idx, idxs in enumerate(self.group_idxs) if is_group_queriable[group_idx]]\n\n # Save the list of observed column ids\n default_used_cols = list(range(len(self._variables) + len(auxiliary_variables))) # All columns observed\n self.used_cols = used_cols if used_cols is not None else default_used_cols\n assert len(self.used_cols) == len(self._variables) + len(self.auxiliary_variables)\n\n self.col_id_to_var_index = {old: new for new, old in enumerate(self.used_cols)}\n\n def __repr__(self):\n return str(self._variables)\n\n def __iter__(self) -> Iterator[Variable]:\n \"\"\"\n Iterate through the variables within the container.\n Note - Now it iterate through all the variables within the container\n (including auxiliary variables, if they are present)\n \"\"\"\n for var in self._all_variables:\n yield var\n\n def __getitem__(self, idx):\n return (self._all_variables)[idx]\n\n def __len__(self) -> int:\n return len(self._variables) + len(self.auxiliary_variables)\n\n @classmethod\n def create_from_json(cls, path: str) -> Variables:\n return cls.create_from_dict(read_json_as(path, dict))\n\n @classmethod\n def create_from_dict(cls, variables_dict: Dict[str, List[Any]]) -> Variables:\n \"\"\"\n Create variables object from a dictionary\n \"\"\"\n variables = variables_dict[\"variables\"]\n for var in variables:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n var_obj_list = [Variable(**var) for var in variables]\n\n auxiliary_vars = variables_dict.get(\"auxiliary_variables\", [])\n if len(auxiliary_vars) == 0:\n auxiliary_vars_obj = None\n else:\n for var in auxiliary_vars:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n\n auxiliary_vars_obj = [Variable(**var) for var in auxiliary_vars]\n\n used_cols = variables_dict.get(\"used_cols\", None)\n\n return cls(var_obj_list, auxiliary_vars_obj, used_cols)\n\n @classmethod\n def create_from_data_and_dict(\n cls, data: np.ndarray, mask: np.ndarray, variables_dict: Optional[Dict[str, Any]] = None\n ) -> Variables:\n \"\"\"\n Create variables object from an input dictionary, inferring missing fields using `data` and `mask`.\n \"\"\"\n # Infer missing fields in variables_dict\n variables_dict = cls.infer_from_data(data, mask, variables_dict, True)\n variables = cls.create_from_dict(variables_dict)\n return variables\n\n @staticmethod\n def _metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n ) -> Tuple[List[Any], Union[List[Any], None]]:\n \"\"\"\n Infer variables_metadata from input data\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n variables_type: is it aux variables, or normal variables\n Returns:\n varaibles_metadata: inferred metadata from input data\n A list of column ids that were used when processing the original data.\n \"\"\"\n\n variables_metadata = []\n # Use None rather than {} as default since mutable default args are dangerous in Python.\n used_cols = variables_dict.get(\"used_cols\", None)\n if used_cols:\n used_cols = cast(List[int], used_cols)\n assert len(used_cols) == data.shape[1]\n\n for idx, variable_metadata in enumerate(variables_dict[variables_type]):\n if not all(\n k in variable_metadata for k in [\"name\", \"type\", \"lower\", \"upper\", \"query\", \"target\", \"always_observed\"]\n ):\n # If variable metadata fully specified, do not try to infer, as doing column indexing can be expensive\n # for CSR sparse matrices.\n var_data = data[:, idx]\n var_mask = mask[:, idx]\n if issparse(var_data):\n var_data = var_data.toarray()\n var_mask = var_mask.toarray()\n\n if \"name\" not in variable_metadata:\n if used_cols:\n variable_metadata[\"name\"] = str(used_cols[idx])\n else:\n variable_metadata[\"name\"] = f\"Column {idx}\"\n\n # If data type/min max/num categories specified explicitly, overwrite variables file\n if \"type\" not in variable_metadata:\n # Test if all unmasked elements are integers\n\n if np.all((var_data * var_mask) // 1 == var_data * var_mask):\n if (var_data * var_mask).max() <= 1:\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as binary. This can be '\n \"changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"binary\"\n else:\n # Note that we always infer integer values with a max value > 1 as categorical. This may want to be\n # reconsidered if support for ordinal variables is introduced at a later date.\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as categorical. This can be'\n \" changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"categorical\"\n else:\n variable_metadata[\"type\"] = \"continuous\"\n\n if \"lower\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_lower = 0\n else:\n inferred_lower = min(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"lower\"] = inferred_lower\n print(\n f'Minimum value of variable {variable_metadata[\"name\"]} inferred as {inferred_lower}. This'\n \" can be changed manually in the dataset's variables.json file\"\n )\n\n if \"upper\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_upper = 1\n else:\n inferred_upper = max(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"upper\"] = inferred_upper\n print(\n f'Max value of variable {variable_metadata[\"name\"]} inferred as {inferred_upper}. This can '\n \"be changed manually in the dataset's variables.json file\"\n )\n\n if \"query\" not in variable_metadata:\n # By default, assume all variables can be queried unless specified otherwise.\n if variables_type == \"auxiliary_variables\":\n variable_metadata[\"query\"] = False\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a non-queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n else:\n variable_metadata[\"query\"] = True\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n\n if \"target\" not in variable_metadata:\n # By default, assume variable is a target if and only if it is not queriable.\n variable_metadata[\"target\"] = not variable_metadata[\"query\"]\n fill_string = \"not \" if not variable_metadata[\"target\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an active learning target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"target\" field.'\n )\n\n if \"always_observed\" not in variable_metadata:\n # By default, assume variable is always observed if there is no missing in the mask.\n if np.sum((var_mask - 1) ** 2) == 0:\n variable_metadata[\"always_observed\"] = True\n else:\n variable_metadata[\"always_observed\"] = False\n fill_string = \"not \" if not variable_metadata[\"always_observed\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an always observed target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"always_observed\" field.'\n )\n\n variables_metadata.append(variable_metadata)\n\n return variables_metadata, used_cols\n\n @staticmethod\n def infer_from_data(data, mask, variables_dict=None, infer_aux_variables=False) -> Dict[str, List[Any]]:\n \"\"\"\n Infer missing values in an input variables dictionary, using the input data.\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n infer_aux_variables: infer auxiliary variables for GINA or not.\n Returns:\n variables_dict: Updated version of the input variables_dict, with missing variables and fields inferred from the\n data.\n \"\"\"\n\n if variables_dict is None:\n variables_dict = {}\n\n # NOTE this assumes all variables have only one column in unprocessed data, which should always be the case when\n # inferring from a dataset.\n if \"auxiliary_variables\" not in variables_dict:\n variables_dict[\"auxiliary_variables\"] = []\n\n if \"variables\" not in variables_dict or variables_dict[\"variables\"] == []:\n num_var_cols = data.shape[1] - len(variables_dict[\"auxiliary_variables\"])\n variables_dict[\"variables\"] = [{} for _ in range(num_var_cols)]\n\n variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": variables_dict[\"auxiliary_variables\"],\n \"used_cols\": used_cols,\n }\n if infer_aux_variables:\n aux_variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"auxiliary_variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": aux_variables_metadata,\n \"used_cols\": used_cols,\n }\n\n return variables_dict\n\n @property\n def _all_variables(self):\n return self._variables + self.auxiliary_variables\n\n @property\n def has_auxiliary(self) -> bool:\n \"\"\"\n True if there are aux variables present.\n \"\"\"\n return len(self.auxiliary_variables) > 0\n\n @property\n def binary_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all binary variables.\n \"\"\"\n return self.var_idxs_by_type[\"binary\"]\n\n @property\n def categorical_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all categorical variables.\n \"\"\"\n return self.var_idxs_by_type[\"categorical\"]\n\n @property\n def discrete_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all discrete (i.e. binary or categorical) variables. We sort to ensure that the\n combined list is in ascending order.\n \"\"\"\n return sorted(self.var_idxs_by_type[\"categorical\"] + self.var_idxs_by_type[\"binary\"])\n\n @property\n def continuous_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all continuous variables.\n \"\"\"\n return self.var_idxs_by_type[\"continuous\"]\n\n @property\n def text_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all text variables.\n \"\"\"\n return self.var_idxs_by_type[\"text\"]\n\n @property\n def non_text_idxs(self) -> List[bool]:\n \"\"\"Helper method. Returns list of booleans, where an element\n at index i indicates whether a variable at index i is non-text or not\n e.g. For Variables object of [...\"continous\"..., ...\"text\"..., \"continuous\"],\n the result would be [True, False, True]\n \"\"\"\n unproc_cols_by_type = self.unprocessed_cols_by_type\n if \"text\" not in unproc_cols_by_type:\n return [True for _ in range(len(self))]\n return (~np.in1d(range(len(self)), unproc_cols_by_type[\"text\"])).tolist()\n\n @property\n def num_unprocessed_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_cols)\n\n @property\n def num_unprocessed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_non_aux_cols)\n\n @property\n def num_processed_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_cols)\n\n @property\n def num_processed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_non_aux_cols)\n\n @property\n def num_groups(self) -> int:\n \"\"\"\n Return the number of unique query groups in the variables object.\n \"\"\"\n return len(self.group_names)\n\n @property\n def group_mask(self) -> np.ndarray:\n \"\"\"\n Return a mask of shape (num_groups, num_processed_cols) indicating which column\n corresponds to which group.\n \"\"\"\n mask = np.zeros((self.num_groups, self.num_processed_cols), dtype=bool)\n for group_idx, group in enumerate(self.group_idxs):\n for var in group:\n for proc_col in self.processed_cols[var]:\n mask[group_idx, proc_col] = 1\n return mask\n\n @property\n def proc_always_observed_list(self) -> List[Optional[bool]]:\n \"\"\"\n The mask that indicates if the variable is always observed (for processed data)\n \"\"\"\n return sum(([var.always_observed] * var.processed_dim for var in self._all_variables), [])\n\n @property\n def processed_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data associated with each variable of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._all_variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def processed_non_aux_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data (w/o aux variables) associated with each\n variable of that type.\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def unprocessed_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._all_variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n @property\n def unprocessed_non_aux_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n def subset(self, idxs: List[int], auxiliary_idxs: Optional[List[int]] = None) -> Variables:\n \"\"\"\n Returns a new Variables object containing only the Variable objects whose indices are given in `idxs`.\n Note that this currently ignores metadata variables.\n \"\"\"\n if auxiliary_idxs is None:\n auxiliary_idxs = []\n\n variables_list = [self._variables[idx] for idx in idxs]\n auxiliary_variables_list = [self.auxiliary_variables[idx] for idx in auxiliary_idxs]\n return Variables(variables_list, auxiliary_variables_list)\n\n def to_dict(self) -> Dict[str, Any]:\n variables_list = [var.to_json() for var in self._variables]\n if self.auxiliary_variables is None:\n auxiliary_vars_list = []\n else:\n auxiliary_vars_list = [var.to_json() for var in self.auxiliary_variables]\n\n variables_json = {\n \"variables\": variables_list,\n \"auxiliary_variables\": auxiliary_vars_list,\n \"used_cols\": [int(col) for col in self.used_cols],\n }\n return variables_json\n\n def save(self, path: str) -> None:\n variables_json = self.to_dict()\n save_json(variables_json, path)\n\n def as_list(self) -> List[Variable]:\n return self._variables\n\n def get_idxs_from_name_list(self, variable_names: List[Union[str, int]]) -> np.ndarray:\n \"\"\"\n Get a binary array of shape (variable_count,), where for each index the array value is 1 if the corresponding\n variable is named in `variable_names`, and 0 otherwise.\n \"\"\"\n variables_to_query = np.zeros((len(self._variables),))\n # Look up indices of specified variables and mark as queriable.\n for variable_name in variable_names:\n # Cast name to string in case numeric names (e.g. question ids) have been input as integers.\n variable_name = str(variable_name)\n variable_idx = self.name_to_idx[variable_name]\n variables_to_query[variable_idx] = 1\n\n return variables_to_query\n\n def get_observable_groups(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of indices for groups that are still observable in the current row\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n list of indices of groups that can be observed, where the indices correspond to the corresponding group\n names in `self.group_names`.\n \"\"\"\n observable_variables_idxs = self.get_observable_variable_idxs(data_mask_row, obs_mask_row)\n observable_groups_idxs: List[int] = []\n for group_idx, idxs in enumerate(self.group_idxs):\n if any(i in observable_variables_idxs for i in idxs):\n observable_groups_idxs.append(group_idx)\n return observable_groups_idxs\n\n def get_observable_variable_idxs(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of variable idxs for variables that are still observable in the current row.\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n observable_vars: List of indices of variables that can be observed.\n \"\"\"\n if data_mask_row.ndim != 1:\n raise ValueError(f\"Test mask should be 1D, had {data_mask_row.ndim} dims and shape {data_mask_row.shape}.\")\n if obs_mask_row.ndim != 1:\n raise ValueError(\n f\"Observation mask should be 1D, had {obs_mask_row.ndim} dims and shape {obs_mask_row.shape}.\"\n )\n if len(obs_mask_row) != len(data_mask_row) or len(data_mask_row) != len(self._variables):\n # One likely cause is accidentally passing 'processed' masks, which may be longer\n # if some variables are categorical.\n raise ValueError(\n f\"Lengths of obs_mask_row {len(obs_mask_row)}, data_mask_row {len(data_mask_row)}, \"\n f\"and variables list {len(self._variables)} should all be the same.\"\n )\n # Get ids where there is an underlying data value (test_mask == 1) and that we haven't yet queried (obs_mask == 0)\n unobserved_idxs = np.where((data_mask_row == 1) & (obs_mask_row == 0))[0]\n\n # Intersection of these and query_var_idxs.\n observable_idx_set = set(unobserved_idxs).intersection(set(self.query_var_idxs))\n return list(observable_idx_set)\n\n def get_var_cols_from_data(self, var_idx, data):\n \"\"\"\n Get data from an array for a single variable only.\n\n Args:\n var_idx: Index of variable we want data for.\n data (shape (batch_size, variable_count)): Array to get variable info from.\n\n Returns:\n var_data (shape (observed_count, processed_dim)): Values only for\n the corresponding variable.\n \"\"\"\n return data[:, self.processed_cols[var_idx]]\n\n def get_variables_to_observe(self, data_mask: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Return a boolean tensor of length num_variables, where each element indicates whether the corresponding variable\n can be queried during active learning (i.e. the variable is queriable and has at least one observed value in\n the data).\n Args:\n data_mask (shape (batch_size, num_processed_cols)): Processed mask\n\n Returns:\n torch.Tensor (shape (variable_count,)): True where it's a query-able variable and we have at least one\n observed value\n \"\"\"\n cols_with_data = data_mask.sum(dim=0).to(torch.bool)\n\n # data_mask may have multiple columns for a single variable, if it's a categorical variable. Pick first entry per variable\n ii = torch.tensor([cols[0] for cols in self.processed_cols], dtype=torch.long, device=cols_with_data.device)\n cols_with_data = torch.index_select(cols_with_data, 0, ii)\n is_query_id = torch.zeros(len(self), dtype=torch.bool, device=cols_with_data.device)\n is_query_id[\n tuple(self.query_var_idxs),\n ] = True\n return is_query_id * cols_with_data\n\n def _deduplicate_names(self):\n # Produce warning if var name is reused and add an increasing integer to the end until it is unique.\n var_names = set()\n for var in self._all_variables:\n i = 2\n original_name = var.name\n while var.name in var_names:\n new_name = f\"{original_name}_{i}\"\n var.name = new_name\n i += 1\n if var.name != original_name:\n # Do the warning in a separate block to the while loop so that we only raise one warning if we have to\n # try appending several different integers to the name.\n warnings.warn(\n f\"Name {original_name} has already been used, renaming to {var.name}\",\n UserWarning,\n )\n var_names.add(var.name)\n\n # TODO: Maybe create Variables.Utils for methods like the below one\n @staticmethod\n def create_empty_data(variables: Variables) -> np.ndarray:\n var_count = len(variables)\n empty_data = np.zeros((1, var_count), dtype=object)\n for i in range(var_count):\n if variables[i].type_ == \"text\":\n empty_data[:, i] = \"empty str\"\n return empty_data" }, { "identifier": "IdentityTransform", "path": "src/causica/preprocessing/transforms.py", "snippet": "class IdentityTransform(FunctionTransformer):\n \"\"\"Scikit-learn data transformation passing through any data without modification.\"\"\"\n\n def __init__(self):\n super().__init__(func=self.identity, inverse_func=self.identity)\n\n @staticmethod\n def identity(values: np.ndarray) -> np.ndarray:\n \"\"\"Return values without modification.\"\"\"\n return values" }, { "identifier": "UnitScaler", "path": "src/causica/preprocessing/transforms.py", "snippet": "class UnitScaler(FunctionTransformer):\n \"\"\"Scikit-learn data transformation for scaling (or squashing) data to the unit hypercube.\n\n The range of the data is determined by the provided variables.\n \"\"\"\n\n def __init__(self, variables: Iterable[Variable]):\n \"\"\"\n Args:\n variables: Iterable over the variables expected to be transformed\n provided in the same order as data columns.\n \"\"\"\n # Collect limits for the variables\n lower = []\n upper = []\n for variable in variables:\n lower.append(variable.lower)\n upper.append(variable.upper)\n\n if variable.lower == variable.upper:\n warnings.warn(\n f\"Variable with name '{variable.name}' has the same upper and lower values. Is this variable a constant?\"\n )\n\n self._lower = np.array(lower)\n self._range: np.ndarray = np.array(upper) - self._lower\n super().__init__(func=self.scale, inverse_func=self.unscale)\n\n def scale(self, values: np.ndarray) -> np.ndarray:\n \"\"\"Scale values into the hypercube using pre-determined variable ranges.\"\"\"\n return (values - self._lower) / self._range\n\n def unscale(self, scaled_values: np.ndarray) -> np.ndarray:\n \"\"\"Restore scaled values from the hypercube into the original range.\"\"\"\n return scaled_values * self._range + self._lower" } ]
import logging import warnings import numpy as np import torch from typing import Iterable, List, Optional, Tuple, TypeVar, Union from scipy import sparse from scipy.sparse import csr_matrix, issparse from sklearn.exceptions import NotFittedError from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.utils.validation import check_is_fitted from tqdm import tqdm from ..datasets.dataset import CausalDataset, Dataset, SparseDataset from ..datasets.intervention_data import InterventionData from ..datasets.variables import Variables from .transforms import IdentityTransform, UnitScaler
12,549
self._txt_unproc_cols, self._txt_proc_cols = [], [] self._num_processed_cols = sum(var.processed_dim for var in self._variables) def process_data_and_masks( self, data: csr_matrix, data_mask: csr_matrix, *extra_masks: csr_matrix, batch_size: int = 1000, ) -> Tuple[csr_matrix, ...]: """ Process and validate data, data mask and optionally any number of additional masks. These masks will all be applied to the data when performing data range validation, in case of e.g. dummy zero data that is masked out by an additional obs_mask. Args: data: Unprocessed data array data_mask: Data indicating which values in `data` are observed. Can be any dtype provided all values are either 0 or 1. extra_masks: Additional masks to be processed, if any. Can be any dtype provided all values are either 0 or 1. batch_size: Batch size used during data preprocessing for sparse matrices. Returns: processed_data: Data with categorical variables expanded to a one-hot encoding, and features normalised. processed_data_mask: Boolean mask with categorical variables expanded to a one-hot encoding. processed_extra_masks: Any additional boolean masks with categorical variables expanded to a one-hot encoding. """ if not issparse(data): ( proc_data, proc_data_mask, *proc_extra_masks, ) = self._process_and_check_dense(data, data_mask, *extra_masks) else: # Break sparse data into smaller batches and preprocess each as a dense array. Somewhat inefficient but # allows us to reuse our preprocessing functions and keeps memory usage manageable. proc_data_list: List[csr_matrix] = [] proc_data_mask_list: List[csr_matrix] = [] proc_extra_masks_lists: Tuple[List[csr_matrix], ...] = tuple([] for mask in extra_masks) num_rows = data.shape[0] for start_idx in tqdm(range(0, num_rows, batch_size), desc="Data preprocessing"): stop_idx = min(start_idx + batch_size, num_rows) data_batch = data[start_idx:stop_idx].toarray() data_mask_batch = data_mask[start_idx:stop_idx].toarray() extra_masks_batch = tuple(mask[start_idx:stop_idx].toarray() for mask in extra_masks) # TODO: we will currently lose sparsity for rescaled continuous data here, since 0 will be mapped to # another value. We could multiply by the mask to zero out unobserved data but we need to make sure this # doesn't have any unintended consequences for cases with more complex masking, e.g. active learning ( proc_data_batch, proc_data_mask_batch, *proc_extra_masks_batch, ) = self._process_and_check_dense(data_batch, data_mask_batch, *extra_masks_batch) proc_data_list.append(csr_matrix(proc_data_batch)) proc_data_mask_list.append(csr_matrix(proc_data_mask_batch)) for mask_list, mask in zip(proc_extra_masks_lists, proc_extra_masks_batch): mask_list.append(csr_matrix(mask)) proc_data = sparse.vstack(proc_data_list, format="csr") proc_data_mask = sparse.vstack(proc_data_mask_list, format="csr") proc_extra_masks = tuple( sparse.vstack(proc_mask_list, format="csr") for proc_mask_list in proc_extra_masks_lists ) return (proc_data, proc_data_mask, *proc_extra_masks) def _process_and_check_dense(self, data: np.ndarray, data_mask: np.ndarray, *extra_masks: np.ndarray): """ Check validity of dense data and masks and process them. """ combined_mask = data_mask for mask in extra_masks: combined_mask = combined_mask * mask self.check_data(data, combined_mask) self.check_mask(data_mask) for mask in extra_masks: self.check_mask(mask) proc_data = self.process_data(data) proc_data_mask = self.process_mask(data_mask) proc_extra_masks = tuple(self.process_mask(mask) for mask in extra_masks) return (proc_data, proc_data_mask, *proc_extra_masks) def process_intervention_data( self, intervention_data: Union[InterventionData, Iterable[InterventionData]] ) -> List[InterventionData]: """Preprocesses data in the InterventionData format and returns a list of processed InterventionData objects. Args: intervention_data (Union[InterventionData, Iterable[InterventionData]]): InterventionData object or list of InterventionData objects to be processed. Returns: List[InterventionData]: List of processed InterventionData objects. """ if isinstance(intervention_data, InterventionData): intervention_data = [intervention_data] proc_intervention = [ InterventionData( i.intervention_idxs, self.process_data_subset_by_group(i.intervention_values, i.intervention_idxs), self.process_data(i.test_data), i.conditioning_idxs, self.process_data_subset_by_group(i.conditioning_values, i.conditioning_idxs), i.effect_idxs, self.process_data_subset_by_group(i.intervention_reference, i.intervention_idxs), self.process_data(i.reference_data) if i.reference_data is not None else None, ) for i in intervention_data ] return proc_intervention def process_dataset(
EPSILON = 1e-5 logger = logging.getLogger(__name__) V = TypeVar("V", np.ndarray, torch.Tensor) # pylint: disable=protected-access class DataProcessor: def __init__( self, variables: Variables, unit_scale_continuous: bool = True, standardize_data_mean: bool = False, standardize_data_std: bool = False, ): """ Args: variables (Variables): Information about variables/features used by this model. unit_scale_continuous (bool): Scale continuous variables to the range of [0, 1]. standardize_data_mean (bool): Standardize continuous variables to mean=0 standardize_data_std (bool): Standardize continuous variables to std=1 """ if unit_scale_continuous and (standardize_data_mean or standardize_data_std): raise ValueError("Cannot unit scale and standardize variables simultanously.") self._variables = variables # Call unprocessed columns unproc_cols, processed columns proc_cols unproc_cols_by_type = self._variables.unprocessed_cols_by_type proc_cols_by_type = self._variables.processed_cols_by_type def flatten(lists): # Flatten proc_cols for continuous and binary unproc_cols, since they will be of form [[1], [2], ...] return [i for sublist in lists for i in sublist] if "binary" in unproc_cols_by_type: self._bin_unproc_cols = unproc_cols_by_type["binary"] self._bin_proc_cols = flatten(proc_cols_by_type["binary"]) # Save contiguous regions containig binary features to allow for more efficient processing via slicing self._bin_unproc_regions = self.split_contiguous_sublists(self._bin_unproc_cols) self._bin_proc_regions = self.split_contiguous_sublists(self._bin_proc_cols) assert len(self._bin_unproc_regions) == len(self._bin_proc_regions) else: self._bin_unproc_cols, self._bin_proc_cols = [], [] if "continuous" in unproc_cols_by_type: self._cts_unproc_cols = unproc_cols_by_type["continuous"] self._cts_proc_cols = flatten(proc_cols_by_type["continuous"]) # Save contiguous regions containing continuous features to allow for more efficient processing via slicing if all(x.overwrite_processed_dim is None for x in self._variables): self._cts_unproc_regions = self.split_contiguous_sublists(self._cts_unproc_cols) self._cts_proc_regions = self.split_contiguous_sublists(self._cts_proc_cols) else: # For VAEM, we can only take single variable as region # to allow for processing/reverting mask self._cts_unproc_regions = [[col_id] for col_id in unproc_cols_by_type["continuous"]] self._cts_proc_regions = proc_cols_by_type["continuous"] assert len(self._cts_unproc_regions) == len(self._cts_proc_regions) if unit_scale_continuous: self._cts_normalizers = [ UnitScaler(variables[i] for i in unproc_region) for unproc_region in self._cts_unproc_regions ] elif standardize_data_mean or standardize_data_std: self._cts_normalizers = [ StandardScaler(with_mean=standardize_data_mean, with_std=standardize_data_std) for _ in self._cts_unproc_regions ] else: self._cts_normalizers = [IdentityTransform()] * len(self._cts_unproc_regions) else: self._cts_unproc_cols, self._cts_proc_cols, self._cts_normalizers = [], [], [] if "categorical" in unproc_cols_by_type: self._cat_unproc_cols = unproc_cols_by_type["categorical"] self._cat_proc_cols = flatten(proc_cols_by_type["categorical"]) self._cat_proc_cols_grouped = proc_cols_by_type["categorical"] def get_lower(idx): return self._variables[idx].lower def get_upper(idx): return self._variables[idx].upper var_categories = [ np.arange(int(get_lower(var_idx)), int(get_upper(var_idx)) + 1) for var_idx in self._cat_unproc_cols ] self._one_hot_encoder = OneHotEncoder(categories=var_categories, sparse=False, handle_unknown="ignore") # Fit on dummy data due to an issue in sklearn where the encoder needs to be fitted to data even if the # categories are specified upon creation. self._one_hot_encoder.fit(np.array([categories[0] for categories in var_categories]).reshape(1, -1)) else: self._cat_unproc_cols, self._cat_proc_cols = [], [] self._txt_unproc_cols, self._txt_proc_cols = [], [] self._num_processed_cols = sum(var.processed_dim for var in self._variables) def process_data_and_masks( self, data: csr_matrix, data_mask: csr_matrix, *extra_masks: csr_matrix, batch_size: int = 1000, ) -> Tuple[csr_matrix, ...]: """ Process and validate data, data mask and optionally any number of additional masks. These masks will all be applied to the data when performing data range validation, in case of e.g. dummy zero data that is masked out by an additional obs_mask. Args: data: Unprocessed data array data_mask: Data indicating which values in `data` are observed. Can be any dtype provided all values are either 0 or 1. extra_masks: Additional masks to be processed, if any. Can be any dtype provided all values are either 0 or 1. batch_size: Batch size used during data preprocessing for sparse matrices. Returns: processed_data: Data with categorical variables expanded to a one-hot encoding, and features normalised. processed_data_mask: Boolean mask with categorical variables expanded to a one-hot encoding. processed_extra_masks: Any additional boolean masks with categorical variables expanded to a one-hot encoding. """ if not issparse(data): ( proc_data, proc_data_mask, *proc_extra_masks, ) = self._process_and_check_dense(data, data_mask, *extra_masks) else: # Break sparse data into smaller batches and preprocess each as a dense array. Somewhat inefficient but # allows us to reuse our preprocessing functions and keeps memory usage manageable. proc_data_list: List[csr_matrix] = [] proc_data_mask_list: List[csr_matrix] = [] proc_extra_masks_lists: Tuple[List[csr_matrix], ...] = tuple([] for mask in extra_masks) num_rows = data.shape[0] for start_idx in tqdm(range(0, num_rows, batch_size), desc="Data preprocessing"): stop_idx = min(start_idx + batch_size, num_rows) data_batch = data[start_idx:stop_idx].toarray() data_mask_batch = data_mask[start_idx:stop_idx].toarray() extra_masks_batch = tuple(mask[start_idx:stop_idx].toarray() for mask in extra_masks) # TODO: we will currently lose sparsity for rescaled continuous data here, since 0 will be mapped to # another value. We could multiply by the mask to zero out unobserved data but we need to make sure this # doesn't have any unintended consequences for cases with more complex masking, e.g. active learning ( proc_data_batch, proc_data_mask_batch, *proc_extra_masks_batch, ) = self._process_and_check_dense(data_batch, data_mask_batch, *extra_masks_batch) proc_data_list.append(csr_matrix(proc_data_batch)) proc_data_mask_list.append(csr_matrix(proc_data_mask_batch)) for mask_list, mask in zip(proc_extra_masks_lists, proc_extra_masks_batch): mask_list.append(csr_matrix(mask)) proc_data = sparse.vstack(proc_data_list, format="csr") proc_data_mask = sparse.vstack(proc_data_mask_list, format="csr") proc_extra_masks = tuple( sparse.vstack(proc_mask_list, format="csr") for proc_mask_list in proc_extra_masks_lists ) return (proc_data, proc_data_mask, *proc_extra_masks) def _process_and_check_dense(self, data: np.ndarray, data_mask: np.ndarray, *extra_masks: np.ndarray): """ Check validity of dense data and masks and process them. """ combined_mask = data_mask for mask in extra_masks: combined_mask = combined_mask * mask self.check_data(data, combined_mask) self.check_mask(data_mask) for mask in extra_masks: self.check_mask(mask) proc_data = self.process_data(data) proc_data_mask = self.process_mask(data_mask) proc_extra_masks = tuple(self.process_mask(mask) for mask in extra_masks) return (proc_data, proc_data_mask, *proc_extra_masks) def process_intervention_data( self, intervention_data: Union[InterventionData, Iterable[InterventionData]] ) -> List[InterventionData]: """Preprocesses data in the InterventionData format and returns a list of processed InterventionData objects. Args: intervention_data (Union[InterventionData, Iterable[InterventionData]]): InterventionData object or list of InterventionData objects to be processed. Returns: List[InterventionData]: List of processed InterventionData objects. """ if isinstance(intervention_data, InterventionData): intervention_data = [intervention_data] proc_intervention = [ InterventionData( i.intervention_idxs, self.process_data_subset_by_group(i.intervention_values, i.intervention_idxs), self.process_data(i.test_data), i.conditioning_idxs, self.process_data_subset_by_group(i.conditioning_values, i.conditioning_idxs), i.effect_idxs, self.process_data_subset_by_group(i.intervention_reference, i.intervention_idxs), self.process_data(i.reference_data) if i.reference_data is not None else None, ) for i in intervention_data ] return proc_intervention def process_dataset(
self, dataset: Union[Dataset, CausalDataset, SparseDataset]
0
2023-11-21 12:55:08+00:00
16k
ChenyangGao/python-epub3
epub3/epub.py
[ { "identifier": "File", "path": "epub3/util/file.py", "snippet": "class File:\n __slots__ = (\"path\", \"fs\", \"open\", \"open_modes\", \"_getattr\")\n ALL_MODES = frozenset(\"rwxab+\")\n\n def __init__(\n self, \n /, \n path=None, \n fs=None, \n open_modes=None, \n ):\n super().__setattr__(\"path\", path)\n super().__setattr__(\"fs\", fs)\n self._init_open(path, fs, open_modes)\n\n def __init_subclass__(cls, /, **kwargs):\n raise TypeError(\"subclassing is not allowed\")\n\n def __repr__(self, /) -> str:\n cls = type(self)\n module = cls.__module__\n name = cls.__qualname__\n if module != \"__main__\":\n name = module + \".\" + name\n return \"%s(%s)\" % (name, \", \".join(\"%s=%r\" % (k, getattr(self, k)) for k in cls.__slots__))\n\n def __delattr__(self, attr):\n raise TypeError(\"can't delete any attributes\")\n\n def __getattr__(self, attr, /):\n try:\n return self._getattr(attr)\n except Exception as e:\n raise AttributeError(attr) from e\n\n def __setattr__(self, attr, value, /):\n raise TypeError(\"can't set any attributes\")\n\n def _init_open(self, path, fs, open_modes, /):\n cls = type(self)\n code, file_open = cls._get_open(fs)\n use_io_open = file_open is io.open\n if file_open is None:\n if isinstance(path, Path):\n file_open = path.open\n use_io_open = True\n code = 0\n else:\n code, file_open = cls._get_open(path)\n if file_open is None:\n if not isinstance(path, (bytes, str, PathLike)):\n raise TypeError(\"unable to determine how to open the file\")\n file_open = partial(io.open, path)\n use_io_open = True\n if code < 0:\n code = 0\n use_fs = False\n else:\n file_open = partial(file_open, path)\n use_fs = True\n if code == 0:\n def _getattr0(attr):\n try:\n return getattr(os, attr)\n except AttributeError:\n try:\n return getattr(ospath, attr)\n except AttributeError:\n return getattr(shutil, attr)\n elif code == 1:\n _getattr0 = partial(getattr, fs if use_fs else path)\n elif code == 2:\n _getattr0 = (fs if use_fs else path).__getitem__\n if use_fs:\n def _getattr(attr, default=undefined, /):\n try:\n val = _getattr0(attr)\n except (LookupError, AttributeError):\n if default is undefined:\n raise\n return default\n if not callable(val):\n return val\n if isclass(val) or isinstance(val, staticmethod):\n return val\n return partial(val, path)\n else:\n def _getattr(attr, default=undefined, /):\n try:\n return _getattr0(attr)\n except (LookupError, AttributeError):\n if default is undefined:\n raise\n return default\n default_open_modes = _getattr(\"open_modes\", None)\n if default_open_modes is not None:\n open_modes = default_open_modes\n super().__setattr__(\"_getattr\", _getattr)\n open_keywords = cls._open_keywords(file_open)\n if \"mode\" not in open_keywords or open_modes == \"\":\n open_modes = frozenset()\n elif open_modes is None:\n open_modes = type(self).ALL_MODES\n elif use_io_open:\n open_modes = frozenset(open_modes) & type(self).ALL_MODES | frozenset(\"rb\")\n else:\n open_modes = frozenset(open_modes) & type(self).ALL_MODES | frozenset(\"r\")\n super().__setattr__(\"open_modes\", open_modes)\n amode = frozenset(\"rwxa+\")\n def open(\n mode=\"r\", \n buffering=-1, \n encoding=None, \n errors=None, \n newline=None, \n **kwargs, \n ):\n if mode not in OPEN_MODES:\n raise ValueError(f\"invalid open mode: {mode!r}\")\n binary_mode = \"b\" in mode\n if mode == \"r\":\n pass\n elif not open_modes:\n if \"r\" not in mode or \"+\" in mode:\n raise ValueError(f\"open mode unsupported: {mode!r}\")\n mode = \"r\"\n else:\n if open_modes:\n if amode & set(mode) - open_modes:\n raise ValueError(f\"open mode unsupported: {mode!r}\")\n mode = next(m for m in \"rwax\" if m in mode) + \"+\"[:\"+\" in mode]\n if open_modes:\n if \"b\" in open_modes:\n mode += \"b\"\n if open_keywords is not CONTAINS_ALL:\n kwargs = {k: v for k, v in kwargs.items() if k in open_keywords}\n if open_modes:\n kwargs[\"mode\"] = mode\n if \"buffering\" in open_keywords:\n kwargs[\"buffering\"] = buffering\n file = file_open(**kwargs)\n else:\n file = file_open(**kwargs)\n if binary_mode and buffering == 0:\n return file\n bufsize = buffering if buffering > 1 else DEFAULT_BUFFER_SIZE\n if \"+\" in mode:\n file = BufferedRandom(file, bufsize)\n elif \"r\" in mode:\n file = BufferedReader(file, bufsize)\n else:\n file = BufferedWriter(file, bufsize)\n if binary_mode:\n return file\n return TextIOWrapper(\n file, \n encoding=encoding, \n errors=errors, \n newline=newline, \n line_buffering=buffering==1, \n )\n super().__setattr__(\"open\", open)\n\n @staticmethod\n def _get_open(f, /):\n if f is None:\n return 0, None\n if callable(open := getattr(f, \"open\", None)):\n return 1, open\n try:\n if callable(open := f[\"open\"]):\n return 2, open\n except (TypeError, LookupError):\n if callable(f):\n return 3, f\n return -1, None\n\n @staticmethod\n def _open_keywords(open, /):\n params = signature(open).parameters\n if params:\n names = []\n for name, param in reversed(params.items()):\n if param.kind not in (POSITIONAL_OR_KEYWORD, KEYWORD_ONLY):\n break\n names.append(name)\n if param.kind is VAR_KEYWORD:\n return CONTAINS_ALL\n return frozenset(names)\n return frozenset()\n\n def check_open_mode(self, mode=\"r\", /):\n if mode not in OPEN_MODES:\n return False\n if mode == \"r\":\n return True\n open_modes = self.open_modes\n if not open_modes:\n if \"r\" not in mode or \"+\" in mode:\n return False\n else:\n if open_modes and frozenset(\"rwxa+\") & set(mode) - open_modes:\n return False\n return True" }, { "identifier": "RootFS", "path": "epub3/util/file.py", "snippet": "class RootFS:\n\n def __init__(self, root=None, /, joinpath=None):\n none_root = root is None\n if not none_root and callable(open := getattr(root, \"open\", None)):\n _getattr = partial(getattr, root)\n elif not none_root and callable(open := root[\"open\"]):\n _getattr = root.__getitem__\n elif none_root or isinstance(root, (bytes, str, PathLike)):\n self._fs = None\n if root is None:\n self._root = os.getcwd()\n else:\n self._root = ospath.realpath(root)\n if ospath.isfile(root):\n raise NotADirectoryError(errno.ENOTDIR, root)\n self._joinpath = ospath.join\n self._open = io.open\n return\n if joinpath is None:\n joinpath = get_any_callable(_getattr, \"joinpath\", \"join\") or posixpath.join\n self._fs = root\n self._root = \"\"\n self._getattr = _getattr\n self._joinpath = joinpath\n self._open = open\n\n def __repr__(self, /):\n return f\"<{type(self).__qualname__}({self._root!r}) at {hex(id(self))}>\"\n\n def _getattr(self, attr, /):\n try:\n return getattr(os, attr)\n except AttributeError:\n try:\n return getattr(ospath, attr)\n except AttributeError:\n return getattr(shutil, attr)\n\n def __getattr__(self, attr, /):\n try:\n val = self._getattr(attr)\n except (AttributeError, LookupError) as e:\n raise AttributeError(attr) from e\n if not callable(val):\n return val\n if isclass(val) or isinstance(val, staticmethod):\n return val\n def wrapper(name, /, *args, **kwargs):\n return val(self.joinpath(name), *args, **kwargs)\n return update_wrapper(wrapper, val)\n\n @property\n def name(self, /):\n return self._root\n\n @property\n def root(self, /):\n return self._root\n\n def joinpath(self, /, *paths):\n return self._joinpath(self._root, *paths)\n\n def open(\n self, \n name, \n /, \n mode='r', \n buffering=-1, \n encoding=None, \n errors=None, \n newline=None, \n ):\n return self._open(\n self.joinpath(name), \n mode=mode, \n buffering=buffering, \n encoding=encoding, \n errors=errors, \n newline=newline, \n )" }, { "identifier": "TemporaryFS", "path": "epub3/util/file.py", "snippet": "class TemporaryFS(RootFS):\n\n def __init__(self, root=None, /, joinpath=None):\n none_root = root is None\n if not none_root and callable(open := getattr(root, \"open\", None)):\n _getattr = partial(getattr, root)\n elif not none_root and callable(open := root[\"open\"]):\n _getattr = root.__getitem__\n elif none_root or isinstance(root, (bytes, str, PathLike)):\n self._fs = None\n temdir = TemporaryDirectory(dir=root)\n self._root = temdir.name\n self._joinpath = ospath.join\n self._open = io.open\n self._cleanup = temdir.cleanup\n return\n else:\n raise TypeError(f\"can't get `open` method from: {fs!r}\")\n if joinpath is None:\n joinpath = get_any_callable(_getattr, \"joinpath\", \"join\") or posixpath.join\n self._fs = root\n self._root = root = \"\"\n self._getattr = _getattr\n self._joinpath = joinpath\n self.open = open\n remove = get_any_callable(_getattr, \"remove\", \"rm\")\n if remove is None:\n warn(f\"can't get `remove` and `rm` methods from: {fs!r}\")\n self.remove = lambda *args, **kwargs: None\n self._cleanup = lambda: None\n return\n self.remove = remove\n mkdir = get_any_callable(_getattr, \"mkdir\", \"makedir\")\n if mkdir is not None:\n name = str(uuid4())\n try:\n mkdir(name)\n except:\n warn(f\"can't make temporary directory: {name!r} on {fs!r}\")\n else:\n self._root = root = name\n if root:\n rmtree = get_any_callable(_getattr, \"rmtree\", \"removetree\")\n if rmtree is not None:\n def _open(path, *args, **kwargs):\n return open(joinpath(root, path), *args, **kwargs)\n self.open = update_wrapper(_open, open)\n def _remove(path):\n remove(joinpath(root, path))\n self.remove = update_wrapper(_remove, remove)\n self._cleanup = lambda: rmtree(root)\n return\n created = set()\n def _open(path, mode=\"r\", **kwargs):\n path = joinpath(root, path)\n file = open(path, mode=mode, **kwargs)\n if \"r\" not in mode:\n created.add(path)\n return file\n self.open = update_wrapper(_open, open)\n def _remove(path):\n path = joinpath(root, path)\n remove(path)\n created.discard(path)\n self.remove = update_wrapper(_remove, remove)\n rmdir = get_any_callable(_getattr, \"rmdir\", \"removedir\")\n def _cleanup():\n for path in tuple(created):\n try:\n remove(path)\n except:\n pass\n if root and rmdir is not None:\n try:\n rmdir(root)\n except:\n pass\n self._cleanup = _cleanup\n\n def __repr__(self, /):\n return f\"<{type(self).__qualname__}({self._fs!r}) {self._root!r} at {hex(id(self))}>\"\n\n def __del__(self, /):\n self.cleanup()\n\n def __enter__(self, /):\n return self\n\n def __exit__(self, exc, value, tb, /):\n self.cleanup()\n\n def cleanup(self, /):\n try:\n self._cleanup()\n except:\n pass" }, { "identifier": "OPEN_MODES", "path": "epub3/util/file.py", "snippet": "OPEN_MODES = frozenset(\n \"\".join(t1) \n for t0 in product(\"rwax\", (\"\", \"b\", \"t\"), (\"\", \"+\")) \n for t1 in permutations(t0, 3)\n)" }, { "identifier": "guess_media_type", "path": "epub3/util/helper.py", "snippet": "def guess_media_type(name: str, /, default: str = \"application/octet-stream\") -> str:\n return guess_type(name)[0] or default" }, { "identifier": "values", "path": "epub3/util/helper.py", "snippet": "def values(m, /):\n if isinstance(m, Mapping):\n try:\n return m.values()\n except Exception:\n return ValuesView(m)\n return m" }, { "identifier": "items", "path": "epub3/util/helper.py", "snippet": "def items(m, /):\n if isinstance(m, Mapping):\n try:\n return m.items()\n except Exception:\n return ItemsView(m)\n return m" }, { "identifier": "sup", "path": "epub3/util/helper.py", "snippet": "def sup(exists, x=1):\n \"\"\"Find the smallest available integer greater than or equal to `x`.\n\n :param exists: Determine if the value exists (unavailable), return True if it does.\n :param x: Start value.\n\n :return: The smallest integer greater than or equal to the initial value \n x for which calling exists returns False.\n \"\"\"\n δ = 1\n while exists(x):\n x += δ\n δ <<= 1\n if δ <= 2:\n return x\n δ >>= 2\n x -= δ\n while δ > 1:\n δ >>= 1\n if exists(x):\n x += δ\n else:\n x -= δ\n return x + exists(x)" }, { "identifier": "proxy_property", "path": "epub3/util/proxy.py", "snippet": "@overload\ndef proxy_property(fget: None, /, key: Optional[str] = \"\") -> Callable[[Callable], property]: ..." }, { "identifier": "ElementAttribProxy", "path": "epub3/util/proxy.py", "snippet": "class ElementAttribProxy(metaclass=CachedMeta):\n __const_keys__: tuple[str, ...] = ()\n __protected_keys__: tuple[str, ...] = ()\n __cache_check_key__ = lambda obj: isinstance(obj, Element)\n __cache_cls__ = WeakKeyDictionary if USE_BUILTIN_XML else WeakValueDictionary\n __wrap_class__: \"type[ElementAttribProxy]\"\n\n def __init__(self, root, /):\n self._root = root\n self._attrib = root.attrib\n if USE_BUILTIN_XML:\n self._nsmap = nsmap = {}\n else:\n self._nsmap = nsmap = root.nsmap\n if self.__const_keys__:\n self.__const_keys__ = frozenset(\n resolve_prefix(key, nsmap, NAMESPACES) for key in type(self).__const_keys__\n )\n if self.__protected_keys__:\n self.__protected_keys__ = frozenset(\n resolve_prefix(key, nsmap, NAMESPACES) for key in type(self).__protected_keys__\n )\n\n def __init_subclass__(\n cls, \n /, \n get_key=None, \n check_key=None, \n get_state=None, \n set_state=None, \n **kwargs, \n ):\n if callable(get_key):\n self.__cache_get_key__ = get_key\n if isclass(check_key) and issubclass(check_key, object) or type(check_key) is tuple:\n self.__cache_check_key__ = lambda obj, _t: isinstance(obj, _t)\n elif type(check_key) in (set, frozenset):\n self.__cache_check_key__ = check_key.__contains__\n elif callable(check_key):\n self.__cache_check_key__ = check_key\n if callable(get_state):\n self.__cache_get_state__ = get_state\n if callable(set_state):\n self.__cache_set_state__ = set_state\n namespaces = cls.__dict__\n const_keys = namespaces.get(\"__const_keys__\")\n if const_keys:\n for key in const_keys:\n stripped_key = strip_key(key)\n if stripped_key not in namespaces:\n setattr(cls, stripped_key, auto_property(key))\n protected_keys = namespaces.get(\"__protected_keys__\")\n if protected_keys:\n for key in protected_keys:\n stripped_key = strip_key(key)\n if stripped_key not in namespaces:\n setattr(cls, stripped_key, auto_property(key, setable=True))\n optional_keys = namespaces.get(\"__optional_keys__\")\n if optional_keys:\n for key in optional_keys:\n stripped_key = strip_key(key)\n if stripped_key not in namespaces:\n setattr(cls, stripped_key, auto_property(key, setable=True, delable=True))\n if \"__wrap_class__\" not in namespaces:\n for base_cls in cls.__mro__:\n if \"__wrap_class__\" in base_cls.__dict__:\n cls.__wrap_class__ = base_cls.__wrap_class__\n break\n elif cls.__dict__.get(\"__is_wrap_class__\"):\n cls.__wrap_class__ = base_cls\n break\n\n def __contains__(self, key, /):\n if not isinstance(key, str) or not key:\n return False\n return resolve_prefix(key, self._nsmap, NAMESPACES) in self._attrib\n\n def __delitem__(self, key, /):\n if isinstance(key, (int, slice)):\n del self._root[key]\n elif isinstance(key, str):\n if not key:\n raise ValueError(\"empty key not allowed\")\n if key in self.__const_keys__ or key in self.__protected_keys__:\n raise LookupError(f\"not allowed to delete key: {key}\")\n del self._attrib[key]\n else:\n raise TypeError(\"only accept `key` type: int, slice and str\")\n return self\n\n def __eq__(self, other, /):\n if type(self) is not type(other):\n return NotImplemented\n return self._root is other._root\n\n def __getitem__(self, key, /):\n if isinstance(key, str):\n if not key:\n raise ValueError(\"empty key not allowed\")\n return self._attrib[resolve_prefix(key, self._nsmap, NAMESPACES)]\n elif isinstance(key, (int, slice)):\n if isinstance(key, int):\n return type(self).wrap(self._root[key])\n return list(map(type(self).wrap, self._root[key]))\n else:\n raise TypeError(\"only accept `key` type: int, slice and str\")\n\n def __hash__(self, /):\n return hash(self._root)\n\n @PyLinq.streamify\n def __iter__(self, /):\n return iter(self._attrib)\n\n def __len__(self, /):\n return len(self._attrib)\n\n def __setitem__(self, key, value, /):\n if not isinstance(key, str):\n raise TypeError(\"only accept `key` type: `str`\")\n if not key:\n raise ValueError(\"empty key not allowed\")\n if value is None:\n self.pop(key, None)\n else:\n if key in self.__const_keys__:\n raise LookupError(f\"not allowed to set key: {key!r}\")\n self._attrib[key] = str(value)\n return self\n\n def __repr__(self, /):\n attrib = self._attrib\n attrib = f\", {attrib=!r}\" if attrib else \"\"\n return f\"<{type(self).__qualname__}(<{self._root.tag}>{attrib}) at {hex(id(self))}>\"\n\n @classmethod\n def wrap(cls, root, /):\n wrap_class_map = cls.__dict__.get(\"__wrap_class_map__\")\n if not wrap_class_map:\n return cls.__wrap_class__(root)\n for pred, wrap_class in wrap_class_map.items():\n if isinstance(pred, str):\n if pred.startswith(\"{*}\"):\n if pred[3:] == root.tag or root.tag.endswith(pred[2:]):\n return wrap_class(root)\n elif pred.startswith(\"{}\"):\n if pred[2:] == root.tag:\n return wrap_class(root)\n elif pred.endswith(\":*\"):\n if root.tag.startswith(pred[:-1]) or root.tag.startswith(resolve_prefix(pred[:-1], NAMESPACES)):\n return wrap_class(root)\n elif root.tag == pred or root.tag == resolve_prefix(pred, NAMESPACES):\n return wrap_class(root)\n elif isinstance(pred, Pattern):\n if pred.search(root.tag) is not None:\n return wrap_class(root)\n elif isinstance(pred, Container):\n if root.tag in pred:\n return wrap_class(root)\n elif callable(pred):\n if pred(root):\n return wrap_class(root)\n return cls.__wrap_class__(root)\n\n def getproxy(self, key, /):\n if not key:\n return\n key = resolve_prefix(key, self._nsmap, NAMESPACES)\n namespaces = type(self).__dict__\n const_keys = namespaces.get(\"__const_keys__\")\n protected_keys = namespaces.get(\"__protected_keys__\")\n setable = not (const_keys and key in const_keys)\n delable = setable and not (protected_keys and key in protected_keys)\n return auto_property(key, setable=setable, delable=delable).fget(self)\n\n @cached_property\n def attrib(self, /):\n return AttrInfoProxy(self)\n\n @property\n def nsmap(self, /):\n return self._nsmap\n\n @cached_property\n def info(self, /):\n return MappingProxyType({\"attrib\": self.attrib})\n\n @property\n def proxy(self, /):\n return self\n\n @PyLinq.streamify\n def iter(self, /):\n return map(type(self).wrap, self._root.iterfind(\"*\"))\n\n def list(self, /, mapfn=None):\n if mapfn is None:\n return list(self.iter())\n return list(map(mapfn, self.iter()))\n\n def keys(self, /):\n return self._attrib.keys()\n\n def values(self, /):\n return self._attrib.values()\n\n def items(self, /):\n return self._attrib.items()\n\n def clear(self, /):\n const_keys = self.__const_keys__\n protected_keys = self.__protected_keys__\n attrib = self._attrib\n if const_keys or protected_keys:\n for key in tuple(attrib):\n if key in const_keys or key in protected_keys:\n continue\n del attrib[key]\n else:\n attrib.clear()\n return self\n\n def get(self, key, /, default=None):\n try:\n return self._attrib[key]\n except LookupError:\n return default\n\n def pop(self, key, /, default=undefined):\n if key in self.__const_keys__ or key in self.__protected_keys__:\n raise LookupError(f\"not allowed to delete key: {key}\") \n try:\n r = self._attrib[key]\n except LookupError:\n if default is undefined:\n raise\n return default\n else:\n del self._attrib[key]\n return r\n\n def popitem(self, /):\n const_keys = self.__const_keys__\n protected_keys = self.__protected_keys__\n for key, val in reversed(self._attrib.items()):\n if not (key in const_keys or key in protected_keys):\n del self._attrib[key]\n return (key, val)\n raise LookupError(\"no items to pop\")\n\n def setdefault(self, key, /, default=\"\"):\n if not isinstance(key, str):\n raise TypeError(\"only accept `key` type: str\")\n try:\n return seself._attriblf[key]\n except LookupError:\n self._attrib[key] = default\n return default\n\n def sort(self, key=id, reverse=False, use_backend_element=False):\n if use_backend_element:\n self._root[:] = sorted(self._root, key=key, reverse=reverse)\n else:\n self._root[:] = (e._root for e in sorted(self.iter(), key=key, reverse=reverse))\n return self\n\n def merge(self, attrib=None, /, **attrs):\n if attrib:\n if attrs:\n attrib = dict(attrib, **attrs)\n else:\n attrib = attrs\n if attrib:\n el_set(self._root, attrib=attrib, namespaces=NAMESPACES, merge=True)\n return self\n\n def update(self, attrib=None, /, **attrs):\n const_keys = self.__const_keys__\n if attrib:\n if attrs:\n attrib = dict(attrib, **attrs)\n elif const_keys and (not isinstance(attrib, Mapping) or any(key in attrib for key in const_keys)):\n attrib = dict(attrib)\n else:\n const_keys = ()\n else:\n attrib = attrs\n if const_keys:\n for key in const_keys:\n attrib.pop(key, None)\n if attrib:\n el_set(self._root, attrib=attrib, namespaces=NAMESPACES, merge=False)\n return self" }, { "identifier": "ElementProxy", "path": "epub3/util/proxy.py", "snippet": "class ElementProxy(ElementAttribProxy):\n __is_wrap_class__ = True\n\n def __repr__(self, /):\n attrib = self._attrib\n attrib = f\", {attrib=!r}\" if attrib else \"\"\n text = self.text\n text = f\", {text=!r}\" if text and text.strip() else \"\"\n tail = self.tail\n tail = f\", {tail=!r}\" if tail and tail.strip() else \"\"\n return f\"<{type(self).__qualname__}(<{self._root.tag}>{attrib}{text}{tail}) at {hex(id(self))}>\"\n\n def getproxy(self, key=\"\", /):\n if not key:\n return auto_property(key, setable=True, delable=True).fget(self)\n return super().getproxy(key)\n\n @property\n def length(self, /):\n return len(self._root)\n\n @property\n def tag(self, /):\n return self._root.tag\n\n @property\n def text(self, /):\n return self._root.text\n\n @text.setter\n def text(self, text, /):\n self._root.text = None if text is None else str(text)\n\n @property\n def tail(self, /):\n return self._root.tail\n\n @tail.setter\n def tail(self, text, /):\n self._root.tail = None if text is None else str(text)\n\n @cached_property\n def info(self, /):\n return ElementInfoProxy(self)\n\n def clear(self, /):\n self._root.clear()\n return self\n\n def merge(self, attrib=None, /, text=None, tail=None, **attrs):\n super().merge(attrib, **attrs)\n el_set(self._root, text=text, tail=tail, namespaces=NAMESPACES, merge=True)\n return self\n\n def update(self, attrib=None, /, text=None, tail=None, **attrs):\n super().update(attrib, **attrs)\n el_set(self._root, text=text, tail=tail, namespaces=NAMESPACES, merge=False)\n return self\n\n def add(self, name, /, attrib=None, text=None, tail=None):\n return type(self).wrap(el_add(self._root, name=name, attrib=attrib, text=text, tail=tail, namespaces=NAMESPACES))\n\n def delete(self, path, /):\n if isinstance(path, ElementAttribProxy):\n try:\n self._root.remove(path._root)\n except:\n pass\n else:\n el_del(self._root, path, namespaces=NAMESPACES)\n return self\n\n def find(self, path, /):\n return next(self.iterfind(path), None)\n\n @PyLinq.streamify\n def iterfind(self, path, /):\n return map(type(self).wrap, el_iterfind(self._root, path, NAMESPACES))\n\n def set(\n self, \n path=None, \n /, \n name=None, \n attrib=None, \n text=None, \n tail=None, \n merge=False, \n ):\n el = el_set(\n self._root, \n path, \n name=name, \n attrib=attrib, \n text=text, \n tail=tail, \n namespaces=NAMESPACES, \n merge=merge, \n )\n if el is not None:\n return type(self).wrap(el)\n\n def setfind(\n self, \n name, \n /, \n find_attrib=None, \n attrib=None, \n text=None, \n tail=None, \n merge=False, \n delete=False, \n auto_add=False, \n ):\n el = el_setfind(\n self._root, \n name=name, \n find_attrib=find_attrib, \n attrib=attrib, \n text=text, \n tail=tail, \n namespaces=NAMESPACES, \n merge=merge, \n delete=delete, \n auto_add=auto_add, \n )\n if el is not None:\n return type(self).wrap(el)" }, { "identifier": "NAMESPACES", "path": "epub3/util/proxy.py", "snippet": "NAMESPACES: Final = {\n \"containerns\": \"urn:oasis:names:tc:opendocument:xmlns:container\", \n \"daisy\": \"http://www.daisy.org/z3986/2005/ncx/\", \n \"dc\": \"http://purl.org/dc/elements/1.1/\", \n \"ds\": \"http://www.w3.org/2000/09/xmldsig#\", \n \"epub\": \"http://www.idpf.org/2007/ops\", \n \"enc\": \"http://www.w3.org/2001/04/xmlenc#\",\n \"ncx\": \"http://www.daisy.org/z3986/2005/ncx/\", \n \"ns\": \"http://www.idpf.org/2016/encryption#compression\", \n \"opf\": \"http://www.idpf.org/2007/opf\", \n \"rdf\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\", \n \"smil\": \"http://www.w3.org/ns/SMIL\", \n \"svg\": \"http://www.w3.org/2000/svg\", \n \"html\": \"http://www.w3.org/1999/xhtml\", \n \"wsdl\": \"http://schemas.xmlsoap.org/wsdl/\", \n \"xhtml\": \"http://www.w3.org/1999/xhtml\", \n \"xlink\": \"http://www.w3.org/1999/xlink\", \n \"xml\": \"http://www.w3.org/XML/1998/namespace\", \n \"xs\": \"http://www.w3.org/2001/XMLSchema\", \n \"xsi\": \"http://www.w3.org/2001/XMLSchema-instance\", \n}" }, { "identifier": "remap_links", "path": "epub3/util/remap.py", "snippet": "def remap_links(\n manifest, \n pathmap, \n encoding=\"utf-8\", \n link_patterns=LINK_PATTERNS, \n):\n changed = []\n for predicate, patterns in link_patterns:\n for item in manifest.filter_by_attr(predicate):\n try:\n text = item.read_text(encoding=encoding)\n href = unquote(item[\"href\"])\n basedir = dirname(href)\n if type(patterns) is list:\n ls = []\n for subpats in patterns:\n repls = list(path_repl_iter(chain_finditer(text, subpats), pathmap, basedir))\n if repls:\n ls.append(repls)\n if not ls:\n repls = None\n elif len(ls) > 1:\n repls = sorted(chain.from_iterable(ls))\n else:\n repls = ls[0]\n else:\n repls = list(path_repl_iter(chain_finditer(text, patterns), pathmap, basedir))\n if repls:\n text = \"\".join(apply_repl_iter(text, repls))\n item.write_text(text, encoding=encoding)\n changed.append(href)\n except:\n pass\n return changed" }, { "identifier": "PyLinq", "path": "epub3/util/stream.py", "snippet": "class PyLinq(Stream, AggregateMixin, ItertoolsMixin):\n\n def __init__(self, iterable=None):\n if iterable is None:\n iterable = []\n super().__init__(iterable)\n\n def iter(self):\n return self @ iter(self.iterable)\n\n def reversed(self):\n return self @ reversed(self.iterable)\n\n def length(self):\n return self @ len(self.iterable)\n\n def add(self, element):\n return self.chain((element,))\n\n def all_equal(self):\n \"Returns True if all the elements are equal to each other\"\n g = iter(self.groupby())\n return next(g, True) and not next(g, False)\n\n def contains(self, element, key=None):\n return element in self.map(key)\n\n def difference(self, other, key=None, left_key=None, right_key=None):\n other = (self @ other).map(key or right_key)\n selectors = self.map(key or left_key).notin(other)\n return self.compress(selectors)\n\n @typed_method\n def distinct(self, key=None):\n # A simpler but not equivalent implementation as following:\n # return self @ self.group_by(key).each.first()\n hashable, unhashable = set(), []\n for i, k in self.pair(key):\n if k not in hashable and k not in unhashable:\n try:\n hashable.add(k)\n except TypeError:\n unhashable.append(k)\n yield i\n\n def element_at(self, n, default=undefined):\n try:\n return self[n]\n except TypeError as exc:\n if type(n) is int:\n if n >= 0:\n r = tuple(self.islice(n, n+1))\n if r:\n return r[0]\n else:\n r = deque(self, -n)\n if len(r) == -n:\n return r[0]\n if default is not undefined:\n return default\n raise LookupError(f'No element found at {n!r}') from exc\n\n def first(self, default=undefined):\n # self.element_at(0, default)\n if default is undefined:\n try:\n return next(iter(self))\n except StopIteration as exc:\n raise LookupError('No such first element') from exc\n return next(iter(self), default)\n\n def first_true(self, default=None, predicate=None):\n \"\"\"Returns the first true value in the iterable.\n\n If no true value is found, returns *default*\n\n If *predicate* is not None, returns the first item\n for which predicate(item) is true.\n\n \"\"\"\n return next(iter(self.filter(predicate)), default)\n\n @typed_method\n def flatten(list_of_lists):\n \"Flatten one level of nesting\"\n return itertools.chain.from_iterable(self.iterable)\n\n def group_by(self, key=None):\n groupers = self.orderby(key=key).groupby(key=key)\n return groupers.map(lambda args: Grouper.make_grouper(*args))\n\n @typed_method\n def group_join(self, other, key=None, left_key=None, right_key=None):\n left_key, right_key = key or left_key, key or right_key\n left = {i.key: tuple(i) for i in self.group_by(left_key)}\n right = {i.key: tuple(i) for i in (self @ other).group_by(right_key)}\n for k in sorted(left.keys() | right.keys()):\n grouper = itertools.product(left.get(k, ()), right.get(k, ()))\n yield Grouper.make_grouper(k, grouper)\n\n def intersection(self, other, key=None, left_key=None, right_key=None):\n return self.join(other, key, left_key, right_key).map(lambda x: x[0])\n\n def isin(self, other):\n if isinstance(other, Stream):\n other = other.data\n if not isinstance(other, (Set, Mapping)):\n if not isinstance(other, Sequence):\n other = tuple(other)\n try:\n other = set(other)\n except TypeError:\n pass\n return self.map(lambda x: x in other)\n\n def join(self, other, key=None, left_key=None, right_key=None):\n left_key = key or left_key or identity_function\n right_key = key or right_key or identity_function\n judge = lambda x: left_key(x[0]) == right_key(x[1])\n return self.product(other).filter(judge)\n\n def last(self, default=undefined):\n # self.element_at(-1, default)\n value = default\n for value in self: pass\n if value is undefined:\n raise LookupError('No such last element')\n return value\n\n @typed_method\n def ncycles(self, n):\n \"Returns the sequence elements n times\"\n return itertools.chain.from_iterable(itertools.repeat(tuple(self.iterable), n))\n\n def nth(self, n, default=undefined):\n \"Returns the nth item or a default value\"\n if isinstance(self.iterable, Sequence):\n try:\n return self.iterable[n]\n except LookupError:\n if default is undefined:\n raise\n return default\n try:\n return next(iter(self.islice(n, None)))\n except StopIteration as e:\n if default is undefined:\n raise LookupError(n) from e\n return default\n\n @typed_method\n def prepend(self, *values):\n \"Prepend a single value in front of an iterator\"\n return itertools.chain(values, self.iterable)\n\n def take(self, n):\n return self.islice(n)\n\n def notin(self, other):\n return self.isin(other).map(lambda x: not x)\n\n def orderby(self, key=None, reverse=False):\n return self.collect(sorted, key=key, reverse=reverse)\n\n def order_by(self, kwargs_orders=None, reverse_orders=False):\n data = list(self)\n if kwargs_orders:\n if reverse_orders:\n kwargs_orders = reversed(kwargs_orders)\n for kwargs in kwargs_orders:\n data.sort(**kwargs)\n return self @ data\n\n @typed_method\n def pair(self, key=None):\n if key is None:\n for i in self:\n yield i, i\n else:\n for i in self:\n yield i, key(i)\n\n def select(self, selector=None):\n return self.map(selector)\n\n def select_many(self, selector=None):\n return self.map(selector).chain_self_iterable()\n\n def single(self, default=undefined):\n n = 0\n for n, v in zip(range(1, 3), self): pass\n if n == 0:\n if default is not undefined:\n return default\n raise LookupError('No elements exception occured')\n elif n == 2:\n raise LookupError('More than one element exception occured')\n return v\n\n def skip(self, n):\n return self.islice(n, None)\n\n def skipwhile(self, predicate):\n return self.dropwhile(predicate)\n\n def tail(self, n):\n return self.collect(deque, n)\n\n def where(self, predicate=None):\n return self.filter(predicate)\n\n def zip(self, *iterables):\n return zip(self, *iterables)" }, { "identifier": "el_add", "path": "epub3/util/xml.py", "snippet": "def el_add(\n el: Element, \n /, \n name: str, \n attrib: Optional[Mapping] = None, \n text=None, \n tail=None, \n namespaces: Optional[Mapping] = None, \n) -> Element:\n \"\"\"\n \"\"\"\n name = extract_name(name)\n if not name:\n raise ValueError(\"unable to determine name\")\n try:\n nsmap = el.nsmap # type: ignore\n except:\n nsmap = {}\n if attrib:\n attrib0 = items(attrib)\n attrib = {}\n for key, val in attrib0:\n if key is None:\n attrib[key] = val\n elif isinstance(key, str):\n if key == \"xmlns\":\n if val:\n nsmap[None] = val\n else:\n nsmap.pop(None, None)\n elif key.startswith(\"xmlns:\"):\n if val:\n nsmap[key[6:]] = val\n else:\n nsmap.pop(key[6:], None)\n else:\n attrib[key] = val\n name = resolve_prefix(name, nsmap, namespaces, inherit=True)\n if USE_BUILTIN_XML:\n sel = el.makeelement(name, cast(dict[str, str], {}))\n else:\n sel = el.makeelement(name, nsmap=cast(dict[str, str], nsmap))\n el.append(sel)\n _el_set(sel, attrib, text, tail, nsmap, namespaces)\n return sel" }, { "identifier": "el_del", "path": "epub3/util/xml.py", "snippet": "def el_del(\n el: Element, \n path: Optional[str] = None, \n /, \n namespaces: Optional[Mapping] = None, \n) -> Optional[Element]:\n \"\"\"\n \"\"\"\n sel = el_find(el, path, namespaces) if path else el\n if sel is not None:\n try:\n pel = sel.getparent() # type: ignore\n except AttributeError:\n pel = el\n if pel is None or pel is sel:\n raise LookupError(f\"can't get parent element: {sel!r}\")\n pel.remove(sel)\n return sel" }, { "identifier": "el_iterfind", "path": "epub3/util/xml.py", "snippet": "def el_iterfind(\n el: Element, \n path: Optional[str] = None, \n /, \n namespaces: Optional[Mapping] = None, \n) -> Iterator[Element]:\n \"\"\"\n \"\"\"\n if not path or path in (\".\", \"*..\", \"*...\", \"./.\"):\n return iter((el,))\n nsmap: Optional[Mapping]\n if USE_BUILTIN_XML:\n nsmap = namespaces\n else:\n nsmap = el.nsmap\n if namespaces:\n nsmap.update(namespaces)\n if nsmap and (None in nsmap or \"\" in nsmap):\n if any(\n l == \"[\" and r != \"@\" \n for l, r in pairwise(m[0] for m in xpath_tokenizer_re.finditer(path))\n ):\n uri = get(nsmap, None) or get(nsmap, \"\") or \"*\"\n path = generalize_elementpath(path, uri=uri)\n nsmap = {k: v for k, v in items(nsmap) if k and v}\n return el.iterfind(path, nsmap) # type: ignore" }, { "identifier": "el_set", "path": "epub3/util/xml.py", "snippet": "def el_set(\n el: Element, \n path: Optional[str] = None, \n /, \n name: Optional[str] = None, \n attrib: Optional[Mapping] = None, \n text: Optional[str] = None, \n tail: Optional[str] = None, \n namespaces: Optional[Mapping] = None, \n merge: bool = False, \n) -> Element:\n \"\"\"\n \"\"\"\n sel = el_find(el, path, namespaces) if path else el\n if sel is not None:\n if text is None and tail is None and not attrib:\n return sel\n try:\n nsmap = sel.nsmap # type: ignore\n except:\n nsmap = None\n (_el_setmerge if merge else _el_set)(sel, attrib, text, tail, nsmap, namespaces)\n elif name is not None:\n if name == \"\":\n name = path\n sel = el_add(el, cast(str, name), attrib=attrib, text=text, tail=tail, namespaces=namespaces)\n else:\n raise LookupError(f\"element not found: {el!r}.find({path!r}) is None\")\n return sel" }, { "identifier": "undefined", "path": "epub3/util/undefined.py", "snippet": "class UndefinedType:\r\n def __new__(cls, /):\r\n def __init_subclass__(cls, /, **kwargs):\r\n def __eq__(self, other, /):\r" } ]
import errno import io import os import os.path as ospath import posixpath from copy import deepcopy from datetime import datetime from fnmatch import translate as wildcard_translate from functools import cached_property, partial from inspect import getfullargspec, isclass from io import IOBase, TextIOWrapper from operator import methodcaller from os import fsdecode, remove, stat, stat_result, PathLike from pathlib import PurePosixPath from posixpath import join as joinpath, normpath from pprint import pformat from re import compile as re_compile, escape as re_escape, Pattern from shutil import copy, copyfileobj from typing import cast, Any, Callable, Container, Mapping, MutableMapping, Optional from types import MappingProxyType from uuid import uuid4 from warnings import warn from weakref import WeakKeyDictionary, WeakValueDictionary from urllib.parse import quote, unquote from zipfile import ZipFile, ZIP_STORED from .util.file import File, RootFS, TemporaryFS, OPEN_MODES from .util.helper import guess_media_type, values, items, sup from .util.proxy import proxy_property, ElementAttribProxy, ElementProxy, NAMESPACES from .util.remap import remap_links from .util.stream import PyLinq from .util.xml import el_add, el_del, el_iterfind, el_set from .util.undefined import undefined, UndefinedType from lxml.etree import fromstring, tostring, _Element as Element, _ElementTree as ElementTree # type: ignore from xml.etree.ElementTree import fromstring, tostring, Element, ElementTree # type: ignore
13,861
self._proxy.merge(id_or_attrib) return self def update(self, id_or_attrib=None, /, **attrs): if attrs: if isinstance(id_or_attrib, Item): item = id_or_attrib if item not in self: raise LookupError(f"no such item: {item!r}") item.update(attrib=attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib item = super().get(id) if item is None: if "href" in attrs: href = attrs.pop("href") self.add(href, id=id, attrib=attrs) else: raise LookupError(f"no such item: {id!r}") else: item.update(attrs) else: self._proxy.update(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.update(id_or_attrib) return self #################### SubElement Methods #################### @PyLinq.streamify def filter(self, /, predicate=None): if not callable(predicate): return iter(self.values()) return filter(predicate, self.values()) @PyLinq.streamify def filter_by_attr(self, predicate=None, attr="media-type", /): def activate_predicate(predicate): if predicate is None: return None if callable(predicate): return predicate elif isinstance(predicate, Pattern): return predicate.search elif isinstance(predicate, str): use_false = False if predicate.startswith(r"!"): use_false = True predicate = predicate[1:] predicate_startswith = predicate.startswith if predicate_startswith(r"="): predicate = predicate[1:].__eq__ elif predicate_startswith(r"~"): predicate = methodcaller("__contains__", predicate[1:]) elif predicate_startswith(r"^"): predicate = methodcaller("startswith", predicate[1:]) elif predicate_startswith(r"$"): predicate = methodcaller("endswith", predicate[1:]) elif predicate_startswith(r";"): predicate = lambda s, needle=predicate[1:]: needle in s.split() elif predicate_startswith(r","): predicate = lambda s, needle=predicate[1:]: needle in s.split(",") elif predicate_startswith(r"<"): predicate = re_compile(r"\b"+re_escape(predicate[1:])).search elif predicate_startswith(r">"): predicate = re_compile(re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"|"): predicate = re_compile(r"\b"+re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"*"): predicate = re_compile(wildcard_translate(predicate[1:])).fullmatch elif predicate_startswith(r"/"): predicate = re_compile(predicate[1:]).search elif predicate_startswith(r"%"): predicate = re_compile(predicate[1:]).fullmatch else: predicate = predicate.__eq__ if use_false: predicate = lambda s, _pred=predicate: not _pred(s) return predicate elif type(predicate) in (tuple, list): preds = tuple(pred for p in predicate if (pred:=activate_predicate(p)) is not None) if not preds: return None if type(predicate) is tuple: return lambda s, _preds=preds: any(p(s) for p in preds) else: return lambda s, _preds=preds: all(p(s) for p in preds) elif isinstance(predicate, Container): return predicate.__contains__ predicate = activate_predicate(predicate) if predicate is None: return filter(lambda item: attr in item, self.values()) return filter(lambda item: attr in item and predicate(item[attr]), self.values()) @PyLinq.streamify def iter(self, /): root = self._root for el in root.iterfind("*"): if not (el.tag == "item" or el.tag.endswith("}item")): yield ElementProxy(el) continue id = el.attrib.get("id") href = el.attrib.get("href") if not href: if id is None or not super().__contains__(id): try: root.remove(el) warn(f"removed a dangling item element: {el!r}") except: pass else: item = super().__getitem__(id) if item._root is not el: raise RuntimeError(f"different item elements {el!r} and {item._root!r} share the same id {id!r}") else: self.pop(id, None) warn(f"removed an item because of missing href attribute: {item!r}") continue href = unquote(href) if not el.attrib.get("media-type"):
#!/usr/bin/env python # coding: utf-8 __author__ = "ChenyangGao <https://chenyanggao.github.io>" __version__ = (0, 0, 1) __all__ = ["ePub", "Metadata", "DCTerm", "Meta", "Link", "Manifest", "Item", "Spine", "Itemref"] try: except ModuleNotFoundError: class DCTerm(ElementProxy): pass class Meta(ElementProxy): __protected_keys__ = ("property",) __optional_keys__ = ("dir", "id", "refines", "scheme", "xml:lang") class Link(ElementAttribProxy): __protected_keys__ = ("href", "rel") __optional_keys__ = ("hreflang", "id", "media-type", "properties", "refines") class Item(ElementAttribProxy): __const_keys__ = ("id",) __protected_keys__ = ("href", "media-type") __optional_keys__ = ("fallback", "media-overlay", "properties") __cache_get_state__ = lambda _, manifest: manifest def __init__(self, root: Element, manifest, /): super().__init__(root) self._manifest = manifest def __eq__(self, other, /): if type(self) is not type(other): return NotImplemented return self._manifest is other._manifest and self._attrib["href"] == other._attrib["href"] def __fspath__(self, /): return unquote(self._attrib["href"]) def __hash__(self, /): return hash((self._root, id(self._manifest))) def __setitem__(self, key, value, /): if key == "href": if value is None: raise ValueError("can't set href to None") self.rename(val) else: super().__setitem__(key, value) return self @property def filename(self, /): return PurePosixPath(joinpath(self.home, self)) @property def home(self, /): return PurePosixPath(self._manifest._epub._opf_dir) @property def name(self, /): return self.path.name @property def path(self, /): return PurePosixPath(self) @property def _parent(self, /): return posixpath.dirname(unquote(self._attrib["href"])) @property def parent(self, /): return self.path.parent @property def parents(self, /): return self.path.parents @property def parts(self, /): return self.path.parts @property def stem(self, /): return self.path.stem @property def suffix(self, /): return self.path.suffix @property def suffixes(self, /): return self.path.suffixes def update(self, attrib=None, /, **attrs): if attrib: attrib = dict(attrib) if attrs: attrib.update(attrs) else: attrib = attrs href = attrib.pop("href", None) if href: self.rename(href) if attrib: super().update(attrib) return self def is_relative_to(self, /, *other): return self.path.is_relative_to(*other) def joinpath(self, /, *others): return PurePosixPath(normpath(joinpath(self._parent, *others))) __truediv__ = joinpath def relpath(self, other, /): return PurePosixPath(posixpath.relpath(other, self._parent)) def relative_to(self, /, *other): return self.path.relative_to(*other) def with_name(self, /, name): return self.path.with_name(str(name)) def with_stem(self, /, stem): return self.path.with_stem(str(stem)) def with_suffix(self, /, suffix): return self.path.with_suffix(str(suffix)) def exists(self, /): return self._manifest.exists(self) def is_file(self, /): return self.exists() def is_dir(self, /): return False def is_symlink(self, /): return False def glob(self, /, pattern="*", ignore_case=False): return self._manifest.glob(pattern, self, ignore_case=ignore_case) def rglob(self, /, pattern="", ignore_case=False): return self._manifest.rglob(pattern, self, ignore_case=ignore_case) def iterdir(self, /): return self._manifest.iterdir(self) def match(self, /, path_pattern, ignore_case=False): path_pattern = path_pattern.strip("/") if not path_pattern: return False pattern = joinpath(*posix_glob_translate_iter(path_pattern)) if ignore_case: pattern = "(?i:%s)" % pattern return re_compile(pattern).fullmatch(self._attrib["href"]) is not None def open( self, /, mode="r", buffering=-1, encoding=None, errors=None, newline=None, ): return self._manifest.open( self, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def read(self, /, buffering=0): return self._manifest.read(self, buffering=buffering) read_bytes = read def read_text(self, /, encoding=None): return self._manifest.read_text(self, encoding=encoding) def remove(self, /): self._manifest.remove(self) return self def rename(self, dest_href, /, repair=False): return self._manifest.rename(self, dest_href, repair=repair) def batch_rename(self, mapper, /, predicate=None, repair=False): return self._manifest.batch_rename(self, mapper, predicate=predicate, repair=repair) def replace(self, href, /): self._manifest.replace(self, href) return self def stat(self, /) -> Optional[stat_result]: return self._manifest.stat(self) def touch(self, /): self._manifest.touch(self) return self unlink = remove def write(self, /, data): return self._manifest.write(self, data) write_bytes = write def write_text(self, /, text, encoding=None, errors=None, newline=None): return self._manifest.write_text(self, text, encoding=encoding, errors=errors, newline=newline) class Itemref(ElementAttribProxy): __const_keys__ = ("idref",) __optional_keys__ = ("id", "linear", "properties") @property def linear(self, /): return "no" if self._attrib.get("linear") == "no" else "yes" @linear.setter def linear(self, value, /): self._attrib["linear"] = "no" if value == "no" else "yes" class Metadata(ElementProxy): __wrap_class_map__ = {"{*}meta": Meta, "{*}": Link, "dc:*": DCTerm} def __repr__(self, /): return f"{super().__repr__()}\n{pformat(self.iter().list())}" @property def info(self, /): return tuple(meta.info for meta in self.iter()) def add( self, name: str = "meta", /, attrib: Optional[Mapping] = None, text: Optional[str] = None, tail: Any = undefined, **_disregards, ): return super().add(name, attrib=attrib, text=text) def dc( self, name: str, text_value: UndefinedType | Optional[str] = undefined, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if text_value is not undefined: if find_attrib: find_attrib = {**find_attrib, "": text_value} else: find_attrib = {"": text_value} return self.setfind( "dc:%s" % name, find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def meta( self, preds: str = "", /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): return self.setfind( "{*}meta%s" % preds, find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def name_meta( self, name, content: Optional[str] = None, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if find_attrib: find_attrib = {**find_attrib, "name": name} else: find_attrib = {"name": name} if content is not None: find_attrib["content"] = content return self.meta( find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def property_meta( self, property, text_value: UndefinedType | Optional[str] = undefined, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if find_attrib: find_attrib = {**find_attrib, "property": property} else: find_attrib = {"property": property} if text_value is not undefined: find_attrib[""] = text_value return self.meta( find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) class ManifestProxy(ElementAttribProxy): __optional_keys__ = ("id",) class Manifest(dict[str, Item]): def __init__(self, /, root: Element, epub): self._root = root self._attrib = root.attrib self._epub = epub self._proxy = ManifestProxy(root) self._href_to_id: dict[str, str] = {} self._href_to_file: dict[str, File] = {} if len(root): href_to_id = self._href_to_id dangling_items = [] for item in root.iterfind("{*}item"): id = item.attrib.get("id") href = item.attrib.get("href") if id is None or not href: dangling_items.append(item) continue id = cast(str, id) href = cast(str, unquote(href)) super().__setitem__(id, Item(item, self)) href_to_id[href] = id if dangling_items: for item in reversed(dangling_items): root.remove(item) warn(f"removed a dangling item element: {item!r}") zfile = epub.__dict__.get("_zfile") opf_dir = epub._opf_dir if zfile: href_to_file = self._href_to_file for href in href_to_id: zpath = joinpath(opf_dir, href) zinfo = zfile.NameToInfo.get(zpath) if not zinfo or zinfo.is_dir(): warn(f"missing file in original epub: {href!r}") href_to_file[href] = File(str(uuid4()), self._workfs) else: href_to_file[href] = File(zpath, zfile, open_modes="r") def __init_subclass__(self, /, **kwargs): raise TypeError("subclassing is not allowed") def __call__(self, href, /): if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") return href if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" try: id = self._href_to_id[href] except LookupError as e: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") from e return super().__getitem__(id) def __contains__(self, other, /): if isinstance(other, Item): return other._manifest is self and super().__contains__(other["id"]) return super().__contains__(other) def __delitem__(self, key, /): pop = self.pop if isinstance(key, int): el = self._root[key] try: id = el.attrib["id"] except AttributeError: try: self._root.remove(el) except: pass else: pop(id) elif isinstance(key, slice): root = self._root for el in root[key]: try: id = el.attrib["id"] except AttributeError: try: root.remove(el) except: pass else: pop(id, None) elif isinstance(key, Item): if key not in self: raise LookupError(f"no such item: {key!r}") pop(key["id"]) elif isinstance(key, str): pop(key) else: raise TypeError("`key` only accepts: `str`, `int`, `slice`, `Item`") return self def __getitem__(self, key, /): def wrap(el): try: if el.tag == "item" or el.tag.endswith("}item"): return Item(el, self) return ElementProxy(el) except AttributeError: return el if isinstance(key, int): return wrap(self._root[key]) elif isinstance(key, slice): return list(map(wrap, self._root[key])) elif isinstance(key, Item): if key not in self: raise LookupError(f"no such item: {key!r}") return key elif isinstance(key, str): return super().__getitem__(key) else: raise TypeError("`key` only accepts: `str`, `int`, `slice`, `Item`") def __setitem__(self, id, value, /): if id not in self: raise LookupError(f"no such item: {id!r}") if isinstance(id, Item): item = id else: item = super().__getitem__(id) href = unquote(item._attrib["href"]) if isinstance(value, str): self.rename(href, value) elif isinstance(value, bytes): self.write(href, value) elif isinstance(value, Mapping): if "open" in value and callable(value["open"]): self._href_to_file[href] = File(value, open_modes="rb") else: item.update(value) else: self._href_to_file[href] = File(value, open_modes="rb") return self @cached_property def _workfs(self, /): if self._epub._maketemp: return TemporaryFS(self._epub._workroot) else: return RootFS(self._epub._workroot) @cached_property def href_to_id(self, /): return MappingProxyType(self._href_to_id) @cached_property def href_to_file(self, /): return MappingProxyType(self._href_to_file) @property def home(self, /): return self._epub._opf_dir @property def attrib(self, /): return self._attrib @property def proxy(self, /): return self._proxy @property def info(self, /): return tuple(item.info for item in self.values()) delete = __delitem__ def clear(self, /): self._root.clear() self._href_to_file.clear() self._href_to_id.clear() super().clear() return self def pop(self, id, /, default=undefined): if id not in self: if default is undefined: raise LookupError(f"no such item: {id!r}") return default if isinstance(id, Item): id = id["id"] item = super().pop(id) try: self._root.remove(item._root) except: pass href = unquote(item._attrib["href"]) self._href_to_id.pop(href, None) file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass return item def popitem(self, /): id, item = super().popitem() try: self._root.remove(item._root) except: pass href = unquote(item._attrib["href"]) self._href_to_id.pop(href, None) file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass return id, item def set(self, id, value, /): if isinstance(id, Item): if id not in self: raise LookupError(f"no such item: {id!r}") item = id else: item = super().get(id) if item is None: if isinstance(value, str): item = self.add(href, id=id) elif isinstance(value, Mapping) and "href" in value: if "open" in value and callable(value["open"]): item = self.add(value["href"], value, id=id) else: item = self.add(value["href"], id=id, attrib=value) else: raise LookupError(f"no such item: {id!r}") else: href = unquote(item._attrib["href"]) if isinstance(value, str): self.rename(href, value) elif isinstance(value, bytes): self.write(href, value) elif isinstance(value, Mapping): if "open" in value and callable(value["open"]): self._href_to_file[href] = File(value, open_modes="rb") else: item.update(value) else: self._href_to_file[href] = File(value, open_modes="rb") return item def setdefault(self, id, value, /): if isinstance(id, Item): if id not in self: raise LookupError(f"no such item: {id!r}") item = id else: item = super().get(id) if item is None: if isinstance(value, str): item = self.add(value, id=id) elif isinstance(value, Mapping) and "href" in value: if "open" in value and callable(value["open"]): item = self.add(value["href"], value, id=id) else: item = self.add(value["href"], id=id, attrib=value) else: raise LookupError(f"no such item: {id!r}") else: if isinstance(value, Mapping) and not ("open" in value and callable(value["open"])): item.merge(value) return item def merge(self, id_or_attrib=None, /, **attrs): if attrs: if isinstance(id_or_attrib, Item): item = id_or_attrib if item not in self: raise LookupError(f"no such item: {item!r}") item.merge(attrib=attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib item = super().get(id) if item is None: if "href" in attrs: href = attrs.pop("href") self.add(href, id=id, attrib=attrs) else: raise LookupError(f"no such item: {id!r}") else: item.merge(attrs) else: self._proxy.merge(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.merge(id_or_attrib) return self def update(self, id_or_attrib=None, /, **attrs): if attrs: if isinstance(id_or_attrib, Item): item = id_or_attrib if item not in self: raise LookupError(f"no such item: {item!r}") item.update(attrib=attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib item = super().get(id) if item is None: if "href" in attrs: href = attrs.pop("href") self.add(href, id=id, attrib=attrs) else: raise LookupError(f"no such item: {id!r}") else: item.update(attrs) else: self._proxy.update(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.update(id_or_attrib) return self #################### SubElement Methods #################### @PyLinq.streamify def filter(self, /, predicate=None): if not callable(predicate): return iter(self.values()) return filter(predicate, self.values()) @PyLinq.streamify def filter_by_attr(self, predicate=None, attr="media-type", /): def activate_predicate(predicate): if predicate is None: return None if callable(predicate): return predicate elif isinstance(predicate, Pattern): return predicate.search elif isinstance(predicate, str): use_false = False if predicate.startswith(r"!"): use_false = True predicate = predicate[1:] predicate_startswith = predicate.startswith if predicate_startswith(r"="): predicate = predicate[1:].__eq__ elif predicate_startswith(r"~"): predicate = methodcaller("__contains__", predicate[1:]) elif predicate_startswith(r"^"): predicate = methodcaller("startswith", predicate[1:]) elif predicate_startswith(r"$"): predicate = methodcaller("endswith", predicate[1:]) elif predicate_startswith(r";"): predicate = lambda s, needle=predicate[1:]: needle in s.split() elif predicate_startswith(r","): predicate = lambda s, needle=predicate[1:]: needle in s.split(",") elif predicate_startswith(r"<"): predicate = re_compile(r"\b"+re_escape(predicate[1:])).search elif predicate_startswith(r">"): predicate = re_compile(re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"|"): predicate = re_compile(r"\b"+re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"*"): predicate = re_compile(wildcard_translate(predicate[1:])).fullmatch elif predicate_startswith(r"/"): predicate = re_compile(predicate[1:]).search elif predicate_startswith(r"%"): predicate = re_compile(predicate[1:]).fullmatch else: predicate = predicate.__eq__ if use_false: predicate = lambda s, _pred=predicate: not _pred(s) return predicate elif type(predicate) in (tuple, list): preds = tuple(pred for p in predicate if (pred:=activate_predicate(p)) is not None) if not preds: return None if type(predicate) is tuple: return lambda s, _preds=preds: any(p(s) for p in preds) else: return lambda s, _preds=preds: all(p(s) for p in preds) elif isinstance(predicate, Container): return predicate.__contains__ predicate = activate_predicate(predicate) if predicate is None: return filter(lambda item: attr in item, self.values()) return filter(lambda item: attr in item and predicate(item[attr]), self.values()) @PyLinq.streamify def iter(self, /): root = self._root for el in root.iterfind("*"): if not (el.tag == "item" or el.tag.endswith("}item")): yield ElementProxy(el) continue id = el.attrib.get("id") href = el.attrib.get("href") if not href: if id is None or not super().__contains__(id): try: root.remove(el) warn(f"removed a dangling item element: {el!r}") except: pass else: item = super().__getitem__(id) if item._root is not el: raise RuntimeError(f"different item elements {el!r} and {item._root!r} share the same id {id!r}") else: self.pop(id, None) warn(f"removed an item because of missing href attribute: {item!r}") continue href = unquote(href) if not el.attrib.get("media-type"):
el.attrib["media-type"] = guess_media_type(href)
4
2023-11-20 14:46:41+00:00
16k
ymp5078/AI-SAM
segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\n \"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False\n )\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack(\n [self.preprocess(x[\"image\"]) for x in batched_input], dim=0\n )\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(\n masks, original_size, mode=\"bilinear\", align_corners=False\n )\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "SamPredictor", "path": "segment_anything/predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[\n None, :, :, :\n ]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(\n point_coords, dtype=torch.float, device=self.device\n )\n labels_torch = torch.as_tensor(\n point_labels, dtype=torch.int, device=self.device\n )\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(\n mask_input, dtype=torch.float, device=self.device\n )\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) before mask prediction.\"\n )\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(\n low_res_masks, self.input_size, self.original_size\n )\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert (\n self.features is not None\n ), \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "segment_anything/utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "segment_anything/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "segment_anything/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "segment_anything/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "segment_anything/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "segment_anything/utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "segment_anything/utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "segment_anything/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "segment_anything/utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "segment_anything/utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "segment_anything/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "segment_anything/utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "segment_anything/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "segment_anything/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "segment_anything/utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "segment_anything/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
11,146
for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch( points, cropped_im_size, crop_box, orig_size ) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones( in_points.shape[0], dtype=torch.int, device=in_points.device ) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset, ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge( data["boxes"], crop_box, [0, 0, orig_w, orig_h] ) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE
# -*- coding: utf-8 -*- # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [ coco_encode_rle(rle) for rle in mask_data["rles"] ] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch( points, cropped_im_size, crop_box, orig_size ) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones( in_points.shape[0], dtype=torch.int, device=in_points.device ) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset, ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge( data["boxes"], crop_box, [0, 0, orig_w, orig_h] ) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE
data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
16
2023-11-26 23:42:53+00:00
16k
sophiaalthammer/alforrankers
matchmaker/utils/input_pipeline.py
[ { "identifier": "ConditionalQueryGenerationInferenceReader", "path": "matchmaker/dataloaders/query_generation_inference_loader.py", "snippet": "class ConditionalQueryGenerationInferenceReader(DatasetReader):\n \"\"\"\n Read a tsv file containing a passage collection.\n \n Expected format for each input line: <doc_id>\\t<doc_sequence_string>\n The output of ``read`` is a list of ``Instance`` s with the fields:\n doc_tokens: ``TextField`` \n target_query_type: ``MetadataField``\n target_query_length: ``MetadataField``\n\n\n Parameters\n ----------\n tokenizer : ``Tokenizer``, optional\n Tokenizer to use to split the input sequences into words or other kinds of tokens. \n token_indexers : ``Dict[str, TokenIndexer]``, optional\n Indexers used to define input (source side) token representations. Defaults to\n ``{\"tokens\": SingleIdTokenIndexer()}``.\n \"\"\"\n def __init__(self,\n tokenizer: Tokenizer = None,\n token_indexers: Dict[str, TokenIndexer] = None,\n \n max_doc_length:int = -1,\n max_query_length:int = -1,\n\n target_distribution_file:str = None,\n target_number_of_queries_total:int = 1 # ATTENTION, this is per worker!! (divide on your own if using > 1 worker)\n ):\n\n super().__init__(\n manual_distributed_sharding=True,\n manual_multiprocess_sharding=True\n )\n self._tokenizer = tokenizer\n self._token_indexers = token_indexers\n\n self.max_doc_length = max_doc_length\n self.max_query_length = max_query_length\n\n self.target_number_of_queries_total = target_number_of_queries_total\n\n target_distribution,(target_label_types,target_label_lengths) = approximate_target_distribution_from_file(target_distribution_file)\n\n console = Console()\n\n console.log(\"[QueryGenLoader] Targeting distribution:\",target_distribution*target_number_of_queries_total,\", labels\",(target_label_types,target_label_lengths))\n\n self.target_distribution = target_distribution\n self.target_label_types = target_label_types\n self.target_label_lengths = target_label_lengths\n\n @overrides\n def _read(self, file_path):\n with open(cached_path(file_path), \"r\", encoding=\"utf8\") as data_file:\n #logger.info(\"Reading instances from lines in file at: %s\", file_path)\n for i,line in enumerate(self.shard_iterable(data_file)):\n if i == self.target_number_of_queries_total:\n break\n\n line = line.strip()\n\n if not line:\n continue\n\n line_parts = line.split('\\t')\n if len(line_parts) == 2:\n doc_id, doc_sequence = line_parts\n else:\n raise ConfigurationError(\"Invalid line format: %s\" % (line))\n\n yield self.text_to_instance(doc_id, doc_sequence)\n\n @overrides\n def text_to_instance(self, doc_id:str, doc_sequence: str) -> Instance:\n\n doc_id_field = MetadataField(doc_id)\n\n target_idx = np.random.choice(len(self.target_distribution),1,replace=False,p=self.target_distribution)[0]\n\n concat_sequence = (\":query_group\"+str(self.target_label_types[target_idx]) + \" \"+ str(self.target_label_lengths[target_idx]) + \" \" + doc_sequence)\n\n doc_tokenized = self._tokenizer.tokenize(concat_sequence, max_length=self.max_doc_length)\n if doc_tokenized.get('token_type_ids') is not None:\n doc_tokenized.pop('token_type_ids')\n doc_field = TransformerTextField(**doc_tokenized,padding_token_id=self._tokenizer._tokenizer.pad_token_id)\n\n return Instance({\n \"doc_id\":doc_id_field,\n \"doc_tokens\":doc_field,\n \"target_query_type\":MetadataField(self.target_label_types[target_idx]),\n \"target_query_length\":MetadataField(self.target_label_lengths[target_idx])})" }, { "identifier": "PseudoLabelDatasetLoader", "path": "matchmaker/dataloaders/pseudo_label_training_loader.py", "snippet": "class PseudoLabelDatasetLoader():\n \"\"\"\n \n \"\"\"\n\n def __init__(\n self,\n\n query_file: str,\n collection_file: str,\n rankings_with_teacher_scores: str,\n\n selection_type: str, # values: \"scores\", \"scores-non-fixed\", \"top-rank\"\n min_pos_score: float,\n max_diff_to_be_pos: float,\n min_diff_to_neg: float,\n\n batch_size: int,\n\n tokenizer: Tokenizer = None,\n\n max_doc_length: int = -1,\n max_query_length: int = -1,\n\n concatenate_sequences = False,\n random_seed=42,\n ):\n\n self.query_file = query_file\n self.collection_file = collection_file\n self.rankings_with_teacher_scores = rankings_with_teacher_scores\n self.batch_size = batch_size\n\n self._tokenizer = tokenizer\n\n self.max_doc_length = max_doc_length\n self.max_query_length = max_query_length\n\n if type(tokenizer) != FastTransformerTokenizer:\n raise Exception(\"only huggingface tokenizer supported\")\n\n self.selection_type = selection_type\n self.min_pos_score = min_pos_score\n self.max_diff_to_be_pos = max_diff_to_be_pos\n self.min_diff_to_neg = min_diff_to_neg\n\n\n self.read_with_scores = True\n self.concatenate_sequences = concatenate_sequences\n self.seed = random_seed\n\n self.uniqe_pos_only = False\n\n def __iter__(self) -> Iterator[TensorDict]:\n \n ctx = mp.get_context(\"fork\" if \"fork\" in mp.get_all_start_methods() else \"spawn\")\n\n queue: mp.JoinableQueue = ctx.JoinableQueue(1000)\n worker = ctx.Process(\n target=self.data_loader_subprocess, args=(queue,), daemon=True\n )\n worker.start()\n\n try:\n for batch, worker_error in iter(queue.get, (None, None)):\n if worker_error is not None:\n e, tb = worker_error\n raise WorkerError(e, tb)\n\n yield batch\n queue.task_done()\n finally:\n if hasattr(queue, \"close\"): # for compat with different Python versions.\n queue.close() # type: ignore[attr-defined]\n if worker.is_alive():\n worker.terminate()\n\n def load_data(self):\n\n console = Console()\n\n console.log(\"[PseudoLabel] Loading rankings from:\",self.rankings_with_teacher_scores)\n self.pos_by_qid = defaultdict(list)\n self.neg_by_qid = defaultdict(list)\n\n stat_total_pos = 0\n stat_total_neg = 0\n with open(self.rankings_with_teacher_scores, \"r\", encoding=\"utf8\") as qf:\n current_q_id = \"\"\n current_top_score = 0\n for line in qf:\n ls = line.split() # pos_score<t>neg_score<t>pos_id<t>neg_id\n if current_q_id != ls[0]:\n current_q_id = ls[0]\n current_top_score = float(ls[3])\n if self.selection_type == \"scores\" or self.selection_type == \"scores-non-fixed\":\n if current_top_score >= self.min_pos_score:\n self.pos_by_qid[ls[0]].append((ls[1],float(ls[3])))\n stat_total_pos+=1\n\n elif self.selection_type == \"top-rank\": \n self.pos_by_qid[ls[0]].append((ls[1],float(ls[3])))\n stat_total_pos+=1\n else:\n score = float(ls[3])\n if self.selection_type == \"scores\":\n if score >= current_top_score - self.max_diff_to_be_pos and score >= self.min_pos_score:\n self.pos_by_qid[ls[0]].append((ls[1],score))\n stat_total_pos+=1\n\n elif score < current_top_score - self.min_diff_to_neg:\n if ls[0] in self.pos_by_qid:\n self.neg_by_qid[ls[0]].append((ls[1],score))\n stat_total_neg+=1\n\n elif self.selection_type == \"scores-non-fixed\":\n if score >= current_top_score - self.max_diff_to_be_pos: # TODO apply this fix and score >= min_pos_score:\n self.pos_by_qid[ls[0]].append((ls[1],score))\n stat_total_pos+=1\n\n elif score < current_top_score - self.min_diff_to_neg:\n if ls[0] in self.pos_by_qid:\n self.neg_by_qid[ls[0]].append((ls[1],score))\n stat_total_neg+=1\n\n elif self.selection_type == \"top-rank\": \n if score >= current_top_score - self.max_diff_to_be_pos:\n self.pos_by_qid[ls[0]].append((ls[1],score))\n stat_total_pos+=1\n\n elif score < current_top_score - self.min_diff_to_neg:\n if ls[0] in self.pos_by_qid:\n self.neg_by_qid[ls[0]].append((ls[1],score))\n stat_total_neg+=1\n\n\n console.log(\"[PseudoLabel] Loading collection from:\",self.collection_file)\n self.collection = {}\n self.collection_ids = []\n with open(self.collection_file, \"r\", encoding=\"utf8\") as cf:\n for line in cf:\n ls = line.split(\"\\t\") # id<\\t>text ....\n self.collection[ls[0]] = ls[1].rstrip()[:100_000]\n self.collection_ids.append(ls[0])\n\n console.log(\"[PseudoLabel] Loading queries from:\",self.query_file)\n self.queries = {}\n with open(self.query_file, \"r\", encoding=\"utf8\") as qf:\n for line in qf:\n ls = line.split(\"\\t\") # id<\\t>text ....\n self.queries[ls[0]] = ls[1].rstrip()\n\n self.query_ids = np.array(sorted(list(set(self.pos_by_qid.keys()).intersection(set(self.neg_by_qid.keys())))))\n\n console.log(f\"[PseudoLabel] Done loading! Using {stat_total_pos} positives and {stat_total_neg} negatives for {len(self.query_ids)} queries\")\n\n def data_loader_subprocess(self, queue):\n\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n random.seed(self.seed)\n \n try:\n self.load_data()\n\n query_target_count = self.batch_size # int((self.batch_size / self.clusters_per_batch))\n\n while len(self.query_ids) > query_target_count:\n\n main_instances = []\n\n #while len(main_instances) < self.batch_size:\n\n #q_ids = random.sample(self.query_ids, query_target_count)\n q_id_idxs = random.sample(range(len(self.query_ids)), query_target_count)\n \n query_idx_remove_buffer = [] # only used for self.uniqe_pos_only==True, we need to buffer the removals, \n # otherwise we break the for loop access of already drawn q_ids\n\n for q_idx in q_id_idxs:\n q_id = self.query_ids[q_idx]\n\n #if q_id not in self.pos_by_qid or q_id not in self.neg_by_qid: # need to make sure that we did not just remove the query from the dataset (only for self.uniqe_pos_only==True)\n # continue\n\n pos = random.choice(self.pos_by_qid[q_id])\n neg = random.choice(self.neg_by_qid[q_id])\n\n if self.uniqe_pos_only:\n self.pos_by_qid[q_id].remove(pos) # ok to remove here, because q_id is unique in this for loop\n if len(self.pos_by_qid[q_id]) == 0:\n #del self.pos_by_qid[q_id]\n query_idx_remove_buffer.append(q_idx)\n #self.query_ids.pop(q_idx)\n\n if self.concatenate_sequences:\n ret_instance = {\n \"doc_pos_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(self.queries[q_id],self.collection[pos[0]],self.max_query_length + self.max_doc_length)),\n \"doc_neg_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(self.queries[q_id],self.collection[neg[0]],self.max_query_length + self.max_doc_length))}\n else:\n ret_instance = {\n \"query_tokens\": self.get_tokenized_query(self.queries[q_id]),\n \"doc_pos_tokens\": self.get_tokenized_document(self.collection[pos[0]]),\n \"doc_neg_tokens\": self.get_tokenized_document(self.collection[neg[0]]),\n }\n\n if self.read_with_scores:\n ret_instance[\"pos_score\"] = ArrayField(np.array(pos[1]))\n ret_instance[\"neg_score\"] = ArrayField(np.array(neg[1]))\n\n main_instances.append(Instance(ret_instance))\n\n #if len(main_instances) == self.batch_size:\n # break\n if self.uniqe_pos_only:\n if len(query_idx_remove_buffer) > 0:\n self.query_ids = np.delete(self.query_ids,query_idx_remove_buffer)\n\n main_batch = Batch(main_instances)\n main_batch = main_batch.as_tensor_dict(main_batch.get_padding_lengths())\n\n queue.put((main_batch,None))\n\n except Exception as e:\n queue.put((None, (repr(e), traceback.format_exc())))\n \n queue.put((None, None))\n # Wait until this process can safely exit.\n queue.join()\n\n def get_tokenized_query(self, text):\n query_tokenized = self._tokenizer.tokenize(text, max_length=self.max_query_length)\n if query_tokenized.get('token_type_ids') is not None:\n query_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**query_tokenized)\n\n def get_tokenized_document(self, text):\n doc_tokenized = self._tokenizer.tokenize(text, max_length=self.max_doc_length)\n if doc_tokenized.get('token_type_ids') is not None:\n doc_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**doc_tokenized)" }, { "identifier": "PseudoLabelTextDatasetLoader", "path": "matchmaker/dataloaders/pseudo_label_training_loader.py", "snippet": "class PseudoLabelTextDatasetLoader():\n \"\"\"\n\n \"\"\"\n\n def __init__(\n self,\n\n rankings_with_teacher_scores: str,\n\n batch_size: int,\n\n tokenizer: Tokenizer = None,\n\n max_doc_length: int = -1,\n max_query_length: int = -1,\n\n concatenate_sequences=False,\n random_seed=42,\n ):\n\n self.rankings_with_teacher_scores = rankings_with_teacher_scores\n self.batch_size = batch_size\n\n self._tokenizer = tokenizer\n\n self.max_doc_length = max_doc_length\n self.max_query_length = max_query_length\n\n if type(tokenizer) != FastTransformerTokenizer:\n raise Exception(\"only huggingface tokenizer supported\")\n\n self.read_with_scores = True\n self.concatenate_sequences = concatenate_sequences\n self.seed = random_seed\n\n self.uniqe_pos_only = False\n\n def __iter__(self) -> Iterator[TensorDict]:\n\n ctx = mp.get_context(\"fork\" if \"fork\" in mp.get_all_start_methods() else \"spawn\")\n\n queue: mp.JoinableQueue = ctx.JoinableQueue(1000)\n worker = ctx.Process(\n target=self.data_loader_subprocess, args=(queue,), daemon=True\n )\n worker.start()\n\n try:\n for batch, worker_error in iter(queue.get, (None, None)):\n if worker_error is not None:\n e, tb = worker_error\n raise WorkerError(e, tb)\n\n yield batch\n queue.task_done()\n finally:\n if hasattr(queue, \"close\"): # for compat with different Python versions.\n queue.close() # type: ignore[attr-defined]\n if worker.is_alive():\n worker.terminate()\n\n def load_data(self):\n\n console = Console()\n\n console.log(\"[PseudoLabel] Loading rankings from:\", self.rankings_with_teacher_scores)\n\n self.triples = [] # query_id pos_id neg_id pos_score neg_score\n\n with open(self.rankings_with_teacher_scores, \"r\", encoding=\"utf8\") as qf:\n for line in qf:\n ls = line.split('\\t') # pos_score neg_score query_text pos_text neg_text\n self.triples.append((float(ls[0]), float(ls[1]), ls[2], ls[3], ls[4]))\n\n console.log(f\"[TripleId] Done loading! Using {len(self.triples)} triples\")\n\n\n def data_loader_subprocess(self, queue):\n\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n random.seed(self.seed)\n\n try:\n self.load_data()\n\n query_target_count = self.batch_size # int((self.batch_size / self.clusters_per_batch))\n\n while True:\n\n main_instances = []\n\n while len(main_instances) < self.batch_size:\n\n pos_score, neg_score, q_text, pos_text, neg_text = random.choice(self.triples)\n\n if self.concatenate_sequences:\n ret_instance = {\n \"doc_pos_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(q_text, pos_text,\n self.max_query_length + self.max_doc_length)),\n \"doc_neg_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(q_text, neg_text,\n self.max_query_length + self.max_doc_length))}\n else:\n ret_instance = {\n \"query_tokens\": self.get_tokenized_query(q_text),\n \"doc_pos_tokens\": self.get_tokenized_document(pos_text),\n \"doc_neg_tokens\": self.get_tokenized_document(neg_text),\n }\n\n if self.read_with_scores:\n ret_instance[\"pos_score\"] = ArrayField(np.array(pos_score))\n ret_instance[\"neg_score\"] = ArrayField(np.array(neg_score))\n\n main_instances.append(Instance(ret_instance))\n\n if len(main_instances) == self.batch_size:\n break\n\n main_batch = Batch(main_instances)\n main_batch = main_batch.as_tensor_dict(main_batch.get_padding_lengths())\n\n queue.put((main_batch, None))\n\n except Exception as e:\n queue.put((None, (repr(e), traceback.format_exc())))\n\n queue.put((None, None))\n # Wait until this process can safely exit.\n queue.join()\n\n def get_tokenized_query(self, text):\n query_tokenized = self._tokenizer.tokenize(text, max_length=self.max_query_length)\n if query_tokenized.get('token_type_ids') is not None:\n query_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**query_tokenized)\n\n def get_tokenized_document(self, text):\n doc_tokenized = self._tokenizer.tokenize(text, max_length=self.max_doc_length)\n if doc_tokenized.get('token_type_ids') is not None:\n doc_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**doc_tokenized)" }, { "identifier": "TripleIdDatasetLoader", "path": "matchmaker/dataloaders/triple_id_training_loader.py", "snippet": "class TripleIdDatasetLoader():\n \"\"\"\n \n \"\"\"\n\n def __init__(\n self,\n\n query_file: str,\n collection_file: str,\n triples_with_teacher_scores: str,\n\n batch_size: int,\n\n tokenizer: Tokenizer = None,\n\n max_doc_length: int = -1,\n max_query_length: int = -1,\n\n concatenate_sequences = False,\n random_seed=42,\n ):\n\n self.query_file = query_file\n self.collection_file = collection_file\n self.triples_with_teacher_scores = triples_with_teacher_scores\n self.batch_size = batch_size\n\n self._tokenizer = tokenizer\n\n self.max_doc_length = max_doc_length\n self.max_query_length = max_query_length\n\n if type(tokenizer) != FastTransformerTokenizer:\n raise Exception(\"only huggingface tokenizer supported\")\n\n self.read_with_scores = True\n self.concatenate_sequences = concatenate_sequences\n self.seed = random_seed\n\n def __iter__(self) -> Iterator[TensorDict]:\n \n ctx = mp.get_context(\"fork\" if \"fork\" in mp.get_all_start_methods() else \"spawn\")\n\n queue: mp.JoinableQueue = ctx.JoinableQueue(1000)\n worker = ctx.Process(\n target=self.data_loader_subprocess, args=(queue,), daemon=True\n )\n worker.start()\n\n try:\n for batch, worker_error in iter(queue.get, (None, None)):\n if worker_error is not None:\n e, tb = worker_error\n raise WorkerError(e, tb)\n\n yield batch\n queue.task_done()\n finally:\n if hasattr(queue, \"close\"): # for compat with different Python versions.\n queue.close() # type: ignore[attr-defined]\n if worker.is_alive():\n worker.terminate()\n\n def load_data(self):\n\n console = Console()\n\n console.log(\"[TripleId] Loading rankings from:\",self.triples_with_teacher_scores)\n self.triples = [] # query_id pos_id neg_id pos_score neg_score\n\n with open(self.triples_with_teacher_scores, \"r\", encoding=\"utf8\") as qf:\n for line in qf:\n ls = line.split() # pos_score neg_score query_id pos_id neg_id\n self.triples.append((ls[2],ls[3],ls[4],float(ls[0]),float(ls[1])))\n\n console.log(\"[TripleId] Loading collection from:\",self.collection_file)\n self.collection = {}\n self.collection_ids = []\n with open(self.collection_file, \"r\", encoding=\"utf8\") as cf:\n for line in cf:\n ls = line.split(\"\\t\") # id<\\t>text ....\n self.collection[ls[0]] = ls[1].rstrip()[:100_000]\n self.collection_ids.append(ls[0])\n\n console.log(\"[TripleId] Loading queries from:\",self.query_file)\n self.queries = {}\n with open(self.query_file, \"r\", encoding=\"utf8\") as qf:\n for line in qf:\n ls = line.split(\"\\t\") # id<\\t>text ....\n self.queries[ls[0]] = ls[1].rstrip()\n\n console.log(f\"[TripleId] Done loading! Using {len(self.triples)} triples\")\n\n def data_loader_subprocess(self, queue):\n\n torch.manual_seed(self.seed)\n np.random.seed(self.seed)\n random.seed(self.seed)\n \n try:\n self.load_data()\n\n query_target_count = self.batch_size # int((self.batch_size / self.clusters_per_batch))\n\n while True:\n\n main_instances = []\n\n while len(main_instances) < self.batch_size:\n\n q_id,pos_id,neg_id,pos_score,neg_score = random.choice(self.triples)\n\n if self.concatenate_sequences:\n ret_instance = {\n \"doc_pos_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(self.queries[q_id],self.collection[pos_id],self.max_query_length + self.max_doc_length)),\n \"doc_neg_tokens\": CustomTransformerTextField(**self._tokenizer.tokenize(self.queries[q_id],self.collection[neg_id],self.max_query_length + self.max_doc_length))}\n else:\n ret_instance = {\n \"query_tokens\": self.get_tokenized_query(self.queries[q_id]),\n \"doc_pos_tokens\": self.get_tokenized_document(self.collection[pos_id]),\n \"doc_neg_tokens\": self.get_tokenized_document(self.collection[neg_id]),\n }\n\n if self.read_with_scores:\n ret_instance[\"pos_score\"] = ArrayField(np.array(pos_score))\n ret_instance[\"neg_score\"] = ArrayField(np.array(neg_score))\n\n main_instances.append(Instance(ret_instance))\n\n if len(main_instances) == self.batch_size:\n break\n\n main_batch = Batch(main_instances)\n main_batch = main_batch.as_tensor_dict(main_batch.get_padding_lengths())\n\n queue.put((main_batch,None))\n\n except Exception as e:\n queue.put((None, (repr(e), traceback.format_exc())))\n \n queue.put((None, None))\n # Wait until this process can safely exit.\n queue.join()\n\n def get_tokenized_query(self, text):\n query_tokenized = self._tokenizer.tokenize(text, max_length=self.max_query_length)\n if query_tokenized.get('token_type_ids') is not None:\n query_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**query_tokenized)\n\n def get_tokenized_document(self, text):\n doc_tokenized = self._tokenizer.tokenize(text, max_length=self.max_doc_length)\n if doc_tokenized.get('token_type_ids') is not None:\n doc_tokenized.pop('token_type_ids')\n return CustomTransformerTextField(**doc_tokenized)" }, { "identifier": "BlingFireTokenizer", "path": "matchmaker/dataloaders/bling_fire_tokenizer.py", "snippet": "class BlingFireTokenizer():\n \"\"\"\n basic tokenizer using bling fire library\n \"\"\"\n\n def tokenize(self, sentence: str) -> List[Token]:\n return [Token(t) for t in text_to_words(sentence).split()]" }, { "identifier": "FastTransformerTokenizer", "path": "matchmaker/dataloaders/transformer_tokenizer.py", "snippet": "class FastTransformerTokenizer():\n \"\"\"\n basic wrapper for an HuggingFace AutoTokenizer\n \"\"\"\n\n def __init__(self, model,add_unique_ids=False,uniqueness_type=\"lower\",create_global_id=False):\n\n if \"t5\" in model:\n self._tokenizer = T5Tokenizer.from_pretrained(model)\n # when generating, we will use the logits of right-most token to predict the next token\n # so the padding should be on the left\n self._tokenizer.padding_side = \"left\"\n self._tokenizer.pad_token = self._tokenizer.eos_token # to avoid an error\n elif \"bart\" in model:\n self._tokenizer = BartTokenizer.from_pretrained(model)\n else:\n self._tokenizer = AutoTokenizer.from_pretrained(model)\n\n self.add_unique_ids = add_unique_ids\n if self.add_unique_ids:\n self.pre_tokenzier = BertPreTokenizer()\n\n from nltk.stem.porter import PorterStemmer\n self.stemmer = PorterStemmer()\n \n self.uniqueness_type = uniqueness_type # or \"stemmed\"\n self.create_global_id = create_global_id\n\n self.stem_cache = {}\n\n def tokenize(self, sentence: str, sentence2: str = None, max_length: int = 512, padding=False, random_spans=False):\n if sentence2 != None:\n seq_tokenized = self._tokenizer(sentence, sentence2,\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n return_attention_mask=True,\n padding=\"max_length\" if padding else False)\n\n else:\n if random_spans:\n sentences = nltk.sent_tokenize(sentence)\n sentence_ids = list(range(len(sentences)))\n random.shuffle(sentence_ids)\n sent_length = 0\n sentence = ''\n for id in sentence_ids:\n sent = sentences[id]\n if len(sent.split(' ')) + sent_length < 512:\n sentence = sentence + sent\n sent_length = len(sent.split(' '))\n\n seq_tokenized = self._tokenizer(sentence,\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n return_attention_mask=True,\n padding=\"max_length\" if padding else False)\n else:\n seq_tokenized = self._tokenizer(sentence,\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n return_attention_mask=True,\n padding=\"max_length\" if padding else False)\n\n #\n # only used for ColBERTer model\n #\n if self.add_unique_ids:\n\n seq_tokenized.data[\"unique_input_ids\"] = torch.unique(seq_tokenized.data[\"input_ids\"])\n \n # these are the wordpiece-subwords\n tf_offsets = seq_tokenized.encodings[0].offsets\n\n # these are the whole-word offsets (subwords are not split yet), but it uses the exact same splitting mechanism\n whole_word_offsets = self.pre_tokenzier.pre_tokenize_str(sentence)\n\n # create unique_token_dict\n whole_word_unique = {}\n for i,(tok,offsets) in enumerate(whole_word_offsets):\n if self.uniqueness_type == \"stemmed\":\n lower_tok = tok.lower()\n if lower_tok not in self.stem_cache:\n tok_transformed = self.stemmer.stem(lower_tok)\n self.stem_cache[lower_tok] = tok_transformed\n else:\n tok_transformed = self.stem_cache[lower_tok]\n else:\n tok_transformed = tok.lower()\n\n whole_word_offsets[i] = (tok_transformed,offsets)\n \n if tok_transformed not in whole_word_unique:\n if self.create_global_id:\n hashed = int.from_bytes(hashlib.sha256(tok_transformed.encode('utf-8')).digest()[:4], 'little', signed=False) # 32-bit int\n # 0 is a reserved id for padding, don't think this will happen often though\n if hashed == 0:\n hashed = 1\n \n if hashed < 0 or hashed > 4294967295:\n #if hashed < -2147483648 or hashed > 2147483647:\n print(\"Warning: hash value is too large, will be truncated to 32-bit int\")\n whole_word_unique[tok_transformed] = hashed\n else:\n whole_word_unique[tok_transformed] = len(whole_word_unique) + 1\n\n # map tf_offsets to whole_word_unique\n tf_input_ids_to_whole_word_unique_map = torch.zeros_like(seq_tokenized.data[\"input_ids\"])\n for i,tf_offset in enumerate(tf_offsets[1:-1]): # ignore special tokens\n for whole_word_token,whole_word_offset in whole_word_offsets:\n if tf_offset[0] >= whole_word_offset[0] and tf_offset[1] <= whole_word_offset[1]:\n tf_input_ids_to_whole_word_unique_map[0][i+1] = whole_word_unique[whole_word_token]\n break\n \n # if the tokenizer cuts off the sequence, we might have some tokens that are in the pre-tokenizer, but not mapped\n # because they only appear in the end and where cut -> in this case we just remove them also from the unique list\n # as the main tokenizer is the main anchor point\n skipped_whole_word =[]\n for tok,i in whole_word_unique.items():\n if i not in tf_input_ids_to_whole_word_unique_map[0]:\n skipped_whole_word.append(tok)\n for tok in skipped_whole_word:\n del whole_word_unique[tok]\n\n #\n # this is just sanity checking to make sure that the mapping is correct\n #\n #if (tf_input_ids_to_whole_word_unique_map[0][1:-1] == 0).any():\n # missing_ids = seq_tokenized.data[\"input_ids\"][0][1:-1][tf_input_ids_to_whole_word_unique_map[0][1:-1] == 0]\n # missing_toks = self._tokenizer.convert_ids_to_tokens(missing_ids)\n # if not (len(set(missing_toks)) <= 2 and ((set(missing_toks) == set([\"[PAD]\", \"[SEP]\"])) or missing_toks[0] == \"[PAD]\")):\n # print(\"WARNING: some tokens were not found in the whole_word dictionary\",missing_toks,\"in sentence:\", sentence, \"with offset:\", whole_word_offsets,\"unique_words\", whole_word_unique)\n\n seq_tokenized.data[\"input_ids_to_words_map\"] = tf_input_ids_to_whole_word_unique_map\n seq_tokenized.data[\"unique_words\"] = torch.from_numpy(numpy.array(list(whole_word_unique.values()),dtype=numpy.int64)).unsqueeze(0)\n\n for _, d in seq_tokenized.data.items():\n d.squeeze_(0)\n return seq_tokenized.data" }, { "identifier": "PretrainedBertIndexerNoSpecialTokens", "path": "matchmaker/modules/bert_embedding_token_embedder.py", "snippet": "class PretrainedBertIndexerNoSpecialTokens(PretrainedTransformerIndexer):\n\n \"\"\"\n A ``TokenIndexer`` corresponding to a pretrained BERT model.\n Parameters\n ----------\n pretrained_model: ``str``\n Either the name of the pretrained model to use (e.g. 'bert-base-uncased'),\n or the path to the .txt file with its vocabulary.\n If the name is a key in the list of pretrained models at\n https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/tokenization.py#L33\n the corresponding path will be used; otherwise it will be interpreted as a path or URL.\n use_starting_offsets: bool, optional (default: False)\n By default, the \"offsets\" created by the token indexer correspond to the\n last wordpiece in each word. If ``use_starting_offsets`` is specified,\n they will instead correspond to the first wordpiece in each word.\n do_lowercase: ``bool``, optional (default = True)\n Whether to lowercase the tokens before converting to wordpiece ids.\n never_lowercase: ``List[str]``, optional\n Tokens that should never be lowercased. Default is\n ['[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]'].\n max_pieces: int, optional (default: 512)\n The BERT embedder uses positional embeddings and so has a corresponding\n maximum length for its input ids. Any inputs longer than this will\n either be truncated (default), or be split apart and batched using a\n sliding window.\n truncate_long_sequences : ``bool``, optional (default=``True``)\n By default, long sequences will be truncated to the maximum sequence\n length. Otherwise, they will be split apart and batched using a\n sliding window.\n \"\"\"\n\n def __init__(\n self,\n pretrained_model: str,\n use_starting_offsets: bool = False,\n do_lowercase: bool = True,\n never_lowercase: List[str] = None,\n max_pieces: int = 512,\n truncate_long_sequences: bool = True,\n ) -> None:\n\n bert_tokenizer = PretrainedTransformerTokenizer(pretrained_model, do_lower_case=do_lowercase)\n super().__init__(\n vocab=bert_tokenizer.vocab,\n wordpiece_tokenizer=bert_tokenizer.wordpiece_tokenizer.tokenize,\n namespace=\"bert\",\n use_starting_offsets=use_starting_offsets,\n max_pieces=max_pieces,\n do_lowercase=do_lowercase,\n never_lowercase=never_lowercase,\n start_tokens=[],\n end_tokens=[],\n separator_token=\"[SEP]\",\n truncate_long_sequences=truncate_long_sequences,\n )\n\n def __eq__(self, other):\n if isinstance(other, PretrainedBertIndexerNoSpecialTokens):\n for key in self.__dict__:\n if key == \"wordpiece_tokenizer\":\n # This is a reference to a function in the huggingface code, which we can't\n # really modify to make this clean. So we special-case it.\n continue\n if self.__dict__[key] != other.__dict__[key]:\n return False\n return True\n return NotImplemented" } ]
import torch import numpy import random import torch.multiprocessing as mp from allennlp.data.samplers import BucketBatchSampler, MaxTokensBatchSampler from allennlp.data.vocabulary import Vocabulary from allennlp.data.data_loaders import MultiProcessDataLoader from transformers import T5Tokenizer from allennlp.data.token_indexers import PretrainedTransformerIndexer from allennlp.data.tokenizers import PretrainedTransformerTokenizer from matchmaker.dataloaders.concatenated_reranking_loader import * from matchmaker.dataloaders.concatenated_training_loader import * from matchmaker.dataloaders.independent_reranking_loader import * from matchmaker.dataloaders.independent_training_loader import * from matchmaker.dataloaders.id_sequence_loader import * from matchmaker.dataloaders.mlm_masked_sequence_loader import * from matchmaker.dataloaders.query_generation_inference_loader import ConditionalQueryGenerationInferenceReader from matchmaker.dataloaders.tas_balanced_training_loader import * from matchmaker.dataloaders.pseudo_label_training_loader import PseudoLabelDatasetLoader, PseudoLabelTextDatasetLoader from matchmaker.dataloaders.triple_id_training_loader import TripleIdDatasetLoader from transformers import AutoTokenizer from matchmaker.dataloaders.bling_fire_tokenizer import BlingFireTokenizer from matchmaker.dataloaders.transformer_tokenizer import FastTransformerTokenizer from matchmaker.modules.bert_embedding_token_embedder import PretrainedBertIndexerNoSpecialTokens from typing import Dict, Tuple, List
11,844
# loader = IrDynamicTripleDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], # qrels_file=run_config["dynamic_qrels_file"], candidate_file=run_config["dynamic_candidate_file"], # batch_size=int(run_config["batch_size_train"]), queries_per_batch=run_config["dynamic_queries_per_batch"], tokenizer=_tokenizer, token_indexers=_token_indexers, # max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], # min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], # data_augment=run_config["train_data_augment"], vocab=_vocab) if run_config["dynamic_sampler_type"] == "tas_balanced": loader = TASBalancedDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], pairs_with_teacher_scores=run_config["dynamic_pairs_with_teacher_scores"], query_cluster_file=run_config["dynamic_query_cluster_file"], batch_size=int(run_config["batch_size_train"]), clusters_per_batch=run_config["dynamic_clusters_per_batch"], tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], pair_balancing_strategy=run_config["tas_balanced_pair_strategy"],random_seed =run_config["random_seed"]) elif run_config["dynamic_sampler_type"] == "pseudo_label": loader = PseudoLabelDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], selection_type=run_config["pseudo_label_selection_type"],min_pos_score=run_config["pseudo_label_min_pos_score"], max_diff_to_be_pos=run_config["pseudo_label_max_diff_to_be_pos"],min_diff_to_neg=run_config["pseudo_label_min_diff_to_neg"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "pseudo_labeltext": loader = PseudoLabelTextDatasetLoader(rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "triple_ids": loader = TripleIdDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], triples_with_teacher_scores=run_config["dynamic_triples_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "mlm_pretrain": loader = MLMDatasetLoader(collection_file=run_config["train_tsv"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], random_seed=run_config["random_seed"], min_doc_length=-1, mlm_mask_whole_words=True, mask_probability=run_config["mask_probability"], mlm_mask_replace_probability=run_config["mlm_mask_replace_probability"], mlm_mask_random_probability=run_config["mlm_mask_random_probability"], whole_word_masking=run_config["whole_word_masking"], random_spans=run_config["random_spans"], tasb=run_config["tasb"], tasb_cluster_file=run_config["tasb_cluster_file"], tasb_weight=run_config["tasb_weight"], grad_acc=run_config["gradient_accumulation_steps"], cached_chunk_size=int(run_config["batch_size_train"])/int(run_config["cache_chunk_size"])) else: raise ConfigurationError("dynamic sampler type not supported") return loader def allennlp_reranking_inference_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], train_qa_spans=run_config["train_qa_spans"]) else: reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_eval"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(run_config["batch_size_eval"])*run_config["max_doc_length"], sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_query_gen_train_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) return loader def allennlp_query_gen_inference_loader(model_config, run_config, _input_file,): ''' Load examples from a .tsv file in the single sequence format: id<tab>text and augment it with conditional query codes (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, run_config["max_doc_length"]) max_length = model_config["max_doc_length"] batch_size = run_config["collection_batch_size"]
#from tokenizers import ByteLevelBPETokenizer,CharBPETokenizer #from matchmaker.dataloaders.transformer_tokenizer import CustomTransformerTokenizer,CustomTransformerIndexer mp.set_sharing_strategy("file_system") # VERY MUCH needed for linux !! makes everything faster, but tends to break stuff def allennlp_single_sequence_loader(model_config, run_config, _input_file, sequence_type, force_exact_batch_size=False): ''' Load examples from a .tsv file in the single sequence format: id<tab>text (Using allennlp's v2 multiprocess loader) ''' if model_config.get("model_input_type", "") == "mlm": sequence_type == "single_mlm" if sequence_type == "query": max_length = run_config.get("overwrite_max_query_length", model_config["max_query_length"]) min_length = model_config.get("min_query_length",-1) batch_size = run_config["query_batch_size"] split_document=False split_document_window_size=-1 if sequence_type == "single_mlm": max_length = run_config.get("overwrite_max_doc_length", model_config["max_doc_length"]) min_length = model_config.get("min_doc_length", -1) batch_size = run_config.get("collection_batch_size", run_config["batch_size_train"]) make_multiple_of=run_config.get("make_multiple_of",8) mask_probability=run_config.get("mask_probability",0.1) mlm_mask_replace_probability=run_config.get("mlm_mask_replace_probability",0.5) mlm_mask_random_probability=run_config.get("mlm_mask_random_probability",0.5) else: # doc max_length = run_config.get("overwrite_max_doc_length", model_config["max_doc_length"]) min_length = model_config.get("min_doc_length",-1) batch_size = run_config["collection_batch_size"] split_document=run_config.get("split_document",False) split_document_window_size=run_config.get("split_document_window_size",-1) _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max_length) #if model_config.get("model_input_type", "") == "mlm": # reader = MLMMaskedSequenceDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, # max_doc_length=max_length, min_doc_length=min_length, # mask_probability=mask_probability, # mlm_mask_replace_probability=mlm_mask_replace_probability, # mlm_mask_random_probability=mlm_mask_random_probability, # make_multiple_of=make_multiple_of) reader = IdSequenceDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, split_document=split_document,split_document_window_size=split_document_window_size, max_seq_length=max_length, min_seq_length=min_length, sequence_type=sequence_type) if force_exact_batch_size: loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=int(batch_size)) else: loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(batch_size)*max_length, sorting_keys=["seq_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_triple_training_loader(model_config, run_config, _input_file,add_text_to_batch=False): ''' Load training examples (either in the re-ranking text file format or a dynamic loader) (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if run_config.get("dynamic_sampler", False) == False: if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedTrainingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], data_augment=run_config["train_data_augment"], train_pairwise_distillation=run_config["train_pairwise_distillation"], train_qa_spans=run_config["train_qa_spans"],add_text_to_batch=add_text_to_batch) else: reader = IndependentTrainingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], data_augment=run_config["train_data_augment"], train_pairwise_distillation=run_config["train_pairwise_distillation"], query_augment_mask_number=run_config["query_augment_mask_number"], train_qa_spans=run_config["train_qa_spans"],add_text_to_batch=add_text_to_batch) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) else: #if run_config["dynamic_sampler_type"] == "list": # loader = IrDynamicTripleDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], # qrels_file=run_config["dynamic_qrels_file"], candidate_file=run_config["dynamic_candidate_file"], # batch_size=int(run_config["batch_size_train"]), queries_per_batch=run_config["dynamic_queries_per_batch"], tokenizer=_tokenizer, token_indexers=_token_indexers, # max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], # min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], # data_augment=run_config["train_data_augment"], vocab=_vocab) if run_config["dynamic_sampler_type"] == "tas_balanced": loader = TASBalancedDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], pairs_with_teacher_scores=run_config["dynamic_pairs_with_teacher_scores"], query_cluster_file=run_config["dynamic_query_cluster_file"], batch_size=int(run_config["batch_size_train"]), clusters_per_batch=run_config["dynamic_clusters_per_batch"], tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], pair_balancing_strategy=run_config["tas_balanced_pair_strategy"],random_seed =run_config["random_seed"]) elif run_config["dynamic_sampler_type"] == "pseudo_label": loader = PseudoLabelDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], selection_type=run_config["pseudo_label_selection_type"],min_pos_score=run_config["pseudo_label_min_pos_score"], max_diff_to_be_pos=run_config["pseudo_label_max_diff_to_be_pos"],min_diff_to_neg=run_config["pseudo_label_min_diff_to_neg"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "pseudo_labeltext": loader = PseudoLabelTextDatasetLoader(rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "triple_ids": loader = TripleIdDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], triples_with_teacher_scores=run_config["dynamic_triples_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "mlm_pretrain": loader = MLMDatasetLoader(collection_file=run_config["train_tsv"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], random_seed=run_config["random_seed"], min_doc_length=-1, mlm_mask_whole_words=True, mask_probability=run_config["mask_probability"], mlm_mask_replace_probability=run_config["mlm_mask_replace_probability"], mlm_mask_random_probability=run_config["mlm_mask_random_probability"], whole_word_masking=run_config["whole_word_masking"], random_spans=run_config["random_spans"], tasb=run_config["tasb"], tasb_cluster_file=run_config["tasb_cluster_file"], tasb_weight=run_config["tasb_weight"], grad_acc=run_config["gradient_accumulation_steps"], cached_chunk_size=int(run_config["batch_size_train"])/int(run_config["cache_chunk_size"])) else: raise ConfigurationError("dynamic sampler type not supported") return loader def allennlp_reranking_inference_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], train_qa_spans=run_config["train_qa_spans"]) else: reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_eval"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(run_config["batch_size_eval"])*run_config["max_doc_length"], sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_query_gen_train_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) return loader def allennlp_query_gen_inference_loader(model_config, run_config, _input_file,): ''' Load examples from a .tsv file in the single sequence format: id<tab>text and augment it with conditional query codes (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, run_config["max_doc_length"]) max_length = model_config["max_doc_length"] batch_size = run_config["collection_batch_size"]
reader = ConditionalQueryGenerationInferenceReader(tokenizer=_tokenizer, token_indexers=_token_indexers,
0
2023-11-21 10:38:22+00:00
16k
MICLab-Unicamp/medpseg
medpseg/poly_pipeline.py
[ { "identifier": "PolySeg2DModule", "path": "medpseg/poly_seg_2d_module.py", "snippet": "class PolySeg2DModule(pl.LightningModule):\n '''\n Regarding of the name, also works with 3D networks\n '''\n def __init__(self, hparams):\n '''\n Check starter.py for description of all hparams\n '''\n super().__init__()\n self.save_hyperparameters(hparams)\n\n ####### Hyperparameters used during development, ignore this its confusing #######\n self.pretraining = self.hparams.pretraining\n self.findings_only = getattr(self.hparams, \"findings_only\", False)\n self.weight_decay = getattr(self.hparams, \"weight_decay\", None)\n self.scheduling_factor = getattr(self.hparams, \"scheduling_factor\", None)\n self.scheduling = getattr(self.hparams, \"scheduling\", \"step\")\n self.scratch = getattr(self.hparams, \"scratch\", False)\n self.expand_bifpn = getattr(self.hparams, \"expand_bifpn\", \"conv\")\n self.backbone = getattr(self.hparams, \"backbone\", \"effnet\")\n self.val_3d = getattr(self.hparams, \"val_3d\", False)\n self.gdl = getattr(self.hparams, \"gdl\", False)\n self.bdl = getattr(self.hparams, \"bdl\", False)\n self.focal = getattr(self.hparams, \"focal\", False)\n self.atmbranch = getattr(self.hparams, \"atmbranch\", None)\n self.vesselbranch = getattr(self.hparams, \"vesselbranch\", None)\n self.recbranch = getattr(self.hparams, \"recbranch\", None)\n self.include_bg = getattr(self.hparams, \"include_background\", False)\n self.unet = getattr(self.hparams, \"unet\", False)\n self.unettr = getattr(self.hparams, \"unettr\", False)\n self.poly_level = getattr(self.hparams, \"poly_level\", None)\n self.flag_3d_metric = '_3d' if self.val_3d or self.unettr else ''\n self.excluded_average_metric_keys = [\"volume_similarity\", \"avg_hd\", \"hd\"]\n self.downstream_method = getattr(self.hparams, \"downstream_method\", None)\n self.perceptual_loss = getattr(self.hparams, \"perceptual_loss\", False)\n self.stem_replacement = getattr(self.hparams, \"stem_replacement\", False)\n self.new_latent_space = getattr(self.hparams, \"new_latent_space\", False)\n self.compound_coef = getattr(self.hparams, \"compound_coef\", 4)\n self.consistency = getattr(self.hparams, \"consistency\", False)\n self.imnet_norm = getattr(self.hparams, \"imnet_norm\", False)\n self.learnable_norm = getattr(self.hparams, \"learnable_norm\", False)\n self.circulatory_branch = getattr(self.hparams, \"circulatory_branch\", None)\n self.bifpn_channels = getattr(self.hparams, \"bifpn_channels\", 128)\n self.combined_loss = getattr(self.hparams, \"combined_loss\", False)\n self.sam = getattr(self.hparams, \"sam\", False)\n self.freeze_encoder = getattr(self.hparams, \"freeze_encoder\", False)\n self.batchfy_e2d = getattr(self.hparams, \"batchfy_e2d\", False)\n self.circulatory_regularization = getattr(self.hparams, \"circulatory_regularization\", False)\n self.medseg3d = getattr(self.hparams, \"medseg3d\", False)\n self.fpn_c = getattr(self.hparams, \"fpn_c\", None)\n # Post ATS ideas\n self.soft_circulatory = getattr(self.hparams, \"soft_circulatory\", False)\n self.poi_loss = getattr(self.hparams, \"poi_loss\", False)\n self.nrdice_loss = getattr(self.hparams, \"nrdice_loss\", False)\n self.polyunet25d = getattr(self.hparams, \"polyunet25d\", False)\n self.polyunet3d = getattr(self.hparams, \"polyunet3d\", False)\n self.mccl = getattr(self.hparams, \"mccl\", False)\n self.tversky = getattr(self.hparams, \"tversky\", False)\n self.airway_ths = getattr(self.hparams, \"airway_ths\", 0.5)\n self.vessel_ths = getattr(self.hparams, \"vessel_ths\", 0.5)\n self.self_attention = getattr(self.hparams, \"self_attention\", False)\n self.deep_supervision = getattr(self.hparams, \"deep_supervision\", False)\n self.con_detect = getattr(self.hparams, \"con_detect\", False)\n self.celoss = getattr(self.hparams, \"celoss\", False)\n self.large = getattr(self.hparams, \"large\", False)\n self.combined_gdl = getattr(self.hparams, \"combined_gdl\", False)\n self.full_silver = getattr(self.hparams, \"preprocess\", '') == \"full_silver_poly_3levels_circulatory\"\n if self.full_silver:\n print(\"Full silver mode detected, every item on batch must be fullsilver preprocess\")\n ####### Hyperparameters used during development, ignore this its confusing #######\n\n # Determine offset for polymorphic labels depending on poly level\n # Poly level:\n # None: supervised training only\n # 0: self supervised only\n # 2: lung -> unhealthy/healthy\n # 3: unhealthy -> GGO/CON\n self.nlossterms = 0\n if self.poly_level == 3: # Previous logic for this was wrong, changing to count from beginning\n self.simple_offset = 2 # BG + Lung\n self.detailed_offset = 3 # BG + Healthy + Unhealthy\n else:\n self.simple_offset = 2 # BG + Lung\n self.detailed_offset = None # Not present if not poly_level 3\n\n # Redundant argument necessary to not tie module to data preprocessing\n if \"poly_3levels\" in self.hparams.preprocess:\n assert self.poly_level == 3 or self.poly_level == 2\n\n self.two5d = True\n self.model = MEDSeg(self.hparams.nin, self.hparams.seg_nout, apply_sigmoid=False, backbone=self.backbone, expand_bifpn=self.expand_bifpn, pretrained=not self.scratch,\n num_classes_atm=self.atmbranch, num_classes_vessel=self.vesselbranch, num_classes_rec=self.recbranch, stem_replacement=self.stem_replacement, new_latent_space=self.new_latent_space,\n compound_coef=self.compound_coef, imnet_norm=self.imnet_norm, learnable_norm=self.learnable_norm, circulatory_branch=self.circulatory_branch,\n bifpn_channels=self.bifpn_channels, sam_embedding=self.sam, self_attention=self.self_attention, deep_supervision=self.deep_supervision,\n con_detecting=self.con_detect, large=self.large, soft_circulatory=self.soft_circulatory)\n \n self.pretrained_weights = self.hparams.pretrained_weights\n if self.pretrained_weights is not None:\n print(f\"Loading pretrained weights from {self.pretrained_weights}\")\n self.model = PolySeg2DModule.load_from_checkpoint(self.pretrained_weights).model\n\n # Supervised loss\n assert (not(self.combined_loss) or not(self.nrdice_loss)) and (not(self.combined_loss) or not(self.mccl)) and (not(self.nrdice_loss) or not(self.mccl)), \"Cant do combined loss and nrdice loss or combined loss and mccl at the same time\"\n \n if self.combined_loss:\n print(\"Combined Loss\")\n self.lossfn = CombinedLoss(include_background=self.include_bg, cross_entropy=self.celoss, gdl=self.combined_gdl, soft_circulatory=self.soft_circulatory)\n self.dicer = DICEMetric(per_channel_metric=True, check_bounds=False)\n\n print('-'*100 + \n f\"\\nPoly2D Module in the following configuration:\"\n f\"\\npoly_level: {self.poly_level} soft_circulatory: {self.soft_circulatory}\"\n f\"\\nnin: {self.hparams.nin} main_nout: {self.hparams.seg_nout}, DS: {self.deep_supervision}, SA: {self.self_attention}\"\n f\"\\nMEDSeg 3D? {self.medseg3d}\\n\" +\n '-'*100)\n\n def save_pt_model(self, path):\n torch.save(self.model.state_dict(), path)\n\n def load_pt_model(self, path):\n self.model.load_state_dict(torch.load(path))\n\n def visual_debug(self, x, y, label):\n pass\n\n def forward(self, x, stacking=False):\n if self.val_3d and not self.training and not stacking: # either training, or bein in val_3d or stacking flag avoids this branch and...\n return real_time_stack_predict(self, x, self.hparams.eval_batch_size, extended_2d=self.hparams.extended_2d, num_workers=self.hparams.nworkers, device=torch.device(\"cpu\") if self.hparams.cpu else x.device)\n else: # ...we return direct slice activations\n y_hat = self.model(x) \n if isinstance(y_hat, dict):\n for k in y_hat.keys():\n if 'atm' in k or 'vessel' in k:\n if self.soft_circulatory:\n y_hat[k] = y_hat[k].softmax(dim=1) \n else:\n y_hat[k] = y_hat[k].sigmoid()\n elif 'main' in k:\n y_hat[k] = y_hat[k].softmax(dim=1)\n else:\n raise ValueError(f\"Unexpected key in MEDSeg return: {k}\")\n if self.hparams.debug and not stacking:\n print(\"y_hat state:\")\n for k, v in y_hat.items():\n print(f\"{k}: {v.shape}\")\n else:\n y_hat = y_hat.softmax(dim=1)\n if self.hparams.debug and not stacking:\n print(f\"y_hat state: {y_hat.shape}\")\n \n return y_hat\n\n # Main branch forms ##################################\n def simple_level(self, y_hat, y, simple, ds, do_loss):\n '''\n Where we train on lung masks only. \n '''\n if self.full_silver and self.training:\n raise RuntimeError(\"Shouldn't be running simple_level on full_silver\")\n \n if isinstance(y_hat, dict):\n lung = y_hat[\"main\"][simple, 1:].sum(dim=1, keepdim=True) # lung is everything after bg summed\n y_hat_simple = torch.cat([y_hat[\"main\"][simple, :1], lung], dim=1) # 2 channel bg + lung on simple cases\n else:\n lung = y_hat[simple, 1:].sum(dim=1, keepdim=True) # lung is everything after bg summed\n y_hat_simple = torch.cat([y_hat[simple, :1], lung], dim=1) # bg + lung on simple cases\n \n # WANING: Boundary Loss deprecated, no significant difference shown \n if self.simple_offset is None: # poly simplification removes unhealthy label\n y_simple = y[simple] \n else:\n y_simple = y[simple, :self.simple_offset] \n NS = y_simple.shape[0]\n \n # Loss can be disabled to accelerate validation\n if do_loss:\n simple_loss = self.lossfn(y_hat_simple, y_simple)\n else:\n simple_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n y_hat_simple_argmax = y_hat_simple.argmax(dim=1, keepdim=True)\n y_hat_lung = y_hat_simple_argmax == 1\n for ns in range(NS):\n struct_names = [\"lung\"]\n seg_metrics(gts=y_simple[ns, 1:2].cpu().numpy().astype(np.uint8), preds=y_hat_lung.detach().cpu().numpy().astype(np.uint8),\n metrics=self.metrics, struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n else:\n raise NotImplementedError(\"2D validation for simplified level not implemented\")\n \n return simple_loss\n\n def detailed_level(self, y_hat, y, detailed, ds, do_loss):\n '''\n Where we train on Healthy/Unhealthy masks\n Still supports old 2.5D validation metrics do pretraining project\n '''\n if self.full_silver and self.training:\n raise RuntimeError(\"Shouldn't be running detailed_level on full_silver\")\n \n if isinstance(y_hat, dict): \n if self.poly_level == 3: # if we have ggo and con outputs, reduce then\n unhealthy = y_hat[\"main\"][detailed, 2:].sum(dim=1, keepdim=True) # GGO + CON = unhealthy\n y_hat_detailed = torch.cat([y_hat[\"main\"][detailed, :2], unhealthy], dim=1) # Concating BG, Healthy with unhealthy\n else:\n y_hat_detailed = y_hat[\"main\"][detailed]\n else:\n if self.poly_level == 3: # if we have ggo and con outputs, reduce then\n unhealthy = y_hat[detailed, 2:].sum(dim=1, keepdim=True) # GGO + CON = unhealthy\n y_hat_detailed = torch.cat([y_hat[detailed, :2], unhealthy], dim=1) # Concating BG, Healthy with unhealthy\n else:\n y_hat_detailed = y_hat[detailed]\n \n # Logic to separate concatenations on x and y. Kind of complicated\n # Although boundary loss is implemented, early experiments showed it not being signifcantly better so, deprecated.\n if self.detailed_offset is None:\n y_detailed = y[detailed]\n else:\n y_detailed = y[detailed, :self.detailed_offset] \n ND = y_detailed.shape[0]\n\n # Loss can be disabled to accelerate validation\n if do_loss:\n detailed_loss = self.lossfn(y_hat_detailed, y_detailed)\n else:\n detailed_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n y_hat_detailed_argmax = y_hat_detailed.argmax(dim=1, keepdim=True)\n y_hat_detailed = torch.cat((y_hat_detailed_argmax == 1, y_hat_detailed_argmax == 2), dim=1)\n for nd in range(ND):\n struct_names = [\"healthy\", \"unhealthy\"]\n seg_metrics(gts=y_detailed[nd, 1:3].cpu().numpy().astype(np.uint8), preds=y_hat_detailed[nd, :2].detach().cpu().numpy().astype(np.uint8),\n metrics=self.metrics, struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n else:\n healthy_metric, unhealthy_metric = self.dicer(y_hat_detailed[:, 1:3], y_detailed[:, 1:3])\n self.log(\"healthy_dice\", healthy_metric, on_epoch=True, on_step=False, prog_bar=False)\n self.log(\"unhealthy_dice\", unhealthy_metric, on_epoch=True, on_step=False, prog_bar=False)\n\n return detailed_loss\n\n def separation_level(self, y_hat, y, separation, ds, do_loss):\n '''\n Where we train on separating GGO and Consolidations \n (semi-supervised through threshold + unhealthy label)\n\n One day might be manual labels too\n '''\n if isinstance(y_hat, dict):\n y_hat_separation = y_hat[\"main\"][separation][:, :4]\n else:\n y_hat_separation = y_hat[separation][:, :4]\n\n y_separation = y[separation][:, :4]\n ND = y_separation.shape[0]\n\n # Loss can be disabled to accelerate validation\n if do_loss:\n separation_loss = self.lossfn(y_hat_separation, y_separation)\n else:\n separation_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n y_hat_separation_argmax = y_hat_separation.argmax(dim=1, keepdim=True)\n y_hat_separation = torch.cat((y_hat_separation_argmax == 2, y_hat_separation_argmax == 3), dim=1)\n for nd in range(ND):\n struct_names = [\"ggo\", \"con\"]\n seg_metrics(gts=y_separation[nd, 2:4].cpu().numpy().astype(np.uint8), preds=y_hat_separation[nd, :2].detach().cpu().numpy().astype(np.uint8),\n metrics=self.metrics, struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n\n return separation_loss\n ####################################################\n\n # ATM branch computations\n def atm_branch(self, y_hat, y, atm, ds, do_loss):\n '''\n where we optimize atm parts of the batch, binary label\n '''\n if self.full_silver and self.training:\n if self.soft_circulatory:\n bg = torch.ones_like(y[atm, 5:6]) - y[atm, 5:6]\n y_airway = torch.cat([bg, y[atm, 5:6]], dim=1)\n y_hat_airway = y_hat[\"atm\"][atm, :2] \n else:\n raise RuntimeError(\"Why are you running full_silver without SoftCirculatory\")\n else:\n if self.soft_circulatory:\n y_airway = y[atm, :2] # Taking one hot map\n y_hat_airway = y_hat[\"atm\"][atm, :2] # output has 2 channels\n else:\n y_airway = y[atm, 1:2] # 0 is BG, taking binary airway map\n y_hat_airway = y_hat[\"atm\"][atm, :1] # output has only 1 channel\n NS = y_airway.shape[0] # nsamples\n \n # Loss can be disabled to accelerate validation\n if do_loss:\n atm_loss = self.lossfn(y_hat_airway, y_airway)\n else:\n atm_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n # Making sure to get the correct activation when softmax (soft_circulatory) is turned on.\n if self.soft_circulatory:\n # Note that this is already 0 and 1 after argmax\n binary_y_hat_airway = y_hat_airway.detach().argmax(dim=1, keepdim=True).cpu().numpy().astype(np.uint8)\n binary_y_airway = y_airway[:, 1:2].cpu().numpy().astype(np.uint8)\n else:\n # Split sigmoid on THS\n binary_y_hat_airway = (y_hat_airway.detach() > self.airway_ths).cpu().numpy().astype(np.uint8)\n binary_y_airway = y_airway[:, 0:1].cpu().numpy().astype(np.uint8)\n assert binary_y_hat_airway.shape[1] == 1 and binary_y_hat_airway.max() <= 1\n\n for ns in range(NS):\n struct_names = [\"airway\"]\n seg_metrics(gts=binary_y_airway[ns], \n preds=binary_y_hat_airway[ns],\n metrics=self.metrics, \n struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n else:\n raise NotImplementedError(\"2D validation for atm not implemented\")\n \n return atm_loss\n\n # Vessel branch computations\n def vessel_branch(self, y_hat, y, vessel, ds, do_loss):\n '''\n where we optimize atm parts of the batch\n '''\n '''\n where we optimize atm parts of the batch, binary label\n '''\n if self.full_silver and self.training:\n if self.soft_circulatory:\n bg = torch.ones_like(y[vessel, 4:5]) - y[vessel, 4:5]\n y_vessel = torch.cat([bg, y[vessel, 4:5]], dim=1)\n y_hat_vessel = y_hat[\"vessel\"][vessel, :2] \n else:\n raise RuntimeError(\"Why are you running full_silver without SoftCirculatory\")\n else:\n if self.soft_circulatory:\n y_vessel = y[vessel, :2] # Taking one hot map\n y_hat_vessel = y_hat[\"vessel\"][vessel, :2] # output has 2 channels\n else:\n y_vessel = y[vessel, 1:2] # 0 is BG, taking binary airway map\n y_hat_vessel = y_hat[\"vessel\"][vessel, :1] # output has only 1 channel\n \n NS = y_vessel.shape[0] # nsamples\n \n # Loss can be disabled to accelerate validation\n if do_loss:\n vessel_loss = self.lossfn(y_hat_vessel, y_vessel)\n else:\n vessel_loss = 0\n \n # Complex metrics on real time\n if not self.training:\n if self.val_3d:\n # Making sure to get the correct activation when softmax (soft_circulatory) is turned on.\n if self.soft_circulatory:\n # Note that this is already 0 and 1 after argmax\n binary_y_hat_vessel = y_hat_vessel.detach().argmax(dim=1, keepdim=True).cpu().numpy().astype(np.uint8)\n binary_y_vessel = y_vessel[:, 1:2].cpu().numpy().astype(np.uint8)\n else:\n # Split sigmoid on THS\n binary_y_hat_vessel = (y_hat_vessel.detach() > self.vessel_ths).cpu().numpy().astype(np.uint8)\n binary_y_vessel = y_vessel[:, 0:1].cpu().numpy().astype(np.uint8)\n assert binary_y_hat_vessel.shape[1] == 1 and binary_y_hat_vessel.max() <= 1\n\n for ns in range(NS):\n struct_names = [\"vessel\"]\n seg_metrics(gts=binary_y_vessel[ns], \n preds=binary_y_hat_vessel[ns],\n metrics=self.metrics, \n struct_names=struct_names)\n for key, value in self.metrics.items():\n for metric, metric_value in value.items():\n if key in struct_names:\n self.log(f\"{key}_{metric}_3d\", metric_value[-1], on_epoch=True, on_step=False, prog_bar=False)\n else:\n raise NotImplementedError(\"2D validation for vessel not implemented\")\n \n return vessel_loss\n\n def debug_batch(self, simple, detailed, separation, atm, vessel, y, meta):\n if self.hparams.debug:\n print(f\"Training? {self.training}\")\n print(\"Simple\")\n print(simple)\n print(\"Detailed\")\n print(detailed)\n print(\"Separation\")\n print(separation)\n print(\"ATM\")\n print(atm)\n print(\"Vessel (parse)\")\n print(vessel)\n \n # Assuming B, C, ... format\n preprocess = meta[\"preprocess\"]\n import matplotlib.pyplot as plt\n for i, y_item in enumerate(y):\n item_preprocess = preprocess[i]\n print(y_item.max())\n display_buffer = y_item.cpu().argmax(dim=0).numpy()\n print(display_buffer.max())\n print(f\"Display buffer: {display_buffer.shape}\")\n if os.getenv(\"NSLOTS\") is None:\n if len(display_buffer.shape) == 3:\n pass\n else:\n plt.title(f\"Batch target {i} preprocess {item_preprocess}\")\n plt.imshow(display_buffer)\n plt.show()\n\n def deep_supervision_fn(self, \n loss_fn: Callable, \n key: str, \n y_hat: Union[torch.Tensor, Dict[str, torch.Tensor]], \n y: torch.Tensor, \n index: np.ndarray, \n do_loss: bool):\n loss_acum = []\n \n for i in range(1, 5):\n current_size = (y_hat[key].shape[-2], y_hat[key].shape[-1])\n current_size = (current_size[0]//(2**(i)), current_size[1]//(2**(i)))\n \n transform = Resize(current_size, interpolation=InterpolationMode.NEAREST)\n \n # Craft prediction and target for deep supervision outputs\n new_y_hat = {}\n\n if key == \"main\":\n new_y_hat[key] = y_hat[f\"{key}{i}\"]\n elif key == \"vessel\" or key == \"atm\":\n new_y_hat[key] = y_hat[f\"{key}{i}\"]\n else:\n raise ValueError(f\"Key {key} not valid\")\n\n new_y = transform(y)\n loss = loss_fn(new_y_hat, new_y, index, True, do_loss)\n\n loss_acum.append(loss)\n\n return loss_acum\n\n def compute_loss(self, \n loss_fn: Callable, \n key: str, \n y_hat: Union[torch.Tensor, Dict[str, torch.Tensor]], \n y: torch.Tensor, \n index: np.ndarray, \n do_loss: bool, \n deep_supervision: bool):\n if index.sum() > 0:\n loss = loss_fn(y_hat, y, index, False, do_loss)\n if deep_supervision and self.training:\n loss_acum = self.deep_supervision_fn(loss_fn, key, y_hat, y, index, do_loss)\n # Due to observing good results with only high resolution loss in poly, bumping high resolution weight in optimization\n # To 0.75, with rest of DS contributing to 0.25 of optimization\n loss = ((2**-1)+(2**-2))*loss + (2**-3)*loss_acum[0] + (2**-4)*loss_acum[1] + (2**-5)*loss_acum[2] + (2**-6)*loss_acum[3]\n for i in range(5):\n self.log(f\"{loss_fn.__name__}_deep_supervision_{i}\", loss if i == 0 else loss_acum[i-1], prog_bar=False, on_step=True, on_epoch=True)\n else:\n loss = 0\n\n return loss\n\n def loss_wrapper(self, \n y_hat: Union[torch.Tensor, Dict[str, torch.Tensor]], \n y: torch.Tensor, \n indexes: Dict[str, np.ndarray], \n do_loss: bool, \n deep_supervision: bool):\n simple, detailed, separation, atm, vessel = indexes[\"simple\"], indexes[\"detailed\"], indexes[\"separation\"], indexes[\"atm\"], indexes[\"vessel\"]\n\n simple_loss = self.compute_loss(self.simple_level, \"main\", y_hat, y, simple, do_loss, deep_supervision)\n detailed_loss = self.compute_loss(self.detailed_level, \"main\", y_hat, y, detailed, do_loss, deep_supervision)\n separation_loss = self.compute_loss(self.separation_level, \"main\", y_hat, y, separation, do_loss, deep_supervision)\n atm_loss = self.compute_loss(self.atm_branch, \"atm\", y_hat, y, atm, do_loss, deep_supervision)\n vessel_loss = self.compute_loss(self.vessel_branch, \"vessel\", y_hat, y, vessel, do_loss, deep_supervision)\n\n if do_loss and simple_loss == 0 and detailed_loss == 0 and atm_loss == 0 and separation_loss == 0 and vessel_loss == 0:\n print(\">>>>>>>>>>>>>WARNING: Malformed batch, didn't find any level of polymorphism!<<<<<<<<<<<<<\")\n\n return simple_loss, detailed_loss, separation_loss, atm_loss, vessel_loss\n\n def polymorphic_loss_metrics(self, \n y: torch.Tensor, \n y_hat: Union[torch.Tensor, Dict[str, torch.Tensor]], \n meta: Dict[str, List[str]], \n do_loss: bool = True):\n '''\n ####### Polymorphic training #############\n # Indexes whole batch and perform loss computations separately\n '''\n detailed = np.logical_or(np.logical_or(np.logical_or(np.logical_or(np.array(meta[\"preprocess\"]) == \"seg_raw_new\", np.array(meta[\"preprocess\"]) == \"seg_raw\"), np.array(meta[\"preprocess\"]) == \"msd_seg\"), np.array(meta[\"preprocess\"]) == \"seg_raw_new_hu\"), np.array(meta[\"preprocess\"]) == \"msd_seg_hu\") # Level 2 polymorphism, healthy/unhealthy annotation, cancer\n simple = np.logical_or(np.logical_or(np.array(meta[\"preprocess\"]) == \"pretrain_preprocessing\", np.array(meta[\"preprocess\"]) == \"classification_pretrain_preprocessing\"), np.array(meta[\"preprocess\"]) == \"pretrain_preprocessing_hu\") # Level 1 polymorphism, lung annotation\n separation = np.logical_or(np.array(meta[\"preprocess\"]) == \"separation\", np.array(meta[\"preprocess\"]) == \"manual_split_msc_hu\") # Level 3 polymorphism detect artificial con/ggo separation and correction with transform\n atm = np.logical_or(np.array(meta[\"preprocess\"]) == \"new_atm\", np.array(meta[\"preprocess\"]) == \"new_atm_hu\") # Auxiliary task, airway segmentation\n vessel = np.logical_or(np.array(meta[\"preprocess\"]) == \"parse\", np.array(meta[\"preprocess\"]) == \"parse_hu\") # Auxiliary task, vessel segmentation\n\n if self.full_silver and self.training:\n # The case where every batch item has everything, from teacher network labeling\n separation = np.array([True]*y.shape[0])\n atm = np.array([True]*y.shape[0])\n vessel = np.array([True]*y.shape[0])\n\n self.debug_batch(simple, detailed, separation, atm, vessel, y, meta)\n\n indexes = {\"simple\": simple, \"detailed\": detailed, \"separation\": separation, \"atm\": atm, \"vessel\": vessel}\n\n return self.loss_wrapper(y_hat, y, indexes, do_loss, deep_supervision=self.deep_supervision)\n\n def supervised_loss(self, y, y_hat, meta, prestr):\n '''\n Does all the dozens of losses involved in this training\n This function also computes and logs metrics internally. Only losses are returned to compute the final loss\n '''\n simple_loss, detailed_loss, separation_loss, atm_loss, vessel_loss = self.polymorphic_loss_metrics(y=y, y_hat=y_hat, meta=meta, do_loss=True)\n \n loss = simple_loss + detailed_loss + separation_loss + atm_loss + vessel_loss\n if loss is not None:\n if self.training:\n if simple_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}simple_loss\", simple_loss, on_step=True, on_epoch=True)\n if detailed_loss > 0: \n self.nlossterms += 1\n self.log(f\"{prestr}detailed_loss\", detailed_loss, on_step=True, on_epoch=True)\n if separation_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}separation_loss\", separation_loss, on_step=True, on_epoch=True)\n if atm_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}atm_loss\", atm_loss, on_step=True, on_epoch=True)\n if vessel_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}vessel_loss\", vessel_loss, on_step=True, on_epoch=True)\n \n self.log(f\"{prestr}loss\", loss, on_step=True, on_epoch=True)\n else:\n if simple_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}simple_loss{self.flag_3d_metric}\", simple_loss, on_step=True, on_epoch=True)\n if detailed_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}detailed_loss{self.flag_3d_metric}\", detailed_loss, on_step=True, on_epoch=True)\n if separation_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}separation_loss{self.flag_3d_metric}\", separation_loss, on_step=True, on_epoch=True)\n if atm_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}atm_loss{self.flag_3d_metric}\", atm_loss, on_step=True, on_epoch=True)\n if vessel_loss > 0:\n self.nlossterms += 1\n self.log(f\"{prestr}vessel_loss\", vessel_loss, on_step=True, on_epoch=True)\n \n self.log(f\"{prestr}loss{self.flag_3d_metric}\", loss, on_step=True, on_epoch=True)\n\n return loss\n\n def training_step(self, train_batch, batch_idx):\n '''\n Training step does different things if on exclusive pretraining mode or \n doing traditional supervision.\n\n We only need to return loss for optimizer, metrics are not computed\n '''\n self.nlossterms = 0\n x, y, meta = train_batch\n self.visual_debug(x, y, \"Training\")\n \n y_hat = None\n\n if self.poly_level != 0: # zero polymorphic means pretraining only\n # Traditional supervision\n if y_hat is None:\n y_hat = self.forward(x)\n\n supervised_loss = self.supervised_loss(y=y, y_hat=y_hat, meta=meta, prestr='')\n self.log(\"supervised_loss\", supervised_loss, on_step=True, on_epoch=True)\n else:\n supervised_loss = 0\n \n final_loss = supervised_loss/self.nlossterms\n self.log(\"nlossterms\", self.nlossterms, on_step=True, on_epoch=True)\n self.log(\"loss\", final_loss, on_step=True, on_epoch=True)\n\n if final_loss == 0:\n raise ValueError(\"Loss is equal to 0. Something is misconfigured.\")\n\n return final_loss # for outside optimization\n\n def validation_step(self, val_batch, batch_idx):\n '''\n Validation step does different things if on exclusive pretraining mode or \n doing traditional supervision\n\n There is no return but metrics are computed in 3D (takes a while)\n for pretraining loss is used as a validation metric. \n\n When using boundary loss, we are not computing it in 3D validation.\n '''\n self.nlossterms = 0\n x, y, meta = val_batch\n self.visual_debug(x, y, \"Validation\")\n \n y_hat = None\n preproc = meta[\"preprocess\"][0]\n if preproc == \"pretrain_preprocessing\" and self.val_3d:\n print(f\"Skipping no label 3D validation {preproc}\")\n return\n \n \n if self.poly_level != 0:\n # Traditional supervision\n if y_hat is None:\n y_hat = self.forward(x)\n \n # Compute loss and metrics on CPU due to val_3d memory usage\n if self.val_3d:\n if isinstance(y_hat, dict):\n for _, value in y_hat.items():\n if value.device == torch.device(\"cpu\"):\n y = y.to(value.device)\n break\n elif y_hat.device == torch.device(\"cpu\"):\n y = y.to(y_hat.device)\n \n supervised_loss = self.supervised_loss(y=y, y_hat=y_hat, meta=meta, prestr=\"val_\")\n else:\n supervised_loss = 0\n \n # We only compute validation loss when not using val_3d, since 3D validation loss is very heavy on gpu[\n if self.nlossterms != 0:\n final_loss = supervised_loss/self.nlossterms\n self.log(\"val_nlossterms\", self.nlossterms, on_step=True, on_epoch=True)\n self.log(\"val_supervised_loss\", supervised_loss, on_step=True, on_epoch=True)\n self.log(\"val_loss\", final_loss, on_step=True, on_epoch=True)\n \n def on_validation_epoch_start(self):\n '''\n Start of validation epoch tasks:\n Initialize metric dictionary and list of IDs\n '''\n # Reset metric dict\n if self.val_3d:\n self.metrics: Dict = defaultdict(lambda: defaultdict(list))\n \n def on_validation_epoch_end(self):\n '''\n End of epoch tasks:\n - Increment BDL weights\n - Print results so far in terminal (stdout) for backup logging\n '''\n if self.bdl:\n self.lossfn.increment_weights()\n\n if self.trainer.fast_dev_run or self.trainer.sanity_checking:\n print(\"Fast dev run or sanity checking detected, not logging\")\n elif not self.pretraining and self.val_3d:\n for key, value in self.metrics.items():\n print(f\"\\n{key}\")\n selected_metrics = {\"names\": [], \"values\": []}\n for metric, metric_value in value.items():\n np_metric_value = np.array(metric_value)\n mean = np_metric_value.mean() \n std = np_metric_value.std() \n print(f\"{key} {metric}: {mean}+-{std}\")\n \n # Stopped logging std for every metric, too much not very useful data on neptune\n # self.logger.experiment[f\"training/{key}_{metric}_3d_std\"].log(std)\n \n if metric not in self.excluded_average_metric_keys:\n if \"error\" in metric:\n selected_metrics[\"names\"].append(f\"1 - {metric}\")\n selected_metrics[\"values\"].append(1 - mean)\n else:\n selected_metrics[\"names\"].append(metric)\n selected_metrics[\"values\"].append(mean)\n \n np_selected_metrics = np.array(selected_metrics[\"values\"])\n np_selected_metrics_mean = np_selected_metrics.mean()\n np_selected_metrics_std = np_selected_metrics.std()\n print(f\"Building end-of-epoch composite metric:\")\n for metric, value in zip(selected_metrics[\"names\"], selected_metrics[\"values\"]):\n print(f\"{metric}: {value}\")\n print(f\"{key}_composite_metric: {np_selected_metrics_mean} +- {np_selected_metrics_std}\")\n \n self.logger.experiment[f\"training/{key}_composite_metric\"].log(np_selected_metrics_mean)\n self.logger.experiment[f\"training/{key}_composite_metric_std\"].log(np_selected_metrics_std)\n \n\n def configure_optimizers(self):\n '''\n Select optimizer and scheduling strategy according to hparams.\n '''\n opt = getattr(self.hparams, \"opt\", \"Adam\")\n optimizer = get_optimizer(opt, self.model.parameters(), self.hparams.lr, wd=self.weight_decay)\n print(f\"Opt: {opt}, Weight decay: {self.weight_decay}\")\n\n if self.scheduling == \"poly\":\n print(\"Polynomial LR\")\n # scheduler = PolynomialLR(optimizer, total_iters=self.hparams.max_epochs, power=0.9, verbose=True)\n elif self.scheduling == \"step\" and self.scheduling_factor is None:\n print(\"Not using any scheduler\")\n return optimizer\n elif self.scheduling_factor is not None and self.scheduling == \"step\":\n print(f\"Using step LR {self.scheduling_factor}!\")\n scheduler = StepLR(optimizer, 1, self.scheduling_factor, verbose=True)\n return [optimizer], [scheduler]\n elif self.scheduling == \"cosine\":\n print(f\"Using CosineAnnealingLR with tmax {self.scheduling_factor}!\")\n scheduler = CosineAnnealingLR(optimizer, T_max=self.scheduling_factor, verbose=True)\n return [optimizer], [scheduler]" }, { "identifier": "E2DStackDataset", "path": "medpseg/eval_2d_utils.py", "snippet": "class E2DStackDataset():\n '''\n Speed up evaluation time slice stacking with dataloader compatible dataset\n '''\n def __init__(self, volume, extended_2d):\n self.volume = volume\n self.limits = [0, volume.shape[2] - 1 ]\n self.extended_2d = extended_2d\n \n def __len__(self):\n return self.volume.shape[2]\n\n def __getitem__(self, i):\n if self.extended_2d is None:\n input_slice = self.volume[:, :, i]\n else:\n central_slice = self.volume[:, :, i]\n input_slice = []\n for extend_i in range(-self.extended_2d, self.extended_2d + 1):\n if extend_i == 0:\n input_slice.append(central_slice)\n continue\n\n new_i = i + extend_i\n if new_i > self.limits[1]:\n new_i = self.limits[1]\n if new_i < self.limits[0]:\n new_i = self.limits[0]\n \n input_slice.append(self.volume[:, :, new_i])\n input_slice = torch.cat(input_slice, dim=1)\n '''\n plt.figure(figsize=(12, 6))\n plt.subplot(1, 3, 1)\n plt.imshow(input_slice[0, 0].detach().cpu().numpy(), cmap=\"gray\")\n plt.subplot(1, 3, 2)\n plt.imshow(input_slice[0, 1].detach().cpu().numpy(), cmap=\"gray\")\n plt.subplot(1, 3, 3)\n plt.imshow(input_slice[0, 2].detach().cpu().numpy(), cmap=\"gray\")\n plt.show()\n '''\n return input_slice[0]\n\n def get_dataloader(self, batch_size, pin_memory, num_workers):\n return DataLoader(self, batch_size=batch_size, pin_memory=pin_memory, num_workers=num_workers)" }, { "identifier": "argon_cpu_count", "path": "medpseg/eval_2d_utils.py", "snippet": "def argon_cpu_count() -> int:\n if os.getenv(\"NSLOTS\") is not None:\n return int(os.getenv(\"NSLOTS\"))\n else:\n return cpu_count()" } ]
import os import torch import numpy as np import cc3d import SimpleITK as sitk from medpseg.poly_seg_2d_module import PolySeg2DModule from medpseg.eval_2d_utils import E2DStackDataset, argon_cpu_count from torch.nn import functional as F from tqdm import tqdm from collections import defaultdict from operator import itemgetter from typing import Dict, Optional from multiprocessing import Queue
11,678
label_count.sort(key=itemgetter(1), reverse=True) label_count.pop(0) # remove largest which should be background if verbose: print(f"Label count: {label_count}") filtered = None if return_largest > 0: for i in range(return_largest): try: id_max = label_count[i][0] if filtered is None: filtered = (labels_out == id_max) else: filtered += (labels_out == id_max) except IndexError: # We want more components that what is in the image, stop break volume = filtered * volume labels_out = filtered * labels_out return volume, label_count, labels_out class PrintInterface(): def __init__(self, tqdm_iter): self.tqdm_iter = tqdm_iter self.rot90 = False def write(self, x): self.tqdm_iter.put(("write", x)) def progress(self, x): self.tqdm_iter.put(("iterbar", x)) def image_to_front_end(self, x): if self.rot90: x = np.rot90(x, k=2, axes=(0, 1)) self.tqdm_iter.put(("slice", x)) def icon(self): self.tqdm_iter.put(("icon", '')) def poly_stack_predict(model: torch.nn.Module, volume: torch.Tensor, batch_size: int, device=torch.device("cuda:0"), info_q: Optional[Queue] = None, uncertainty: Optional[int] = None): ''' DEVING uncertainty: epistemic uncerainty, predict n times and return the mean and std prediction ''' e2d_stack_dataloader = E2DStackDataset(volume, extended_2d=1).get_dataloader(batch_size=batch_size, pin_memory=False, num_workers=argon_cpu_count()) outs = defaultdict(list) np_outs = {} np_means = {} np_stds = {} uncertainty_means = defaultdict(list) uncertainty_stds = defaultdict(list) for input_slice in tqdm(e2d_stack_dataloader, desc=f"Slicing with batch size {batch_size}."): if info_q is not None: package = input_slice[0].numpy().transpose(1, 2, 0).copy() # Not necessary anymore with current pre processing: September 2023 # package = (package + 1024) / (600 + 1024) info_q.image_to_front_end(package) if uncertainty is None: out = model(input_slice.to(device), stacking=True) for key, y_hat in out.items(): outs[key].append(y_hat.cpu()) else: raise DeprecationWarning("Uncertainty deprecated for now, needs update") # Save outputs for each branch in buffer uncertainty_buffer = defaultdict(list) model.train() for _ in tqdm(range(uncertainty)): # 8 equivalent to (0, 1, 2) flips out = model(input_slice.to(device), stacking=True) for key, y_hat in out.items(): uncertainty_buffer[key].append(y_hat.cpu()) model.eval() # Collect buffer items into means and STDs for each branch for key, buffer in uncertainty_buffer.items(): # use stack to keep batch dimension separate from acumulation dimension # statistics will take that dimension out buffer = torch.stack(buffer, dim=0) uncertainty_means[key].append(buffer.mean(dim=0)) uncertainty_stds[key].append(buffer.std(dim=0)) # Certain prediction volumes. Will no run if in uncertain mode. for key, y_hat in outs.items(): np_outs[key] = torch.cat(y_hat).unsqueeze(0).permute(0, 2, 1, 3, 4) # Compute final volume for uncertainty mean and uncertainty itself (STD) if uncertainty is not None: raise DeprecationWarning("Uncertainty deprecated for now, needs update") for key, y_hat in uncertainty_means.items(): np_means[key] = torch.cat(y_hat).unsqueeze(0).permute(0, 2, 1, 3, 4) for key, y_hat in uncertainty_stds.items(): np_stds[f"{key}_uncertainty"] = torch.cat(y_hat).unsqueeze(0).permute(0, 2, 1, 3, 4) if uncertainty is None: return np_outs else: return np_means, np_stds class PolySegmentationPipeline(): ''' This pipeline does all targets in a single weight ''' def __init__(self, weight="/home/diedre/diedre_phd/phd/models/medseg_25d_a100_long_silver_gold_gdl-epoch=22-step=76176-val_loss_3d=0.25-healthy_dice_3d=0.93-unhealthy_dice_3d=0.71-ggo_dice_3d=0.71-con_dice_3d=0.62-airway_dice_3d=0.90-vessel_dice_3d=0.87.ckpt", batch_size=1, # increase with high memory gpus cpu=False, output_dir=None, post=False): self.version = 'silver_gold_gdl' self.batch_size = batch_size self.device = torch.device("cpu") if cpu else torch.device("cuda:0")
''' Copyright (c) Diedre Carmo, Medical Imaging Computing Lab (MICLab) https://miclab.fee.unicamp.br/ https://github.com/MICLab-Unicamp/medpseg All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. Independent script Updated pipeline using a single weight ''' def get_connected_components(volume, return_largest=2, verbose=False): ''' volume: input volume return_largest: how many of the largest labels to return. If 0, nothing is changed in input volume verbose: prints label_count returns: filtered_volume, label_count, labeled_volume ''' labels_out = cc3d.connected_components(volume.astype(np.int32)) label_count = np.unique(labels_out, return_counts=True)[1] # Indicate which was the original label and sort by count label_count = [(label, count) for label, count in enumerate(label_count)] label_count.sort(key=itemgetter(1), reverse=True) label_count.pop(0) # remove largest which should be background if verbose: print(f"Label count: {label_count}") filtered = None if return_largest > 0: for i in range(return_largest): try: id_max = label_count[i][0] if filtered is None: filtered = (labels_out == id_max) else: filtered += (labels_out == id_max) except IndexError: # We want more components that what is in the image, stop break volume = filtered * volume labels_out = filtered * labels_out return volume, label_count, labels_out class PrintInterface(): def __init__(self, tqdm_iter): self.tqdm_iter = tqdm_iter self.rot90 = False def write(self, x): self.tqdm_iter.put(("write", x)) def progress(self, x): self.tqdm_iter.put(("iterbar", x)) def image_to_front_end(self, x): if self.rot90: x = np.rot90(x, k=2, axes=(0, 1)) self.tqdm_iter.put(("slice", x)) def icon(self): self.tqdm_iter.put(("icon", '')) def poly_stack_predict(model: torch.nn.Module, volume: torch.Tensor, batch_size: int, device=torch.device("cuda:0"), info_q: Optional[Queue] = None, uncertainty: Optional[int] = None): ''' DEVING uncertainty: epistemic uncerainty, predict n times and return the mean and std prediction ''' e2d_stack_dataloader = E2DStackDataset(volume, extended_2d=1).get_dataloader(batch_size=batch_size, pin_memory=False, num_workers=argon_cpu_count()) outs = defaultdict(list) np_outs = {} np_means = {} np_stds = {} uncertainty_means = defaultdict(list) uncertainty_stds = defaultdict(list) for input_slice in tqdm(e2d_stack_dataloader, desc=f"Slicing with batch size {batch_size}."): if info_q is not None: package = input_slice[0].numpy().transpose(1, 2, 0).copy() # Not necessary anymore with current pre processing: September 2023 # package = (package + 1024) / (600 + 1024) info_q.image_to_front_end(package) if uncertainty is None: out = model(input_slice.to(device), stacking=True) for key, y_hat in out.items(): outs[key].append(y_hat.cpu()) else: raise DeprecationWarning("Uncertainty deprecated for now, needs update") # Save outputs for each branch in buffer uncertainty_buffer = defaultdict(list) model.train() for _ in tqdm(range(uncertainty)): # 8 equivalent to (0, 1, 2) flips out = model(input_slice.to(device), stacking=True) for key, y_hat in out.items(): uncertainty_buffer[key].append(y_hat.cpu()) model.eval() # Collect buffer items into means and STDs for each branch for key, buffer in uncertainty_buffer.items(): # use stack to keep batch dimension separate from acumulation dimension # statistics will take that dimension out buffer = torch.stack(buffer, dim=0) uncertainty_means[key].append(buffer.mean(dim=0)) uncertainty_stds[key].append(buffer.std(dim=0)) # Certain prediction volumes. Will no run if in uncertain mode. for key, y_hat in outs.items(): np_outs[key] = torch.cat(y_hat).unsqueeze(0).permute(0, 2, 1, 3, 4) # Compute final volume for uncertainty mean and uncertainty itself (STD) if uncertainty is not None: raise DeprecationWarning("Uncertainty deprecated for now, needs update") for key, y_hat in uncertainty_means.items(): np_means[key] = torch.cat(y_hat).unsqueeze(0).permute(0, 2, 1, 3, 4) for key, y_hat in uncertainty_stds.items(): np_stds[f"{key}_uncertainty"] = torch.cat(y_hat).unsqueeze(0).permute(0, 2, 1, 3, 4) if uncertainty is None: return np_outs else: return np_means, np_stds class PolySegmentationPipeline(): ''' This pipeline does all targets in a single weight ''' def __init__(self, weight="/home/diedre/diedre_phd/phd/models/medseg_25d_a100_long_silver_gold_gdl-epoch=22-step=76176-val_loss_3d=0.25-healthy_dice_3d=0.93-unhealthy_dice_3d=0.71-ggo_dice_3d=0.71-con_dice_3d=0.62-airway_dice_3d=0.90-vessel_dice_3d=0.87.ckpt", batch_size=1, # increase with high memory gpus cpu=False, output_dir=None, post=False): self.version = 'silver_gold_gdl' self.batch_size = batch_size self.device = torch.device("cpu") if cpu else torch.device("cuda:0")
self.model = PolySeg2DModule.load_from_checkpoint(weight, map_location="cpu").eval()
0
2023-11-21 20:03:33+00:00
16k
DLYuanGod/TinyGPT-V
minigpt4/datasets/builders/image_text_pair_builder.py
[ { "identifier": "registry", "path": "minigpt4/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "BaseDatasetBuilder", "path": "minigpt4/datasets/builders/base_dataset_builder.py", "snippet": "class BaseDatasetBuilder:\n train_dataset_cls, eval_dataset_cls = None, None\n\n def __init__(self, cfg=None):\n super().__init__()\n\n if cfg is None:\n # help to create datasets from default config.\n self.config = load_dataset_config(self.default_config_path())\n elif isinstance(cfg, str):\n self.config = load_dataset_config(cfg)\n else:\n # when called from task.build_dataset()\n self.config = cfg\n\n self.data_type = self.config.data_type\n\n self.vis_processors = {\"train\": BaseProcessor(), \"eval\": BaseProcessor()}\n self.text_processors = {\"train\": BaseProcessor(), \"eval\": BaseProcessor()}\n\n def build_datasets(self):\n # download, split, etc...\n # only called on 1 GPU/TPU in distributed\n\n if is_main_process():\n self._download_data()\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n # at this point, all the annotations and image/videos should be all downloaded to the specified locations.\n logging.info(\"Building datasets...\")\n datasets = self.build() # dataset['train'/'val'/'test']\n\n return datasets\n\n def build_processors(self):\n vis_proc_cfg = self.config.get(\"vis_processor\")\n txt_proc_cfg = self.config.get(\"text_processor\")\n\n if vis_proc_cfg is not None:\n vis_train_cfg = vis_proc_cfg.get(\"train\")\n vis_eval_cfg = vis_proc_cfg.get(\"eval\")\n\n self.vis_processors[\"train\"] = self._build_proc_from_cfg(vis_train_cfg)\n self.vis_processors[\"eval\"] = self._build_proc_from_cfg(vis_eval_cfg)\n\n if txt_proc_cfg is not None:\n txt_train_cfg = txt_proc_cfg.get(\"train\")\n txt_eval_cfg = txt_proc_cfg.get(\"eval\")\n\n self.text_processors[\"train\"] = self._build_proc_from_cfg(txt_train_cfg)\n self.text_processors[\"eval\"] = self._build_proc_from_cfg(txt_eval_cfg)\n\n @staticmethod\n def _build_proc_from_cfg(cfg):\n return (\n registry.get_processor_class(cfg.name).from_config(cfg)\n if cfg is not None\n else None\n )\n\n @classmethod\n def default_config_path(cls, type=\"default\"):\n return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type])\n\n def _download_data(self):\n self._download_ann()\n self._download_vis()\n\n def _download_ann(self):\n \"\"\"\n Download annotation files if necessary.\n All the vision-language datasets should have annotations of unified format.\n\n storage_path can be:\n (1) relative/absolute: will be prefixed with env.cache_root to make full path if relative.\n (2) basename/dirname: will be suffixed with base name of URL if dirname is provided.\n\n Local annotation paths should be relative.\n \"\"\"\n anns = self.config.build_info.annotations\n\n splits = anns.keys()\n\n cache_root = registry.get_path(\"cache_root\")\n\n for split in splits:\n info = anns[split]\n\n urls, storage_paths = info.get(\"url\", None), info.storage\n\n if isinstance(urls, str):\n urls = [urls]\n if isinstance(storage_paths, str):\n storage_paths = [storage_paths]\n\n assert len(urls) == len(storage_paths)\n\n for url_or_filename, storage_path in zip(urls, storage_paths):\n # if storage_path is relative, make it full by prefixing with cache_root.\n if not os.path.isabs(storage_path):\n storage_path = os.path.join(cache_root, storage_path)\n\n dirname = os.path.dirname(storage_path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n if os.path.isfile(url_or_filename):\n src, dst = url_or_filename, storage_path\n if not os.path.exists(dst):\n shutil.copyfile(src=src, dst=dst)\n else:\n logging.info(\"Using existing file {}.\".format(dst))\n else:\n if os.path.isdir(storage_path):\n # if only dirname is provided, suffix with basename of URL.\n raise ValueError(\n \"Expecting storage_path to be a file path, got directory {}\".format(\n storage_path\n )\n )\n else:\n filename = os.path.basename(storage_path)\n\n download_url(url=url_or_filename, root=dirname, filename=filename)\n\n def _download_vis(self):\n\n storage_path = self.config.build_info.get(self.data_type).storage\n storage_path = utils.get_cache_path(storage_path)\n\n if not os.path.exists(storage_path):\n warnings.warn(\n f\"\"\"\n The specified path {storage_path} for visual inputs does not exist.\n Please provide a correct path to the visual inputs or\n refer to datasets/download_scripts/README.md for downloading instructions.\n \"\"\"\n )\n\n def build(self):\n \"\"\"\n Create by split datasets inheriting torch.utils.data.Datasets.\n\n # build() can be dataset-specific. Overwrite to customize.\n \"\"\"\n self.build_processors()\n\n build_info = self.config.build_info\n\n ann_info = build_info.annotations\n vis_info = build_info.get(self.data_type)\n\n datasets = dict()\n for split in ann_info.keys():\n if split not in [\"train\", \"val\", \"test\"]:\n continue\n\n is_train = split == \"train\"\n\n # processors\n vis_processor = (\n self.vis_processors[\"train\"]\n if is_train\n else self.vis_processors[\"eval\"]\n )\n text_processor = (\n self.text_processors[\"train\"]\n if is_train\n else self.text_processors[\"eval\"]\n )\n\n # annotation path\n ann_paths = ann_info.get(split).storage\n if isinstance(ann_paths, str):\n ann_paths = [ann_paths]\n\n abs_ann_paths = []\n for ann_path in ann_paths:\n if not os.path.isabs(ann_path):\n ann_path = utils.get_cache_path(ann_path)\n abs_ann_paths.append(ann_path)\n ann_paths = abs_ann_paths\n\n # visual data storage path\n vis_path = os.path.join(vis_info.storage, split)\n\n if not os.path.isabs(vis_path):\n # vis_path = os.path.join(utils.get_cache_path(), vis_path)\n vis_path = utils.get_cache_path(vis_path)\n\n if not os.path.exists(vis_path):\n warnings.warn(\"storage path {} does not exist.\".format(vis_path))\n\n # create datasets\n dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls\n datasets[split] = dataset_cls(\n vis_processor=vis_processor,\n text_processor=text_processor,\n ann_paths=ann_paths,\n vis_root=vis_path,\n )\n\n return datasets" }, { "identifier": "LaionDataset", "path": "minigpt4/datasets/datasets/laion_dataset.py", "snippet": "class LaionDataset(BaseDataset):\n def __init__(self, vis_processor, text_processor, location):\n super().__init__(vis_processor=vis_processor, text_processor=text_processor)\n\n self.inner_dataset = wds.DataPipeline(\n wds.ResampledShards(location),\n wds.tarfile_to_samples(handler=wds.warn_and_continue),\n wds.shuffle(1000, handler=wds.warn_and_continue),\n wds.decode(\"pilrgb\", handler=wds.warn_and_continue),\n wds.to_tuple(\"jpg\", \"json\", handler=wds.warn_and_continue),\n wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),\n wds.map(self.to_dict, handler=wds.warn_and_continue),\n )\n\n def to_dict(self, sample):\n return {\n \"image\": sample[0],\n \"answer\": self.text_processor(sample[1][\"caption\"]),\n }" }, { "identifier": "CCSBUDataset", "path": "minigpt4/datasets/datasets/cc_sbu_dataset.py", "snippet": "class CCSBUDataset(BaseDataset):\n def __init__(self, vis_processor, text_processor, location):\n super().__init__(vis_processor=vis_processor, text_processor=text_processor)\n\n self.inner_dataset = wds.DataPipeline(\n wds.ResampledShards(location),\n wds.tarfile_to_samples(handler=wds.warn_and_continue),\n wds.shuffle(1000, handler=wds.warn_and_continue),\n wds.decode(\"pilrgb\", handler=wds.warn_and_continue),\n wds.to_tuple(\"jpg\", \"json\", handler=wds.warn_and_continue),\n wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),\n wds.map(self.to_dict, handler=wds.warn_and_continue),\n )\n\n def to_dict(self, sample):\n return {\n \"image\": sample[0],\n \"answer\": self.text_processor(sample[1][\"caption\"]),\n }" }, { "identifier": "CCSBUAlignDataset", "path": "minigpt4/datasets/datasets/cc_sbu_dataset.py", "snippet": "class CCSBUAlignDataset(CaptionDataset):\n\n def __getitem__(self, index):\n\n # TODO this assumes image input, not general enough\n ann = self.annotation[index]\n\n img_file = '{}.jpg'.format(ann[\"image_id\"])\n image_path = os.path.join(self.vis_root, img_file)\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n caption = ann[\"caption\"]\n\n return {\n \"image\": image,\n \"answer\": caption,\n \"image_id\": self.img_ids[ann[\"image_id\"]],\n }" }, { "identifier": "TextCapDataset", "path": "minigpt4/datasets/datasets/text_caps.py", "snippet": "class TextCapDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n 'Briefly describe this image.',\n 'Provide a concise depiction of this image.',\n 'Present a short description of this image.',\n 'Summarize this image in a few words.',\n 'A short image caption:',\n 'A short image description:',\n 'A photo of ',\n 'An image that shows ',\n 'Write a short description for the image. ',\n 'Write a description for the photo.',\n 'Provide a description of what is presented in the photo.',\n 'Briefly describe the content of the image.',\n 'Can you briefly explain what you see in the image?',\n 'Could you use a few words to describe what you perceive in the photo?',\n 'Please provide a short depiction of the picture.',\n 'Using language, provide a short account of the image.',\n 'Use a few words to illustrate what is happening in the picture.',\n ]\n \n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n\n def __len__(self):\n return len(self.ann[\"data\"])\n\n\n def __getitem__(self, index):\n info = self.ann[\"data\"][index]\n\n image_file = '{}.jpg'.format(info['image_id'])\n\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n caption = info[\"caption_str\"]\n caption = self.text_processor(caption)\n instruction = \"<Img><ImageHere></Img> [caption] {} \".format(random.choice(self.instruction_pool))\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": caption,\n }" }, { "identifier": "LlavaDetailDataset", "path": "minigpt4/datasets/datasets/llava_dataset.py", "snippet": "class LlavaDetailDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['conversations'][1]['value']\n instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n \n instruction = '<Img><ImageHere></Img> {} '.format(self.text_processor(instruction))\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['id'],\n }" }, { "identifier": "LlavaReasonDataset", "path": "minigpt4/datasets/datasets/llava_dataset.py", "snippet": "class LlavaReasonDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['conversations'][1]['value']\n instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n\n instruction = '<Img><ImageHere></Img> {} '.format(self.text_processor(instruction))\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['id'],\n }" }, { "identifier": "LlavaConversationDataset", "path": "minigpt4/datasets/datasets/llava_dataset.py", "snippet": "class LlavaConversationDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.ann=[]\n\n \n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n self.connect_sym = \"!@#\"\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n first_instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n first_instruction = '<Img><ImageHere></Img> {} '.format(first_instruction)\n\n questions = [first_instruction]\n answers = []\n\n for i, item in enumerate(info[\"conversations\"][1:]):\n if i % 2 ==0: # assistant\n assistant_answer = item[\"value\"]\n answers.append(assistant_answer)\n else:\n human_instruction = item[\"value\"]+\" \"\n questions.append(human_instruction)\n\n questions = self.connect_sym.join(questions)\n answers = self.connect_sym.join(answers)\n\n\n return {\n \"image\": image,\n \"conv_q\": questions,\n 'conv_a': answers,\n \"image_id\": info['id'],\n \"connect_sym\": self.connect_sym\n }" }, { "identifier": "UnnaturalDataset", "path": "minigpt4/datasets/datasets/unnatural_instruction.py", "snippet": "class UnnaturalDataset(Dataset):\n def __init__(self, text_processor, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index][\"instances\"][0]\n instruction = info[\"instruction_with_input\"]\n constraints = info[\"constraints\"]\n answer = info[\"output\"]\n if constraints != None:\n instruction = instruction+\" \"+constraints\n\n return {\n \"instruction_input\": self.text_processor(instruction),\n \"answer\": self.text_processor(answer),\n }" }, { "identifier": "MultiTaskConversationDataset", "path": "minigpt4/datasets/datasets/multitask_conversation.py", "snippet": "class MultiTaskConversationDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n self.connect_sym = \"!@#\"\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n first_instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n first_instruction = '<Img><ImageHere></Img> {} '.format(first_instruction)\n\n questions = [first_instruction]\n answers = []\n\n for i, item in enumerate(info[\"conversations\"][1:]):\n if i % 2 ==0: # assistant\n assistant_answer = item[\"value\"]\n answers.append(assistant_answer)\n else:\n human_instruction = item[\"value\"]+\" \"\n questions.append(human_instruction)\n\n questions = self.connect_sym.join(questions)\n answers = self.connect_sym.join(answers)\n\n\n return {\n \"image\": image,\n \"conv_q\": questions,\n 'conv_a': answers,\n \"image_id\": info['id'],\n \"connect_sym\": self.connect_sym\n }" }, { "identifier": "GroundedDetailDataset", "path": "minigpt4/datasets/datasets/flickr.py", "snippet": "class GroundedDetailDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[grounding] please describe this image in details',\n '[grounding] describe this image as detailed as possible',\n '[grounding] summarize this image in details',\n '[grounding] give a thorough description of what you see in this image',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n # image_file = 'COCO_train2014_{}.jpg'.format(info['image_id'])\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['grounded_caption']\n instruction = random.choice(self.instruction_pool)\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }" }, { "identifier": "CaptionToObjectDataset", "path": "minigpt4/datasets/datasets/flickr.py", "snippet": "class CaptionToObjectDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[detection] {}',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n input = info[\"caption\"]\n answer = info[\"output\"]\n\n instruction = random.choice(self.instruction_pool).format(input)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n print(\"CaptionToObject instruction\", instruction)\n print(\"CaptionToObject answer\", answer)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }" }, { "identifier": "PhraseToObjectDataset", "path": "minigpt4/datasets/datasets/flickr.py", "snippet": "class PhraseToObjectDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[detection] {}',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n input = info[\"phrase\"]\n answer = \"<p>\"+input+\"</p> \"+info[\"bbox\"]\n instruction = random.choice(self.instruction_pool).format(input)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n print(\"PhraseToObject instruction\", instruction)\n print(\"PhraseToObject answer\", answer)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }" }, { "identifier": "ReferVisualGenomeDataset", "path": "minigpt4/datasets/datasets/vg_dataset.py", "snippet": "class ReferVisualGenomeDataset(Dataset):\n def __init__(self, vis_processor, text_processor, data_dir):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.data_dir = data_dir\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n all_regions = local.get_all_region_descriptions(self.data_dir)\n all_regions = [region for regions in all_regions for region in regions]\n\n # follow OFA practice, only regions smaller than 16384 pixels are used for refer\n self.regions = [region for region in all_regions if region.width * region.height < 16384]\n\n\n self.instruction_pool = [\n \"[refer] {}\",\n \"[refer] give me the location of {}\",\n \"[refer] where is {} ?\",\n \"[refer] from this image, tell me the location of {}\",\n \"[refer] the location of {} is\",\n \"[refer] could you tell me the location for {} ?\",\n \"[refer] where can I locate the {} ?\",\n ]\n\n\n def __len__(self):\n return len(self.regions)\n\n def preprocess(self, index):\n region = self.regions[index]\n image_file = region.image.url.split('/')[-2:]\n image_path = os.path.join(self.data_dir, *image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image_orig_size = image.size\n image = self.vis_processor(image)\n image_new_size = [100,100]\n\n sample_sentence = region.phrase\n refer_sentence = self.text_processor(sample_sentence)\n\n bbox = [region.x, region.y, region.width, region.height]\n\n bbox = [\n bbox[0] / image_orig_size[0] * image_new_size[0],\n bbox[1] / image_orig_size[1] * image_new_size[1],\n (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0],\n (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1]\n ]\n bbox = [int(x) for x in bbox]\n bbox = \"{{<{}><{}><{}><{}>}}\".format(*bbox)\n return {\n \"image\": image,\n \"refer_sentence\": refer_sentence,\n \"bbox\": bbox,\n \"image_id\": region.image.id,\n }\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n instruction = random.choice(self.instruction_pool).format(data['refer_sentence'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": data['bbox'],\n \"image_id\": data['image_id'],\n }" }, { "identifier": "ReferCOCODataset", "path": "minigpt4/datasets/datasets/coco_dataset.py", "snippet": "class ReferCOCODataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path, dataset='refcoco', splitBy='unc'):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.refer = REFER(ann_path, vis_root, dataset, splitBy)\n self.ref_ids = self.refer.getRefIds(split=\"train\")\n\n self.instruction_pool = [\n \"[refer] {}\",\n \"[refer] give me the location of {}\",\n \"[refer] where is {} ?\",\n \"[refer] from this image, tell me the location of {}\",\n \"[refer] the location of {} is\",\n \"[refer] could you tell me the location for {} ?\",\n \"[refer] where can I locate the {} ?\",\n ]\n\n\n def __len__(self):\n return len(self.ref_ids)\n\n def preprocess(self, index):\n ref_id = self.ref_ids[index]\n ref = self.refer.loadRefs(ref_id)[0]\n\n image_file = 'COCO_train2014_{:0>12}.jpg'.format(ref[\"image_id\"])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image_orig_size = image.size\n image = self.vis_processor(image)\n image_new_size = [image.shape[1], image.shape[2]]\n\n image_new_size = [100,100]\n\n sample_sentence = random.choice(ref['sentences'])['raw']\n refer_sentence = self.text_processor(sample_sentence)\n\n\n bbox = self.refer.getRefBox(ref['ref_id'])\n bbox = [\n bbox[0] / image_orig_size[0] * image_new_size[0],\n bbox[1] / image_orig_size[1] * image_new_size[1],\n (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0],\n (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1]\n ]\n bbox = [int(x) for x in bbox]\n bbox = \"{{<{}><{}><{}><{}>}}\".format(*bbox)\n return {\n \"image\": image,\n \"refer_sentence\": refer_sentence,\n \"bbox\": bbox,\n \"image_id\": ref['image_id'],\n }\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n instruction = random.choice(self.instruction_pool).format(data['refer_sentence'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": data['bbox'],\n \"image_id\": data['image_id'],\n }" }, { "identifier": "InvReferCOCODataset", "path": "minigpt4/datasets/datasets/coco_dataset.py", "snippet": "class InvReferCOCODataset(ReferCOCODataset):\n def __init__(self, *args, **kwargs):\n super(InvReferCOCODataset, self).__init__(*args, **kwargs)\n\n self.instruction_pool = [\n \"[identify] {}\",\n \"[identify] what object is in this location {}\",\n \"[identify] identify the object present at this location {}\",\n \"[identify] what is it in {}\",\n \"[identify] describe this object in {}\",\n \"[identify] this {} is\",\n \"[identify] the object in {} is\",\n ]\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n\n instruction = random.choice(self.instruction_pool).format(data['bbox'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n \n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": self.text_processor(data['refer_sentence']),\n \"image_id\": data['image_id'],\n }" }, { "identifier": "GQADataset", "path": "minigpt4/datasets/datasets/gqa_datasets.py", "snippet": "class GQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n def __getitem__(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n\n instruction = random.choice(self.instruction_pool).format(question)\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n answers = self.text_processor(ann[\"answer\"])\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answers,\n }" }, { "identifier": "AOKVQADataset", "path": "minigpt4/datasets/datasets/aok_vqa_datasets.py", "snippet": "class AOKVQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n exist_annotation = []\n for ann in self.annotation:\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n if os.path.exists(image_path):\n exist_annotation.append(ann)\n self.annotation = exist_annotation\n\n def get_data(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n\n answer_key = \"direct_answers\"\n\n answer_weight = {}\n for answer in ann[answer_key]:\n if answer in answer_weight.keys():\n answer_weight[answer] += 1 / len(ann[answer_key])\n else:\n answer_weight[answer] = 1 / len(ann[answer_key])\n\n answers = list(answer_weight.keys())\n weights = list(answer_weight.values())\n\n answer = random.choices(answers, weights=weights, k=1)[0] # random sample an answer according to weights\n\n return {\n \"image\": image,\n \"question\": question,\n \"answer\": answer,\n }\n\n def __getitem__(self, index):\n data = self.get_data(index)\n question = self.text_processor(data[\"question\"])\n instruction = random.choice(self.instruction_pool).format(question)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n answer = self.text_processor(data['answer'])\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": answer,\n }" }, { "identifier": "COCOVQADataset", "path": "minigpt4/datasets/datasets/coco_vqa_datasets.py", "snippet": "class COCOVQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n exist_annotation = []\n for ann in self.annotation:\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n if os.path.exists(image_path):\n exist_annotation.append(ann)\n self.annotation = exist_annotation\n\n\n def get_data(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n question_id = ann[\"question_id\"]\n\n answer_weight = {}\n for answer in ann[\"answer\"]:\n if answer in answer_weight.keys():\n answer_weight[answer] += 1 / len(ann[\"answer\"])\n else:\n answer_weight[answer] = 1 / len(ann[\"answer\"])\n\n answers = list(answer_weight.keys())\n weights = list(answer_weight.values())\n\n answer = random.choices(answers, weights=weights, k=1)[0] # random sample an answer according to weights\n\n\n return {\n \"image\": image,\n \"question\": question,\n \"question_id\": question_id,\n \"answer\": answer,\n }\n\n def __getitem__(self, index):\n data = self.get_data(index)\n instruction = random.choice(self.instruction_pool).format(data['question'])\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"question_id\": data[\"question_id\"],\n \"instruction_input\": instruction,\n \"answer\": self.text_processor(data['answer']),\n }" }, { "identifier": "OCRVQADataset", "path": "minigpt4/datasets/datasets/ocrvqa_dataset.py", "snippet": "class OCRVQADataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n self.data = self.create_data(ann_path)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n def create_data(self, ann_path):\n processed_data = []\n with open(ann_path, 'r') as f:\n data = json.load(f)\n for k in data.keys():\n if data[k]['split'] != 1: continue # 1 for training, 2 for validation, 3 for test\n ext = os.path.splitext(data[k]['imageURL'])[1]\n imageFile = k + ext\n assert len(data[k]['questions']) == len(data[k]['answers'])\n for q, a in zip(data[k]['questions'], data[k]['answers']):\n processed_data.append(\n {'question': q,\n 'answer': a,\n 'image_path': imageFile,\n 'image_id': k,\n 'title': data[k]['title'],\n 'genre': data[k]['genre'],\n }\n )\n return processed_data\n\n def __len__(self):\n return len(self.data)" }, { "identifier": "COCOCapDataset", "path": "minigpt4/datasets/datasets/coco_caption.py", "snippet": "class COCOCapEvalDataset(CaptionEvalDataset):\nclass NoCapsEvalDataset(CaptionEvalDataset):\nclass RefCOCOEvalData(torch.utils.data.Dataset):\nclass EvalCaptionData(torch.utils.data.Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n def __getitem__(self, index):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n def __getitem__(self, index):\n def __init__(self, loaded_data, vis_processor, root_path):\n def __new__(cls, *args, **kwargs):\n def __len__(self):\n def __getitem__(self, idx):\n def __init__(self, loaded_data, vis_processor, root_path):\n def __len__(self):\n def __getitem__(self, idx):" } ]
import os import logging import warnings from minigpt4.common.registry import registry from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder from minigpt4.datasets.datasets.laion_dataset import LaionDataset from minigpt4.datasets.datasets.cc_sbu_dataset import CCSBUDataset, CCSBUAlignDataset from minigpt4.datasets.datasets.text_caps import TextCapDataset from minigpt4.datasets.datasets.llava_dataset import LlavaDetailDataset, LlavaReasonDataset, LlavaConversationDataset from minigpt4.datasets.datasets.unnatural_instruction import UnnaturalDataset from minigpt4.datasets.datasets.multitask_conversation import MultiTaskConversationDataset from minigpt4.datasets.datasets.flickr import GroundedDetailDataset,CaptionToObjectDataset,PhraseToObjectDataset from minigpt4.datasets.datasets.vg_dataset import ReferVisualGenomeDataset from minigpt4.datasets.datasets.coco_dataset import ReferCOCODataset, InvReferCOCODataset from minigpt4.datasets.datasets.gqa_datasets import GQADataset from minigpt4.datasets.datasets.aok_vqa_datasets import AOKVQADataset from minigpt4.datasets.datasets.coco_vqa_datasets import COCOVQADataset from minigpt4.datasets.datasets.ocrvqa_dataset import OCRVQADataset from minigpt4.datasets.datasets.coco_caption import COCOCapDataset
11,462
self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_conversation") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/conversation.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class AllRefCOCOBuilder(BaseDatasetBuilder): def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info image_path = build_info.image_path ann_path = build_info.ann_path datasets = dict() if not os.path.exists(image_path): warnings.warn("image path {} does not exist.".format(image_path)) if not os.path.exists(ann_path): warnings.warn("ann path {} does not exist.".format(ann_path)) # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=ann_path, vis_root=image_path, dataset=build_info.dataset, splitBy=build_info.splitBy ) return datasets @registry.register_builder("refcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcoco.yaml", } @registry.register_builder("refcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocop.yaml", } @registry.register_builder("refcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocog.yaml", } @registry.register_builder("invrefcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcoco.yaml", } @registry.register_builder("invrefcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocop.yaml", } @registry.register_builder("invrefcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocog.yaml", } @registry.register_builder("refvg") class RefVisualGenomeBuilder(BaseDatasetBuilder):
@registry.register_builder("multitask_conversation") class MultitaskConversationBuilder(BaseDatasetBuilder): train_dataset_cls = MultiTaskConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/multitask_conversation/default.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("unnatural_instruction") class UnnaturalInstructionBuilder(BaseDatasetBuilder): train_dataset_cls = UnnaturalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/nlp/unnatural_instruction.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( text_processor=self.text_processors["train"], ann_path=build_info.ann_path, ) return datasets @registry.register_builder("llava_detail") class LlavaDetailBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaDetailDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/detail.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_reason") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaReasonDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/reason.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_conversation") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/conversation.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class AllRefCOCOBuilder(BaseDatasetBuilder): def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info image_path = build_info.image_path ann_path = build_info.ann_path datasets = dict() if not os.path.exists(image_path): warnings.warn("image path {} does not exist.".format(image_path)) if not os.path.exists(ann_path): warnings.warn("ann path {} does not exist.".format(ann_path)) # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=ann_path, vis_root=image_path, dataset=build_info.dataset, splitBy=build_info.splitBy ) return datasets @registry.register_builder("refcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcoco.yaml", } @registry.register_builder("refcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocop.yaml", } @registry.register_builder("refcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocog.yaml", } @registry.register_builder("invrefcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcoco.yaml", } @registry.register_builder("invrefcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocop.yaml", } @registry.register_builder("invrefcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocog.yaml", } @registry.register_builder("refvg") class RefVisualGenomeBuilder(BaseDatasetBuilder):
train_dataset_cls = ReferVisualGenomeDataset
14
2023-12-28 05:47:18+00:00
16k
jiawei-ren/dreamgaussian4d
diffusers/src/diffusers/pipelines/pipeline_flax_utils.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and\n saving classes that inherit from [`ConfigMixin`].\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129\n\n Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n return self._internal_dict[name]\n\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file is saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary.\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class is instantiated. Make sure to only load configuration\n files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the Python class.\n `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually\n overwrite the same named arguments in `config`.\n\n Returns:\n [`ModelMixin`] or [`SchedulerMixin`]:\n A model or scheduler object instantiated from a config dictionary.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Load a model or scheduler configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with\n [`~ConfigMixin.save_config`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config are returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the `commit_hash` of the loaded configuration are returned.\n\n Returns:\n `dict`:\n A dictionary of all the parameters stored in a JSON configuration file.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # Skip keys that were not present in the original config, so default __init__ values were used\n used_defaults = config_dict.get(\"_use_default_values\", [])\n config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != \"_use_default_values\"}\n\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if (\n isinstance(orig_cls_name, str)\n and orig_cls_name != cls.__name__\n and hasattr(diffusers_library, orig_cls_name)\n ):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)):\n raise ValueError(\n \"Make sure that the `_class_name` is of type string or list of string (for custom pipelines).\"\n )\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes the configuration instance to a JSON string.\n\n Returns:\n `str`:\n String containing all the attributes that make up the configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n # Don't save \"_ignore_files\" or \"_use_default_values\"\n config_dict.pop(\"_ignore_files\", None)\n config_dict.pop(\"_use_default_values\", None)\n\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save the configuration instance's parameters to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file to save a configuration instance's parameters.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "FLAX_WEIGHTS_NAME", "path": "diffusers/src/diffusers/models/modeling_flax_utils.py", "snippet": "class FlaxModelMixin(PushToHubMixin):\n def _from_config(cls, config, **kwargs):\n def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:\n def conditional_cast(param):\n def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):\n def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n def init_weights(self, rng: jax.Array) -> Dict:\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n dtype: jnp.dtype = jnp.float32,\n *model_args,\n **kwargs,\n ):\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n params: Union[Dict, FrozenDict],\n is_main_process: bool = True,\n push_to_hub: bool = False,\n **kwargs,\n ):" }, { "identifier": "SCHEDULER_CONFIG_NAME", "path": "diffusers/src/diffusers/schedulers/scheduling_utils_flax.py", "snippet": "SCHEDULER_CONFIG_NAME = \"scheduler_config.json\"" }, { "identifier": "FlaxSchedulerMixin", "path": "diffusers/src/diffusers/schedulers/scheduling_utils_flax.py", "snippet": "class FlaxSchedulerMixin(PushToHubMixin):\n \"\"\"\n Mixin containing common functions for the schedulers.\n\n Class attributes:\n - **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that\n `from_config` can be used from a class different than the one used to save the config (should be overridden\n by parent class).\n \"\"\"\n\n config_name = SCHEDULER_CONFIG_NAME\n ignore_for_config = [\"dtype\"]\n _compatibles = []\n has_compatibles = True\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,\n subfolder: Optional[str] = None,\n return_unused_kwargs=False,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a Scheduler class from a pre-defined JSON-file.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an\n organization name, like `google/ddpm-celebahq-256`.\n - A path to a *directory* containing model weights saved using [`~SchedulerMixin.save_pretrained`],\n e.g., `./my_model_directory/`.\n subfolder (`str`, *optional*):\n In case the relevant files are located inside a subfolder of the model repo (either remote in\n huggingface.co or downloaded locally), you can specify the folder name here.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n file exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `transformers-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n\n <Tip>\n\n It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated\n models](https://huggingface.co/docs/hub/models-gated#gated-models).\n\n </Tip>\n\n <Tip>\n\n Activate the special [\"offline-mode\"](https://huggingface.co/transformers/installation.html#offline-mode) to\n use this method in a firewalled environment.\n\n </Tip>\n\n \"\"\"\n config, kwargs = cls.load_config(\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n subfolder=subfolder,\n return_unused_kwargs=True,\n **kwargs,\n )\n scheduler, unused_kwargs = cls.from_config(config, return_unused_kwargs=True, **kwargs)\n\n if hasattr(scheduler, \"create_state\") and getattr(scheduler, \"has_state\", False):\n state = scheduler.create_state()\n\n if return_unused_kwargs:\n return scheduler, state, unused_kwargs\n\n return scheduler, state\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~FlaxSchedulerMixin.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)\n\n @property\n def compatibles(self):\n \"\"\"\n Returns all schedulers that are compatible with this scheduler\n\n Returns:\n `List[SchedulerMixin]`: List of compatible schedulers\n \"\"\"\n return self._get_compatibles()\n\n @classmethod\n def _get_compatibles(cls):\n compatible_classes_str = list(set([cls.__name__] + cls._compatibles))\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n compatible_classes = [\n getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)\n ]\n return compatible_classes" }, { "identifier": "logging", "path": "diffusers/src/diffusers/utils/logging.py", "snippet": "def _get_default_logging_level() -> int:\ndef _get_library_name() -> str:\ndef _get_library_root_logger() -> logging.Logger:\ndef _configure_library_root_logger() -> None:\ndef _reset_library_root_logger() -> None:\ndef get_log_levels_dict() -> Dict[str, int]:\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\ndef get_verbosity() -> int:\ndef set_verbosity(verbosity: int) -> None:\ndef set_verbosity_info() -> None:\ndef set_verbosity_warning() -> None:\ndef set_verbosity_debug() -> None:\ndef set_verbosity_error() -> None:\ndef disable_default_handler() -> None:\ndef enable_default_handler() -> None:\ndef add_handler(handler: logging.Handler) -> None:\ndef remove_handler(handler: logging.Handler) -> None:\ndef disable_propagation() -> None:\ndef enable_propagation() -> None:\ndef enable_explicit_format() -> None:\ndef reset_format() -> None:\ndef warning_advice(self, *args, **kwargs) -> None:\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\n def __iter__(self):\n def __getattr__(self, _):\n def empty_fn(*args, **kwargs): # pylint: disable=unused-argument\n def __enter__(self):\n def __exit__(self, type_, value, traceback):\n def __call__(self, *args, **kwargs):\n def set_lock(self, *args, **kwargs):\n def get_lock(self):\ndef is_progress_bar_enabled() -> bool:\ndef enable_progress_bar() -> None:\ndef disable_progress_bar() -> None:\nclass EmptyTqdm:\nclass _tqdm_cls:" }, { "identifier": "CONFIG_NAME", "path": "diffusers/src/diffusers/utils/constants.py", "snippet": "CONFIG_NAME = \"config.json\"" }, { "identifier": "DIFFUSERS_CACHE", "path": "diffusers/src/diffusers/utils/constants.py", "snippet": "DIFFUSERS_CACHE = default_cache_path" }, { "identifier": "PushToHubMixin", "path": "diffusers/src/diffusers/utils/hub_utils.py", "snippet": "class PushToHubMixin:\n \"\"\"\n A Mixin to push a model, scheduler, or pipeline to the Hugging Face Hub.\n \"\"\"\n\n def _upload_folder(\n self,\n working_dir: Union[str, os.PathLike],\n repo_id: str,\n token: Optional[str] = None,\n commit_message: Optional[str] = None,\n create_pr: bool = False,\n ):\n \"\"\"\n Uploads all files in `working_dir` to `repo_id`.\n \"\"\"\n if commit_message is None:\n if \"Model\" in self.__class__.__name__:\n commit_message = \"Upload model\"\n elif \"Scheduler\" in self.__class__.__name__:\n commit_message = \"Upload scheduler\"\n else:\n commit_message = f\"Upload {self.__class__.__name__}\"\n\n logger.info(f\"Uploading the files of {working_dir} to {repo_id}.\")\n return upload_folder(\n repo_id=repo_id, folder_path=working_dir, token=token, commit_message=commit_message, create_pr=create_pr\n )\n\n def push_to_hub(\n self,\n repo_id: str,\n commit_message: Optional[str] = None,\n private: Optional[bool] = None,\n token: Optional[str] = None,\n create_pr: bool = False,\n safe_serialization: bool = True,\n variant: Optional[str] = None,\n ) -> str:\n \"\"\"\n Upload model, scheduler, or pipeline files to the 🤗 Hugging Face Hub.\n\n Parameters:\n repo_id (`str`):\n The name of the repository you want to push your model, scheduler, or pipeline files to. It should\n contain your organization name when pushing to an organization. `repo_id` can also be a path to a local\n directory.\n commit_message (`str`, *optional*):\n Message to commit while pushing. Default to `\"Upload {object}\"`.\n private (`bool`, *optional*):\n Whether or not the repository created should be private.\n token (`str`, *optional*):\n The token to use as HTTP bearer authorization for remote files. The token generated when running\n `huggingface-cli login` (stored in `~/.huggingface`).\n create_pr (`bool`, *optional*, defaults to `False`):\n Whether or not to create a PR with the uploaded files or directly commit.\n safe_serialization (`bool`, *optional*, defaults to `True`):\n Whether or not to convert the model weights to the `safetensors` format.\n variant (`str`, *optional*):\n If specified, weights are saved in the format `pytorch_model.<variant>.bin`.\n\n Examples:\n\n ```python\n from diffusers import UNet2DConditionModel\n\n unet = UNet2DConditionModel.from_pretrained(\"stabilityai/stable-diffusion-2\", subfolder=\"unet\")\n\n # Push the `unet` to your namespace with the name \"my-finetuned-unet\".\n unet.push_to_hub(\"my-finetuned-unet\")\n\n # Push the `unet` to an organization with the name \"my-finetuned-unet\".\n unet.push_to_hub(\"your-org/my-finetuned-unet\")\n ```\n \"\"\"\n repo_id = create_repo(repo_id, private=private, token=token, exist_ok=True).repo_id\n\n # Save all files.\n save_kwargs = {\"safe_serialization\": safe_serialization}\n if \"Scheduler\" not in self.__class__.__name__:\n save_kwargs.update({\"variant\": variant})\n\n with tempfile.TemporaryDirectory() as tmpdir:\n self.save_pretrained(tmpdir, **save_kwargs)\n\n return self._upload_folder(\n tmpdir,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )" }, { "identifier": "http_user_agent", "path": "diffusers/src/diffusers/utils/hub_utils.py", "snippet": "def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:\n \"\"\"\n Formats a user-agent string with basic info about a request.\n \"\"\"\n ua = f\"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}\"\n if DISABLE_TELEMETRY or HF_HUB_OFFLINE:\n return ua + \"; telemetry/off\"\n if is_torch_available():\n ua += f\"; torch/{_torch_version}\"\n if is_flax_available():\n ua += f\"; jax/{_jax_version}\"\n ua += f\"; flax/{_flax_version}\"\n if is_onnx_available():\n ua += f\"; onnxruntime/{_onnxruntime_version}\"\n # CI will set this value to True\n if os.environ.get(\"DIFFUSERS_IS_CI\", \"\").upper() in ENV_VARS_TRUE_VALUES:\n ua += \"; is_ci/true\"\n if isinstance(user_agent, dict):\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in user_agent.items())\n elif isinstance(user_agent, str):\n ua += \"; \" + user_agent\n return ua" }, { "identifier": "is_transformers_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_transformers_available():\n return _transformers_available" }, { "identifier": "BaseOutput", "path": "diffusers/src/diffusers/utils/outputs.py", "snippet": "class BaseOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n Python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple\n first.\n\n </Tip>\n \"\"\"\n\n def __init_subclass__(cls) -> None:\n \"\"\"Register subclasses as pytree nodes.\n\n This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with\n `static_graph=True` with modules that output `ModelOutput` subclasses.\n \"\"\"\n if is_torch_available():\n import torch.utils._pytree\n\n torch.utils._pytree._register_pytree_node(\n cls,\n torch.utils._pytree._dict_flatten,\n lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)),\n )\n\n def __post_init__(self) -> None:\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and isinstance(first_field, dict):\n for key, value in first_field.items():\n self[key] = value\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k: Any) -> Any:\n if isinstance(k, str):\n inner_dict = dict(self.items())\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name: Any, value: Any) -> None:\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def __reduce__(self):\n if not is_dataclass(self):\n return super().__reduce__()\n callable, _args, *remaining = super().__reduce__()\n args = tuple(getattr(self, field.name) for field in fields(self))\n return callable, args, *remaining\n\n def to_tuple(self) -> Tuple[Any, ...]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())" } ]
import importlib import inspect import os import flax import numpy as np import PIL.Image from typing import Any, Dict, List, Optional, Union from flax.core.frozen_dict import FrozenDict from huggingface_hub import create_repo, snapshot_download from PIL import Image from tqdm.auto import tqdm from ..configuration_utils import ConfigMixin from ..models.modeling_flax_utils import FLAX_WEIGHTS_NAME, FlaxModelMixin from ..schedulers.scheduling_utils_flax import SCHEDULER_CONFIG_NAME, FlaxSchedulerMixin from ..utils import ( CONFIG_NAME, DIFFUSERS_CACHE, BaseOutput, PushToHubMixin, http_user_agent, is_transformers_available, logging, ) from transformers import FlaxPreTrainedModel from diffusers import pipelines from diffusers import pipelines
11,032
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if is_transformers_available(): INDEX_FILE = "diffusion_flax_model.bin" logger = logging.get_logger(__name__) LOADABLE_CLASSES = { "diffusers": { "FlaxModelMixin": ["save_pretrained", "from_pretrained"], "FlaxSchedulerMixin": ["save_pretrained", "from_pretrained"], "FlaxDiffusionPipeline": ["save_pretrained", "from_pretrained"], }, "transformers": { "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], "FlaxPreTrainedModel": ["save_pretrained", "from_pretrained"], "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], "ProcessorMixin": ["save_pretrained", "from_pretrained"], "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], }, } ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def import_flax_or_no_model(module, class_name): try: # 1. First make sure that if a Flax object is present, import this one class_obj = getattr(module, "Flax" + class_name) except AttributeError: # 2. If this doesn't work, it's not a model and we don't append "Flax" class_obj = getattr(module, class_name) except AttributeError: raise ValueError(f"Neither Flax{class_name} nor {class_name} exist in {module}") return class_obj @flax.struct.dataclass class FlaxImagePipelineOutput(BaseOutput): """ Output class for image pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. """ images: Union[List[PIL.Image.Image], np.ndarray]
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if is_transformers_available(): INDEX_FILE = "diffusion_flax_model.bin" logger = logging.get_logger(__name__) LOADABLE_CLASSES = { "diffusers": { "FlaxModelMixin": ["save_pretrained", "from_pretrained"], "FlaxSchedulerMixin": ["save_pretrained", "from_pretrained"], "FlaxDiffusionPipeline": ["save_pretrained", "from_pretrained"], }, "transformers": { "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], "FlaxPreTrainedModel": ["save_pretrained", "from_pretrained"], "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], "ProcessorMixin": ["save_pretrained", "from_pretrained"], "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], }, } ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def import_flax_or_no_model(module, class_name): try: # 1. First make sure that if a Flax object is present, import this one class_obj = getattr(module, "Flax" + class_name) except AttributeError: # 2. If this doesn't work, it's not a model and we don't append "Flax" class_obj = getattr(module, class_name) except AttributeError: raise ValueError(f"Neither Flax{class_name} nor {class_name} exist in {module}") return class_obj @flax.struct.dataclass class FlaxImagePipelineOutput(BaseOutput): """ Output class for image pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. """ images: Union[List[PIL.Image.Image], np.ndarray]
class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin):
7
2023-12-28 08:17:40+00:00
16k
FoundationVision/UniRef
detectron2/evaluation/coco_evaluation.py
[ { "identifier": "CfgNode", "path": "detectron2/config/config.py", "snippet": "class CfgNode(_CfgNode):\n \"\"\"\n The same as `fvcore.common.config.CfgNode`, but different in:\n\n 1. Use unsafe yaml loading by default.\n Note that this may lead to arbitrary code execution: you must not\n load a config file from untrusted sources before manually inspecting\n the content of the file.\n 2. Support config versioning.\n When attempting to merge an old config, it will convert the old config automatically.\n\n .. automethod:: clone\n .. automethod:: freeze\n .. automethod:: defrost\n .. automethod:: is_frozen\n .. automethod:: load_yaml_with_base\n .. automethod:: merge_from_list\n .. automethod:: merge_from_other_cfg\n \"\"\"\n\n @classmethod\n def _open_cfg(cls, filename):\n return PathManager.open(filename, \"r\")\n\n # Note that the default value of allow_unsafe is changed to True\n def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:\n \"\"\"\n Load content from the given config file and merge it into self.\n\n Args:\n cfg_filename: config filename\n allow_unsafe: allow unsafe yaml syntax\n \"\"\"\n assert PathManager.isfile(cfg_filename), f\"Config file '{cfg_filename}' does not exist!\"\n loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)\n loaded_cfg = type(self)(loaded_cfg)\n\n # defaults.py needs to import CfgNode\n from .defaults import _C\n\n latest_ver = _C.VERSION\n assert (\n latest_ver == self.VERSION\n ), \"CfgNode.merge_from_file is only allowed on a config object of latest version!\"\n\n logger = logging.getLogger(__name__)\n\n loaded_ver = loaded_cfg.get(\"VERSION\", None)\n if loaded_ver is None:\n from .compat import guess_version\n\n loaded_ver = guess_version(loaded_cfg, cfg_filename)\n assert loaded_ver <= self.VERSION, \"Cannot merge a v{} config into a v{} config.\".format(\n loaded_ver, self.VERSION\n )\n\n if loaded_ver == self.VERSION:\n self.merge_from_other_cfg(loaded_cfg)\n else:\n # compat.py needs to import CfgNode\n from .compat import upgrade_config, downgrade_config\n\n logger.warning(\n \"Loading an old v{} config file '{}' by automatically upgrading to v{}. \"\n \"See docs/CHANGELOG.md for instructions to update your files.\".format(\n loaded_ver, cfg_filename, self.VERSION\n )\n )\n # To convert, first obtain a full config at an old version\n old_self = downgrade_config(self, to_version=loaded_ver)\n old_self.merge_from_other_cfg(loaded_cfg)\n new_config = upgrade_config(old_self)\n self.clear()\n self.update(new_config)\n\n def dump(self, *args, **kwargs):\n \"\"\"\n Returns:\n str: a yaml string representation of the config\n \"\"\"\n # to make it show up in docs\n return super().dump(*args, **kwargs)" }, { "identifier": "MetadataCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "convert_to_coco_json", "path": "detectron2/data/datasets/coco.py", "snippet": "def convert_to_coco_json(dataset_name, output_file, allow_cached=True):\n \"\"\"\n Converts dataset into COCO format and saves it to a json file.\n dataset_name must be registered in DatasetCatalog and in detectron2's standard format.\n\n Args:\n dataset_name:\n reference from the config file to the catalogs\n must be registered in DatasetCatalog and in detectron2's standard format\n output_file: path of json file that will be saved to\n allow_cached: if json file is already present then skip conversion\n \"\"\"\n\n # TODO: The dataset or the conversion script *may* change,\n # a checksum would be useful for validating the cached data\n\n PathManager.mkdirs(os.path.dirname(output_file))\n with file_lock(output_file):\n if PathManager.exists(output_file) and allow_cached:\n logger.warning(\n f\"Using previously cached COCO format annotations at '{output_file}'. \"\n \"You need to clear the cache file if your dataset has been modified.\"\n )\n else:\n logger.info(f\"Converting annotations of dataset '{dataset_name}' to COCO format ...)\")\n coco_dict = convert_to_coco_dict(dataset_name)\n\n logger.info(f\"Caching COCO format annotations at '{output_file}' ...\")\n tmp_file = output_file + \".tmp\"\n with PathManager.open(tmp_file, \"w\") as f:\n json.dump(coco_dict, f)\n shutil.move(tmp_file, output_file)" }, { "identifier": "Boxes", "path": "detectron2/structures/boxes.py", "snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "BoxMode", "path": "detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "pairwise_iou", "path": "detectron2/structures/boxes.py", "snippet": "def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M, compute the IoU\n (intersection over union) between **all** N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax).\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: IoU, sized [N,M].\n \"\"\"\n area1 = boxes1.area() # [N]\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n iou = torch.where(\n inter > 0,\n inter / (area1[:, None] + area2 - inter),\n torch.zeros(1, dtype=inter.dtype, device=inter.device),\n )\n return iou" }, { "identifier": "PathManager", "path": "detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "create_small_table", "path": "detectron2/utils/logger.py", "snippet": "def create_small_table(small_dict):\n \"\"\"\n Create a small table using the keys of small_dict as headers. This is only\n suitable for small dictionaries.\n\n Args:\n small_dict (dict): a result dictionary of only a few items.\n\n Returns:\n str: the table as a string.\n \"\"\"\n keys, values = tuple(zip(*small_dict.items()))\n table = tabulate(\n [values],\n headers=keys,\n tablefmt=\"pipe\",\n floatfmt=\".3f\",\n stralign=\"center\",\n numalign=\"center\",\n )\n return table" }, { "identifier": "DatasetEvaluator", "path": "detectron2/evaluation/evaluator.py", "snippet": "class DatasetEvaluator:\n \"\"\"\n Base class for a dataset evaluator.\n\n The function :func:`inference_on_dataset` runs the model over\n all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.\n\n This class will accumulate information of the inputs/outputs (by :meth:`process`),\n and produce evaluation results in the end (by :meth:`evaluate`).\n \"\"\"\n\n def reset(self):\n \"\"\"\n Preparation for a new round of evaluation.\n Should be called before starting a round of evaluation.\n \"\"\"\n pass\n\n def process(self, inputs, outputs):\n \"\"\"\n Process the pair of inputs and outputs.\n If they contain batches, the pairs can be consumed one-by-one using `zip`:\n\n .. code-block:: python\n\n for input_, output in zip(inputs, outputs):\n # do evaluation on single input/output pair\n ...\n\n Args:\n inputs (list): the inputs that's used to call the model.\n outputs (list): the return value of `model(inputs)`\n \"\"\"\n pass\n\n def evaluate(self):\n \"\"\"\n Evaluate/summarize the performance, after processing all input/output pairs.\n\n Returns:\n dict:\n A new evaluator class can return a dict of arbitrary format\n as long as the user can process the results.\n In our train_net.py, we expect the following format:\n\n * key: the name of the task (e.g., bbox)\n * value: a dict of {metric name: score}, e.g.: {\"AP50\": 80}\n \"\"\"\n pass" }, { "identifier": "RefCOCOeval", "path": "detectron2/evaluation/refcocoeval.py", "snippet": "class RefCOCOeval:\n # Interface for evaluating detection on the Microsoft COCO dataset.\n #\n # The usage for CocoEval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n # for computing overall iou\n self.total_intersection_area = 0\n self.total_union_area = 0\n self.iou_list = []\n if not cocoGt is None:\n self.params.imgIds = sorted(cocoGt.getImgIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['image_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['image_id'], dt['category_id']].append(dt)\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(imgId, catId): computeIoU(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n # evaluateImg = self.evaluateImg\n # maxDet = p.maxDets[-1]\n # self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)\n # for catId in catIds\n # for areaRng in p.areaRng\n # for imgId in p.imgIds\n # ]\n # self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n\n # for computing overall iou\n # there is only one bbox and segm\n if p.iouType == 'bbox':\n g, d = g[0], d[0]\n g_bbox = [g[0], g[1], g[2] + g[0], g[3] + g[1]] # x1y1wh -> x1y1x2y2\n d_bbox = [d[0], d[1], d[2] + d[0], d[3] + d[1]] # x1y1wh -> x1y1x2y2\n g_bbox = torch.tensor(g_bbox).unsqueeze(0)\n d_bbox = torch.tensor(d_bbox).unsqueeze(0)\n iou, intersection, union = compute_bbox_iou(d_bbox, g_bbox)\n elif p.iouType == 'segm':\n g_segm = decode(g[0])\n d_segm = decode(d[0])\n g_segm = torch.tensor(g_segm).unsqueeze(0)\n d_segm = torch.tensor(d_segm).unsqueeze(0)\n iou, intersection, union = compute_mask_iou(d_segm, g_segm)\n else:\n raise Exception('unknown iouType for iou computation')\n iou, intersection, union = iou.item(), intersection.item(), union.item()\n self.total_intersection_area += intersection\n self.total_union_area += union\n self.iou_list.append(iou)\n return ious\n\n\n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()" } ]
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .evaluator import DatasetEvaluator from detectron2.evaluation.fast_eval_api import COCOeval_opt from detectron2.evaluation.refcocoeval import RefCOCOeval
13,051
try: except ImportError: COCOeval_opt = COCOeval class COCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), allow_cached_coco=True, force_tasks=None, refcoco=False ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (int): limit on the maximum number of detections per image. By default in COCO, this limit is to 100, but this can be customized to be greater, as is needed in evaluation metrics AP fixed and AP pool (see https://arxiv.org/pdf/2102.01066.pdf) This doesn't affect keypoint evaluation. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval When empty, it will use the defaults in COCO. Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. allow_cached_coco (bool): Whether to use cached coco json from previous validation runs. You should set this to False if you need to use different validation data. Defaults to True. """ self.dataset_name = dataset_name self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self.force_tasks = force_tasks self.refcoco = refcoco if use_fast_impl and (COCOeval_opt is COCOeval): self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") use_fast_impl = False self._use_fast_impl = use_fast_impl # COCOeval requires the limit on the number of detections per image (maxDets) to be a list # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the # 3rd element (100) is used as the limit on the number of detections per image when # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. if max_dets_per_image is None: max_dets_per_image = [1, 10, 100] else: max_dets_per_image = [1, 10, max_dets_per_image] self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): if output_dir is None: raise ValueError( "output_dir must be provided to COCOEvaluator " "for datasets not in COCO format." ) self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path
# Copyright (c) Facebook, Inc. and its affiliates. try: except ImportError: COCOeval_opt = COCOeval class COCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), allow_cached_coco=True, force_tasks=None, refcoco=False ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (int): limit on the maximum number of detections per image. By default in COCO, this limit is to 100, but this can be customized to be greater, as is needed in evaluation metrics AP fixed and AP pool (see https://arxiv.org/pdf/2102.01066.pdf) This doesn't affect keypoint evaluation. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval When empty, it will use the defaults in COCO. Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. allow_cached_coco (bool): Whether to use cached coco json from previous validation runs. You should set this to False if you need to use different validation data. Defaults to True. """ self.dataset_name = dataset_name self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self.force_tasks = force_tasks self.refcoco = refcoco if use_fast_impl and (COCOeval_opt is COCOeval): self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") use_fast_impl = False self._use_fast_impl = use_fast_impl # COCOeval requires the limit on the number of detections per image (maxDets) to be a list # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the # 3rd element (100) is used as the limit on the number of detections per image when # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. if max_dets_per_image is None: max_dets_per_image = [1, 10, 100] else: max_dets_per_image = [1, 10, max_dets_per_image] self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): if output_dir is None: raise ValueError( "output_dir must be provided to COCOEvaluator " "for datasets not in COCO format." ) self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path
convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco)
2
2023-12-22 13:31:33+00:00
16k
xhuangcv/humannorm
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n # improve the resolution of DMTET at these steps\n progressive_resolution_steps: Optional[int] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n self.cached_sdf = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # adjust the position of mesh\n if \"full_body\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.3\n elif \"half_body\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.1\n elif \"head_only\" in mesh_path:\n mesh.vertices[:,2] = mesh.vertices[:,2] + 0.15\n elif \"t-pose\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.4\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(2000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((40000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n\n sdf_loss: Optional[Float[Tensor, \"*N 1\"]] = None\n if self.cfg.use_sdf_loss and self.cached_sdf is not None:\n selected_points_idx = torch.LongTensor(random.sample(range(points_unscaled.shape[0]), 100000))\n gt_sdf = torch.from_numpy(-self.cached_sdf(points_unscaled[selected_points_idx].cpu().numpy())).to(\n points_unscaled\n )[..., None]\n sdf_loss = F.mse_loss(gt_sdf, sdf[selected_points_idx], reduction='sum')\n return sdf, deformation, sdf_loss\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n\n if global_step >= (self.cfg.start_sdf_loss_step + 1) and self.cached_sdf is None:\n\n from pysdf import SDF\n import trimesh\n\n mesh_v_pos = np.load('.threestudio_cache/mesh_v_pos.npy')\n mesh_t_pos_idx = np.load('.threestudio_cache/mesh_t_pos_idx.npy')\n cached_mesh = trimesh.Trimesh(\n vertices=mesh_v_pos,\n faces=mesh_t_pos_idx,\n )\n self.cached_sdf = SDF(cached_mesh.vertices, cached_mesh.faces)\n\n if self.cfg.progressive_resolution_steps is not None:\n if global_step >= self.cfg.progressive_resolution_steps[0] and self.cfg.isosurface_resolution < 256:\n self.cfg.isosurface_resolution = 256\n self.isosurface_helper = None\n self._initilize_isosurface_helper()\n if global_step >= self.cfg.progressive_resolution_steps[1] and self.cfg.isosurface_resolution < 512:\n self.cfg.isosurface_resolution = 512\n self.isosurface_helper = None\n self._initilize_isosurface_helper()\n\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n \n setattr(co, 'max_cost', 2.0)\n setattr(po, 'resolution', 4096)\n \n atlas.generate(co, po, verbose=True)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF from tqdm import tqdm import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh
14,266
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config )
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config )
self.feature_network = get_mlp(
8
2023-12-23 12:37:48+00:00
16k
Con6924/SPM
evaluate_task.py
[ { "identifier": "config", "path": "src/configs/config.py", "snippet": "PRECISION_TYPES = Literal[\"fp32\", \"fp16\", \"bf16\", \"float32\", \"float16\", \"bfloat16\"]\nclass PretrainedModelConfig(BaseModel):\nclass NetworkConfig(BaseModel):\nclass TrainConfig(BaseModel): \nclass SaveConfig(BaseModel):\nclass LoggingConfig(BaseModel):\nclass InferenceConfig(BaseModel):\nclass OtherConfig(BaseModel):\nclass RootConfig(BaseModel):\ndef parse_precision(precision: str) -> torch.dtype:\ndef load_config_from_yaml(config_path: str) -> RootConfig:" }, { "identifier": "RootConfig", "path": "src/configs/config.py", "snippet": "class RootConfig(BaseModel):\n prompts_file: Optional[str] = None\n \n pretrained_model: PretrainedModelConfig\n\n network: Optional[NetworkConfig] = None\n\n train: Optional[TrainConfig] = None\n\n save: Optional[SaveConfig] = None\n\n logging: Optional[LoggingConfig] = None\n\n inference: Optional[InferenceConfig] = None\n\n other: Optional[OtherConfig] = None" }, { "identifier": "GenerationConfig", "path": "src/configs/generation_config.py", "snippet": "class GenerationConfig(BaseModel):\n prompts: list[str] = []\n negative_prompt: str = \"bad anatomy,watermark,extra digit,signature,worst quality,jpeg artifacts,normal quality,low quality,long neck,lowres,error,blurry,missing fingers,fewer digits,missing arms,text,cropped,Humpbacked,bad hands,username\"\n unconditional_prompt: str = \"\"\n width: int = 512\n height: int = 512\n num_inference_steps: int = 30\n guidance_scale: float = 7.5\n seed: int = 2024\n generate_num: int = 1\n\n save_path: str = None # can be a template, e.g. \"path/to/img_{}.png\",\n # then the generated images will be saved as \"path/to/img_0.png\", \"path/to/img_1.png\", ...\n\n def dict(self):\n results = {}\n for attr in vars(self):\n if not attr.startswith(\"_\"):\n results[attr] = getattr(self, attr)\n return results\n \n @staticmethod\n def fix_format(cfg):\n for k, v in cfg.items():\n if isinstance(v, list):\n cfg[k] = v[0]\n elif isinstance(v, torch.Tensor):\n cfg[k] = v.item()" }, { "identifier": "train_util", "path": "src/engine/train_util.py", "snippet": "UNET_IN_CHANNELS = 4 # Stable Diffusion の in_channels は 4 で固定。XLも同じ。\nVAE_SCALE_FACTOR = 8 # 2 ** (len(vae.config.block_out_channels) - 1) = 8\nUNET_ATTENTION_TIME_EMBED_DIM = 256 # XL\nTEXT_ENCODER_2_PROJECTION_DIM = 1280\nUNET_PROJECTION_CLASS_EMBEDDING_INPUT_DIM = 2816\ndef get_random_noise(\n batch_size: int, height: int, width: int, generator: torch.Generator = None\n) -> torch.Tensor:\ndef apply_noise_offset(latents: torch.FloatTensor, noise_offset: float):\ndef get_initial_latents(\n scheduler: SchedulerMixin,\n n_imgs: int,\n height: int,\n width: int,\n n_prompts: int,\n generator=None,\n) -> torch.Tensor:\ndef text_tokenize(\n tokenizer: CLIPTokenizer, # 普通ならひとつ、XLならふたつ!\n prompts: list[str],\n):\ndef text_encode(text_encoder: CLIPTextModel, tokens):\ndef encode_prompts(\n tokenizer: CLIPTokenizer,\n text_encoder: CLIPTokenizer,\n prompts: list[str],\n return_tokens: bool = False,\n):\ndef text_encode_xl(\n text_encoder: SDXL_TEXT_ENCODER_TYPE,\n tokens: torch.FloatTensor,\n num_images_per_prompt: int = 1,\n):\ndef encode_prompts_xl(\n tokenizers: list[CLIPTokenizer],\n text_encoders: list[SDXL_TEXT_ENCODER_TYPE],\n prompts: list[str],\n num_images_per_prompt: int = 1,\n) -> tuple[torch.FloatTensor, torch.FloatTensor]:\ndef concat_embeddings(\n unconditional: torch.FloatTensor,\n conditional: torch.FloatTensor,\n n_imgs: int,\n):\ndef predict_noise(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n timestep: int, # 現在のタイムステップ\n latents: torch.FloatTensor,\n text_embeddings: torch.FloatTensor, # uncond な text embed と cond な text embed を結合したもの\n guidance_scale=7.5,\n) -> torch.FloatTensor:\ndef diffusion(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n latents: torch.FloatTensor, # ただのノイズだけのlatents\n text_embeddings: torch.FloatTensor,\n total_timesteps: int = 1000,\n start_timesteps=0,\n **kwargs,\n):\ndef rescale_noise_cfg(\n noise_cfg: torch.FloatTensor, noise_pred_text, guidance_rescale=0.0\n):\ndef predict_noise_xl(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n timestep: int, # 現在のタイムステップ\n latents: torch.FloatTensor,\n text_embeddings: torch.FloatTensor, # uncond な text embed と cond な text embed を結合したもの\n add_text_embeddings: torch.FloatTensor, # pooled なやつ\n add_time_ids: torch.FloatTensor,\n guidance_scale=7.5,\n guidance_rescale=0.7,\n) -> torch.FloatTensor:\ndef diffusion_xl(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n latents: torch.FloatTensor, # ただのノイズだけのlatents\n text_embeddings: tuple[torch.FloatTensor, torch.FloatTensor],\n add_text_embeddings: torch.FloatTensor, # pooled なやつ\n add_time_ids: torch.FloatTensor,\n guidance_scale: float = 1.0,\n total_timesteps: int = 1000,\n start_timesteps=0,\n):\ndef get_add_time_ids(\n height: int,\n width: int,\n dynamic_crops: bool = False,\n dtype: torch.dtype = torch.float32,\n):\ndef get_optimizer(config, trainable_params):\ndef get_scheduler_fix(config, optimizer: Optimizer, num_processes: int = 1):\n def wrap_check_needless_num_warmup_steps(return_vals):\ndef get_random_resolution_in_bucket(bucket_resolution: int = 512) -> tuple[int, int]:\ndef text2img(pipe: DiffusionPipeline,\n prompts: Union[str, list[str]], \n negative_prompt: Union[str, list[str]] = \"\", \n width: int = 512, \n height: int = 512,\n num_inference_steps: int = 30,\n guidance_scale: int = 7.5,\n seed: int = None,\n generate_num: int = 1,\n tag: str = \"\",\n **kwargs):\ndef latent2img(pipe: DiffusionPipeline,\n scheduler,\n noise_pred: torch.FloatTensor,\n latents: torch.FloatTensor,\n timestep: int,\n tag: str = \"ori\",\n **kwargs):" }, { "identifier": "model_util", "path": "src/models/model_util.py", "snippet": "TOKENIZER_V1_MODEL_NAME = \"CompVis/stable-diffusion-v1-4\"\nTOKENIZER_V2_MODEL_NAME = \"stabilityai/stable-diffusion-2-1\"\nAVAILABLE_SCHEDULERS = Literal[\"ddim\", \"ddpm\", \"lms\", \"euler_a\"]\nSDXL_TEXT_ENCODER_TYPE = Union[CLIPTextModel, CLIPTextModelWithProjection]\nDIFFUSERS_CACHE_DIR = \".cache/\" # if you want to change the cache dir, change this\nLOCAL_ONLY = False # if you want to use only local files, change this\ndef load_diffusers_model(\n pretrained_model_name_or_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel,]:\ndef load_checkpoint_model(\n checkpoint_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, DiffusionPipeline]:\ndef load_models(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n v2: bool = False,\n v_pred: bool = False,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, SchedulerMixin, DiffusionPipeline, ]:\ndef load_diffusers_model_xl(\n pretrained_model_name_or_path: str,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel,]:\ndef load_checkpoint_model_xl(\n checkpoint_path: str,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel, DiffusionPipeline, ]:\ndef load_models_xl(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[\ndef create_noise_scheduler(\n scheduler_name: AVAILABLE_SCHEDULERS = \"ddpm\",\n prediction_type: Literal[\"epsilon\", \"v_prediction\"] = \"epsilon\",\n) -> SchedulerMixin:" }, { "identifier": "SPMLayer", "path": "src/models/spm.py", "snippet": "class SPMLayer(nn.Module):\n \"\"\"\n replaces forward method of the original Linear, instead of replacing the original Linear module.\n \"\"\"\n\n def __init__(\n self,\n spm_name,\n org_module: nn.Module,\n multiplier=1.0,\n dim=4,\n alpha=1,\n ):\n \"\"\"if alpha == 0 or None, alpha is rank (no scaling).\"\"\"\n super().__init__()\n self.spm_name = spm_name\n self.dim = dim\n\n if org_module.__class__.__name__ == \"Linear\":\n in_dim = org_module.in_features\n out_dim = org_module.out_features\n self.lora_down = nn.Linear(in_dim, dim, bias=False)\n self.lora_up = nn.Linear(dim, out_dim, bias=False)\n\n elif org_module.__class__.__name__ == \"Conv2d\":\n in_dim = org_module.in_channels\n out_dim = org_module.out_channels\n\n self.dim = min(self.dim, in_dim, out_dim)\n if self.dim != dim:\n print(f\"{spm_name} dim (rank) is changed to: {self.dim}\")\n\n kernel_size = org_module.kernel_size\n stride = org_module.stride\n padding = org_module.padding\n self.lora_down = nn.Conv2d(\n in_dim, self.dim, kernel_size, stride, padding, bias=False\n )\n self.lora_up = nn.Conv2d(self.dim, out_dim, (1, 1), (1, 1), bias=False)\n\n if type(alpha) == torch.Tensor:\n alpha = alpha.detach().numpy()\n alpha = dim if alpha is None or alpha == 0 else alpha\n self.scale = alpha / self.dim\n self.register_buffer(\"alpha\", torch.tensor(alpha))\n\n # same as microsoft's\n nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))\n nn.init.zeros_(self.lora_up.weight)\n\n self.multiplier = multiplier\n self.org_module = org_module # remove in applying\n\n def apply_to(self):\n self.org_forward = self.org_module.forward\n self.org_module.forward = self.forward\n del self.org_module\n\n def forward(self, x):\n return (\n self.org_forward(x)\n + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale\n )" }, { "identifier": "SPMNetwork", "path": "src/models/spm.py", "snippet": "class SPMNetwork(nn.Module):\n UNET_TARGET_REPLACE_MODULE_TRANSFORMER = [\n \"Transformer2DModel\",\n ]\n UNET_TARGET_REPLACE_MODULE_CONV = [\n \"ResnetBlock2D\",\n \"Downsample2D\",\n \"Upsample2D\",\n ]\n\n SPM_PREFIX_UNET = \"lora_unet\" # aligning with SD webui usage\n DEFAULT_TARGET_REPLACE = UNET_TARGET_REPLACE_MODULE_TRANSFORMER\n\n def __init__(\n self,\n unet: UNet2DConditionModel,\n rank: int = 4,\n multiplier: float = 1.0,\n alpha: float = 1.0,\n module = SPMLayer,\n module_kwargs = None,\n ) -> None:\n super().__init__()\n\n self.multiplier = multiplier\n self.dim = rank\n self.alpha = alpha\n\n self.module = module\n self.module_kwargs = module_kwargs or {}\n\n # unet spm\n self.unet_spm_layers = self.create_modules(\n SPMNetwork.SPM_PREFIX_UNET,\n unet,\n SPMNetwork.DEFAULT_TARGET_REPLACE,\n self.dim,\n self.multiplier,\n )\n print(f\"Create SPM for U-Net: {len(self.unet_spm_layers)} modules.\")\n\n spm_names = set()\n for spm_layer in self.unet_spm_layers:\n assert (\n spm_layer.spm_name not in spm_names\n ), f\"duplicated SPM layer name: {spm_layer.spm_name}. {spm_names}\"\n spm_names.add(spm_layer.spm_name)\n\n for spm_layer in self.unet_spm_layers:\n spm_layer.apply_to()\n self.add_module(\n spm_layer.spm_name,\n spm_layer,\n )\n\n del unet\n\n torch.cuda.empty_cache()\n\n def create_modules(\n self,\n prefix: str,\n root_module: nn.Module,\n target_replace_modules: List[str],\n rank: int,\n multiplier: float,\n ) -> list:\n spm_layers = []\n\n for name, module in root_module.named_modules():\n if module.__class__.__name__ in target_replace_modules:\n for child_name, child_module in module.named_modules():\n if child_module.__class__.__name__ in [\"Linear\", \"Conv2d\"]:\n spm_name = prefix + \".\" + name + \".\" + child_name\n spm_name = spm_name.replace(\".\", \"_\")\n print(f\"{spm_name}\")\n spm_layer = self.module(\n spm_name, child_module, multiplier, rank, self.alpha, **self.module_kwargs\n )\n spm_layers.append(spm_layer)\n\n return spm_layers\n\n def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):\n all_params = []\n\n if self.unet_spm_layers:\n params = []\n [params.extend(spm_layer.parameters()) for spm_layer in self.unet_spm_layers]\n param_data = {\"params\": params}\n if default_lr is not None:\n param_data[\"lr\"] = default_lr\n all_params.append(param_data)\n\n return all_params\n\n def save_weights(self, file, dtype=None, metadata: Optional[dict] = None):\n state_dict = self.state_dict()\n\n if dtype is not None:\n for key in list(state_dict.keys()):\n v = state_dict[key]\n v = v.detach().clone().to(\"cpu\").to(dtype)\n state_dict[key] = v\n\n for key in list(state_dict.keys()):\n if not key.startswith(\"lora\"):\n del state_dict[key]\n\n if os.path.splitext(file)[1] == \".safetensors\":\n save_file(state_dict, file, metadata)\n else:\n torch.save(state_dict, file)\n\n def __enter__(self):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 1.0\n\n def __exit__(self, exc_type, exc_value, tb):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 0" }, { "identifier": "load_state_dict", "path": "src/models/merge_spm.py", "snippet": "def load_state_dict(file_name, dtype):\n if os.path.splitext(file_name)[1] == \".safetensors\":\n sd = load_file(file_name)\n metadata = load_metadata_from_safetensors(file_name)\n else:\n sd = torch.load(file_name, map_location=\"cpu\")\n metadata = {}\n\n for key in list(sd.keys()):\n if type(sd[key]) == torch.Tensor:\n sd[key] = sd[key].to(dtype)\n\n return sd, metadata" }, { "identifier": "SLDPipeline", "path": "src/misc/sld_pipeline.py", "snippet": "class SLDPipeline(DiffusionPipeline):\n r\"\"\"\n Pipeline for text-to-image generation using Safe Latent Diffusion.\n\n The implementation is based on the [`StableDiffusionPipeline`]\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder. Stable Diffusion uses the text portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically\n the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n safety_checker ([`StableDiffusionSafetyChecker`]):\n Classification module that estimates whether generated images could be considered offensive or harmful.\n Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.\n feature_extractor ([`CLIPFeatureExtractor`]):\n Model that extracts features from generated images to be used as inputs for the `safety_checker`.\n \"\"\"\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n ],\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPFeatureExtractor,\n ):\n super().__init__()\n safety_concept: Optional[str] = 'hate, harassment, violence, suffering, humiliation, harm, suicide, ' \\\n 'sexual, nudity, bodily fluids, blood, obscene gestures, illegal activity, ' \\\n 'drug use, theft, vandalism, weapons, child abuse, brutality, cruelty'\n\n if hasattr(scheduler.config, \"steps_offset\") and scheduler.config.steps_offset != 1:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`\"\n f\" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure \"\n \"to update the config accordingly as leaving `steps_offset` might led to incorrect results\"\n \" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,\"\n \" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`\"\n \" file\"\n )\n deprecate(\"steps_offset!=1\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"steps_offset\"] = 1\n scheduler._internal_dict = FrozenDict(new_config)\n\n if hasattr(scheduler.config, \"clip_sample\") and scheduler.config.clip_sample is True:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`.\"\n \" `clip_sample` should be set to False in the configuration file. Please make sure to update the\"\n \" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in\"\n \" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very\"\n \" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file\"\n )\n deprecate(\"clip_sample not set\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"clip_sample\"] = False\n scheduler._internal_dict = FrozenDict(new_config)\n\n if safety_checker is None:\n logger.warn(\n f\"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure\"\n \" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered\"\n \" results in services or applications open to the public. Both the diffusers team and Hugging Face\"\n \" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling\"\n \" it only for use-cases that involve analyzing network behavior or auditing its results. For more\"\n \" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\"\n )\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self._safety_text_concept = safety_concept\n\n @property\n def safety_concept(self):\n r\"\"\"\n Getter method for the safety concept used with SLD\n\n Returns:\n `str`:\n The text describing the safety concept\n \"\"\"\n return self._safety_text_concept\n\n @safety_concept.setter\n def safety_concept(self, concept):\n r\"\"\"\n Setter method for the safety concept used with SLD\n\n Args:\n concept (`str`):\n The text of the new safety concept\n \"\"\"\n self._safety_text_concept = concept\n\n def enable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Enable memory efficient attention as implemented in xformers.\n\n When this option is enabled, you should observe lower GPU memory usage and a potential speed up at inference\n time. Speed up at training time is not guaranteed.\n\n Warning: When Memory Efficient Attention and Sliced attention are both enabled, the Memory Efficient Attention\n is used.\n \"\"\"\n self.unet.set_use_memory_efficient_attention_xformers(True)\n\n def disable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Disable memory efficient attention as implemented in xformers.\n \"\"\"\n self.unet.set_use_memory_efficient_attention_xformers(False)\n\n def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = \"auto\"):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,\n `attention_head_dim` must be a multiple of `slice_size`.\n \"\"\"\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = self.unet.config.attention_head_dim // 2\n self.unet.set_attention_slice(slice_size)\n\n def disable_attention_slicing(self):\n r\"\"\"\n Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go\n back to computing attention in one step.\n \"\"\"\n # set slice_size = `None` to disable `attention slicing`\n self.enable_attention_slicing(None)\n\n def enable_sequential_cpu_offload(self):\n r\"\"\"\n Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,\n text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a\n `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.\n \"\"\"\n if is_accelerate_available():\n from accelerate import cpu_offload\n else:\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\n\n device = torch.device(\"cuda\")\n\n for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:\n if cpu_offloaded_model is not None:\n cpu_offload(cpu_offloaded_model, device)\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]],\n height: int = 512,\n width: int = 512,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[torch.Generator] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n sld_guidance_scale: Optional[float] = 1000,\n sld_warmup_steps: Optional[int] = 10,\n sld_threshold: Optional[float] = 0.01,\n sld_momentum_scale: Optional[float] = 0.3,\n sld_mom_beta: Optional[float] = 0.4,\n **kwargs,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`):\n The prompt or prompts to guide the image generation.\n height (`int`, *optional*, defaults to 512):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to 512):\n The width in pixels of the generated image.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored\n if `guidance_scale` is less than `1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator`, *optional*):\n A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation\n deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n sld_guidance_scale (`float`, *optional*, defaults to 1000):\n The guidance scale of safe latent diffusion. If set to be less than 1, safety guidance will be disabled.\n sld_warmup_steps (`int`, *optional*, defaults to 10):\n Number of warmup steps for safety guidance. SLD will only be applied for diffusion steps greater\n than `sld_warmup_steps`.\n sld_threshold (`float`, *optional*, defaults to 0.01):\n Threshold that separates the hyperplane between appropriate and inappropriate images.\n sld_momentum_scale (`float`, *optional*, defaults to 0.3):\n Scale of the SLD momentum to be added to the safety guidance at each diffusion step.\n If set to 0.0 momentum will be disabled. Momentum is already built up during warmup,\n i.e. for diffusion steps smaller than `sld_warmup_steps`.\n sld_mom_beta (`float`, *optional*, defaults to 0.4):\n Defines how safety guidance momentum builds up. `sld_mom_beta` indicates how much of the previous\n momentum will be kept. Momentum is already built up during warmup, i.e. for diffusion steps smaller than\n `sld_warmup_steps`.\n Returns:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated images, and the second element is a\n list of `bool`s denoting whether the corresponding generated image likely represents \"not-safe-for-work\"\n (nsfw) content, according to the `safety_checker`.\n \"\"\"\n if isinstance(prompt, str):\n batch_size = 1\n elif isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n enable_safety_guidance = True\n if sld_guidance_scale < 1:\n enable_safety_guidance = False\n logger.warn('You have disabled safety guidance.')\n\n # get prompt text embeddings\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n\n if text_input_ids.shape[-1] > self.tokenizer.model_max_length:\n removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]\n text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]\n\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = text_embeddings.shape\n text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)\n text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n max_length = text_input_ids.shape[-1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]\n\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = uncond_embeddings.shape[1]\n uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)\n uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # Encode the safety concept text\n if enable_safety_guidance:\n safety_concept_input = self.tokenizer(\n [self._safety_text_concept],\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n safety_embeddings = self.text_encoder(safety_concept_input.input_ids.to(self.device))[0]\n\n # duplicate safety embeddings for each generation per prompt, using mps friendly method\n seq_len = safety_embeddings.shape[1]\n safety_embeddings = safety_embeddings.repeat(batch_size, num_images_per_prompt, 1)\n safety_embeddings = safety_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings, safety_embeddings])\n\n else:\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings])\n\n # get the initial random noise unless the user supplied it\n\n # Unlike in other pipelines, latents need to be generated in the target device\n # for 1-to-1 results reproducibility with the CompVis implementation.\n # However this currently doesn't work in `mps`.\n latents_shape = (batch_size * num_images_per_prompt, self.unet.in_channels, height // 8, width // 8)\n latents_dtype = text_embeddings.dtype\n if latents is None:\n if self.device.type == \"mps\":\n # randn does not work reproducibly on mps\n latents = torch.randn(latents_shape, generator=generator, device=\"cpu\", dtype=latents_dtype).to(\n self.device\n )\n else:\n latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)\n else:\n if latents.shape != latents_shape:\n raise ValueError(f\"Unexpected latents shape, got {latents.shape}, expected {latents_shape}\")\n latents = latents.to(self.device)\n\n # set timesteps\n self.scheduler.set_timesteps(num_inference_steps)\n timesteps = self.scheduler.timesteps\n\n # Some schedulers like PNDM have timesteps as arrays\n # It's more optimized to move all timesteps to correct device beforehand\n timesteps_tensor = self.scheduler.timesteps.to(self.device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n\n safety_momentum = None\n\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * (3 if enable_safety_guidance else 2)) \\\n if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_out = noise_pred.chunk((3 if enable_safety_guidance else 2))\n noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1]\n\n # default classifier free guidance\n noise_guidance = (noise_pred_text - noise_pred_uncond)\n\n # Perform SLD guidance\n if enable_safety_guidance:\n if safety_momentum is None:\n safety_momentum = torch.zeros_like(noise_guidance)\n noise_pred_safety_concept = noise_pred_out[2]\n\n # Equation 6\n scale = torch.clamp(\n torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.)\n\n # Equation 6\n safety_concept_scale = torch.where(\n (noise_pred_text - noise_pred_safety_concept) >= sld_threshold,\n torch.zeros_like(scale), scale)\n\n # Equation 4\n noise_guidance_safety = torch.mul(\n (noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale)\n\n # Equation 7\n noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum\n\n # Equation 8\n safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety\n\n if i >= sld_warmup_steps: # Warmup\n # Equation 3\n noise_guidance = noise_guidance - noise_guidance_safety\n\n noise_pred = noise_pred_uncond + guidance_scale * noise_guidance\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n latents = 1 / 0.18215 * latents\n image = self.vae.decode(latents).sample\n\n image = (image / 2 + 0.5).clamp(0, 1)\n\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\n\n if self.safety_checker is not None:\n safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors=\"pt\").to(\n self.device\n )\n image, has_nsfw_concept = self.safety_checker(\n images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)\n )\n else:\n has_nsfw_concept = None\n\n if output_type == \"pil\":\n image = self.numpy_to_pil(image)\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return SLDPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept,\n applied_safety_concept=self._safety_text_concept if enable_safety_guidance else None)" } ]
import argparse import gc import warnings import torch from pathlib import Path from typing import Literal from torch.utils.data import DataLoader from accelerate import PartialState, Accelerator from src.configs import config from src.configs.config import RootConfig from src.configs.generation_config import GenerationConfig from src.engine import train_util from src.evaluation import * from src.models import model_util from src.models.spm import SPMLayer, SPMNetwork from src.models.merge_spm import load_state_dict from src.misc.sld_pipeline import SLDPipeline
11,603
def parse_extra_args(extra_args): if extra_args is None or extra_args == ['']: return {} extra_args_dict = {} for extra_arg in extra_args: key, value = extra_arg.split("=") # convert value to various types if value.isdigit(): value = int(value) elif value.replace(".", "", 1).isdigit(): value = float(value) elif value[0] == "[" and value[-1] == "]": value = [i.replace('+', ' ') for i in value[1:-1].split(",")] value = [v.strip() for v in value] if value[0].isdigit(): value = [int(v) for v in value] elif value[0].replace(".", "", 1).isdigit(): value = [float(v) for v in value] extra_args_dict[key] = value return extra_args_dict def get_dataloader(args, num_processes=1): # parse task_args arguments task_args = parse_extra_args(args.task_args) task_args["save_folder"] = args.img_save_path task_args["output_path"] = args.save_path # parse generation arguments cfg = parse_extra_args(args.generation_cfg) cfg = GenerationConfig(**cfg) dataset_class = None if args.task == "general": dataset_class = ClipTemplateDataset elif args.task == "artwork": dataset_class = ArtworkDataset elif args.task == "i2p": dataset_class = I2PDataset elif args.task == "coco": dataset_class = Coco30kGenerationDataset else: raise ValueError(f"Unknown task: {args.task}") dataset = dataset_class(**task_args, base_cfg=cfg) dataloader = DataLoader(dataset, batch_size=num_processes, num_workers=0, shuffle=False) return dataloader def get_evaluator(args): evaluator_class = None if args.task == "general": evaluator_class = ClipEvaluator elif args.task == "artwork": evaluator_class = ArtworkEvaluator elif args.task == "i2p": evaluator_class = I2PEvaluator elif args.task == "coco": evaluator_class = CocoEvaluator else: raise ValueError(f"Unknown task: {args.task}") evaluator = evaluator_class( save_folder=args.img_save_path, output_path=args.save_path ) return evaluator def calculate_matching_score( prompt_tokens, prompt_embeds, erased_prompt_tokens, erased_prompt_embeds, matching_metric: MATCHING_METRICS, special_token_ids: set[int], weight_dtype: torch.dtype = torch.float32, ): scores = [] if "allone" in matching_metric: scores.append(torch.ones(prompt_embeds.shape[0]).to("cpu", dtype=weight_dtype)) if "clipcos" in matching_metric: clipcos = torch.cosine_similarity( prompt_embeds.flatten(1, 2), erased_prompt_embeds.flatten(1, 2), dim=-1 ).cpu() scores.append(clipcos) if "tokenuni" in matching_metric: prompt_set = set(prompt_tokens[0].tolist()) - special_token_ids tokenuni = [] for ep in erased_prompt_tokens: ep_set = set(ep.tolist()) - special_token_ids tokenuni.append(len(prompt_set.intersection(ep_set)) / len(ep_set)) scores.append(torch.tensor(tokenuni).to("cpu", dtype=weight_dtype)) return torch.max(torch.stack(scores), dim=0)[0] @torch.no_grad() def infer_with_spm( dataloader: DataLoader, spm_paths: list[str], matching_metric: MATCHING_METRICS, facilitate_factor: float = 1.0, assigned_multipliers: list[float] = None, finetuned_model_path: str = None, sld_target_concept: str = None, base_model: str = "CompVis/stable-diffusion-v1-4", v2: bool = False, precision: str = "fp32", ): spm_model_paths = [ lp / f"{lp.name}_last.safetensors" if lp.is_dir() else lp for lp in spm_paths ] weight_dtype = config.parse_precision(precision) if finetuned_model_path is not None and Path(finetuned_model_path).is_dir(): # folder path for the diffuser model base_model = finetuned_model_path print(f"Using models from {base_model}") # load the pretrained SD
DIFFUSERS_CACHE_DIR = ".cache/" UNET_NAME = "unet" TEXT_ENCODER_NAME = "text_encoder" MATCHING_METRICS = Literal[ "clipcos", "clipcos_tokenuni", "tokenuni", "allone", ] distributed_state = PartialState() accelerator = Accelerator() def flush(): torch.cuda.empty_cache() gc.collect() def parse_extra_args(extra_args): if extra_args is None or extra_args == ['']: return {} extra_args_dict = {} for extra_arg in extra_args: key, value = extra_arg.split("=") # convert value to various types if value.isdigit(): value = int(value) elif value.replace(".", "", 1).isdigit(): value = float(value) elif value[0] == "[" and value[-1] == "]": value = [i.replace('+', ' ') for i in value[1:-1].split(",")] value = [v.strip() for v in value] if value[0].isdigit(): value = [int(v) for v in value] elif value[0].replace(".", "", 1).isdigit(): value = [float(v) for v in value] extra_args_dict[key] = value return extra_args_dict def get_dataloader(args, num_processes=1): # parse task_args arguments task_args = parse_extra_args(args.task_args) task_args["save_folder"] = args.img_save_path task_args["output_path"] = args.save_path # parse generation arguments cfg = parse_extra_args(args.generation_cfg) cfg = GenerationConfig(**cfg) dataset_class = None if args.task == "general": dataset_class = ClipTemplateDataset elif args.task == "artwork": dataset_class = ArtworkDataset elif args.task == "i2p": dataset_class = I2PDataset elif args.task == "coco": dataset_class = Coco30kGenerationDataset else: raise ValueError(f"Unknown task: {args.task}") dataset = dataset_class(**task_args, base_cfg=cfg) dataloader = DataLoader(dataset, batch_size=num_processes, num_workers=0, shuffle=False) return dataloader def get_evaluator(args): evaluator_class = None if args.task == "general": evaluator_class = ClipEvaluator elif args.task == "artwork": evaluator_class = ArtworkEvaluator elif args.task == "i2p": evaluator_class = I2PEvaluator elif args.task == "coco": evaluator_class = CocoEvaluator else: raise ValueError(f"Unknown task: {args.task}") evaluator = evaluator_class( save_folder=args.img_save_path, output_path=args.save_path ) return evaluator def calculate_matching_score( prompt_tokens, prompt_embeds, erased_prompt_tokens, erased_prompt_embeds, matching_metric: MATCHING_METRICS, special_token_ids: set[int], weight_dtype: torch.dtype = torch.float32, ): scores = [] if "allone" in matching_metric: scores.append(torch.ones(prompt_embeds.shape[0]).to("cpu", dtype=weight_dtype)) if "clipcos" in matching_metric: clipcos = torch.cosine_similarity( prompt_embeds.flatten(1, 2), erased_prompt_embeds.flatten(1, 2), dim=-1 ).cpu() scores.append(clipcos) if "tokenuni" in matching_metric: prompt_set = set(prompt_tokens[0].tolist()) - special_token_ids tokenuni = [] for ep in erased_prompt_tokens: ep_set = set(ep.tolist()) - special_token_ids tokenuni.append(len(prompt_set.intersection(ep_set)) / len(ep_set)) scores.append(torch.tensor(tokenuni).to("cpu", dtype=weight_dtype)) return torch.max(torch.stack(scores), dim=0)[0] @torch.no_grad() def infer_with_spm( dataloader: DataLoader, spm_paths: list[str], matching_metric: MATCHING_METRICS, facilitate_factor: float = 1.0, assigned_multipliers: list[float] = None, finetuned_model_path: str = None, sld_target_concept: str = None, base_model: str = "CompVis/stable-diffusion-v1-4", v2: bool = False, precision: str = "fp32", ): spm_model_paths = [ lp / f"{lp.name}_last.safetensors" if lp.is_dir() else lp for lp in spm_paths ] weight_dtype = config.parse_precision(precision) if finetuned_model_path is not None and Path(finetuned_model_path).is_dir(): # folder path for the diffuser model base_model = finetuned_model_path print(f"Using models from {base_model}") # load the pretrained SD
tokenizer, text_encoder, unet, pipe = model_util.load_checkpoint_model(
4
2023-12-26 03:19:16+00:00
16k
dakpinaroglu/Frame2seq
frame2seq/openfold/utils/feats.py
[ { "identifier": "protein", "path": "frame2seq/openfold/np/protein.py", "snippet": "PICO_TO_ANGSTROM = 0.01\nclass Protein:\ndef from_pdb_string(pdb_str: str, chain_id: Optional[str] = None) -> Protein:\ndef from_proteinnet_string(proteinnet_str: str) -> Protein:\ndef get_pdb_headers(prot: Protein, chain_id: int = 0) -> Sequence[str]:\ndef add_pdb_headers(prot: Protein, pdb_str: str) -> str:\ndef to_pdb(prot: Protein) -> str:\ndef ideal_atom_mask(prot: Protein) -> np.ndarray:\ndef from_prediction(\n features: FeatureDict,\n result: ModelOutput,\n b_factors: Optional[np.ndarray] = None,\n chain_index: Optional[np.ndarray] = None,\n remark: Optional[str] = None,\n parents: Optional[Sequence[str]] = None,\n parents_chain_index: Optional[Sequence[int]] = None\n) -> Protein:" }, { "identifier": "Rotation", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rotation:\n \"\"\"\n A 3D rotation. Depending on how the object is initialized, the\n rotation is represented by either a rotation matrix or a\n quaternion, though both formats are made available by helper functions.\n To simplify gradient computation, the underlying format of the\n rotation cannot be changed in-place. Like Rigid, the class is designed\n to mimic the behavior of a torch Tensor, almost as if each Rotation\n object were a tensor of rotations, in one format or another.\n \"\"\"\n def __init__(self,\n rot_mats: Optional[torch.Tensor] = None,\n quats: Optional[torch.Tensor] = None,\n normalize_quats: bool = True,\n ):\n \"\"\"\n Args:\n rot_mats:\n A [*, 3, 3] rotation matrix tensor. Mutually exclusive with\n quats\n quats:\n A [*, 4] quaternion. Mutually exclusive with rot_mats. If\n normalize_quats is not True, must be a unit quaternion\n normalize_quats:\n If quats is specified, whether to normalize quats\n \"\"\"\n if((rot_mats is None and quats is None) or \n (rot_mats is not None and quats is not None)):\n raise ValueError(\"Exactly one input argument must be specified\")\n\n if((rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or \n (quats is not None and quats.shape[-1] != 4)):\n raise ValueError(\n \"Incorrectly shaped rotation matrix or quaternion\"\n )\n\n # Force full-precision\n if(quats is not None):\n quats = quats.to(dtype=torch.float32)\n if(rot_mats is not None):\n rot_mats = rot_mats.to(dtype=torch.float32)\n\n if(quats is not None and normalize_quats):\n quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)\n\n self._rot_mats = rot_mats\n self._quats = quats\n\n @staticmethod\n def identity(\n shape,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rotation:\n \"\"\"\n Returns an identity Rotation.\n\n Args:\n shape:\n The \"shape\" of the resulting Rotation object. See documentation\n for the shape property\n dtype:\n The torch dtype for the rotation\n device:\n The torch device for the new rotation\n requires_grad:\n Whether the underlying tensors in the new rotation object\n should require gradient computation\n fmt:\n One of \"quat\" or \"rot_mat\". Determines the underlying format\n of the new object's rotation \n Returns:\n A new identity rotation\n \"\"\"\n if(fmt == \"rot_mat\"):\n rot_mats = identity_rot_mats(\n shape, dtype, device, requires_grad,\n )\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(fmt == \"quat\"):\n quats = identity_quats(shape, dtype, device, requires_grad)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(f\"Invalid format: f{fmt}\")\n\n # Magic methods\n\n def __getitem__(self, index: Any) -> Rotation:\n \"\"\"\n Allows torch-style indexing over the virtual shape of the rotation\n object. See documentation for the shape property.\n\n Args:\n index:\n A torch index. E.g. (1, 3, 2), or (slice(None,))\n Returns:\n The indexed rotation\n \"\"\"\n if type(index) != tuple:\n index = (index,)\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats[index + (slice(None), slice(None))]\n return Rotation(rot_mats=rot_mats)\n elif(self._quats is not None):\n quats = self._quats[index + (slice(None),)]\n return Rotation(quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Pointwise left multiplication of the rotation with a tensor. Can be\n used to e.g. mask the Rotation.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats * right[..., None, None]\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats * right[..., None]\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Reverse pointwise multiplication of the rotation with a tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n \n # Properties\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the virtual shape of the rotation object. This shape is\n defined as the batch dimensions of the underlying rotation matrix\n or quaternion. If the Rotation was initialized with a [10, 3, 3]\n rotation matrix tensor, for example, the resulting shape would be\n [10].\n \n Returns:\n The virtual shape of the rotation object\n \"\"\"\n s = None\n if(self._quats is not None):\n s = self._quats.shape[:-1]\n else:\n s = self._rot_mats.shape[:-2]\n\n return s\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Returns the dtype of the underlying rotation.\n\n Returns:\n The dtype of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.dtype\n elif(self._quats is not None):\n return self._quats.dtype\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n The device of the underlying rotation\n\n Returns:\n The device of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.device\n elif(self._quats is not None):\n return self._quats.device\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def requires_grad(self) -> bool:\n \"\"\"\n Returns the requires_grad property of the underlying rotation\n\n Returns:\n The requires_grad property of the underlying tensor\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.requires_grad\n elif(self._quats is not None):\n return self._quats.requires_grad\n else:\n raise ValueError(\"Both rotations are None\")\n\n def get_rot_mats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a rotation matrix tensor.\n\n Returns:\n The rotation as a rotation matrix tensor\n \"\"\"\n rot_mats = self._rot_mats\n if(rot_mats is None):\n if(self._quats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n rot_mats = quat_to_rot(self._quats)\n\n return rot_mats \n\n def get_quats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a quaternion tensor.\n\n Depending on whether the Rotation was initialized with a\n quaternion, this function may call torch.linalg.eigh.\n\n Returns:\n The rotation as a quaternion tensor.\n \"\"\"\n quats = self._quats\n if(quats is None):\n if(self._rot_mats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n quats = rot_to_quat(self._rot_mats)\n\n return quats\n\n def get_cur_rot(self) -> torch.Tensor:\n \"\"\"\n Return the underlying rotation in its current form\n\n Returns:\n The stored rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats\n elif(self._quats is not None):\n return self._quats\n else:\n raise ValueError(\"Both rotations are None\")\n\n # Rotation functions\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor, \n normalize_quats: bool = True\n ) -> Rotation:\n \"\"\"\n Returns a new quaternion Rotation after updating the current\n object's underlying rotation with a quaternion update, formatted\n as a [*, 3] tensor whose final three columns represent x, y, z such \n that (1, x, y, z) is the desired (not necessarily unit) quaternion\n update.\n\n Args:\n q_update_vec:\n A [*, 3] quaternion update tensor\n normalize_quats:\n Whether to normalize the output quaternion\n Returns:\n An updated Rotation\n \"\"\"\n quats = self.get_quats()\n new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)\n return Rotation(\n rot_mats=None, \n quats=new_quats, \n normalize_quats=normalize_quats,\n )\n\n def compose_r(self, r: Rotation) -> Rotation:\n \"\"\"\n Compose the rotation matrices of the current Rotation object with\n those of another.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n r1 = self.get_rot_mats()\n r2 = r.get_rot_mats()\n new_rot_mats = rot_matmul(r1, r2)\n return Rotation(rot_mats=new_rot_mats, quats=None)\n\n def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:\n \"\"\"\n Compose the quaternions of the current Rotation object with those\n of another.\n\n Depending on whether either Rotation was initialized with\n quaternions, this function may call torch.linalg.eigh.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n q1 = self.get_quats()\n q2 = r.get_quats()\n new_quats = quat_multiply(q1, q2)\n return Rotation(\n rot_mats=None, quats=new_quats, normalize_quats=normalize_quats\n )\n\n def apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Apply the current Rotation as a rotation matrix to a set of 3D\n coordinates.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n return rot_vec_mul(rot_mats, pts)\n\n def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n The inverse of the apply() method.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] inverse-rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n inv_rot_mats = invert_rot_mat(rot_mats) \n return rot_vec_mul(inv_rot_mats, pts)\n\n def invert(self) -> Rotation:\n \"\"\"\n Returns the inverse of the current Rotation.\n\n Returns:\n The inverse of the current Rotation\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=invert_rot_mat(self._rot_mats), \n quats=None\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None,\n quats=invert_quat(self._quats),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n # \"Tensor\" stuff\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shape of the Rotation object.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed Rotation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n @staticmethod\n def cat(\n rs: Sequence[Rotation], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates rotations along one of the batch dimensions. Analogous\n to torch.cat().\n\n Note that the output of this operation is always a rotation matrix,\n regardless of the format of input rotations.\n\n Args:\n rs: \n A list of rotation objects\n dim: \n The dimension along which the rotations should be \n concatenated\n Returns:\n A concatenated Rotation object in rotation matrix format\n \"\"\"\n rot_mats = [r.get_rot_mats() for r in rs]\n rot_mats = torch.cat(rot_mats, dim=dim if dim >= 0 else dim - 2)\n\n return Rotation(rot_mats=rot_mats, quats=None) \n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rotation:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying rotation tensors,\n mapping over the rotation dimension(s). Can be used e.g. to sum out\n a one-hot batch dimension.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rotation \n Returns:\n The transformed Rotation object\n \"\"\" \n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))\n rot_mats = torch.stack(\n list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1\n )\n rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = torch.stack(\n list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1\n )\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n \n def cuda(self) -> Rotation:\n \"\"\"\n Analogous to the cuda() method of torch Tensors\n\n Returns:\n A copy of the Rotation in CUDA memory\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.cuda(),\n normalize_quats=False\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def to(self, \n device: Optional[torch.device], \n dtype: Optional[torch.dtype]\n ) -> Rotation:\n \"\"\"\n Analogous to the to() method of torch Tensors\n\n Args:\n device:\n A torch device\n dtype:\n A torch dtype\n Returns:\n A copy of the Rotation using the new device and dtype\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=self._rot_mats.to(device=device, dtype=dtype), \n quats=None,\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.to(device=device, dtype=dtype),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def detach(self) -> Rotation:\n \"\"\"\n Returns a copy of the Rotation whose underlying Tensor has been\n detached from its torch graph.\n\n Returns:\n A copy of the Rotation whose underlying Tensor has been detached\n from its torch graph\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.detach(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.detach(), \n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")" }, { "identifier": "Rigid", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())" }, { "identifier": "batched_gather", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def add(m1, m2, inplace):\ndef permute_final_dims(tensor: torch.Tensor, inds: List[int]):\ndef flatten_final_dims(t: torch.Tensor, no_dims: int):\ndef masked_mean(mask, value, dim, eps=1e-4):\ndef pts_to_distogram(pts, min_bin=2.3125, max_bin=21.6875, no_bins=64):\ndef dict_multimap(fn, dicts):\ndef one_hot(x, v_bins):\ndef batched_gather(data, inds, dim=0, no_batch_dims=0):\ndef dict_map(fn, dic, leaf_type):\ndef tree_map(fn, tree, leaf_type):" } ]
import math import numpy as np import torch import torch.nn as nn import frame2seq.openfold.np.residue_constants as rc from typing import Dict from frame2seq.openfold.np import protein from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid from frame2seq.openfold.utils.tensor_utils import ( batched_gather, one_hot, tree_map, tensor_tree_map, )
11,342
def build_template_pair_feat( batch, min_bin, max_bin, no_bins, use_unit_vector=False, eps=1e-20, inf=1e8 ): template_mask = batch["template_pseudo_beta_mask"] template_mask_2d = template_mask[..., None] * template_mask[..., None, :] # Compute distogram (this seems to differ slightly from Alg. 5) tpb = batch["template_pseudo_beta"] dgram = torch.sum( (tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True ) lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2 upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1) dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype) to_concat = [dgram, template_mask_2d[..., None]] aatype_one_hot = nn.functional.one_hot( batch["template_aatype"], rc.restype_num + 2, ) n_res = batch["template_aatype"].shape[-1] to_concat.append( aatype_one_hot[..., None, :, :].expand( *aatype_one_hot.shape[:-2], n_res, -1, -1 ) ) to_concat.append( aatype_one_hot[..., None, :].expand( *aatype_one_hot.shape[:-2], -1, n_res, -1 ) ) n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]] rigids = Rigid.make_transform_from_reference( n_xyz=batch["template_all_atom_positions"][..., n, :], ca_xyz=batch["template_all_atom_positions"][..., ca, :], c_xyz=batch["template_all_atom_positions"][..., c, :], eps=eps, ) points = rigids.get_trans()[..., None, :, :] rigid_vec = rigids[..., None].invert_apply(points) inv_distance_scalar = torch.rsqrt(eps + torch.sum(rigid_vec ** 2, dim=-1)) t_aa_masks = batch["template_all_atom_mask"] template_mask = ( t_aa_masks[..., n] * t_aa_masks[..., ca] * t_aa_masks[..., c] ) template_mask_2d = template_mask[..., None] * template_mask[..., None, :] inv_distance_scalar = inv_distance_scalar * template_mask_2d unit_vector = rigid_vec * inv_distance_scalar[..., None] if(not use_unit_vector): unit_vector = unit_vector * 0. to_concat.extend(torch.unbind(unit_vector[..., None, :], dim=-1)) to_concat.append(template_mask_2d[..., None]) act = torch.cat(to_concat, dim=-1) act = act * template_mask_2d[..., None] return act def build_extra_msa_feat(batch): msa_1hot = nn.functional.one_hot(batch["extra_msa"], 23) msa_feat = [ msa_1hot, batch["extra_has_deletion"].unsqueeze(-1), batch["extra_deletion_value"].unsqueeze(-1), ] return torch.cat(msa_feat, dim=-1) def torsion_angles_to_frames( r: Rigid, alpha: torch.Tensor, aatype: torch.Tensor, rrgdf: torch.Tensor, ): # [*, N, 8, 4, 4] default_4x4 = rrgdf[aatype, ...] # [*, N, 8] transformations, i.e. # One [*, N, 8, 3, 3] rotation matrix and # One [*, N, 8, 3] translation matrix default_r = r.from_tensor_4x4(default_4x4) bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2)) bb_rot[..., 1] = 1 # [*, N, 8, 2] alpha = torch.cat( [bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2 ) # [*, N, 8, 3, 3] # Produces rotation matrices of the form: # [ # [1, 0 , 0 ], # [0, a_2,-a_1], # [0, a_1, a_2] # ] # This follows the original code rather than the supplement, which uses # different indices. all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape) all_rots[..., 0, 0] = 1 all_rots[..., 1, 1] = alpha[..., 1] all_rots[..., 1, 2] = -alpha[..., 0] all_rots[..., 2, 1:] = alpha
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks): is_gly = aatype == rc.restype_order["G"] ca_idx = rc.atom_order["CA"] cb_idx = rc.atom_order["CB"] pseudo_beta = torch.where( is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3), all_atom_positions[..., ca_idx, :], all_atom_positions[..., cb_idx, :], ) if all_atom_masks is not None: pseudo_beta_mask = torch.where( is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx], ) return pseudo_beta, pseudo_beta_mask else: return pseudo_beta def atom14_to_atom37(atom14, batch): atom37_data = batched_gather( atom14, batch["residx_atom37_to_atom14"], dim=-2, no_batch_dims=len(atom14.shape[:-2]), ) atom37_data = atom37_data * batch["atom37_atom_exists"][..., None] return atom37_data def build_template_angle_feat(template_feats): template_aatype = template_feats["template_aatype"] torsion_angles_sin_cos = template_feats["template_torsion_angles_sin_cos"] alt_torsion_angles_sin_cos = template_feats[ "template_alt_torsion_angles_sin_cos" ] torsion_angles_mask = template_feats["template_torsion_angles_mask"] template_angle_feat = torch.cat( [ nn.functional.one_hot(template_aatype, 22), torsion_angles_sin_cos.reshape( *torsion_angles_sin_cos.shape[:-2], 14 ), alt_torsion_angles_sin_cos.reshape( *alt_torsion_angles_sin_cos.shape[:-2], 14 ), torsion_angles_mask, ], dim=-1, ) return template_angle_feat def build_template_pair_feat( batch, min_bin, max_bin, no_bins, use_unit_vector=False, eps=1e-20, inf=1e8 ): template_mask = batch["template_pseudo_beta_mask"] template_mask_2d = template_mask[..., None] * template_mask[..., None, :] # Compute distogram (this seems to differ slightly from Alg. 5) tpb = batch["template_pseudo_beta"] dgram = torch.sum( (tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True ) lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2 upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1) dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype) to_concat = [dgram, template_mask_2d[..., None]] aatype_one_hot = nn.functional.one_hot( batch["template_aatype"], rc.restype_num + 2, ) n_res = batch["template_aatype"].shape[-1] to_concat.append( aatype_one_hot[..., None, :, :].expand( *aatype_one_hot.shape[:-2], n_res, -1, -1 ) ) to_concat.append( aatype_one_hot[..., None, :].expand( *aatype_one_hot.shape[:-2], -1, n_res, -1 ) ) n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]] rigids = Rigid.make_transform_from_reference( n_xyz=batch["template_all_atom_positions"][..., n, :], ca_xyz=batch["template_all_atom_positions"][..., ca, :], c_xyz=batch["template_all_atom_positions"][..., c, :], eps=eps, ) points = rigids.get_trans()[..., None, :, :] rigid_vec = rigids[..., None].invert_apply(points) inv_distance_scalar = torch.rsqrt(eps + torch.sum(rigid_vec ** 2, dim=-1)) t_aa_masks = batch["template_all_atom_mask"] template_mask = ( t_aa_masks[..., n] * t_aa_masks[..., ca] * t_aa_masks[..., c] ) template_mask_2d = template_mask[..., None] * template_mask[..., None, :] inv_distance_scalar = inv_distance_scalar * template_mask_2d unit_vector = rigid_vec * inv_distance_scalar[..., None] if(not use_unit_vector): unit_vector = unit_vector * 0. to_concat.extend(torch.unbind(unit_vector[..., None, :], dim=-1)) to_concat.append(template_mask_2d[..., None]) act = torch.cat(to_concat, dim=-1) act = act * template_mask_2d[..., None] return act def build_extra_msa_feat(batch): msa_1hot = nn.functional.one_hot(batch["extra_msa"], 23) msa_feat = [ msa_1hot, batch["extra_has_deletion"].unsqueeze(-1), batch["extra_deletion_value"].unsqueeze(-1), ] return torch.cat(msa_feat, dim=-1) def torsion_angles_to_frames( r: Rigid, alpha: torch.Tensor, aatype: torch.Tensor, rrgdf: torch.Tensor, ): # [*, N, 8, 4, 4] default_4x4 = rrgdf[aatype, ...] # [*, N, 8] transformations, i.e. # One [*, N, 8, 3, 3] rotation matrix and # One [*, N, 8, 3] translation matrix default_r = r.from_tensor_4x4(default_4x4) bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2)) bb_rot[..., 1] = 1 # [*, N, 8, 2] alpha = torch.cat( [bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2 ) # [*, N, 8, 3, 3] # Produces rotation matrices of the form: # [ # [1, 0 , 0 ], # [0, a_2,-a_1], # [0, a_1, a_2] # ] # This follows the original code rather than the supplement, which uses # different indices. all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape) all_rots[..., 0, 0] = 1 all_rots[..., 1, 1] = alpha[..., 1] all_rots[..., 1, 2] = -alpha[..., 0] all_rots[..., 2, 1:] = alpha
all_rots = Rigid(Rotation(rot_mats=all_rots), None)
1
2023-12-25 09:29:36+00:00
16k
KyanChen/TTP
mmpretrain/models/multimodal/clip/clip.py
[ { "identifier": "CIFAR100_CATEGORIES", "path": "mmpretrain/datasets/categories.py", "snippet": "CIFAR100_CATEGORIES = (\n 'apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle',\n 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel',\n 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock',\n 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur',\n 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster',\n 'house', 'kangaroo', 'keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion',\n 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain',\n 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree',\n 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy',\n 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket',\n 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail',\n 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper',\n 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train',\n 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf',\n 'woman', 'worm')" }, { "identifier": "IMAGENET_SIMPLE_CATEGORIES", "path": "mmpretrain/datasets/categories.py", "snippet": "IMAGENET_SIMPLE_CATEGORIES = (\n 'tench', 'goldfish', 'great white shark', 'tiger shark',\n 'hammerhead shark', 'electric ray', 'stingray', 'rooster', 'hen',\n 'ostrich', 'brambling', 'goldfinch', 'house finch', 'junco',\n 'indigo bunting', 'American robin', 'bulbul', 'jay', 'magpie', 'chickadee',\n 'American dipper', 'kite (bird of prey)', 'bald eagle', 'vulture',\n 'great grey owl', 'fire salamander', 'smooth newt', 'newt',\n 'spotted salamander', 'axolotl', 'American bullfrog', 'tree frog',\n 'tailed frog', 'loggerhead sea turtle', 'leatherback sea turtle',\n 'mud turtle', 'terrapin', 'box turtle', 'banded gecko', 'green iguana',\n 'Carolina anole', 'desert grassland whiptail lizard', 'agama',\n 'frilled-necked lizard', 'alligator lizard', 'Gila monster',\n 'European green lizard', 'chameleon', 'Komodo dragon', 'Nile crocodile',\n 'American alligator', 'triceratops', 'worm snake', 'ring-necked snake',\n 'eastern hog-nosed snake', 'smooth green snake', 'kingsnake',\n 'garter snake', 'water snake', 'vine snake', 'night snake',\n 'boa constrictor', 'African rock python', 'Indian cobra', 'green mamba',\n 'sea snake', 'Saharan horned viper', 'eastern diamondback rattlesnake',\n 'sidewinder rattlesnake', 'trilobite', 'harvestman', 'scorpion',\n 'yellow garden spider', 'barn spider', 'European garden spider',\n 'southern black widow', 'tarantula', 'wolf spider', 'tick', 'centipede',\n 'black grouse', 'ptarmigan', 'ruffed grouse', 'prairie grouse', 'peafowl',\n 'quail', 'partridge', 'african grey parrot', 'macaw',\n 'sulphur-crested cockatoo', 'lorikeet', 'coucal', 'bee eater', 'hornbill',\n 'hummingbird', 'jacamar', 'toucan', 'duck', 'red-breasted merganser',\n 'goose', 'black swan', 'tusker', 'echidna', 'platypus', 'wallaby', 'koala',\n 'wombat', 'jellyfish', 'sea anemone', 'brain coral', 'flatworm',\n 'nematode', 'conch', 'snail', 'slug', 'sea slug', 'chiton',\n 'chambered nautilus', 'Dungeness crab', 'rock crab', 'fiddler crab',\n 'red king crab', 'American lobster', 'spiny lobster', 'crayfish',\n 'hermit crab', 'isopod', 'white stork', 'black stork', 'spoonbill',\n 'flamingo', 'little blue heron', 'great egret', 'bittern bird',\n 'crane bird', 'limpkin', 'common gallinule', 'American coot', 'bustard',\n 'ruddy turnstone', 'dunlin', 'common redshank', 'dowitcher',\n 'oystercatcher', 'pelican', 'king penguin', 'albatross', 'grey whale',\n 'killer whale', 'dugong', 'sea lion', 'Chihuahua', 'Japanese Chin',\n 'Maltese', 'Pekingese', 'Shih Tzu', 'King Charles Spaniel', 'Papillon',\n 'toy terrier', 'Rhodesian Ridgeback', 'Afghan Hound', 'Basset Hound',\n 'Beagle', 'Bloodhound', 'Bluetick Coonhound', 'Black and Tan Coonhound',\n 'Treeing Walker Coonhound', 'English foxhound', 'Redbone Coonhound',\n 'borzoi', 'Irish Wolfhound', 'Italian Greyhound', 'Whippet',\n 'Ibizan Hound', 'Norwegian Elkhound', 'Otterhound', 'Saluki',\n 'Scottish Deerhound', 'Weimaraner', 'Staffordshire Bull Terrier',\n 'American Staffordshire Terrier', 'Bedlington Terrier', 'Border Terrier',\n 'Kerry Blue Terrier', 'Irish Terrier', 'Norfolk Terrier',\n 'Norwich Terrier', 'Yorkshire Terrier', 'Wire Fox Terrier',\n 'Lakeland Terrier', 'Sealyham Terrier', 'Airedale Terrier',\n 'Cairn Terrier', 'Australian Terrier', 'Dandie Dinmont Terrier',\n 'Boston Terrier', 'Miniature Schnauzer', 'Giant Schnauzer',\n 'Standard Schnauzer', 'Scottish Terrier', 'Tibetan Terrier',\n 'Australian Silky Terrier', 'Soft-coated Wheaten Terrier',\n 'West Highland White Terrier', 'Lhasa Apso', 'Flat-Coated Retriever',\n 'Curly-coated Retriever', 'Golden Retriever', 'Labrador Retriever',\n 'Chesapeake Bay Retriever', 'German Shorthaired Pointer', 'Vizsla',\n 'English Setter', 'Irish Setter', 'Gordon Setter', 'Brittany dog',\n 'Clumber Spaniel', 'English Springer Spaniel', 'Welsh Springer Spaniel',\n 'Cocker Spaniel', 'Sussex Spaniel', 'Irish Water Spaniel', 'Kuvasz',\n 'Schipperke', 'Groenendael dog', 'Malinois', 'Briard', 'Australian Kelpie',\n 'Komondor', 'Old English Sheepdog', 'Shetland Sheepdog', 'collie',\n 'Border Collie', 'Bouvier des Flandres dog', 'Rottweiler',\n 'German Shepherd Dog', 'Dobermann', 'Miniature Pinscher',\n 'Greater Swiss Mountain Dog', 'Bernese Mountain Dog',\n 'Appenzeller Sennenhund', 'Entlebucher Sennenhund', 'Boxer', 'Bullmastiff',\n 'Tibetan Mastiff', 'French Bulldog', 'Great Dane', 'St. Bernard', 'husky',\n 'Alaskan Malamute', 'Siberian Husky', 'Dalmatian', 'Affenpinscher',\n 'Basenji', 'pug', 'Leonberger', 'Newfoundland dog', 'Great Pyrenees dog',\n 'Samoyed', 'Pomeranian', 'Chow Chow', 'Keeshond', 'brussels griffon',\n 'Pembroke Welsh Corgi', 'Cardigan Welsh Corgi', 'Toy Poodle',\n 'Miniature Poodle', 'Standard Poodle',\n 'Mexican hairless dog (xoloitzcuintli)', 'grey wolf',\n 'Alaskan tundra wolf', 'red wolf or maned wolf', 'coyote', 'dingo',\n 'dhole', 'African wild dog', 'hyena', 'red fox', 'kit fox', 'Arctic fox',\n 'grey fox', 'tabby cat', 'tiger cat', 'Persian cat', 'Siamese cat',\n 'Egyptian Mau', 'cougar', 'lynx', 'leopard', 'snow leopard', 'jaguar',\n 'lion', 'tiger', 'cheetah', 'brown bear', 'American black bear',\n 'polar bear', 'sloth bear', 'mongoose', 'meerkat', 'tiger beetle',\n 'ladybug', 'ground beetle', 'longhorn beetle', 'leaf beetle',\n 'dung beetle', 'rhinoceros beetle', 'weevil', 'fly', 'bee', 'ant',\n 'grasshopper', 'cricket insect', 'stick insect', 'cockroach',\n 'praying mantis', 'cicada', 'leafhopper', 'lacewing', 'dragonfly',\n 'damselfly', 'red admiral butterfly', 'ringlet butterfly',\n 'monarch butterfly', 'small white butterfly', 'sulphur butterfly',\n 'gossamer-winged butterfly', 'starfish', 'sea urchin', 'sea cucumber',\n 'cottontail rabbit', 'hare', 'Angora rabbit', 'hamster', 'porcupine',\n 'fox squirrel', 'marmot', 'beaver', 'guinea pig', 'common sorrel horse',\n 'zebra', 'pig', 'wild boar', 'warthog', 'hippopotamus', 'ox',\n 'water buffalo', 'bison', 'ram (adult male sheep)', 'bighorn sheep',\n 'Alpine ibex', 'hartebeest', 'impala (antelope)', 'gazelle',\n 'arabian camel', 'llama', 'weasel', 'mink', 'European polecat',\n 'black-footed ferret', 'otter', 'skunk', 'badger', 'armadillo',\n 'three-toed sloth', 'orangutan', 'gorilla', 'chimpanzee', 'gibbon',\n 'siamang', 'guenon', 'patas monkey', 'baboon', 'macaque', 'langur',\n 'black-and-white colobus', 'proboscis monkey', 'marmoset',\n 'white-headed capuchin', 'howler monkey', 'titi monkey',\n \"Geoffroy's spider monkey\", 'common squirrel monkey', 'ring-tailed lemur',\n 'indri', 'Asian elephant', 'African bush elephant', 'red panda',\n 'giant panda', 'snoek fish', 'eel', 'silver salmon', 'rock beauty fish',\n 'clownfish', 'sturgeon', 'gar fish', 'lionfish', 'pufferfish', 'abacus',\n 'abaya', 'academic gown', 'accordion', 'acoustic guitar',\n 'aircraft carrier', 'airliner', 'airship', 'altar', 'ambulance',\n 'amphibious vehicle', 'analog clock', 'apiary', 'apron', 'trash can',\n 'assault rifle', 'backpack', 'bakery', 'balance beam', 'balloon',\n 'ballpoint pen', 'Band-Aid', 'banjo', 'baluster / handrail', 'barbell',\n 'barber chair', 'barbershop', 'barn', 'barometer', 'barrel', 'wheelbarrow',\n 'baseball', 'basketball', 'bassinet', 'bassoon', 'swimming cap',\n 'bath towel', 'bathtub', 'station wagon', 'lighthouse', 'beaker',\n 'military hat (bearskin or shako)', 'beer bottle', 'beer glass',\n 'bell tower', 'baby bib', 'tandem bicycle', 'bikini', 'ring binder',\n 'binoculars', 'birdhouse', 'boathouse', 'bobsleigh', 'bolo tie',\n 'poke bonnet', 'bookcase', 'bookstore', 'bottle cap', 'hunting bow',\n 'bow tie', 'brass memorial plaque', 'bra', 'breakwater', 'breastplate',\n 'broom', 'bucket', 'buckle', 'bulletproof vest', 'high-speed train',\n 'butcher shop', 'taxicab', 'cauldron', 'candle', 'cannon', 'canoe',\n 'can opener', 'cardigan', 'car mirror', 'carousel', 'tool kit',\n 'cardboard box / carton', 'car wheel', 'automated teller machine',\n 'cassette', 'cassette player', 'castle', 'catamaran', 'CD player', 'cello',\n 'mobile phone', 'chain', 'chain-link fence', 'chain mail', 'chainsaw',\n 'storage chest', 'chiffonier', 'bell or wind chime', 'china cabinet',\n 'Christmas stocking', 'church', 'movie theater', 'cleaver',\n 'cliff dwelling', 'cloak', 'clogs', 'cocktail shaker', 'coffee mug',\n 'coffeemaker', 'spiral or coil', 'combination lock', 'computer keyboard',\n 'candy store', 'container ship', 'convertible', 'corkscrew', 'cornet',\n 'cowboy boot', 'cowboy hat', 'cradle', 'construction crane',\n 'crash helmet', 'crate', 'infant bed', 'Crock Pot', 'croquet ball',\n 'crutch', 'cuirass', 'dam', 'desk', 'desktop computer',\n 'rotary dial telephone', 'diaper', 'digital clock', 'digital watch',\n 'dining table', 'dishcloth', 'dishwasher', 'disc brake', 'dock',\n 'dog sled', 'dome', 'doormat', 'drilling rig', 'drum', 'drumstick',\n 'dumbbell', 'Dutch oven', 'electric fan', 'electric guitar',\n 'electric locomotive', 'entertainment center', 'envelope',\n 'espresso machine', 'face powder', 'feather boa', 'filing cabinet',\n 'fireboat', 'fire truck', 'fire screen', 'flagpole', 'flute',\n 'folding chair', 'football helmet', 'forklift', 'fountain', 'fountain pen',\n 'four-poster bed', 'freight car', 'French horn', 'frying pan', 'fur coat',\n 'garbage truck', 'gas mask or respirator', 'gas pump', 'goblet', 'go-kart',\n 'golf ball', 'golf cart', 'gondola', 'gong', 'gown', 'grand piano',\n 'greenhouse', 'radiator grille', 'grocery store', 'guillotine',\n 'hair clip', 'hair spray', 'half-track', 'hammer', 'hamper', 'hair dryer',\n 'hand-held computer', 'handkerchief', 'hard disk drive', 'harmonica',\n 'harp', 'combine harvester', 'hatchet', 'holster', 'home theater',\n 'honeycomb', 'hook', 'hoop skirt', 'gymnastic horizontal bar',\n 'horse-drawn vehicle', 'hourglass', 'iPod', 'clothes iron',\n 'carved pumpkin', 'jeans', 'jeep', 'T-shirt', 'jigsaw puzzle', 'rickshaw',\n 'joystick', 'kimono', 'knee pad', 'knot', 'lab coat', 'ladle', 'lampshade',\n 'laptop computer', 'lawn mower', 'lens cap', 'letter opener', 'library',\n 'lifeboat', 'lighter', 'limousine', 'ocean liner', 'lipstick',\n 'slip-on shoe', 'lotion', 'music speaker', 'loupe magnifying glass',\n 'sawmill', 'magnetic compass', 'messenger bag', 'mailbox', 'tights',\n 'one-piece bathing suit', 'manhole cover', 'maraca', 'marimba', 'mask',\n 'matchstick', 'maypole', 'maze', 'measuring cup', 'medicine cabinet',\n 'megalith', 'microphone', 'microwave oven', 'military uniform', 'milk can',\n 'minibus', 'miniskirt', 'minivan', 'missile', 'mitten', 'mixing bowl',\n 'mobile home', 'ford model t', 'modem', 'monastery', 'monitor', 'moped',\n 'mortar and pestle', 'graduation cap', 'mosque', 'mosquito net', 'vespa',\n 'mountain bike', 'tent', 'computer mouse', 'mousetrap', 'moving van',\n 'muzzle', 'metal nail', 'neck brace', 'necklace', 'baby pacifier',\n 'notebook computer', 'obelisk', 'oboe', 'ocarina', 'odometer',\n 'oil filter', 'pipe organ', 'oscilloscope', 'overskirt', 'bullock cart',\n 'oxygen mask', 'product packet / packaging', 'paddle', 'paddle wheel',\n 'padlock', 'paintbrush', 'pajamas', 'palace', 'pan flute', 'paper towel',\n 'parachute', 'parallel bars', 'park bench', 'parking meter',\n 'railroad car', 'patio', 'payphone', 'pedestal', 'pencil case',\n 'pencil sharpener', 'perfume', 'Petri dish', 'photocopier', 'plectrum',\n 'Pickelhaube', 'picket fence', 'pickup truck', 'pier', 'piggy bank',\n 'pill bottle', 'pillow', 'ping-pong ball', 'pinwheel', 'pirate ship',\n 'drink pitcher', 'block plane', 'planetarium', 'plastic bag', 'plate rack',\n 'farm plow', 'plunger', 'Polaroid camera', 'pole', 'police van', 'poncho',\n 'pool table', 'soda bottle', 'plant pot', \"potter's wheel\", 'power drill',\n 'prayer rug', 'printer', 'prison', 'missile', 'projector', 'hockey puck',\n 'punching bag', 'purse', 'quill', 'quilt', 'race car', 'racket',\n 'radiator', 'radio', 'radio telescope', 'rain barrel',\n 'recreational vehicle', 'fishing casting reel', 'reflex camera',\n 'refrigerator', 'remote control', 'restaurant', 'revolver', 'rifle',\n 'rocking chair', 'rotisserie', 'eraser', 'rugby ball',\n 'ruler measuring stick', 'sneaker', 'safe', 'safety pin', 'salt shaker',\n 'sandal', 'sarong', 'saxophone', 'scabbard', 'weighing scale',\n 'school bus', 'schooner', 'scoreboard', 'CRT monitor', 'screw',\n 'screwdriver', 'seat belt', 'sewing machine', 'shield', 'shoe store',\n 'shoji screen / room divider', 'shopping basket', 'shopping cart',\n 'shovel', 'shower cap', 'shower curtain', 'ski', 'balaclava ski mask',\n 'sleeping bag', 'slide rule', 'sliding door', 'slot machine', 'snorkel',\n 'snowmobile', 'snowplow', 'soap dispenser', 'soccer ball', 'sock',\n 'solar thermal collector', 'sombrero', 'soup bowl', 'keyboard space bar',\n 'space heater', 'space shuttle', 'spatula', 'motorboat', 'spider web',\n 'spindle', 'sports car', 'spotlight', 'stage', 'steam locomotive',\n 'through arch bridge', 'steel drum', 'stethoscope', 'scarf', 'stone wall',\n 'stopwatch', 'stove', 'strainer', 'tram', 'stretcher', 'couch', 'stupa',\n 'submarine', 'suit', 'sundial', 'sunglasses', 'sunglasses', 'sunscreen',\n 'suspension bridge', 'mop', 'sweatshirt', 'swim trunks / shorts', 'swing',\n 'electrical switch', 'syringe', 'table lamp', 'tank', 'tape player',\n 'teapot', 'teddy bear', 'television', 'tennis ball', 'thatched roof',\n 'front curtain', 'thimble', 'threshing machine', 'throne', 'tile roof',\n 'toaster', 'tobacco shop', 'toilet seat', 'torch', 'totem pole',\n 'tow truck', 'toy store', 'tractor', 'semi-trailer truck', 'tray',\n 'trench coat', 'tricycle', 'trimaran', 'tripod', 'triumphal arch',\n 'trolleybus', 'trombone', 'hot tub', 'turnstile', 'typewriter keyboard',\n 'umbrella', 'unicycle', 'upright piano', 'vacuum cleaner', 'vase',\n 'vaulted or arched ceiling', 'velvet fabric', 'vending machine',\n 'vestment', 'viaduct', 'violin', 'volleyball', 'waffle iron', 'wall clock',\n 'wallet', 'wardrobe', 'military aircraft', 'sink', 'washing machine',\n 'water bottle', 'water jug', 'water tower', 'whiskey jug', 'whistle',\n 'hair wig', 'window screen', 'window shade', 'Windsor tie', 'wine bottle',\n 'airplane wing', 'wok', 'wooden spoon', 'wool', 'split-rail fence',\n 'shipwreck', 'sailboat', 'yurt', 'website', 'comic book', 'crossword',\n 'traffic or street sign', 'traffic light', 'dust jacket', 'menu', 'plate',\n 'guacamole', 'consomme', 'hot pot', 'trifle', 'ice cream', 'popsicle',\n 'baguette', 'bagel', 'pretzel', 'cheeseburger', 'hot dog',\n 'mashed potatoes', 'cabbage', 'broccoli', 'cauliflower', 'zucchini',\n 'spaghetti squash', 'acorn squash', 'butternut squash', 'cucumber',\n 'artichoke', 'bell pepper', 'cardoon', 'mushroom', 'Granny Smith apple',\n 'strawberry', 'orange', 'lemon', 'fig', 'pineapple', 'banana', 'jackfruit',\n 'cherimoya (custard apple)', 'pomegranate', 'hay', 'carbonara',\n 'chocolate syrup', 'dough', 'meatloaf', 'pizza', 'pot pie', 'burrito',\n 'red wine', 'espresso', 'tea cup', 'eggnog', 'mountain', 'bubble', 'cliff',\n 'coral reef', 'geyser', 'lakeshore', 'promontory', 'sandbar', 'beach',\n 'valley', 'volcano', 'baseball player', 'bridegroom', 'scuba diver',\n 'rapeseed', 'daisy', \"yellow lady's slipper\", 'corn', 'acorn', 'rose hip',\n 'horse chestnut seed', 'coral fungus', 'agaric', 'gyromitra',\n 'stinkhorn mushroom', 'earth star fungus', 'hen of the woods mushroom',\n 'bolete', 'corn cob', 'toilet paper')" }, { "identifier": "MODELS", "path": "mmpretrain/registry.py", "snippet": "MODELS = Registry(\n 'model',\n parent=MMENGINE_MODELS,\n locations=['mmpretrain.models'],\n)" }, { "identifier": "TOKENIZER", "path": "mmpretrain/registry.py", "snippet": "TOKENIZER = Registry(\n 'tokenizer',\n locations=['mmpretrain.models'],\n)" }, { "identifier": "DataSample", "path": "mmpretrain/structures/data_sample.py", "snippet": "class DataSample(BaseDataElement):\n \"\"\"A general data structure interface.\n\n It's used as the interface between different components.\n\n The following fields are convention names in MMPretrain, and we will set or\n get these fields in data transforms, models, and metrics if needed. You can\n also set any new fields for your need.\n\n Meta fields:\n img_shape (Tuple): The shape of the corresponding input image.\n ori_shape (Tuple): The original shape of the corresponding image.\n sample_idx (int): The index of the sample in the dataset.\n num_classes (int): The number of all categories.\n\n Data fields:\n gt_label (tensor): The ground truth label.\n gt_score (tensor): The ground truth score.\n pred_label (tensor): The predicted label.\n pred_score (tensor): The predicted score.\n mask (tensor): The mask used in masked image modeling.\n\n Examples:\n >>> import torch\n >>> from mmpretrain.structures import DataSample\n >>>\n >>> img_meta = dict(img_shape=(960, 720), num_classes=5)\n >>> data_sample = DataSample(metainfo=img_meta)\n >>> data_sample.set_gt_label(3)\n >>> print(data_sample)\n <DataSample(\n META INFORMATION\n num_classes: 5\n img_shape: (960, 720)\n DATA FIELDS\n gt_label: tensor([3])\n ) at 0x7ff64c1c1d30>\n >>>\n >>> # For multi-label data\n >>> data_sample = DataSample().set_gt_label([0, 1, 4])\n >>> print(data_sample)\n <DataSample(\n DATA FIELDS\n gt_label: tensor([0, 1, 4])\n ) at 0x7ff5b490e100>\n >>>\n >>> # Set one-hot format score\n >>> data_sample = DataSample().set_pred_score([0.1, 0.1, 0.6, 0.1])\n >>> print(data_sample)\n <DataSample(\n META INFORMATION\n num_classes: 4\n DATA FIELDS\n pred_score: tensor([0.1000, 0.1000, 0.6000, 0.1000])\n ) at 0x7ff5b48ef6a0>\n >>>\n >>> # Set custom field\n >>> data_sample = DataSample()\n >>> data_sample.my_field = [1, 2, 3]\n >>> print(data_sample)\n <DataSample(\n DATA FIELDS\n my_field: [1, 2, 3]\n ) at 0x7f8e9603d3a0>\n >>> print(data_sample.my_field)\n [1, 2, 3]\n \"\"\"\n\n def set_gt_label(self, value: LABEL_TYPE) -> 'DataSample':\n \"\"\"Set ``gt_label``.\"\"\"\n self.set_field(format_label(value), 'gt_label', dtype=torch.Tensor)\n return self\n\n def set_gt_score(self, value: SCORE_TYPE) -> 'DataSample':\n \"\"\"Set ``gt_score``.\"\"\"\n score = format_score(value)\n self.set_field(score, 'gt_score', dtype=torch.Tensor)\n if hasattr(self, 'num_classes'):\n assert len(score) == self.num_classes, \\\n f'The length of score {len(score)} should be '\\\n f'equal to the num_classes {self.num_classes}.'\n else:\n self.set_field(\n name='num_classes', value=len(score), field_type='metainfo')\n return self\n\n def set_pred_label(self, value: LABEL_TYPE) -> 'DataSample':\n \"\"\"Set ``pred_label``.\"\"\"\n self.set_field(format_label(value), 'pred_label', dtype=torch.Tensor)\n return self\n\n def set_pred_score(self, value: SCORE_TYPE):\n \"\"\"Set ``pred_label``.\"\"\"\n score = format_score(value)\n self.set_field(score, 'pred_score', dtype=torch.Tensor)\n if hasattr(self, 'num_classes'):\n assert len(score) == self.num_classes, \\\n f'The length of score {len(score)} should be '\\\n f'equal to the num_classes {self.num_classes}.'\n else:\n self.set_field(\n name='num_classes', value=len(score), field_type='metainfo')\n return self\n\n def set_mask(self, value: Union[torch.Tensor, np.ndarray]):\n if isinstance(value, np.ndarray):\n value = torch.from_numpy(value)\n elif not isinstance(value, torch.Tensor):\n raise TypeError(f'Invalid mask type {type(value)}')\n self.set_field(value, 'mask', dtype=torch.Tensor)\n return self\n\n def __repr__(self) -> str:\n \"\"\"Represent the object.\"\"\"\n\n def dump_items(items, prefix=''):\n return '\\n'.join(f'{prefix}{k}: {v}' for k, v in items)\n\n repr_ = ''\n if len(self._metainfo_fields) > 0:\n repr_ += '\\n\\nMETA INFORMATION\\n'\n repr_ += dump_items(self.metainfo_items(), prefix=' ' * 4)\n if len(self._data_fields) > 0:\n repr_ += '\\n\\nDATA FIELDS\\n'\n repr_ += dump_items(self.items(), prefix=' ' * 4)\n\n repr_ = f'<{self.__class__.__name__}({repr_}\\n\\n) at {hex(id(self))}>'\n return repr_" }, { "identifier": "track_on_main_process", "path": "mmpretrain/utils/progress.py", "snippet": "def track_on_main_process(sequence, description='', total=None):\n if not dist.is_main_process() or disable_progress_bar:\n yield from sequence\n else:\n yield from track(sequence, total=total, description=description)" }, { "identifier": "OPENAI_CIFAR100_PROMPT", "path": "mmpretrain/models/multimodal/clip/utils.py", "snippet": "OPENAI_CIFAR100_PROMPT = [\n lambda c: f'a photo of a {c}.',\n lambda c: f'a blurry photo of a {c}.',\n lambda c: f'a black and white photo of a {c}.',\n lambda c: f'a low contrast photo of a {c}.',\n lambda c: f'a high contrast photo of a {c}.',\n lambda c: f'a bad photo of a {c}.',\n lambda c: f'a good photo of a {c}.',\n lambda c: f'a photo of a small {c}.',\n lambda c: f'a photo of a big {c}.',\n lambda c: f'a photo of the {c}.',\n lambda c: f'a blurry photo of the {c}.',\n lambda c: f'a black and white photo of the {c}.',\n lambda c: f'a low contrast photo of the {c}.',\n lambda c: f'a high contrast photo of the {c}.',\n lambda c: f'a bad photo of the {c}.',\n lambda c: f'a good photo of the {c}.',\n lambda c: f'a photo of the small {c}.',\n lambda c: f'a photo of the big {c}.',\n]" }, { "identifier": "OPENAI_IMAGENET_PROMPT", "path": "mmpretrain/models/multimodal/clip/utils.py", "snippet": "OPENAI_IMAGENET_PROMPT = [\n lambda c: f'a bad photo of a {c}.',\n lambda c: f'a photo of many {c}.',\n lambda c: f'a sculpture of a {c}.',\n lambda c: f'a photo of the hard to see {c}.',\n lambda c: f'a low resolution photo of the {c}.',\n lambda c: f'a rendering of a {c}.',\n lambda c: f'graffiti of a {c}.',\n lambda c: f'a bad photo of the {c}.',\n lambda c: f'a cropped photo of the {c}.',\n lambda c: f'a tattoo of a {c}.',\n lambda c: f'the embroidered {c}.',\n lambda c: f'a photo of a hard to see {c}.',\n lambda c: f'a bright photo of a {c}.',\n lambda c: f'a photo of a clean {c}.',\n lambda c: f'a photo of a dirty {c}.',\n lambda c: f'a dark photo of the {c}.',\n lambda c: f'a drawing of a {c}.',\n lambda c: f'a photo of my {c}.',\n lambda c: f'the plastic {c}.',\n lambda c: f'a photo of the cool {c}.',\n lambda c: f'a close-up photo of a {c}.',\n lambda c: f'a black and white photo of the {c}.',\n lambda c: f'a painting of the {c}.',\n lambda c: f'a painting of a {c}.',\n lambda c: f'a pixelated photo of the {c}.',\n lambda c: f'a sculpture of the {c}.',\n lambda c: f'a bright photo of the {c}.',\n lambda c: f'a cropped photo of a {c}.',\n lambda c: f'a plastic {c}.',\n lambda c: f'a photo of the dirty {c}.',\n lambda c: f'a jpeg corrupted photo of a {c}.',\n lambda c: f'a blurry photo of the {c}.',\n lambda c: f'a photo of the {c}.',\n lambda c: f'a good photo of the {c}.',\n lambda c: f'a rendering of the {c}.',\n lambda c: f'a {c} in a video game.',\n lambda c: f'a photo of one {c}.',\n lambda c: f'a doodle of a {c}.',\n lambda c: f'a close-up photo of the {c}.',\n lambda c: f'a photo of a {c}.',\n lambda c: f'the origami {c}.',\n lambda c: f'the {c} in a video game.',\n lambda c: f'a sketch of a {c}.',\n lambda c: f'a doodle of the {c}.',\n lambda c: f'a origami {c}.',\n lambda c: f'a low resolution photo of a {c}.',\n lambda c: f'the toy {c}.',\n lambda c: f'a rendition of the {c}.',\n lambda c: f'a photo of the clean {c}.',\n lambda c: f'a photo of a large {c}.',\n lambda c: f'a rendition of a {c}.',\n lambda c: f'a photo of a nice {c}.',\n lambda c: f'a photo of a weird {c}.',\n lambda c: f'a blurry photo of a {c}.',\n lambda c: f'a cartoon {c}.',\n lambda c: f'art of a {c}.',\n lambda c: f'a sketch of the {c}.',\n lambda c: f'a embroidered {c}.',\n lambda c: f'a pixelated photo of a {c}.',\n lambda c: f'itap of the {c}.',\n lambda c: f'a jpeg corrupted photo of the {c}.',\n lambda c: f'a good photo of a {c}.',\n lambda c: f'a plushie {c}.',\n lambda c: f'a photo of the nice {c}.',\n lambda c: f'a photo of the small {c}.',\n lambda c: f'a photo of the weird {c}.',\n lambda c: f'the cartoon {c}.',\n lambda c: f'art of the {c}.',\n lambda c: f'a drawing of the {c}.',\n lambda c: f'a photo of the large {c}.',\n lambda c: f'a black and white photo of a {c}.',\n lambda c: f'the plushie {c}.',\n lambda c: f'a dark photo of a {c}.',\n lambda c: f'itap of a {c}.',\n lambda c: f'graffiti of the {c}.',\n lambda c: f'a toy {c}.',\n lambda c: f'itap of my {c}.',\n lambda c: f'a photo of a cool {c}.',\n lambda c: f'a photo of a small {c}.',\n lambda c: f'a tattoo of the {c}.',\n]" }, { "identifier": "OPENAI_IMAGENET_PROMPT_SUB", "path": "mmpretrain/models/multimodal/clip/utils.py", "snippet": "OPENAI_IMAGENET_PROMPT_SUB = [\n lambda c: f'itap of a {c}.',\n lambda c: f'a bad photo of the {c}.',\n lambda c: f'a origami {c}.',\n lambda c: f'a photo of the large {c}.',\n lambda c: f'a {c} in a video game.',\n lambda c: f'art of the {c}.',\n lambda c: f'a photo of the small {c}.',\n]" } ]
from abc import abstractmethod from typing import List, Optional, Tuple, Union from mmengine.model import BaseModel from torch import nn from mmpretrain.datasets.categories import (CIFAR100_CATEGORIES, IMAGENET_SIMPLE_CATEGORIES) from mmpretrain.registry import MODELS, TOKENIZER from mmpretrain.structures import DataSample from mmpretrain.utils import track_on_main_process from .utils import (OPENAI_CIFAR100_PROMPT, OPENAI_IMAGENET_PROMPT, OPENAI_IMAGENET_PROMPT_SUB) import numpy as np import torch import torch.nn.functional as F
10,842
def initialize_parameters(self) -> None: """Initialize the parameters. The pretrained weight will override the initialized parameters by this function. """ nn.init.normal_(self.token_embedding.weight, std=0.02) nn.init.normal_(self.positional_embedding, std=0.01) proj_std = (self.transformer.width**-0.5) * ( (2 * self.transformer.layers)**-0.5) attn_std = self.transformer.width**-0.5 fc_std = (2 * self.transformer.width)**-0.5 for block in self.transformer.resblocks: nn.init.normal_(block.attn.in_proj_weight, std=attn_std) nn.init.normal_(block.attn.out_proj.weight, std=proj_std) nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) if self.text_projection is not None: nn.init.normal_( self.text_projection, std=self.transformer.width**-0.5) def build_attention_mask(self): # lazily create causal attention mask, # with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(self.context_length, self.context_length) mask.fill_(float('-inf')) mask.triu_(1) # zero out the lower diagonal return mask def forward( self, images: torch.Tensor, data_samples: Optional[list] = None, mode: str = 'predict', **kwargs, ): """The unified entry for a forward process in both training and test. The method accepts the following modes: - "predict": Forward and return a list of data samples contain the predict results. Args: images (torch.Tensor): the preprocessed image tensor of shape ``(N, C, H, W)``. data_samples (List[DataSample], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to 'predict'. """ if mode == 'predict': return self.predict(images, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}".') def extract_image_feat(self, images: torch.Tensor) -> torch.Tensor: """The function to extract image latent features.""" return self.visual_proj(self.visual(images))[0] def extract_text_feat(self, texts: torch.Tensor) -> torch.Tensor: """The function to extract text latent features.""" x = self.token_embedding(texts) # [batch_size, n_ctx, d_model] x = x + self.positional_embedding x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x)[0] x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_final(x) # x.shape = [batch_size, n_ctx, transformer.width] # take features from the eot embedding # (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), texts.argmax(dim=-1)] @ self.text_projection return x def extract_feat( self, images: torch.Tensor, texts: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor]]: """The function to extract image and text latent features, the input image or text can not both be None.""" assert images is not None or texts is not None, \ 'text and image cannot both be None!' if images is None: return self.extract_text_feat(texts) elif texts is None: return self.extract_image_feat(images) image_features = self.extract_image_feat(images) text_features = self.extract_text_feat(texts) image_features = image_features / image_features.norm( dim=-1, keepdim=True) text_features = text_features / text_features.norm( dim=-1, keepdim=True) return image_features, text_features def compute_similarity(self, images, texts): """Extract images and texts features and compute cosine similarity.""" image_features, text_features = self.extract_feat( images=images, texts=texts) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_image = logit_scale * image_features @ text_features.t() logits_per_text = logits_per_image.t() # shape (N, N) return logits_per_image, logits_per_text @abstractmethod def predict(self, images: torch.Tensor,
# Copyright (c) OpenMMLab. All rights reserved. CIFAR100_CATEGORIES = [' '.join(c.split('_')) for c in CIFAR100_CATEGORIES] PROTOTYPE_MAP = { 'imagenet': IMAGENET_SIMPLE_CATEGORIES, 'cifar100': CIFAR100_CATEGORIES, } PROMPT_MAP = { 'openai_imagenet': OPENAI_IMAGENET_PROMPT, 'openai_cifar100': OPENAI_CIFAR100_PROMPT, 'vanilla': [lambda c: f'a photo of a {c}'], 'openai_imagenet_sub': OPENAI_IMAGENET_PROMPT_SUB } class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward function.""" orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) class CLIP(BaseModel): """The implementation of `CLIP <https://arxiv.org/abs/2103.00020>`_. Args: vision_backbone (dict): Config dict for vision backbone. text_backbone (dict): Config dict for text backbone. tokenizer (dict): Config dict for text tokenizer. proj_dim (int): Projection dimension for similarity computation. text_prototype (str): Text prototype, which can be a key in `PROTOTYPE_MAP` or list of text. text_prompt (str): The prompt for text prototype. Defaults to 'vanilla',which refers to "a photo of {cls}". context_length (int): The context length to use. Defaults to 77. data_preprocessor (Union[dict, nn.Module], optional): The config for preprocessing input data. If None or no specified type, it will use "MultiModalDataPreprocessor" as type. See :class:`MultiModalDataPreprocessor` for more details. Defaults to None. init_cfg (dict, optional): The config to control the initialization. Defaults to None. """ def __init__(self, vision_backbone: dict, projection: dict, text_backbone: dict, tokenizer: dict, vocab_size: int, transformer_width: int, proj_dim: int, context_length: int = 77, data_preprocessor: Optional[dict] = None, init_cfg: Optional[dict] = None): if data_preprocessor is None: data_preprocessor = {} data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') data_preprocessor = MODELS.build(data_preprocessor) super().__init__( data_preprocessor=data_preprocessor, init_cfg=init_cfg) self.context_length = context_length # build the vision transformer self.visual = MODELS.build(vision_backbone) # build the visual projection self.visual_proj = MODELS.build(projection) # build attn_mask for casual-attn text_backbone['attn_mask'] = self.build_attention_mask() # build the text transformer self.transformer = MODELS.build(text_backbone) self.vocab_size = vocab_size self.token_embedding = nn.Embedding(vocab_size, transformer_width) self.positional_embedding = nn.Parameter( torch.empty(self.context_length, transformer_width)) self.ln_final = LayerNorm(transformer_width) self.text_projection = nn.Parameter( torch.empty(transformer_width, proj_dim)) self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) self.initialize_parameters() self.tokenizer = TOKENIZER.build(tokenizer) self.tokenizer.vocab = self.tokenizer.get_vocab( ) # CLIPTokenizer has no attribute named 'vocab', so manually def initialize_parameters(self) -> None: """Initialize the parameters. The pretrained weight will override the initialized parameters by this function. """ nn.init.normal_(self.token_embedding.weight, std=0.02) nn.init.normal_(self.positional_embedding, std=0.01) proj_std = (self.transformer.width**-0.5) * ( (2 * self.transformer.layers)**-0.5) attn_std = self.transformer.width**-0.5 fc_std = (2 * self.transformer.width)**-0.5 for block in self.transformer.resblocks: nn.init.normal_(block.attn.in_proj_weight, std=attn_std) nn.init.normal_(block.attn.out_proj.weight, std=proj_std) nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) if self.text_projection is not None: nn.init.normal_( self.text_projection, std=self.transformer.width**-0.5) def build_attention_mask(self): # lazily create causal attention mask, # with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(self.context_length, self.context_length) mask.fill_(float('-inf')) mask.triu_(1) # zero out the lower diagonal return mask def forward( self, images: torch.Tensor, data_samples: Optional[list] = None, mode: str = 'predict', **kwargs, ): """The unified entry for a forward process in both training and test. The method accepts the following modes: - "predict": Forward and return a list of data samples contain the predict results. Args: images (torch.Tensor): the preprocessed image tensor of shape ``(N, C, H, W)``. data_samples (List[DataSample], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to 'predict'. """ if mode == 'predict': return self.predict(images, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}".') def extract_image_feat(self, images: torch.Tensor) -> torch.Tensor: """The function to extract image latent features.""" return self.visual_proj(self.visual(images))[0] def extract_text_feat(self, texts: torch.Tensor) -> torch.Tensor: """The function to extract text latent features.""" x = self.token_embedding(texts) # [batch_size, n_ctx, d_model] x = x + self.positional_embedding x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x)[0] x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_final(x) # x.shape = [batch_size, n_ctx, transformer.width] # take features from the eot embedding # (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), texts.argmax(dim=-1)] @ self.text_projection return x def extract_feat( self, images: torch.Tensor, texts: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor]]: """The function to extract image and text latent features, the input image or text can not both be None.""" assert images is not None or texts is not None, \ 'text and image cannot both be None!' if images is None: return self.extract_text_feat(texts) elif texts is None: return self.extract_image_feat(images) image_features = self.extract_image_feat(images) text_features = self.extract_text_feat(texts) image_features = image_features / image_features.norm( dim=-1, keepdim=True) text_features = text_features / text_features.norm( dim=-1, keepdim=True) return image_features, text_features def compute_similarity(self, images, texts): """Extract images and texts features and compute cosine similarity.""" image_features, text_features = self.extract_feat( images=images, texts=texts) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_image = logit_scale * image_features @ text_features.t() logits_per_text = logits_per_image.t() # shape (N, N) return logits_per_image, logits_per_text @abstractmethod def predict(self, images: torch.Tensor,
data_samples: DataSample = None) -> DataSample:
4
2023-12-23 08:36:47+00:00
16k
see2023/Bert-VITS2-ext
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n v_model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 384)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, en_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n\n return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert)\n\n def get_audio(self, filename):\n audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)\n '''\n # from https://github.com/YYuX-1145/Bert-VITS2-Integration-package\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n '''\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n if config.train_ms_config.spec_cache:\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert_ori = torch.load(bert_path)\n assert bert_ori.shape[-1] == len(phone)\n except Exception as e:\n logger.warning(\"Bert load Failed\")\n logger.warning(e)\n\n if language_str == \"ZH\":\n bert = bert_ori\n ja_bert = torch.randn(1024, len(phone))\n en_bert = torch.randn(1024, len(phone))\n elif language_str == \"JP\":\n bert = torch.randn(1024, len(phone))\n ja_bert = bert_ori\n en_bert = torch.randn(1024, len(phone))\n elif language_str == \"EN\":\n bert = torch.randn(1024, len(phone))\n ja_bert = torch.randn(1024, len(phone))\n en_bert = bert_ori\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, en_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n en_bert_padded.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n en_bert = row[8]\n en_bert_padded[i, :, : en_bert.size(1)] = en_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n en_bert_padded,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "AudioVisemesLoader", "path": "data_utils.py", "snippet": "class AudioVisemesLoader(torch.utils.data.Dataset):\n \"\"\"\n loads audio, visemes torch variable pairs from visemes list file .\n file is like: \n ./records/date_time.z.npy|./records/date_time.npy\n \"\"\"\n \n def __init__(self, audio_visemes_list_file, hparams):\n self.audio_visemes_list_items = load_filepaths_and_text(audio_visemes_list_file)\n print('audio_visemes_list_items: ', len(self.audio_visemes_list_items))\n random.seed(1234)\n random.shuffle(self.audio_visemes_list_items)\n self.max_visemes_len = 1210\n self.min_visemes_len = 1190\n self._filter()\n\n\n def _filter(self):\n # check if the file exists, and can parse as torch tensor\n audio_visemes_list_items_new = []\n for audio_file, visemes_file in self.audio_visemes_list_items:\n if os.path.exists(audio_file) and os.path.exists(visemes_file):\n # check using torch.load\n try:\n audio = torch.load(audio_file)\n visemes = np.load(visemes_file)\n if visemes.shape[0] < self.min_visemes_len:\n print('drop this data: --------- visemes.shape[0] < self.min_visemes_len: ', visemes.shape[0], visemes_file)\n continue\n audio_visemes_list_items_new.append([audio_file, visemes_file])\n except Exception as e:\n print('error: ', audio_file, visemes_file)\n print(e)\n self.audio_visemes_list_items = audio_visemes_list_items_new\n print('audio_visemes_list_items after filter: ', len(self.audio_visemes_list_items))\n\n def __getitem__(self, index):\n # read these two torch.tensor\n audio_file, visemes_file = self.audio_visemes_list_items[index]\n audio_z = torch.load(audio_file).squeeze(0).detach()\n # [192, seq_len(1722)]\n\n visemes = np.load(visemes_file)\n visemes = torch.from_numpy(visemes)\n #[seq_len(1194), 61]\n visemes = visemes.transpose(0, 1)\n #[61, seq_len(1194)]\n if visemes.shape[1] > self.max_visemes_len:\n # cut the extra part\n # print('__getitem__ 1 cut visemes from ', visemes.shape[0], ' to ', self.max_visemes_len, 'file: ', visemes_file)\n visemes = visemes[:, :self.max_visemes_len]\n elif visemes.shape[1] < self.max_visemes_len:\n # padding to max_visemes_len with last frame\n # print('__getitem__ 2 padding visemes from ', visemes.shape[0], ' to ', self.max_visemes_len, 'file: ', visemes_file)\n # last_frame = visemes[-1]\n # visemes = np.concatenate([visemes, np.tile(last_frame, (self.max_visemes_len - visemes.shape[0], 1))], axis=0)\n # visemes = torch.from_numpy(visemes)\n pass\n\n visemes_offset = 0.08 # 将visemes延迟n s\n visemes_offset_frames = int(visemes_offset * const_map.ARKIT_FPS)\n visemes = visemes[:, visemes_offset_frames:]\n\n audio_z_offset = 0.0\n audio_z_offset_frames = int(audio_z_offset * const_map.Z_FPS)\n audio_z = audio_z[:, audio_z_offset_frames:]\n\n # 获取二者的时长,将过长的一方多的部分丢弃\n visemes_duration = visemes.shape[1] / const_map.ARKIT_FPS\n audio_z_duration = audio_z.shape[1] / const_map.Z_FPS\n if visemes_duration > audio_z_duration:\n visemes = visemes[:, :int(audio_z_duration * const_map.ARKIT_FPS)]\n elif visemes_duration < audio_z_duration:\n audio_z = audio_z[:, :int(visemes_duration * const_map.Z_FPS)]\n\n\n # print('__getitem__ 3 audio.shape: ', audio.shape, 'visemes.shape: ', visemes.shape,'file: ', visemes_file)\n return audio_z, visemes\n\n def __len__(self):\n return len(self.audio_visemes_list_items)" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n logw_sdp = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=1.0)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n l_length_sdp += torch.sum((logw_sdp - logw_) ** 2, [1, 2]) / torch.sum(x_mask)\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_, logw_sdp),\n g,\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n\n def get_post_enc_dec(self):\n return self.enc_q, self.dec" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.LSTM = nn.LSTM(\n 2 * filter_channels, filter_channels, batch_first=True, bidirectional=True\n )\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(\n nn.Linear(2 * filter_channels, 1), nn.Sigmoid()\n )\n\n def forward_probability(self, x, dur):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = x.transpose(1, 2)\n x, _ = self.LSTM(x)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, dur)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "WavLMDiscriminator", "path": "models.py", "snippet": "class WavLMDiscriminator(nn.Module):\n \"\"\"docstring for Discriminator.\"\"\"\n\n def __init__(\n self, slm_hidden=768, slm_layers=13, initial_channel=64, use_spectral_norm=False\n ):\n super(WavLMDiscriminator, self).__init__()\n norm_f = weight_norm if use_spectral_norm == False else spectral_norm\n self.pre = norm_f(\n Conv1d(slm_hidden * slm_layers, initial_channel, 1, 1, padding=0)\n )\n\n self.convs = nn.ModuleList(\n [\n norm_f(\n nn.Conv1d(\n initial_channel, initial_channel * 2, kernel_size=5, padding=2\n )\n ),\n norm_f(\n nn.Conv1d(\n initial_channel * 2,\n initial_channel * 4,\n kernel_size=5,\n padding=2,\n )\n ),\n norm_f(\n nn.Conv1d(initial_channel * 4, initial_channel * 4, 5, 1, padding=2)\n ),\n ]\n )\n\n self.conv_post = norm_f(Conv1d(initial_channel * 4, 1, 3, 1, padding=1))\n\n def forward(self, x):\n x = self.pre(x)\n\n fmap = []\n for l in self.convs:\n x = l(x)\n x = F.leaky_relu(x, modules.LRELU_SLOPE)\n fmap.append(x)\n x = self.conv_post(x)\n x = torch.flatten(x, 1, -1)\n\n return x" }, { "identifier": "VisemesNet", "path": "models.py", "snippet": "class VisemesNet(nn.Module):\n def active(self, x):\n # active_fun: 0: null, 1: tanh, 2: relu, 3: LeakyReLU\n if self.active_fun == 1:\n return torch.tanh(x)\n elif self.active_fun == 2:\n return torch.relu(x)\n elif self.active_fun == 3:\n return self.leakyReLU(x)\n else:\n return x\n\n def __init__(self, hidden_channels, lstm_bidirectional=True, active_fun = 3, enable_conv=True, \n use_transformer = False, enable_dropout=True):\n super(VisemesNet, self).__init__()\n self.lstm_bidirectional = lstm_bidirectional\n self.lstm_directions = 2 if lstm_bidirectional else 1\n self.use_transformer = use_transformer\n self.enable_dropout = enable_dropout\n if active_fun == 3:\n self.leakyReLU = nn.LeakyReLU(negative_slope=0.01)\n if use_transformer:\n num_heads=8\n num_layers=3\n dim_feedforward=512\n dropout=0.1\n activation=\"relu\"\n self.transformer_encoder_layer = nn.TransformerEncoderLayer(\n d_model=hidden_channels, \n nhead=num_heads,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n activation=activation,\n batch_first=True\n )\n self.transformer_encoder = nn.TransformerEncoder(self.transformer_encoder_layer, num_layers=num_layers)\n else:\n self.lstm = nn.LSTM(input_size=hidden_channels, hidden_size=128, num_layers=3, batch_first=True, bidirectional=lstm_bidirectional)\n if use_transformer:\n self.fc1 = nn.Linear(hidden_channels, 96)\n else:\n self.fc1 = nn.Linear(128 * self.lstm_directions, 96)\n self.fc2 = nn.Linear(96, 61)\n dropout_rate = 0.5\n if self.enable_dropout:\n self.dropout = nn.Dropout(dropout_rate)\n conv_kernel_pre = 15\n conv_kernel_post = 11\n self.conv1d_pre = nn.Conv1d(in_channels=hidden_channels, out_channels=hidden_channels, kernel_size=conv_kernel_pre, stride=1, padding=conv_kernel_pre//2)\n self.conv1d_post = nn.Conv1d(in_channels=61, out_channels=61, kernel_size=conv_kernel_post, stride=1, padding=conv_kernel_post//2)\n self.enable_conv = enable_conv\n self.active_fun = active_fun\n\n def forward(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.use_transformer:\n return self.forward_transformer(x, y)\n else:\n return self.forward_lstm(x, y)\n\n def forward_transformer(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n # batch_first: True (batch, seq, feature); False (seq, batch, feature).\n x = x.transpose(1, 2)\n\n expressions = self.transformer_encoder(x)\n \n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n # expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n\n return expressions \n\n def forward_lstm(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n x = x.transpose(1, 2)\n # x [batch_size, seq_len, hidden_channels]\n expressions = None\n expressions, _ = self.lstm(x)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n return expressions\n \n def init_weights(self):\n # 初始化权重\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.LSTM):\n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n nn.init.xavier_uniform_(param.data)\n elif 'weight_hh' in name:\n nn.init.orthogonal_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight.data, 1)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Conv1d):\n nn.init.xavier_uniform_(m.weight.data)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.TransformerEncoderLayer):\n for name, param in m.named_parameters():\n if 'weight' in name:\n if param.dim() == 1:\n nn.init.normal_(param.data)\n else:\n nn.init.xavier_uniform_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.TransformerEncoder):\n for param in m.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param.data)\n else:\n nn.init.constant_(param.data, 0)" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "WavLMLoss", "path": "losses.py", "snippet": "class WavLMLoss(torch.nn.Module):\n def __init__(self, model, wd, model_sr, slm_sr=16000):\n super(WavLMLoss, self).__init__()\n self.wavlm = AutoModel.from_pretrained(model)\n self.wd = wd\n self.resample = torchaudio.transforms.Resample(model_sr, slm_sr)\n self.wavlm.eval()\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n def forward(self, wav, y_rec):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16.squeeze(), output_hidden_states=True\n ).hidden_states\n\n floss = 0\n for er, eg in zip(wav_embeddings, y_rec_embeddings):\n floss += torch.mean(torch.abs(er - eg))\n\n return floss.mean()\n\n def generator(self, y_rec):\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16, output_hidden_states=True\n ).hidden_states\n y_rec_embeddings = (\n torch.stack(y_rec_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n y_df_hat_g = self.wd(y_rec_embeddings)\n loss_gen = torch.mean((1 - y_df_hat_g) ** 2)\n\n return loss_gen\n\n def discriminator(self, wav, y_rec):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16, output_hidden_states=True\n ).hidden_states\n\n y_embeddings = (\n torch.stack(wav_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n y_rec_embeddings = (\n torch.stack(y_rec_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n\n y_d_rs = self.wd(y_embeddings)\n y_d_gs = self.wd(y_rec_embeddings)\n\n y_df_hat_r, y_df_hat_g = y_d_rs, y_d_gs\n\n r_loss = torch.mean((1 - y_df_hat_r) ** 2)\n g_loss = torch.mean((y_df_hat_g) ** 2)\n\n loss_disc_f = r_loss + g_loss\n\n return loss_disc_f.mean()\n\n def discriminator_forward(self, wav):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_embeddings = (\n torch.stack(wav_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n\n y_d_rs = self.wd(y_embeddings)\n\n return y_d_rs" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, AudioVisemesLoader, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, WavLMDiscriminator, VisemesNet, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss, WavLMLoss, ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
13,247
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0) train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data) train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True, batch_size=1, drop_last=True) eval_dataset = AudioVisemesLoader(hps.data.validation_visemes_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False)
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0) train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data) train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True, batch_size=1, drop_last=True) eval_dataset = AudioVisemesLoader(hps.data.validation_visemes_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False)
net_v = VisemesNet(hps.model.hidden_channels).cuda()
9
2023-12-27 03:09:11+00:00
16k
chinhsuanwu/ifusion-threestudio
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,692
padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
2
2023-12-27 20:30:33+00:00
16k