repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
DLYuanGod/TinyGPT-V
minigpt4/models/minigpt4.py
[ { "identifier": "registry", "path": "minigpt4/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "disabled_train", "path": "minigpt4/models/base_model.py", "snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self" }, { "identifier": "MiniGPTBase", "path": "minigpt4/models/minigpt_base.py", "snippet": "class MiniGPTBase(BaseModel):\n \"\"\"\n Base class for MiniGPT-4 and MiniGPT-v2\n \"\"\"\n\n def __init__(\n self,\n vit_model=\"eva_clip_g\",\n img_size=224,\n drop_path_rate=0,\n use_grad_checkpoint=False,\n vit_precision=\"fp16\",\n freeze_vit=True,\n llama_model=\"\",\n max_txt_len=32,\n max_context_len=3800,\n prompt_template=\"\",\n end_sym='\\n',\n low_resource=False, # use 8 bit and put vit in cpu\n device_8bit=0, # the device of 8bit model should be set when loading and cannot be changed anymore.\n lora_r=0, # lora_r means lora is not used\n lora_target_modules=[\"q_proj\", \"v_proj\"],\n lora_alpha=16,\n lora_dropout=0.05,\n ):\n super().__init__()\n\n self.llama_model, self.llama_tokenizer = self.init_llm(\n llama_model_path=llama_model,\n low_resource=low_resource,\n low_res_device=device_8bit,\n lora_r=lora_r,\n lora_target_modules=lora_target_modules,\n lora_alpha=lora_alpha,\n lora_dropout=lora_dropout,\n )\n\n self.visual_encoder, self.ln_vision = self.init_vision_encoder(\n vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision, freeze_vit\n )\n\n self.max_txt_len = max_txt_len\n self.max_context_len = max_context_len\n self.end_sym = end_sym\n\n self.prompt_template = prompt_template\n self.prompt_list = []\n\n def vit_to_cpu(self):\n self.ln_vision.to(\"cpu\")\n self.ln_vision.float()\n self.visual_encoder.to(\"cpu\")\n self.visual_encoder.float()\n\n def get_context_emb(self, prompt, img_list):\n device = img_list[0].device\n prompt_segs = prompt.split('<ImageHere>')\n assert len(prompt_segs) == len(img_list) + 1, \"Unmatched numbers of image placeholders and images.\"\n seg_tokens = [\n self.llama_tokenizer(\n seg, return_tensors=\"pt\", add_special_tokens=i==0).to(device).input_ids # only add bos to the first seg\n for i, seg in enumerate(prompt_segs)\n ]\n seg_embs = [self.embed_tokens(seg_t) for seg_t in seg_tokens]\n\n mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]]\n mixed_embs = torch.cat(mixed_embs, dim=1)\n return mixed_embs\n\n def prompt_wrap(self, img_embeds, atts_img, prompts, lengths=None):\n if prompts is None or len(prompts) == 0:\n # prompts is not provided, just return the original image embedding\n return img_embeds, atts_img\n elif img_embeds is None:\n # prompt is provided but there is no image embedding. return the prompt embedding in right padding\n self.llama_tokenizer.padding_side = \"right\"\n prompt_tokens = self.llama_tokenizer(\n prompts,\n return_tensors=\"pt\",\n padding=\"longest\",\n add_special_tokens=False\n ).to(self.device)\n prompt_embeds = self.embed_tokens(prompt_tokens.input_ids)\n atts_prompt = prompt_tokens.attention_mask\n return prompt_embeds, atts_prompt\n else:\n # return the multi-modal embedding in right padding\n emb_lists = []\n if isinstance(prompts, str):\n prompts = [prompts] * len(img_embeds)\n\n for idx, (each_img_embed, each_prompt) in enumerate(zip(img_embeds, prompts)):\n pn = each_img_embed.shape[-2]\n if lengths is not None:\n each_img_embed = each_img_embed.reshape(-1, each_img_embed.shape[-1])\n each_img_embed = each_img_embed[:lengths[idx] * pn]\n p_segs = each_prompt.split('<ImageHere>')\n interleave_emb = []\n for idx, seg in enumerate(p_segs[:-1]):\n p_tokens = self.llama_tokenizer(\n seg, return_tensors=\"pt\", add_special_tokens=False).to(img_embeds.device)\n p_embed = self.embed_tokens(p_tokens.input_ids)\n interleave_emb.append(torch.cat([p_embed, each_img_embed[None][:, idx * pn:(idx + 1) * pn]], dim=1))\n wrapped_emb = torch.cat(interleave_emb, dim=1)\n p_tokens = self.llama_tokenizer(\n p_segs[-1], return_tensors=\"pt\", add_special_tokens=False).to(img_embeds.device)\n p_embed = self.embed_tokens(p_tokens.input_ids)\n wrapped_emb = torch.cat([wrapped_emb, p_embed], dim=1)\n emb_lists.append(wrapped_emb)\n\n emb_lens = [emb.shape[1] for emb in emb_lists]\n pad_emb = self.embed_tokens(torch.tensor(self.llama_tokenizer.pad_token_id, device=img_embeds.device))\n\n max_length = max(emb_lens) if max(emb_lens) < self.max_context_len else self.max_context_len\n wrapped_embs = pad_emb.expand(len(emb_lens), max_length, -1).clone()\n wrapped_atts = torch.zeros([len(emb_lens), max_length], dtype=torch.int, device=img_embeds.device)\n \n for i, emb in enumerate(emb_lists):\n length = emb_lens[i] if emb_lens[i] < self.max_context_len else self.max_context_len\n wrapped_embs[i, :length] = emb[:, :length]\n wrapped_atts[i, :length] = 1\n return wrapped_embs, wrapped_atts\n\n def concat_emb_input_output(self, input_embs, input_atts, output_embs, output_atts):\n \"\"\"\n Concatenate the batched input embedding and batched output embedding together.\n Both the input and the output embedding should be right padded.\n \"\"\"\n input_lens = []\n cat_embs = []\n cat_atts = []\n for i in range(input_embs.size(0)):\n input_len = input_atts[i].sum()\n input_lens.append(input_len)\n cat_embs.append(\n torch.cat([\n input_embs[i][:input_len],\n output_embs[i],\n input_embs[i][input_len:]\n ])\n )\n cat_atts.append(\n torch.cat([\n input_atts[i][:input_len],\n output_atts[i],\n input_atts[i][input_len:]\n ])\n )\n cat_embs = torch.stack(cat_embs)\n cat_atts = torch.stack(cat_atts)\n return cat_embs, cat_atts, input_lens\n\n def tokenize_conversation(self, conv_q, conv_a):\n \"\"\"concatenate conversation and make sure the model is only trained to regress the answer\"\"\"\n\n to_regress_token_ids_list = []\n targets_list = []\n\n batch_size = len(conv_q)\n for batch_idx in range(batch_size):\n questions, answers = conv_q[batch_idx], conv_a[batch_idx]\n questions = [self.llama_tokenizer(self.llama_tokenizer.bos_token + q,\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.device) for q in questions[1:]] # the first question is handled in the prompt wrap function, skip it\n answers = [self.llama_tokenizer(a + self.end_sym,\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.device) for a in answers]\n cur_id = []\n cur_target = []\n for i in range(len(questions)):\n cur_id.append(answers[i].input_ids)\n cur_target.append(answers[i].input_ids)\n cur_id.append(questions[i].input_ids)\n cur_target.append(torch.ones_like(questions[i].input_ids) * -100)\n\n cur_id.append(answers[-1].input_ids)\n cur_target.append(answers[-1].input_ids)\n\n cur_id = torch.cat(cur_id, dim=1)\n cur_target = torch.cat(cur_target, dim=1)\n to_regress_token_ids_list.append(cur_id)\n targets_list.append(cur_target)\n\n max_len = min(max([target.shape[1] for target in targets_list]), self.max_txt_len)\n to_regress_token_ids = torch.ones([batch_size, max_len],\n dtype=cur_id.dtype, device=self.device) * self.llama_tokenizer.pad_token_id\n targets = torch.ones([batch_size, max_len],\n dtype=cur_id.dtype, device=self.device) * -100\n for batch_idx in range(batch_size):\n cur_len = to_regress_token_ids_list[batch_idx].shape[1]\n to_regress_token_ids[batch_idx, :cur_len] = to_regress_token_ids_list[batch_idx][0, :max_len]\n targets[batch_idx, :cur_len] = targets_list[batch_idx][0, :max_len]\n\n to_regress_token_attn = (to_regress_token_ids != self.llama_tokenizer.pad_token_id).to(torch.int)\n\n return to_regress_token_ids, to_regress_token_attn, targets\n\n def preparing_embedding(self, samples):\n ### prepare input tokens\n if 'image' in samples:\n img_embeds, img_atts = self.encode_img(samples[\"image\"])\n else:\n img_embeds = img_atts = None\n\n if 'conv_q' in samples:\n # handeling conversation datasets\n conv_q, conv_a = samples['conv_q'], samples['conv_a']\n\n connect_sym = samples['connect_sym'][0]\n conv_q = [q.split(connect_sym)for q in conv_q]\n conv_a = [a.split(connect_sym) for a in conv_a]\n\n conv_q = [[self.prompt_template.format(item) for item in items] for items in conv_q]\n\n cond_embeds, cond_atts = self.prompt_wrap(img_embeds, img_atts, [q[0] for q in conv_q])\n regress_token_ids, regress_atts, part_targets = self.tokenize_conversation(conv_q, conv_a)\n\n else:\n if \"instruction_input\" in samples:\n instruction = samples[\"instruction_input\"]\n elif self.prompt_list:\n instruction = random.choice(self.prompt_list)\n else:\n instruction = None\n\n if hasattr(self, 'chat_template') and self.chat_template:\n instruction = [self.prompt_template.format(instruct) for instruct in instruction]\n\n if 'length' in samples:\n # the input is a image train (like videos)\n bsz, pn, hs = img_embeds.shape\n img_embeds = img_embeds.reshape(len(samples['image']), -1, pn, hs)\n cond_embeds, cond_atts = self.prompt_wrap(img_embeds, img_atts, instruction, samples['length'])\n else:\n cond_embeds, cond_atts = self.prompt_wrap(img_embeds, img_atts, instruction)\n\n ### prepare target tokens\n self.llama_tokenizer.padding_side = \"right\"\n text = [t + self.end_sym for t in samples[\"answer\"]]\n\n regress_tokens = self.llama_tokenizer(\n text,\n return_tensors=\"pt\",\n padding=\"longest\",\n truncation=True,\n max_length=self.max_txt_len,\n add_special_tokens=False\n ).to(self.device)\n\n regress_token_ids = regress_tokens.input_ids\n regress_atts = regress_tokens.attention_mask\n part_targets = regress_token_ids.masked_fill(\n regress_token_ids == self.llama_tokenizer.pad_token_id, -100\n )\n\n regress_embeds = self.embed_tokens(regress_token_ids)\n\n return cond_embeds, cond_atts, regress_embeds, regress_atts, part_targets\n\n def forward(self, samples,):\n # prepare the embedding to condition and the embedding to regress\n cond_embeds, cond_atts, regress_embeds, regress_atts, part_targets = \\\n self.preparing_embedding(samples)\n\n # concat the embedding to condition and the embedding to regress\n inputs_embeds, attention_mask, input_lens = \\\n self.concat_emb_input_output(cond_embeds, cond_atts, regress_embeds, regress_atts)\n\n # get bos token embedding\n bos = torch.ones_like(part_targets[:, :1]) * self.llama_tokenizer.bos_token_id\n bos_embeds = self.embed_tokens(bos)\n bos_atts = cond_atts[:, :1]\n\n # add bos token at the begining\n inputs_embeds = torch.cat([bos_embeds, inputs_embeds], dim=1)\n attention_mask = torch.cat([bos_atts, attention_mask], dim=1)\n\n # ensemble the final targets\n targets = torch.ones([inputs_embeds.shape[0], inputs_embeds.shape[1]],\n dtype=torch.long).to(self.device).fill_(-100)\n\n for i, target in enumerate(part_targets):\n targets[i, input_lens[i]+1:input_lens[i]+len(target)+1] = target # plus 1 for bos\n\n with self.maybe_autocast():\n outputs = self.llama_model(\n input_ids = None,\n inputs_embeds=inputs_embeds,\n attention_mask=attention_mask,\n return_dict=True,\n labels=targets,\n )\n loss = outputs.loss\n\n return {\"loss\": loss}\n\n def embed_tokens(self, token_ids):\n if hasattr(self.llama_model.base_model, 'model'): ## lora wrapped model\n embeds = self.llama_model.model.model.embed_tokens(token_ids)\n else:\n embeds = self.llama_model.model.embed_tokens(token_ids)\n return embeds\n\n @torch.no_grad()\n def generate(\n self,\n images,\n texts,\n num_beams=1,\n max_new_tokens=20,\n min_length=1,\n top_p=0.9,\n repetition_penalty=1,\n length_penalty=1,\n temperature=1,\n do_sample=False,\n stop_words_ids=[2],\n ):\n '''\n function for generate test use\n '''\n\n stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(\n stops=[torch.tensor([i]).to(self.device) for i in stop_words_ids])])\n\n img_embeds, atts_img = self.encode_img(images.to(self.device))\n image_lists = [[image_emb[None]] for image_emb in img_embeds]\n\n batch_embs = [self.get_context_emb(text, img_list) for text, img_list in zip(texts, image_lists)]\n\n batch_size = len(batch_embs)\n max_len = max([emb.shape[1] for emb in batch_embs])\n emb_dim = batch_embs[0].shape[2]\n dtype = batch_embs[0].dtype\n device = batch_embs[0].device\n\n embs = torch.zeros([batch_size, max_len, emb_dim], dtype=dtype, device=device)\n attn_mask = torch.zeros([batch_size, max_len], dtype=torch.int, device=device)\n for i, emb in enumerate(batch_embs):\n emb_len = emb.shape[1]\n embs[i, -emb_len:] = emb[0]\n attn_mask[i, -emb_len:] = 1\n\n with self.maybe_autocast():\n outputs = self.llama_model.generate(\n inputs_embeds=embs,\n attention_mask=attn_mask,\n max_new_tokens=max_new_tokens,\n num_beams=num_beams,\n length_penalty=length_penalty,\n temperature=temperature,\n do_sample=do_sample,\n min_length=min_length,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n bos_token_id = 50256\n # stopping_criteria=stopping_criteria,\n )\n\n # with self.maybe_autocast():\n # outputs = self.llama_model.generate(\n # inputs_embeds=embs,\n # attention_mask=attn_mask,\n # max_new_tokens=max_new_tokens,\n # num_beams=num_beams,\n # do_sample=do_sample,\n # # stopping_criteria=stopping_criteria,\n # )\n answers = []\n for output_token in outputs:\n if output_token[0] == 0:\n output_token = output_token[1:]\n output_texts = self.llama_tokenizer.decode(output_token, skip_special_tokens=True)\n output_texts = output_texts.split('</s>')[0] # remove the stop sign </s>\n output_texts = output_texts.replace(\"<s>\", \"\")\n output_texts = output_texts.split(r'[/INST]')[-1].strip()\n answers.append(output_texts)\n\n return answers\n\n @torch.no_grad()\n def multi_select(self, images, texts, answers, num_cand=None):\n all_losses = []\n for answer in answers:\n choice_samples = {\n 'image': images,\n 'instruction_input': texts,\n 'answer': answer\n }\n loss = self.forward(choice_samples)['loss'].reshape(-1, 1)\n all_losses.append(loss)\n torch.cuda.empty_cache()\n all_losses = torch.cat(all_losses, dim=-1)\n if num_cand is not None:\n for i in range(all_losses.shape[0]):\n all_losses[i, num_cand[i]:] = 9999\n output_class_ranks = torch.argsort(all_losses, dim=-1)\n return output_class_ranks.tolist()" }, { "identifier": "BertConfig", "path": "minigpt4/models/Qformer.py", "snippet": "class BertEmbeddings(nn.Module):\nclass BertSelfAttention(nn.Module):\nclass BertSelfOutput(nn.Module):\nclass BertAttention(nn.Module):\nclass BertIntermediate(nn.Module):\nclass BertOutput(nn.Module):\nclass BertLayer(nn.Module):\nclass BertEncoder(nn.Module):\nclass BertPooler(nn.Module):\nclass BertPredictionHeadTransform(nn.Module):\nclass BertLMPredictionHead(nn.Module):\nclass BertOnlyMLMHead(nn.Module):\nclass BertPreTrainedModel(PreTrainedModel):\nclass BertModel(BertPreTrainedModel):\nclass BertLMHeadModel(BertPreTrainedModel):\nclass BertForMaskedLM(BertPreTrainedModel):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n query_embeds=None,\n past_key_values_length=0,\n ):\n def __init__(self, config, is_cross_attention):\n def save_attn_gradients(self, attn_gradients):\n def get_attn_gradients(self):\n def save_attention_map(self, attention_map):\n def get_attention_map(self):\n def transpose_for_scores(self, x):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(self, hidden_states, input_tensor):\n def __init__(self, config, is_cross_attention=False):\n def prune_heads(self, heads):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states, input_tensor):\n def __init__(self, config, layer_num):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n query_length=0,\n ):\n def feed_forward_chunk(self, attention_output):\n def feed_forward_chunk_query(self, attention_output):\n def __init__(self, config):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n query_length=0,\n ):\n def create_custom_forward(module):\n def custom_forward(*inputs):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, sequence_output):\n def _init_weights(self, module):\n def __init__(self, config, add_pooling_layer=False):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def _prune_heads(self, heads_to_prune):\n def get_extended_attention_mask(\n self,\n attention_mask: Tensor,\n input_shape: Tuple[int],\n device: device,\n is_decoder: bool,\n has_query: bool = False,\n ) -> Tensor:\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n query_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n is_decoder=False,\n ):\n def __init__(self, config):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n query_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=True,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n return_logits=False,\n is_decoder=True,\n reduction=\"mean\",\n ):\n def prepare_inputs_for_generation(\n self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs\n ):\n def _reorder_cache(self, past, beam_idx):\n def __init__(self, config):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n query_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n return_logits=False,\n is_decoder=False,\n ):" } ]
import logging import random import torch import torch.nn as nn from torch.cuda.amp import autocast as autocast from minigpt4.common.registry import registry from minigpt4.models.base_model import disabled_train from minigpt4.models.minigpt_base import MiniGPTBase from minigpt4.models.Qformer import BertConfig, BertLMHeadModel
6,834
@registry.register_model("minigpt4") class MiniGPT4(MiniGPTBase): """ MiniGPT-4 model """ PRETRAINED_MODEL_CONFIG_DICT = { "pretrain_vicuna0": "configs/models/minigpt4_vicuna0.yaml", "pretrain_llama2": "configs/models/minigpt4_llama2.yaml", } def __init__( self, vit_model="eva_clip_g", q_former_model="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth", img_size=224, drop_path_rate=0, use_grad_checkpoint=False, vit_precision="fp16", freeze_vit=True, has_qformer=True, freeze_qformer=True, num_query_token=32, llama_model="", prompt_path="", prompt_template="", max_txt_len=32, end_sym='\n', low_resource=False, # use 8 bit and put vit in cpu device_8bit=0, # the device of 8bit model should be set when loading and cannot be changed anymore. lora_r=64, lora_target_modules=['query_key_value','dense'], lora_alpha=16, lora_dropout=0.05, ): super().__init__( vit_model=vit_model, img_size=img_size, drop_path_rate=drop_path_rate, use_grad_checkpoint=use_grad_checkpoint, vit_precision=vit_precision, freeze_vit=freeze_vit, llama_model=llama_model, max_txt_len=max_txt_len, end_sym=end_sym, low_resource=low_resource, device_8bit=device_8bit, lora_r=lora_r, lora_target_modules=lora_target_modules, lora_alpha=lora_alpha, lora_dropout=lora_dropout, ) self.has_qformer = True if self.has_qformer: print('Loading Q-Former') self.Qformer, self.query_tokens = self.init_Qformer( num_query_token, self.visual_encoder.num_features, freeze_qformer ) self.load_from_pretrained(url_or_filename=q_former_model) # load q-former weights here img_f_dim = self.Qformer.config.hidden_size print('Loading Q-Former Done') else: img_f_dim = self.visual_encoder.num_features * 4 print('Do not use Q-Former here.') print(img_f_dim,self.llama_model.config.hidden_size) self.llama_proj = nn.Linear( self.Qformer.config.hidden_size, 4096 ) self.llama_proj2 = nn.Linear( 4096, self.llama_model.config.hidden_size ) if prompt_path: with open(prompt_path, 'r') as f: raw_prompts = f.read().splitlines() filted_prompts = [raw_prompt for raw_prompt in raw_prompts if "<ImageHere>" in raw_prompt] self.prompt_list = [prompt_template.format(p) for p in filted_prompts] print('Load {} training prompts'.format(len(self.prompt_list))) print('Prompt Example \n{}'.format(random.choice(self.prompt_list))) else: self.prompt_list = [] @classmethod def init_Qformer(cls, num_query_token, vision_width, freeze): encoder_config = BertConfig.from_pretrained("bert-base-uncased") encoder_config.encoder_width = vision_width # insert cross-attention layer every other block encoder_config.add_cross_attention = True encoder_config.cross_attention_freq = 2 encoder_config.query_length = num_query_token Qformer = BertLMHeadModel(config=encoder_config) query_tokens = nn.Parameter( torch.zeros(1, num_query_token, encoder_config.hidden_size) ) query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range) Qformer.cls = None Qformer.bert.embeddings.word_embeddings = None Qformer.bert.embeddings.position_embeddings = None for layer in Qformer.bert.encoder.layer: layer.output = None layer.intermediate = None if freeze: for name, param in Qformer.named_parameters(): param.requires_grad = False Qformer = Qformer.eval()
@registry.register_model("minigpt4") class MiniGPT4(MiniGPTBase): """ MiniGPT-4 model """ PRETRAINED_MODEL_CONFIG_DICT = { "pretrain_vicuna0": "configs/models/minigpt4_vicuna0.yaml", "pretrain_llama2": "configs/models/minigpt4_llama2.yaml", } def __init__( self, vit_model="eva_clip_g", q_former_model="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth", img_size=224, drop_path_rate=0, use_grad_checkpoint=False, vit_precision="fp16", freeze_vit=True, has_qformer=True, freeze_qformer=True, num_query_token=32, llama_model="", prompt_path="", prompt_template="", max_txt_len=32, end_sym='\n', low_resource=False, # use 8 bit and put vit in cpu device_8bit=0, # the device of 8bit model should be set when loading and cannot be changed anymore. lora_r=64, lora_target_modules=['query_key_value','dense'], lora_alpha=16, lora_dropout=0.05, ): super().__init__( vit_model=vit_model, img_size=img_size, drop_path_rate=drop_path_rate, use_grad_checkpoint=use_grad_checkpoint, vit_precision=vit_precision, freeze_vit=freeze_vit, llama_model=llama_model, max_txt_len=max_txt_len, end_sym=end_sym, low_resource=low_resource, device_8bit=device_8bit, lora_r=lora_r, lora_target_modules=lora_target_modules, lora_alpha=lora_alpha, lora_dropout=lora_dropout, ) self.has_qformer = True if self.has_qformer: print('Loading Q-Former') self.Qformer, self.query_tokens = self.init_Qformer( num_query_token, self.visual_encoder.num_features, freeze_qformer ) self.load_from_pretrained(url_or_filename=q_former_model) # load q-former weights here img_f_dim = self.Qformer.config.hidden_size print('Loading Q-Former Done') else: img_f_dim = self.visual_encoder.num_features * 4 print('Do not use Q-Former here.') print(img_f_dim,self.llama_model.config.hidden_size) self.llama_proj = nn.Linear( self.Qformer.config.hidden_size, 4096 ) self.llama_proj2 = nn.Linear( 4096, self.llama_model.config.hidden_size ) if prompt_path: with open(prompt_path, 'r') as f: raw_prompts = f.read().splitlines() filted_prompts = [raw_prompt for raw_prompt in raw_prompts if "<ImageHere>" in raw_prompt] self.prompt_list = [prompt_template.format(p) for p in filted_prompts] print('Load {} training prompts'.format(len(self.prompt_list))) print('Prompt Example \n{}'.format(random.choice(self.prompt_list))) else: self.prompt_list = [] @classmethod def init_Qformer(cls, num_query_token, vision_width, freeze): encoder_config = BertConfig.from_pretrained("bert-base-uncased") encoder_config.encoder_width = vision_width # insert cross-attention layer every other block encoder_config.add_cross_attention = True encoder_config.cross_attention_freq = 2 encoder_config.query_length = num_query_token Qformer = BertLMHeadModel(config=encoder_config) query_tokens = nn.Parameter( torch.zeros(1, num_query_token, encoder_config.hidden_size) ) query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range) Qformer.cls = None Qformer.bert.embeddings.word_embeddings = None Qformer.bert.embeddings.position_embeddings = None for layer in Qformer.bert.encoder.layer: layer.output = None layer.intermediate = None if freeze: for name, param in Qformer.named_parameters(): param.requires_grad = False Qformer = Qformer.eval()
Qformer.train = disabled_train
1
2023-12-28 05:47:18+00:00
8k
ali-vilab/dreamtalk
core/networks/generator.py
[ { "identifier": "TransformerEncoder", "path": "core/networks/transformer.py", "snippet": "class TransformerEncoder(nn.Module):\r\n\r\n def __init__(self, encoder_layer, num_layers, norm=None):\r\n super().__init__()\r\n self.layers = _get_clones(encoder_layer, num_layers)\r\n self.num_layers = num_layers\r\n self.norm = norm\r\n\r\n def forward(self, src, mask = None, src_key_padding_mask = None, pos = None):\r\n output = src+pos\r\n\r\n for layer in self.layers:\r\n output = layer(output, src_mask=mask,\r\n src_key_padding_mask=src_key_padding_mask, pos=pos)\r\n\r\n if self.norm is not None:\r\n output = self.norm(output)\r\n\r\n return output\r" }, { "identifier": "TransformerEncoderLayer", "path": "core/networks/transformer.py", "snippet": "class TransformerEncoderLayer(nn.Module):\r\n\r\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,\r\n activation=\"relu\", normalize_before=False):\r\n super().__init__()\r\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\r\n # Implementation of Feedforward model\r\n self.linear1 = nn.Linear(d_model, dim_feedforward)\r\n self.dropout = nn.Dropout(dropout)\r\n self.linear2 = nn.Linear(dim_feedforward, d_model)\r\n\r\n self.norm1 = nn.LayerNorm(d_model)\r\n self.norm2 = nn.LayerNorm(d_model)\r\n self.dropout1 = nn.Dropout(dropout)\r\n self.dropout2 = nn.Dropout(dropout)\r\n\r\n self.activation = _get_activation_fn(activation)\r\n self.normalize_before = normalize_before\r\n\r\n def with_pos_embed(self, tensor, pos):\r\n return tensor if pos is None else tensor + pos\r\n\r\n def forward_post(self,\r\n src,\r\n src_mask = None,\r\n src_key_padding_mask = None,\r\n pos = None):\r\n # q = k = self.with_pos_embed(src, pos)\r\n src2 = self.self_attn(src, src, value=src, attn_mask=src_mask,\r\n key_padding_mask=src_key_padding_mask)[0]\r\n src = src + self.dropout1(src2)\r\n src = self.norm1(src)\r\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\r\n src = src + self.dropout2(src2)\r\n src = self.norm2(src)\r\n return src\r\n\r\n def forward_pre(self, src,\r\n src_mask = None,\r\n src_key_padding_mask = None,\r\n pos = None):\r\n src2 = self.norm1(src)\r\n # q = k = self.with_pos_embed(src2, pos)\r\n src2 = self.self_attn(src2, src2, value=src2, attn_mask=src_mask,\r\n key_padding_mask=src_key_padding_mask)[0]\r\n src = src + self.dropout1(src2)\r\n src2 = self.norm2(src)\r\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\r\n src = src + self.dropout2(src2)\r\n return src\r\n\r\n def forward(self, src,\r\n src_mask = None,\r\n src_key_padding_mask = None,\r\n pos = None):\r\n if self.normalize_before:\r\n return self.forward_pre(src, src_mask, src_key_padding_mask, pos)\r\n return self.forward_post(src, src_mask, src_key_padding_mask, pos)\r" }, { "identifier": "PositionalEncoding", "path": "core/networks/transformer.py", "snippet": "class PositionalEncoding(nn.Module):\r\n\r\n def __init__(self, d_hid, n_position=200):\r\n super(PositionalEncoding, self).__init__()\r\n\r\n # Not a parameter\r\n self.register_buffer('pos_table', self._get_sinusoid_encoding_table(n_position, d_hid))\r\n\r\n def _get_sinusoid_encoding_table(self, n_position, d_hid):\r\n ''' Sinusoid position encoding table '''\r\n # TODO: make it with torch instead of numpy\r\n\r\n def get_position_angle_vec(position):\r\n return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]\r\n\r\n sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])\r\n sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i\r\n sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1\r\n\r\n return torch.FloatTensor(sinusoid_table).unsqueeze(0)\r\n\r\n def forward(self, winsize):\r\n return self.pos_table[:, :winsize].clone().detach()\r" }, { "identifier": "TransformerDecoderLayer", "path": "core/networks/transformer.py", "snippet": "class TransformerDecoderLayer(nn.Module):\r\n\r\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,\r\n activation=\"relu\", normalize_before=False):\r\n super().__init__()\r\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\r\n self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\r\n # Implementation of Feedforward model\r\n self.linear1 = nn.Linear(d_model, dim_feedforward)\r\n self.dropout = nn.Dropout(dropout)\r\n self.linear2 = nn.Linear(dim_feedforward, d_model)\r\n\r\n self.norm1 = nn.LayerNorm(d_model)\r\n self.norm2 = nn.LayerNorm(d_model)\r\n self.norm3 = nn.LayerNorm(d_model)\r\n self.dropout1 = nn.Dropout(dropout)\r\n self.dropout2 = nn.Dropout(dropout)\r\n self.dropout3 = nn.Dropout(dropout)\r\n\r\n self.activation = _get_activation_fn(activation)\r\n self.normalize_before = normalize_before\r\n\r\n def with_pos_embed(self, tensor, pos):\r\n return tensor if pos is None else tensor + pos\r\n\r\n def forward_post(self, tgt, memory,\r\n tgt_mask = None,\r\n memory_mask = None,\r\n tgt_key_padding_mask = None,\r\n memory_key_padding_mask = None,\r\n pos = None,\r\n query_pos = None):\r\n # q = k = self.with_pos_embed(tgt, query_pos)\r\n tgt2 = self.self_attn(tgt, tgt, value=tgt, attn_mask=tgt_mask,\r\n key_padding_mask=tgt_key_padding_mask)[0]\r\n tgt = tgt + self.dropout1(tgt2)\r\n tgt = self.norm1(tgt)\r\n tgt2 = self.multihead_attn(query=tgt,\r\n key=memory,\r\n value=memory, attn_mask=memory_mask,\r\n key_padding_mask=memory_key_padding_mask)[0]\r\n tgt = tgt + self.dropout2(tgt2)\r\n tgt = self.norm2(tgt)\r\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\r\n tgt = tgt + self.dropout3(tgt2)\r\n tgt = self.norm3(tgt)\r\n return tgt\r\n\r\n def forward_pre(self, tgt, memory,\r\n tgt_mask = None,\r\n memory_mask = None,\r\n tgt_key_padding_mask = None,\r\n memory_key_padding_mask = None,\r\n pos = None,\r\n query_pos = None):\r\n tgt2 = self.norm1(tgt)\r\n # q = k = self.with_pos_embed(tgt2, query_pos)\r\n tgt2 = self.self_attn(tgt2, tgt2, value=tgt2, attn_mask=tgt_mask,\r\n key_padding_mask=tgt_key_padding_mask)[0]\r\n tgt = tgt + self.dropout1(tgt2)\r\n tgt2 = self.norm2(tgt)\r\n tgt2 = self.multihead_attn(query=tgt2,\r\n key=memory,\r\n value=memory, attn_mask=memory_mask,\r\n key_padding_mask=memory_key_padding_mask)[0]\r\n tgt = tgt + self.dropout2(tgt2)\r\n tgt2 = self.norm3(tgt)\r\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))\r\n tgt = tgt + self.dropout3(tgt2)\r\n return tgt\r\n\r\n def forward(self, tgt, memory,\r\n tgt_mask = None,\r\n memory_mask = None,\r\n tgt_key_padding_mask = None,\r\n memory_key_padding_mask = None,\r\n pos = None,\r\n query_pos = None):\r\n if self.normalize_before:\r\n return self.forward_pre(tgt, memory, tgt_mask, memory_mask,\r\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)\r\n return self.forward_post(tgt, memory, tgt_mask, memory_mask,\r\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)\r" }, { "identifier": "TransformerDecoder", "path": "core/networks/transformer.py", "snippet": "class TransformerDecoder(nn.Module):\r\n\r\n def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):\r\n super().__init__()\r\n self.layers = _get_clones(decoder_layer, num_layers)\r\n self.num_layers = num_layers\r\n self.norm = norm\r\n self.return_intermediate = return_intermediate\r\n\r\n def forward(self, tgt, memory, tgt_mask = None, memory_mask = None, tgt_key_padding_mask = None,\r\n memory_key_padding_mask = None,\r\n pos = None,\r\n query_pos = None):\r\n output = tgt+pos+query_pos\r\n\r\n intermediate = []\r\n\r\n for layer in self.layers:\r\n output = layer(output, memory, tgt_mask=tgt_mask,\r\n memory_mask=memory_mask,\r\n tgt_key_padding_mask=tgt_key_padding_mask,\r\n memory_key_padding_mask=memory_key_padding_mask,\r\n pos=pos, query_pos=query_pos)\r\n if self.return_intermediate:\r\n intermediate.append(self.norm(output))\r\n\r\n if self.norm is not None:\r\n output = self.norm(output)\r\n if self.return_intermediate:\r\n intermediate.pop()\r\n intermediate.append(output)\r\n\r\n if self.return_intermediate:\r\n return torch.stack(intermediate)\r\n\r\n return output.unsqueeze(0)\r" }, { "identifier": "_reset_parameters", "path": "core/utils.py", "snippet": "def _reset_parameters(model):\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)" }, { "identifier": "SelfAttentionPooling", "path": "core/networks/self_attention_pooling.py", "snippet": "class SelfAttentionPooling(nn.Module):\n \"\"\"\n Implementation of SelfAttentionPooling\n Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition\n https://arxiv.org/pdf/2008.01077v1.pdf\n \"\"\"\n\n def __init__(self, input_dim):\n super(SelfAttentionPooling, self).__init__()\n self.W = nn.Sequential(nn.Linear(input_dim, input_dim), Mish(), nn.Linear(input_dim, 1))\n self.softmax = nn.functional.softmax\n\n def forward(self, batch_rep, att_mask=None):\n \"\"\"\n N: batch size, T: sequence length, H: Hidden dimension\n input:\n batch_rep : size (N, T, H)\n attention_weight:\n att_w : size (N, T, 1)\n att_mask:\n att_mask: size (N, T): if True, mask this item.\n return:\n utter_rep: size (N, H)\n \"\"\"\n\n att_logits = self.W(batch_rep).squeeze(-1)\n # (N, T)\n if att_mask is not None:\n att_mask_logits = att_mask.to(dtype=batch_rep.dtype) * -100000.0\n # (N, T)\n att_logits = att_mask_logits + att_logits\n\n att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1)\n utter_rep = torch.sum(batch_rep * att_w, dim=1)\n\n return utter_rep" } ]
import torch import sys from torch import nn from .transformer import ( TransformerEncoder, TransformerEncoderLayer, PositionalEncoding, TransformerDecoderLayer, TransformerDecoder, ) from core.utils import _reset_parameters from core.networks.self_attention_pooling import SelfAttentionPooling from configs.default import get_cfg_defaults
3,945
# self.ph_embedding = nn.Embedding(41, ph_embed_dim) # self.increase_embed_dim = nn.Linear(ph_embed_dim, d_model) # def forward(self, x): # """ # Args: # x (_type_): (B, num_frames, window) # Returns: # content: (B, num_frames, window, C_dmodel) # """ # x_embedding = self.ph_embedding(x) # x_embedding = self.increase_embed_dim(x_embedding) # # (B, N, W, C) # B, N, W, C = x_embedding.shape # x_embedding = x_embedding.reshape(B * N, W, C) # x_embedding = x_embedding.permute(1, 0, 2) # # (W, B*N, C) # pos = self.pos_embed(W) # pos = pos.permute(1, 0, 2) # # (W, 1, C) # content = self.encoder(x_embedding, pos=pos) # # (W, B*N, C) # content = content.permute(1, 0, 2).reshape(B, N, W, C) # # (B, N, W, C) # return content class ContentW2VEncoder(nn.Module): def __init__( self, d_model=512, nhead=8, num_encoder_layers=6, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False, pos_embed_len=80, ph_embed_dim=128, ): super().__init__() encoder_layer = TransformerEncoderLayer( d_model, nhead, dim_feedforward, dropout, activation, normalize_before ) encoder_norm = nn.LayerNorm(d_model) if normalize_before else None self.encoder = TransformerEncoder( encoder_layer, num_encoder_layers, encoder_norm ) _reset_parameters(self.encoder) self.pos_embed = PositionalEncoding(d_model, pos_embed_len) self.increase_embed_dim = nn.Linear(1024, d_model) def forward(self, x): """ Args: x (_type_): (B, num_frames, window, C_wav2vec) Returns: content: (B, num_frames, window, C_dmodel) """ x_embedding = self.increase_embed_dim( x ) # [16, 64, 11, 1024] -> [16, 64, 11, 256] # (B, N, W, C) B, N, W, C = x_embedding.shape x_embedding = x_embedding.reshape(B * N, W, C) x_embedding = x_embedding.permute(1, 0, 2) # [11, 1024, 256] # (W, B*N, C) pos = self.pos_embed(W) pos = pos.permute(1, 0, 2) # [11, 1, 256] # (W, 1, C) content = self.encoder(x_embedding, pos=pos) # [11, 1024, 256] # (W, B*N, C) content = content.permute(1, 0, 2).reshape(B, N, W, C) # (B, N, W, C) return content class StyleEncoder(nn.Module): def __init__( self, d_model=512, nhead=8, num_encoder_layers=6, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False, pos_embed_len=80, input_dim=128, aggregate_method="average", ): super().__init__() encoder_layer = TransformerEncoderLayer( d_model, nhead, dim_feedforward, dropout, activation, normalize_before ) encoder_norm = nn.LayerNorm(d_model) if normalize_before else None self.encoder = TransformerEncoder( encoder_layer, num_encoder_layers, encoder_norm ) _reset_parameters(self.encoder) self.pos_embed = PositionalEncoding(d_model, pos_embed_len) self.increase_embed_dim = nn.Linear(input_dim, d_model) self.aggregate_method = None if aggregate_method == "self_attention_pooling":
# class ContentEncoder(nn.Module): # def __init__( # self, # d_model=512, # nhead=8, # num_encoder_layers=6, # dim_feedforward=2048, # dropout=0.1, # activation="relu", # normalize_before=False, # pos_embed_len=80, # ph_embed_dim=128, # ): # super().__init__() # encoder_layer = TransformerEncoderLayer( # d_model, nhead, dim_feedforward, dropout, activation, normalize_before # ) # encoder_norm = nn.LayerNorm(d_model) if normalize_before else None # self.encoder = TransformerEncoder( # encoder_layer, num_encoder_layers, encoder_norm # ) # _reset_parameters(self.encoder) # self.pos_embed = PositionalEncoding(d_model, pos_embed_len) # self.ph_embedding = nn.Embedding(41, ph_embed_dim) # self.increase_embed_dim = nn.Linear(ph_embed_dim, d_model) # def forward(self, x): # """ # Args: # x (_type_): (B, num_frames, window) # Returns: # content: (B, num_frames, window, C_dmodel) # """ # x_embedding = self.ph_embedding(x) # x_embedding = self.increase_embed_dim(x_embedding) # # (B, N, W, C) # B, N, W, C = x_embedding.shape # x_embedding = x_embedding.reshape(B * N, W, C) # x_embedding = x_embedding.permute(1, 0, 2) # # (W, B*N, C) # pos = self.pos_embed(W) # pos = pos.permute(1, 0, 2) # # (W, 1, C) # content = self.encoder(x_embedding, pos=pos) # # (W, B*N, C) # content = content.permute(1, 0, 2).reshape(B, N, W, C) # # (B, N, W, C) # return content class ContentW2VEncoder(nn.Module): def __init__( self, d_model=512, nhead=8, num_encoder_layers=6, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False, pos_embed_len=80, ph_embed_dim=128, ): super().__init__() encoder_layer = TransformerEncoderLayer( d_model, nhead, dim_feedforward, dropout, activation, normalize_before ) encoder_norm = nn.LayerNorm(d_model) if normalize_before else None self.encoder = TransformerEncoder( encoder_layer, num_encoder_layers, encoder_norm ) _reset_parameters(self.encoder) self.pos_embed = PositionalEncoding(d_model, pos_embed_len) self.increase_embed_dim = nn.Linear(1024, d_model) def forward(self, x): """ Args: x (_type_): (B, num_frames, window, C_wav2vec) Returns: content: (B, num_frames, window, C_dmodel) """ x_embedding = self.increase_embed_dim( x ) # [16, 64, 11, 1024] -> [16, 64, 11, 256] # (B, N, W, C) B, N, W, C = x_embedding.shape x_embedding = x_embedding.reshape(B * N, W, C) x_embedding = x_embedding.permute(1, 0, 2) # [11, 1024, 256] # (W, B*N, C) pos = self.pos_embed(W) pos = pos.permute(1, 0, 2) # [11, 1, 256] # (W, 1, C) content = self.encoder(x_embedding, pos=pos) # [11, 1024, 256] # (W, B*N, C) content = content.permute(1, 0, 2).reshape(B, N, W, C) # (B, N, W, C) return content class StyleEncoder(nn.Module): def __init__( self, d_model=512, nhead=8, num_encoder_layers=6, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False, pos_embed_len=80, input_dim=128, aggregate_method="average", ): super().__init__() encoder_layer = TransformerEncoderLayer( d_model, nhead, dim_feedforward, dropout, activation, normalize_before ) encoder_norm = nn.LayerNorm(d_model) if normalize_before else None self.encoder = TransformerEncoder( encoder_layer, num_encoder_layers, encoder_norm ) _reset_parameters(self.encoder) self.pos_embed = PositionalEncoding(d_model, pos_embed_len) self.increase_embed_dim = nn.Linear(input_dim, d_model) self.aggregate_method = None if aggregate_method == "self_attention_pooling":
self.aggregate_method = SelfAttentionPooling(d_model)
6
2023-12-28 05:39:31+00:00
8k
jiawei-ren/dreamgaussian4d
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras" }, { "identifier": "qvec2rotmat", "path": "scene/colmap_loader.py", "snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])" }, { "identifier": "read_extrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,\n format_char_sequence=\"ddq\"*num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8*num_params,\n format_char_sequence=\"d\"*num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras" }, { "identifier": "read_points3D_binary", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8*track_length,\n format_char_sequence=\"ii\"*track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors" }, { "identifier": "read_points3D_text", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n if xyzs is None:\n xyzs = xyz[None, ...]\n rgbs = rgb[None, ...]\n errors = error[None, ...]\n else:\n xyzs = np.append(xyzs, xyz[None, ...], axis=0)\n rgbs = np.append(rgbs, rgb[None, ...], axis=0)\n errors = np.append(errors, error[None, ...], axis=0)\n return xyzs, rgbs, errors" }, { "identifier": "Load_hyper_data", "path": "scene/hyper_loader.py", "snippet": "class Load_hyper_data(Dataset):\n def __init__(self, \n datadir, \n ratio=1.0,\n use_bg_points=False,\n split=\"train\"\n ):\n \n from .utils import Camera\n datadir = os.path.expanduser(datadir)\n with open(f'{datadir}/scene.json', 'r') as f:\n scene_json = json.load(f)\n with open(f'{datadir}/metadata.json', 'r') as f:\n meta_json = json.load(f)\n with open(f'{datadir}/dataset.json', 'r') as f:\n dataset_json = json.load(f)\n\n self.near = scene_json['near']\n self.far = scene_json['far']\n self.coord_scale = scene_json['scale']\n self.scene_center = scene_json['center']\n\n self.all_img = dataset_json['ids']\n self.val_id = dataset_json['val_ids']\n self.split = split\n if len(self.val_id) == 0:\n self.i_train = np.array([i for i in np.arange(len(self.all_img)) if\n (i%4 == 0)])\n self.i_test = self.i_train+2\n self.i_test = self.i_test[:-1,]\n else:\n self.train_id = dataset_json['train_ids']\n self.i_test = []\n self.i_train = []\n for i in range(len(self.all_img)):\n id = self.all_img[i]\n if id in self.val_id:\n self.i_test.append(i)\n if id in self.train_id:\n self.i_train.append(i)\n \n\n self.all_cam = [meta_json[i]['camera_id'] for i in self.all_img]\n self.all_time = [meta_json[i]['warp_id'] for i in self.all_img]\n max_time = max(self.all_time)\n self.all_time = [meta_json[i]['warp_id']/max_time for i in self.all_img]\n self.selected_time = set(self.all_time)\n self.ratio = ratio\n self.max_time = max(self.all_time)\n self.min_time = min(self.all_time)\n self.i_video = [i for i in range(len(self.all_img))]\n self.i_video.sort()\n # all poses\n self.all_cam_params = []\n for im in self.all_img:\n camera = Camera.from_json(f'{datadir}/camera/{im}.json')\n camera = camera.scale(ratio)\n camera.position -= self.scene_center\n camera.position *= self.coord_scale\n self.all_cam_params.append(camera)\n\n self.all_img = [f'{datadir}/rgb/{int(1/ratio)}x/{i}.png' for i in self.all_img]\n self.h, self.w = self.all_cam_params[0].image_shape\n self.map = {}\n self.image_one = Image.open(self.all_img[0])\n self.image_one_torch = PILtoTorch(self.image_one,None).to(torch.float32)\n \n def __getitem__(self, index):\n if self.split == \"train\":\n return self.load_raw(self.i_train[index])\n \n elif self.split == \"test\":\n return self.load_raw(self.i_test[index])\n elif self.split == \"video\":\n return self.load_video(self.i_video[index])\n def __len__(self):\n if self.split == \"train\":\n return len(self.i_train)\n elif self.split == \"test\":\n return len(self.i_test)\n elif self.split == \"video\":\n # return len(self.i_video)\n return len(self.video_v2)\n def load_video(self, idx):\n if idx in self.map.keys():\n return self.map[idx]\n camera = self.all_cam_params[idx]\n w = self.image_one.size[0]\n h = self.image_one.size[1]\n # image = PILtoTorch(image,None)\n # image = image.to(torch.float32)\n time = self.all_time[idx]\n R = camera.orientation.T\n T = - camera.position @ R\n FovY = focal2fov(camera.focal_length, self.h)\n FovX = focal2fov(camera.focal_length, self.w)\n image_path = \"/\".join(self.all_img[idx].split(\"/\")[:-1])\n image_name = self.all_img[idx].split(\"/\")[-1]\n caminfo = CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=self.image_one_torch,\n image_path=image_path, image_name=image_name, width=w, height=h, time=time,\n )\n self.map[idx] = caminfo\n return caminfo \n def load_raw(self, idx):\n if idx in self.map.keys():\n return self.map[idx]\n camera = self.all_cam_params[idx]\n image = Image.open(self.all_img[idx])\n w = image.size[0]\n h = image.size[1]\n image = PILtoTorch(image,None)\n image = image.to(torch.float32)\n time = self.all_time[idx]\n R = camera.orientation.T\n T = - camera.position @ R\n FovY = focal2fov(camera.focal_length, self.h)\n FovX = focal2fov(camera.focal_length, self.w)\n image_path = \"/\".join(self.all_img[idx].split(\"/\")[:-1])\n image_name = self.all_img[idx].split(\"/\")[-1]\n caminfo = CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,\n image_path=image_path, image_name=image_name, width=w, height=h, time=time,\n )\n self.map[idx] = caminfo\n return caminfo " }, { "identifier": "format_hyper_data", "path": "scene/hyper_loader.py", "snippet": "def format_hyper_data(data_class, split):\n if split == \"train\":\n data_idx = data_class.i_train\n elif split == \"test\":\n data_idx = data_class.i_test\n # dataset = data_class.copy()\n # dataset.mode = split\n cam_infos = []\n for uid, index in tqdm(enumerate(data_idx)):\n camera = data_class.all_cam_params[index]\n # image = Image.open(data_class.all_img[index])\n # image = PILtoTorch(image,None)\n time = data_class.all_time[index]\n R = camera.orientation.T\n T = - camera.position @ R\n FovY = focal2fov(camera.focal_length, data_class.h)\n FovX = focal2fov(camera.focal_length, data_class.w)\n image_path = \"/\".join(data_class.all_img[index].split(\"/\")[:-1])\n image_name = data_class.all_img[index].split(\"/\")[-1]\n cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=None,\n image_path=image_path, image_name=image_name, width=int(data_class.w), height=int(data_class.h), time=time,\n )\n cam_infos.append(cam_info)\n return cam_infos\n # matrix = np.linalg.inv(np.array(poses))\n # R = -np.transpose(matrix[:3,:3])\n # R[:,0] = -R[:,0]\n # T = -matrix[:3, 3]" }, { "identifier": "getWorld2View2", "path": "utils/graphics_utils.py", "snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)" }, { "identifier": "focal2fov", "path": "utils/graphics_utils.py", "snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))" }, { "identifier": "fov2focal", "path": "utils/graphics_utils.py", "snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))" }, { "identifier": "SH2RGB", "path": "utils/sh_utils.py", "snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5" }, { "identifier": "BasicPointCloud", "path": "scene/gaussian_model.py", "snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):\n def __init__(self, sh_degree : int, args):\n def capture(self):\n def restore(self, model_args, training_args):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_covariance(self, scaling_modifier = 1):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float, time_line: int):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def compute_deformation(self,time):\n def load_model(self, path):\n def save_deformation(self, path):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation, new_deformation_table):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def prune(self, max_grad, min_opacity, extent, max_screen_size):\n def densify(self, max_grad, min_opacity, extent, max_screen_size):\n def standard_constaint(self):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n def update_deformation_table(self,threshold):\n def print_deformation_weight_grad(self):\n def _plane_regulation(self):\n def _time_regulation(self):\n def _l1_regulation(self):\n def compute_regulation(self, time_smoothness_weight, l1_time_planes_weight, plane_tv_weight):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)" }, { "identifier": "PILtoTorch", "path": "utils/general_utils.py", "snippet": "def PILtoTorch(pil_image, resolution):\n if resolution is not None:\n resized_image_PIL = pil_image.resize(resolution)\n else:\n resized_image_PIL = pil_image\n resized_image = torch.from_numpy(np.array(resized_image_PIL)) / 255.0\n if len(resized_image.shape) == 3:\n return resized_image.permute(2, 0, 1)\n else:\n return resized_image.unsqueeze(dim=-1).permute(2, 0, 1)" } ]
import os import sys import torchvision.transforms as transforms import copy import numpy as np import torch import json from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from scene.hyper_loader import Load_hyper_data, format_hyper_data from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from utils.general_utils import PILtoTorch from tqdm import tqdm from scene.neural_3D_dataset_NDC import Neural3D_NDC_Dataset
5,816
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class CameraInfo(NamedTuple): uid: int R: np.array T: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str width: int height: int time : float class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list video_cameras: list nerf_normalization: dict ply_path: str maxtime: int def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model in ["SIMPLE_PINHOLE", "SIMPLE_RADIAL"]: focal_length_x = intr.params[0] FovY = focal2fov(focal_length_x, height) FovX = focal2fov(focal_length_x, width) elif intr.model=="PINHOLE": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) elif intr.model == "OPENCV": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path)
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class CameraInfo(NamedTuple): uid: int R: np.array T: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str width: int height: int time : float class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list video_cameras: list nerf_normalization: dict ply_path: str maxtime: int def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model in ["SIMPLE_PINHOLE", "SIMPLE_RADIAL"]: focal_length_x = intr.params[0] FovY = focal2fov(focal_length_x, height) FovX = focal2fov(focal_length_x, width) elif intr.model=="PINHOLE": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) elif intr.model == "OPENCV": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path)
image = PILtoTorch(image,None)
14
2023-12-28 08:17:40+00:00
8k
oppo-us-research/SpacetimeGaussians
script/pre_immersive_distorted.py
[ { "identifier": "posetow2c_matrcs", "path": "thirdparty/gaussian_splatting/utils/my_utils.py", "snippet": "def posetow2c_matrcs(poses):\n tmp = inversestep4(inversestep3(inversestep2(inversestep1(poses))))\n N = tmp.shape[0]\n ret = []\n for i in range(N):\n ret.append(tmp[i])\n return ret" }, { "identifier": "rotmat2qvec", "path": "thirdparty/gaussian_splatting/utils/my_utils.py", "snippet": "def rotmat2qvec(R):\n Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat\n K = np.array([\n [Rxx - Ryy - Rzz, 0, 0, 0],\n [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],\n [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],\n [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0\n eigvals, eigvecs = np.linalg.eigh(K)\n qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]\n if qvec[0] < 0:\n qvec *= -1\n return qvec" }, { "identifier": "qvec2rotmat", "path": "thirdparty/gaussian_splatting/utils/my_utils.py", "snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])" }, { "identifier": "focal2fov", "path": "thirdparty/gaussian_splatting/utils/graphics_utils.py", "snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))" }, { "identifier": "fov2focal", "path": "thirdparty/gaussian_splatting/utils/graphics_utils.py", "snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))" }, { "identifier": "getcolmapsingleimdistort", "path": "thirdparty/gaussian_splatting/helper3dg.py", "snippet": "def getcolmapsingleimdistort(folder, offset):\n \n folder = os.path.join(folder, \"colmap_\" + str(offset))\n assert os.path.exists(folder)\n\n dbfile = os.path.join(folder, \"input.db\")\n inputimagefolder = os.path.join(folder, \"input\")\n distortedmodel = os.path.join(folder, \"distorted/sparse\")\n step2model = os.path.join(folder, \"tmp\")\n if not os.path.exists(step2model):\n os.makedirs(step2model)\n\n manualinputfolder = os.path.join(folder, \"manual\")\n if not os.path.exists(distortedmodel):\n os.makedirs(distortedmodel)\n\n featureextract = \"colmap feature_extractor SiftExtraction.max_image_size 6000 --database_path \" + dbfile+ \" --image_path \" + inputimagefolder \n \n exit_code = os.system(featureextract)\n if exit_code != 0:\n exit(exit_code)\n \n\n featurematcher = \"colmap exhaustive_matcher --database_path \" + dbfile\n exit_code = os.system(featurematcher)\n if exit_code != 0:\n exit(exit_code)\n\n\n triandmap = \"colmap point_triangulator --database_path \"+ dbfile + \" --image_path \"+ inputimagefolder + \" --output_path \" + distortedmodel \\\n + \" --input_path \" + manualinputfolder + \" --Mapper.ba_global_function_tolerance=0.000001\"\n \n exit_code = os.system(triandmap)\n if exit_code != 0:\n exit(exit_code)\n print(triandmap)\n\n img_undist_cmd = \"colmap\" + \" image_undistorter --image_path \" + inputimagefolder + \" --input_path \" + distortedmodel + \" --output_path \" + folder \\\n + \" --output_type COLMAP \" # --blank_pixels 1\n exit_code = os.system(img_undist_cmd)\n if exit_code != 0:\n exit(exit_code)\n print(img_undist_cmd)\n\n removeinput = \"rm -r \" + inputimagefolder\n exit_code = os.system(removeinput)\n if exit_code != 0:\n exit(exit_code)\n\n files = os.listdir(folder + \"/sparse\")\n os.makedirs(folder + \"/sparse/0\", exist_ok=True)\n for file in files:\n if file == '0':\n continue\n source_file = os.path.join(folder, \"sparse\", file)\n destination_file = os.path.join(folder, \"sparse\", \"0\", file)\n shutil.move(source_file, destination_file)" }, { "identifier": "extractframes", "path": "script/pre_n3d.py", "snippet": "def extractframes(videopath):\n cam = cv2.VideoCapture(videopath)\n ctr = 0\n sucess = True\n for i in range(300):\n if os.path.exists(os.path.join(videopath.replace(\".mp4\", \"\"), str(i) + \".png\")):\n ctr += 1\n if ctr == 300 or ctr == 150: # 150 for 04_truck \n print(\"already extracted all the frames, skip extracting\")\n return\n ctr = 0\n while ctr < 300:\n try:\n _, frame = cam.read()\n\n savepath = os.path.join(videopath.replace(\".mp4\", \"\"), str(ctr) + \".png\")\n if not os.path.exists(videopath.replace(\".mp4\", \"\")) :\n os.makedirs(videopath.replace(\".mp4\", \"\"))\n\n cv2.imwrite(savepath, frame)\n ctr += 1 \n except:\n sucess = False\n print(\"error\")\n cam.release()\n return" } ]
import os import cv2 import glob import tqdm import numpy as np import shutil import pickle import argparse import natsort import struct import pickle import json import cv2 import numpy as np import os import json from scipy.spatial.transform import Rotation from thirdparty.gaussian_splatting.utils.my_utils import posetow2c_matrcs, rotmat2qvec, qvec2rotmat from thirdparty.gaussian_splatting.utils.graphics_utils import focal2fov, fov2focal from thirdparty.colmap.pre_colmap import * from thirdparty.gaussian_splatting.helper3dg import getcolmapsingleimdistort from script.pre_n3d import extractframes
4,815
knew = np.zeros((3, 3), dtype=np.float32) knew[0,0] = focalscale * intrinsics[0,0] knew[1,1] = focalscale * intrinsics[1,1] knew[0,2] = view['principal_point'][0] # cx fixed half of the width knew[1,2] = view['principal_point'][1] # knew[2,2] = 1.0 map1, map2 = cv2.fisheye.initUndistortRectifyMap(intrinsics, dis_cef, R=None, P=knew, size=(w, h), m1type=cv2.CV_32FC1) undistorted_image = cv2.remap(image, map1, map2, interpolation=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT) undistorted_image = undistorted_image.clip(0,255.0).astype(np.uint8) cv2.imwrite(imagesavepath, undistorted_image) if offset == 0: # distortingflow = getdistortedflow(image, intrinsics, dis_cef, "linear", crop_output=False, scale=1.0, knew=knew) print("saved distortion mappers") np.save(os.path.join(video, folder + ".npy"), distortingflow) def softlinkdataset(originalpath, path, srcscene, scene): videofolderlist = glob.glob(originalpath + "camera_*/") if not os.path.exists(path): os.makedirs(path) for videofolder in videofolderlist: newlink = os.path.join(path, videofolder.split("/")[-2]) if os.path.exists(newlink): print("already exists do not make softlink again") quit() assert not os.path.exists(newlink) cmd = " ln -s " + videofolder + " " + newlink os.system(cmd) print(cmd) originalmodel = originalpath + "models.json" newmodel = path + "models.json" shutil.copy(originalmodel, newmodel) if __name__ == "__main__" : parser = argparse.ArgumentParser() parser.add_argument("--videopath", default="", type=str) parser.add_argument("--startframe", default=0, type=int) parser.add_argument("--endframe", default=50, type=int) args = parser.parse_args() videopath = args.videopath startframe = args.startframe endframe = args.endframe if startframe >= endframe: print("start frame must smaller than end frame") quit() if startframe < 0 or endframe > 300: print("frame must in range 0-300") quit() if not os.path.exists(videopath): print("path not exist") quit() if not videopath.endswith("/"): videopath = videopath + "/" srcscene = videopath.split("/")[-2] if srcscene not in Immersiveseven: print("scene not in Immersiveseven", Immersiveseven) print("Please check if the scene name is correct") quit() if "04_Trucks" in videopath: print('04_Trucks') if endframe > 150: endframe = 150 postfix = "_dist" # distored model scene = srcscene + postfix originalpath = videopath #" originalvideo = originalpath# 43 1 path = videopath[:-1] + postfix video = originalpath # 43 1 scale = immmersivescaledict[scene] videoslist = glob.glob(originalvideo + "*.mp4") for v in tqdm.tqdm(videoslist): extractframes(v) try: softlinkdataset(originalpath, path, srcscene, scene) except: print("softlink failed") quit() try: imageundistort(video, offsetlist=[i for i in range(startframe,endframe)],focalscale=scale, fixfocal=None) except: print("undistort failed") quit() try: for offset in tqdm.tqdm(range(startframe, endframe)): convertmodel2dbfiles(video, offset=offset, scale=scale, removeverythingexceptinput=False) except: convertmodel2dbfiles(video, offset=offset, scale=scale, removeverythingexceptinput=True) print("create colmap input failed, better clean the data and try again") quit() for offset in range(startframe, endframe):
# MIT License # Copyright (c) 2023 OPPO # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. SCALEDICT = {} # SCALEDICT["01_Welder_S11"] = 0.35 # ["04_Truck", "09_Alexa", "10_Alexa", "11_Alexa", "12_Cave"] Immersiveseven = ["01_Welder", "02_Flames", "04_Truck", "09_Alexa", "10_Alexa", "11_Alexa", "12_Cave"] immmersivescaledict = {} immmersivescaledict["01_Welder"] = 0.36 immmersivescaledict["02_Flames"] = 0.35 immmersivescaledict["04_Truck"] = 0.36 immmersivescaledict["09_Alexa"] = 0.36 immmersivescaledict["10_Alexa"] = 0.36 immmersivescaledict["11_Alexa"] = 0.36 immmersivescaledict["12_Cave"] = 0.36 for scene in Immersiveseven: immmersivescaledict[scene + "_dist"] =immmersivescaledict[scene] SCALEDICT[scene + "_dist"] = immmersivescaledict[scene] #immmersivescaledict[scene] # to be checked with large scale def convertmodel2dbfiles(path, offset=0, scale=1.0, removeverythingexceptinput=False): projectfolder = os.path.join(path, "colmap_" + str(offset)) manualfolder = os.path.join(projectfolder, "manual") if os.path.exists(projectfolder) and removeverythingexceptinput: print("already exists colmap folder, better remove it and create a new one") inputfolder = os.path.join(projectfolder, "input") # remove everything except input folder for file in os.listdir(projectfolder): if file == "input": continue file_path = os.path.join(projectfolder, file) if os.path.isfile(file_path): os.remove(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) if not os.path.exists(manualfolder): os.makedirs(manualfolder) savetxt = os.path.join(manualfolder, "images.txt") savecamera = os.path.join(manualfolder, "cameras.txt") savepoints = os.path.join(manualfolder, "points3D.txt") imagetxtlist = [] cameratxtlist = [] if os.path.exists(os.path.join(projectfolder, "input.db")): os.remove(os.path.join(projectfolder, "input.db")) db = COLMAPDatabase.connect(os.path.join(projectfolder, "input.db")) db.create_tables() with open(os.path.join(video + "models.json"), "r") as f: meta = json.load(f) for idx , camera in enumerate(meta): cameraname = camera['name'] # camera_0001 view = camera focolength = camera['focal_length'] width, height = camera['width'], camera['height'] principlepoint =[0,0] principlepoint[0] = view['principal_point'][0] principlepoint[1] = view['principal_point'][1] distort1 = view['radial_distortion'][0] distort2 = view['radial_distortion'][1] distort3 = 0 distort4 = 0 #view['radial_distortion'][3] R = Rotation.from_rotvec(view['orientation']).as_matrix() t = np.array(view['position'])[:, np.newaxis] w2c = np.concatenate((R, -np.dot(R, t)), axis=1) colmapR = w2c[:3, :3] T = w2c[:3, 3] K = np.array([[focolength, 0, principlepoint[0]], [0, focolength, principlepoint[1]], [0, 0, 1]]) Knew = K.copy() Knew[0,0] = K[0,0] * float(scale) Knew[1,1] = K[1,1] * float(scale) Knew[0,2] = view['principal_point'][0]#width * 0.5 #/ 2 Knew[1,2] = view['principal_point'][1]#height * 0.5 #/ 2 # transformation = np.array([[2, 0.0, 0.5], # [0.0, 2, 0.5], # [0.0, 0.0, 1.0]]) # Knew = np.dot(transformation, Knew) newfocalx = Knew[0,0] newfocaly = Knew[1,1] newcx = Knew[0,2] newcy = Knew[1,2] colmapQ = rotmat2qvec(colmapR) imageid = str(idx+1) cameraid = imageid pngname = cameraname + ".png" line = imageid + " " for j in range(4): line += str(colmapQ[j]) + " " for j in range(3): line += str(T[j]) + " " line = line + cameraid + " " + pngname + "\n" empltyline = "\n" imagetxtlist.append(line) imagetxtlist.append(empltyline) newwidth = width newheight = height params = np.array((newfocalx , newfocaly, newcx, newcy,)) camera_id = db.add_camera(1, newwidth, newheight, params) # RADIAL_FISHEYE # width and height # #cameraline = str(i+1) + " " + "PINHOLE " + str(width) + " " + str(height) + " " + str(focolength) + " " + str(focolength) + " " + str(W//2) + " " + str(H//2) + "\n" cameraline = str(idx+1) + " " + "PINHOLE " + str(newwidth) + " " + str(newheight) + " " + str(newfocalx) + " " + str(newfocaly) + " " + str(newcx) + " " + str(newcy) + "\n" cameratxtlist.append(cameraline) image_id = db.add_image(pngname, camera_id, prior_q=np.array((colmapQ[0], colmapQ[1], colmapQ[2], colmapQ[3])), prior_t=np.array((T[0], T[1], T[2])), image_id=idx+1) db.commit() print("commited one") db.close() with open(savetxt, "w") as f: for line in imagetxtlist : f.write(line) with open(savecamera, "w") as f: for line in cameratxtlist : f.write(line) with open(savepoints, "w") as f: pass #https://github.com/Synthesis-AI-Dev/fisheye-distortion def getdistortedflow(img: np.ndarray, cam_intr: np.ndarray, dist_coeff: np.ndarray, mode: str, crop_output: bool = True, crop_type: str = "corner", scale: float =2, cxoffset=None, cyoffset=None, knew=None): assert cam_intr.shape == (3, 3) assert dist_coeff.shape == (4,) imshape = img.shape if len(imshape) == 3: h, w, chan = imshape elif len(imshape) == 2: h, w = imshape chan = 1 else: raise RuntimeError(f'Image has unsupported shape: {imshape}. Valid shapes: (H, W), (H, W, N)') imdtype = img.dtype dstW = int(w ) dstH = int(h ) # Get array of pixel co-ords xs = np.arange(dstW) ys = np.arange(dstH) xs = xs #- 0.5 # + cxoffset / 2 ys = ys #- 0.5 # + cyoffset / 2 xv, yv = np.meshgrid(xs, ys) img_pts = np.stack((xv, yv), axis=2) # shape (H, W, 2) img_pts = img_pts.reshape((-1, 1, 2)).astype(np.float32) # shape: (N, 1, 2), in undistorted image coordiante undistorted_px = cv2.fisheye.undistortPoints(img_pts, cam_intr, dist_coeff, None, knew) # shape: (N, 1, 2) undistorted_px = undistorted_px.reshape((dstH, dstW, 2)) # Shape: (H, W, 2) undistorted_px = np.flip(undistorted_px, axis=2) # flip x, y coordinates of the points as cv2 is height first undistorted_px[:, :, 0] = undistorted_px[:, :, 0] #+ 0.5*cyoffset #- 0.25*cyoffset #orginalx (0, 1) undistorted_px[:, :, 1] = undistorted_px[:, :, 1] #+ 0.5*cyoffset #- 0.25*cxoffset #orginaly (0, 1) undistorted_px[:, :, 0] = undistorted_px[:, :, 0] / (h-1)#(h-1) #orginalx (0, 1) undistorted_px[:, :, 1] = undistorted_px[:, :, 1] / (w-1)#(w-1) #orginaly (0, 1) undistorted_px = 2 * (undistorted_px - 0.5) #to -1 to 1 for gridsample undistorted_px[:, :, 0] = undistorted_px[:, :, 0] #orginalx (0, 1) undistorted_px[:, :, 1] = undistorted_px[:, :, 1] #orginaly (0, 1) undistorted_px = undistorted_px[:,:,::-1] # yx to xy for grid sample return undistorted_px def imageundistort(video, offsetlist=[0],focalscale=1.0, fixfocal=None): with open(os.path.join(video + "models.json"), "r") as f: meta = json.load(f) for idx , camera in enumerate(meta): folder = camera['name'] # camera_0001 view = camera intrinsics = np.array([[view['focal_length'], 0.0, view['principal_point'][0]], [0.0, view['focal_length'], view['principal_point'][1]], [0.0, 0.0, 1.0]]) dis_cef = np.zeros((4)) dis_cef[:2] = np.array(view['radial_distortion'])[:2] print("done one camera") map1, map2 = None, None for offset in offsetlist: videofolder = os.path.join(video, folder) imagepath = os.path.join(videofolder, str(offset) + ".png") imagesavepath = os.path.join(video, "colmap_" + str(offset), "input", folder + ".png") inputimagefolder = os.path.join(video, "colmap_" + str(offset), "input") if not os.path.exists(inputimagefolder): os.makedirs(inputimagefolder) assert os.path.exists(imagepath) image = cv2.imread(imagepath).astype(np.float32) #/ 255.0 h, w = image.shape[:2] image_size = (w, h) knew = np.zeros((3, 3), dtype=np.float32) knew[0,0] = focalscale * intrinsics[0,0] knew[1,1] = focalscale * intrinsics[1,1] knew[0,2] = view['principal_point'][0] # cx fixed half of the width knew[1,2] = view['principal_point'][1] # knew[2,2] = 1.0 map1, map2 = cv2.fisheye.initUndistortRectifyMap(intrinsics, dis_cef, R=None, P=knew, size=(w, h), m1type=cv2.CV_32FC1) undistorted_image = cv2.remap(image, map1, map2, interpolation=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT) undistorted_image = undistorted_image.clip(0,255.0).astype(np.uint8) cv2.imwrite(imagesavepath, undistorted_image) if offset == 0: # distortingflow = getdistortedflow(image, intrinsics, dis_cef, "linear", crop_output=False, scale=1.0, knew=knew) print("saved distortion mappers") np.save(os.path.join(video, folder + ".npy"), distortingflow) def softlinkdataset(originalpath, path, srcscene, scene): videofolderlist = glob.glob(originalpath + "camera_*/") if not os.path.exists(path): os.makedirs(path) for videofolder in videofolderlist: newlink = os.path.join(path, videofolder.split("/")[-2]) if os.path.exists(newlink): print("already exists do not make softlink again") quit() assert not os.path.exists(newlink) cmd = " ln -s " + videofolder + " " + newlink os.system(cmd) print(cmd) originalmodel = originalpath + "models.json" newmodel = path + "models.json" shutil.copy(originalmodel, newmodel) if __name__ == "__main__" : parser = argparse.ArgumentParser() parser.add_argument("--videopath", default="", type=str) parser.add_argument("--startframe", default=0, type=int) parser.add_argument("--endframe", default=50, type=int) args = parser.parse_args() videopath = args.videopath startframe = args.startframe endframe = args.endframe if startframe >= endframe: print("start frame must smaller than end frame") quit() if startframe < 0 or endframe > 300: print("frame must in range 0-300") quit() if not os.path.exists(videopath): print("path not exist") quit() if not videopath.endswith("/"): videopath = videopath + "/" srcscene = videopath.split("/")[-2] if srcscene not in Immersiveseven: print("scene not in Immersiveseven", Immersiveseven) print("Please check if the scene name is correct") quit() if "04_Trucks" in videopath: print('04_Trucks') if endframe > 150: endframe = 150 postfix = "_dist" # distored model scene = srcscene + postfix originalpath = videopath #" originalvideo = originalpath# 43 1 path = videopath[:-1] + postfix video = originalpath # 43 1 scale = immmersivescaledict[scene] videoslist = glob.glob(originalvideo + "*.mp4") for v in tqdm.tqdm(videoslist): extractframes(v) try: softlinkdataset(originalpath, path, srcscene, scene) except: print("softlink failed") quit() try: imageundistort(video, offsetlist=[i for i in range(startframe,endframe)],focalscale=scale, fixfocal=None) except: print("undistort failed") quit() try: for offset in tqdm.tqdm(range(startframe, endframe)): convertmodel2dbfiles(video, offset=offset, scale=scale, removeverythingexceptinput=False) except: convertmodel2dbfiles(video, offset=offset, scale=scale, removeverythingexceptinput=True) print("create colmap input failed, better clean the data and try again") quit() for offset in range(startframe, endframe):
getcolmapsingleimdistort(video, offset=offset)
5
2023-12-28 04:16:32+00:00
8k
kinggongzilla/ai-clone-whatsapp
train.py
[ { "identifier": "fsdp_config", "path": "configs/fsdp.py", "snippet": "class fsdp_config:\n mixed_precision: bool=True\n use_fp16: bool=False\n sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD\n checkpoint_type: StateDictType = StateDictType.SHARDED_STATE_DICT # alternatively can use SHARDED_STATE_DICT save one file per rank, and can resize the world-size.\n fsdp_activation_checkpointing: bool=True\n fsdp_cpu_offload: bool=False\n pure_bf16: bool = False\n optimizer: str= \"AdamW\"" }, { "identifier": "train_config", "path": "configs/training.py", "snippet": "class train_config:\n whatsapp_username: str=\"\" # your own whatsapp user name as it is in the chat .txt files\n model_name: str=\"mistralai/Mistral-7B-Instruct-v0.2\"\n enable_fsdp: bool=False\n low_cpu_fsdp: bool=False\n run_validation: bool=False\n batch_size_training: int=1\n batching_strategy: str=\"packing\" #alternative: padding\n context_length: int=4096\n gradient_accumulation_steps: int=1\n gradient_clipping: bool = False\n gradient_clipping_threshold: float = 1.0\n num_epochs: int=1\n num_workers_dataloader: int=1\n lr: float=1e-4\n weight_decay: float=0.0\n gamma: float= 0.85\n seed: int=42\n use_fp16: bool=True\n mixed_precision: bool=True\n val_batch_size: int=1\n dataset = \"custom_dataset\"\n data_dir: str = \"data/preprocessing/processed_chats\"\n peft_method: str = \"lora\" # None , llama_adapter, prefix\n use_peft: bool=True\n output_dir: str = \"checkpoints\"\n freeze_layers: bool = False\n num_freeze_layers: int = 1\n quantization: bool = True\n one_gpu: bool = False\n save_model: bool = True\n dist_checkpoint_root_folder: str=\"PATH/to/save/FSDP/model\" # will be used if using FSDP\n dist_checkpoint_folder: str=\"fine-tuned\" # will be used if using FSDP\n save_optimizer: bool=False # will be used if using FSDP\n use_fast_kernels: bool = False # Enable using SDPA from PyTroch Accelerated Transformers, make use Flash Attention and Xformer memory-efficient kernels" }, { "identifier": "ConcatDataset", "path": "data/concatenator.py", "snippet": "class ConcatDataset(Dataset):\n def __init__(self, dataset, chunk_size=4096):\n self.dataset = dataset\n self.chunk_size = chunk_size\n\n self.samples = []\n\n buffer = {\n \"input_ids\": [],\n \"attention_mask\": [],\n \"labels\": [],\n }\n\n for sample in tqdm(self.dataset, desc=\"Preprocessing dataset\", dynamic_ncols=True):\n buffer = {k: v + sample[k] for k,v in buffer.items()}\n\n while len(next(iter(buffer.values()))) > self.chunk_size:\n self.samples.append({k: v[:self.chunk_size] for k,v in buffer.items()})\n buffer = {k: v[self.chunk_size:] for k,v in buffer.items()}\n\n def __getitem__(self, idx):\n return self.samples[idx]\n\n def __len__(self):\n return len(self.samples)" }, { "identifier": "update_config", "path": "utils/config_utils.py", "snippet": "def update_config(config, **kwargs):\n if isinstance(config, (tuple, list)):\n for c in config:\n update_config(c, **kwargs)\n else:\n for k, v in kwargs.items():\n if hasattr(config, k):\n setattr(config, k, v)\n elif \".\" in k:\n # allow --some_config.some_param=True\n config_name, param_name = k.split(\".\")\n if type(config).__name__ == config_name:\n if hasattr(config, param_name):\n setattr(config, param_name, v)\n else:\n # In case of specialized config we can warm user\n print(f\"Warning: {config_name} does not accept parameter: {k}\")\n elif isinstance(config, train_config):\n print(f\"Warning: unknown parameter {k}\")" }, { "identifier": "generate_peft_config", "path": "utils/config_utils.py", "snippet": "def generate_peft_config(train_config, kwargs):\n configs = (lora_config, llama_adapter_config, prefix_config)\n peft_configs = (LoraConfig, AdaptionPromptConfig, PrefixTuningConfig)\n names = tuple(c.__name__.rstrip(\"_config\") for c in configs)\n\n assert train_config.peft_method in names, f\"Peft config not found: {train_config.peft_method}\"\n\n config = configs[names.index(train_config.peft_method)]()\n\n update_config(config, **kwargs)\n params = asdict(config)\n peft_config = peft_configs[names.index(train_config.peft_method)](**params)\n\n return peft_config" }, { "identifier": "generate_dataset_config", "path": "utils/config_utils.py", "snippet": "def generate_dataset_config(train_config, kwargs):\n names = tuple(DATASET_PREPROC.keys())\n\n assert train_config.dataset in names, f\"Unknown dataset: {train_config.dataset}\"\n\n dataset_config = {k:v for k, v in inspect.getmembers(datasets)}[train_config.dataset]()\n\n update_config(dataset_config, **kwargs)\n\n return dataset_config" }, { "identifier": "get_dataloader_kwargs", "path": "utils/config_utils.py", "snippet": "def get_dataloader_kwargs(train_config, dataset, tokenizer, mode):\n kwargs = {}\n batch_size = train_config.batch_size_training if mode==\"train\" else train_config.val_batch_size\n if train_config.batching_strategy == \"padding\":\n if train_config.enable_fsdp:\n kwargs[\"batch_sampler\"] = DistributedLengthBasedBatchSampler(\n dataset,\n batch_size=batch_size,\n rank=dist.get_rank(),\n num_replicas=dist.get_world_size(),\n shuffle=mode==\"train\",\n )\n else:\n kwargs[\"batch_sampler\"] = LengthBasedBatchSampler(dataset, batch_size, drop_last=True, shuffle=mode==\"train\")\n kwargs[\"collate_fn\"] = DataCollatorForSeq2Seq(tokenizer)\n elif train_config.batching_strategy == \"packing\":\n if train_config.enable_fsdp:\n kwargs[\"sampler\"] = DistributedSampler(\n dataset,\n rank=dist.get_rank(),\n num_replicas=dist.get_world_size(),\n shuffle=mode==\"train\",\n )\n kwargs[\"batch_size\"] = batch_size\n kwargs[\"drop_last\"] = True\n kwargs[\"collate_fn\"] = default_data_collator\n else:\n raise ValueError(f\"Unknown batching strategy: {train_config.batching_strategy}\")\n\n return kwargs" }, { "identifier": "get_preprocessed_dataset", "path": "utils/dataset_utils.py", "snippet": "def get_preprocessed_dataset(\n tokenizer, dataset_config, split: str = \"train\"\n) -> torch.utils.data.Dataset:\n if not dataset_config.dataset in DATASET_PREPROC:\n raise NotImplementedError(f\"{dataset_config.dataset} is not (yet) implemented\")\n\n def get_split():\n return (\n dataset_config.train_split\n if split == \"train\"\n else dataset_config.test_split\n )\n\n return DATASET_PREPROC[dataset_config.dataset](\n dataset_config,\n tokenizer,\n get_split(),\n )" }, { "identifier": "train", "path": "utils/train_utils.py", "snippet": "def train(model, train_dataloader,eval_dataloader, tokenizer, optimizer, lr_scheduler, gradient_accumulation_steps, train_config, fsdp_config=None, local_rank=None, rank=None):\n \"\"\"\n Trains the model on the given dataloader\n\n Args:\n model: The model to be trained\n train_dataloader: The dataloader containing the training data\n optimizer: The optimizer used for training\n lr_scheduler: The learning rate scheduler\n gradient_accumulation_steps: The number of steps to accumulate gradients before performing a backward/update operation\n num_epochs: The number of epochs to train for\n local_rank: The rank of the current node in a distributed setting\n train_config: The training configuration\n eval_dataloader: The dataloader containing the eval data\n tokenizer: tokenizer used in the eval for decoding the predicitons\n\n Returns: results dictionary containing average training and validation perplexity and loss\n \"\"\"\n # Create a gradient scaler for fp16\n if train_config.use_fp16 and train_config.enable_fsdp:\n scaler = ShardedGradScaler()\n elif train_config.use_fp16 and not train_config.enable_fsdp:\n scaler = torch.cuda.amp.GradScaler()\n if train_config.enable_fsdp:\n world_size = int(os.environ[\"WORLD_SIZE\"])\n autocast = torch.cuda.amp.autocast if train_config.use_fp16 else nullcontext\n\n train_prep = []\n train_loss = []\n val_prep = []\n val_loss =[]\n epoch_times = []\n checkpoint_times = []\n results = {}\n best_val_loss = float(\"inf\")\n for epoch in range(train_config.num_epochs):\n epoch_start_time = time.perf_counter()\n with MemoryTrace() as memtrace: # track the memory usage\n model.train()\n total_loss = 0.0\n total_length = len(train_dataloader)//gradient_accumulation_steps\n pbar = tqdm(colour=\"blue\", desc=f\"Training Epoch: {epoch+1}\", total=total_length, dynamic_ncols=True)\n for step, batch in enumerate(train_dataloader):\n for key in batch.keys():\n if train_config.enable_fsdp:\n batch[key] = batch[key].to(local_rank)\n else:\n batch[key] = batch[key].to('cuda:0')\n with autocast():\n loss = model(**batch).loss\n loss = loss / gradient_accumulation_steps\n total_loss += loss.detach().float()\n if train_config.use_fp16:\n # if fp16 is enabled, use gradient scaler to handle gradient update\n scaler.scale(loss).backward()\n if (step + 1) % gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n if train_config.gradient_clipping and train_config.gradient_clipping_threshold > 0.0:\n scaler.unscale_(optimizer)\n if train_config.enable_fsdp:\n model.clip_grad_norm_(train_config.gradient_clipping_threshold)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), train_config.gradient_clipping_threshold)\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n pbar.update(1)\n else:\n # regular backpropagation when fp16 is not used\n loss.backward()\n if (step + 1) % gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n if train_config.gradient_clipping and train_config.gradient_clipping_threshold > 0.0:\n if train_config.enable_fsdp:\n model.clip_grad_norm_(train_config.gradient_clipping_threshold)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), train_config.gradient_clipping_threshold)\n optimizer.step()\n optimizer.zero_grad()\n pbar.update(1)\n\n pbar.set_description(f\"Training Epoch: {epoch+1}/{train_config.num_epochs}, step {step}/{len(train_dataloader)} completed (loss: {loss.detach().float()})\")\n pbar.close()\n\n epoch_end_time = time.perf_counter()-epoch_start_time\n epoch_times.append(epoch_end_time)\n # Reducing total_loss across all devices if there's more than one CUDA device\n if torch.cuda.device_count() > 1 and train_config.enable_fsdp:\n dist.all_reduce(total_loss, op=dist.ReduceOp.SUM)\n train_epoch_loss = total_loss / len(train_dataloader)\n if train_config.enable_fsdp:\n train_epoch_loss = train_epoch_loss/world_size\n train_perplexity = torch.exp(train_epoch_loss)\n\n train_prep.append(train_perplexity)\n train_loss.append(train_epoch_loss)\n\n if train_config.enable_fsdp:\n if rank==0:\n print(f\"Max CUDA memory allocated was {memtrace.peak} GB\")\n print(f\"Max CUDA memory reserved was {memtrace.max_reserved} GB\")\n print(f\"Peak active CUDA memory was {memtrace.peak_active_gb} GB\")\n print(f\"Cuda Malloc retires : {memtrace.cuda_malloc_retires}\")\n print(f\"CPU Total Peak Memory consumed during the train (max): {memtrace.cpu_peaked + memtrace.cpu_begin} GB\")\n else:\n print(f\"Max CUDA memory allocated was {memtrace.peak} GB\")\n print(f\"Max CUDA memory reserved was {memtrace.max_reserved} GB\")\n print(f\"Peak active CUDA memory was {memtrace.peak_active_gb} GB\")\n print(f\"Cuda Malloc retires : {memtrace.cuda_malloc_retires}\")\n print(f\"CPU Total Peak Memory consumed during the train (max): {memtrace.cpu_peaked + memtrace.cpu_begin} GB\")\n\n # Update the learning rate as needed\n lr_scheduler.step()\n\n if train_config.run_validation:\n eval_ppl, eval_epoch_loss = evaluation(model, train_config, eval_dataloader, local_rank, tokenizer)\n checkpoint_start_time = time.perf_counter()\n if (train_config.save_model and not train_config.run_validation) or (train_config.save_model and train_config.run_validation and eval_epoch_loss < best_val_loss):\n if train_config.enable_fsdp:\n dist.barrier()\n if train_config.use_peft:\n if train_config.enable_fsdp:\n if rank==0:\n print(f\"we are about to save the PEFT modules\")\n else:\n print(f\"we are about to save the PEFT modules\")\n model.save_pretrained(train_config.output_dir)\n if train_config.enable_fsdp:\n if rank==0:\n print(f\"PEFT modules are saved in {train_config.output_dir} directory\")\n else:\n print(f\"PEFT modules are saved in {train_config.output_dir} directory\")\n\n else:\n if not train_config.use_peft and fsdp_config.checkpoint_type == StateDictType.FULL_STATE_DICT:\n\n save_model_checkpoint(\n model, optimizer, rank, train_config, epoch=epoch\n )\n elif not train_config.use_peft and fsdp_config.checkpoint_type == StateDictType.SHARDED_STATE_DICT:\n print(\" Saving the FSDP model checkpoints using SHARDED_STATE_DICT\")\n print(\"=====================================================\")\n\n save_model_and_optimizer_sharded(model, rank, train_config)\n if train_config.save_optimizer:\n save_model_and_optimizer_sharded(model, rank, train_config, optim=optimizer)\n print(\" Saving the FSDP model checkpoints and optimizer using SHARDED_STATE_DICT\")\n print(\"=====================================================\")\n\n if not train_config.use_peft and train_config.save_optimizer:\n save_optimizer_checkpoint(\n model, optimizer, rank, train_config, epoch=epoch\n )\n print(\" Saving the FSDP model checkpoints and optimizer using FULL_STATE_DICT\")\n print(\"=====================================================\")\n if train_config.enable_fsdp:\n dist.barrier()\n checkpoint_end_time = time.perf_counter() - checkpoint_start_time\n checkpoint_times.append(checkpoint_end_time)\n if eval_epoch_loss < best_val_loss:\n best_val_loss = eval_epoch_loss\n if train_config.enable_fsdp:\n if rank==0:\n print(f\"best eval loss on epoch {epoch+1} is {best_val_loss}\")\n else:\n print(f\"best eval loss on epoch {epoch+1} is {best_val_loss}\")\n val_loss.append(best_val_loss)\n val_prep.append(eval_ppl)\n else:\n print('Saving last checkpoint..')\n model.save_pretrained(train_config.output_dir)\n\n if train_config.enable_fsdp:\n if rank==0:\n print(f\"Epoch {epoch+1}: train_perplexity={train_perplexity:.4f}, train_epoch_loss={train_epoch_loss:.4f}, epoch time {epoch_end_time}s\")\n else:\n print(f\"Epoch {epoch+1}: train_perplexity={train_perplexity:.4f}, train_epoch_loss={train_epoch_loss:.4f}, epoch time {epoch_end_time}s\")\n avg_epoch_time = sum(epoch_times)/ len(epoch_times)\n avg_checkpoint_time = sum(checkpoint_times)/ len(checkpoint_times) if len(checkpoint_times) > 0 else 0\n avg_train_prep = sum(train_prep)/len(train_prep)\n avg_train_loss = sum(train_loss)/len(train_loss)\n\n if train_config.run_validation:\n avg_eval_prep = sum(val_prep)/len(val_prep)\n avg_eval_loss = sum(val_loss)/len(val_loss)\n\n results['avg_train_prep'] = avg_train_prep\n results['avg_train_loss'] = avg_train_loss\n if train_config.run_validation:\n results['avg_eval_prep'] = avg_eval_prep\n results['avg_eval_loss'] = avg_eval_loss\n results[\"avg_epoch_time\"] = avg_epoch_time\n results[\"avg_checkpoint_time\"] = avg_checkpoint_time\n\n #saving the training params including fsdp setting for reference.\n if train_config.enable_fsdp and not train_config.use_peft:\n save_train_params(train_config, fsdp_config, rank)\n\n return results" }, { "identifier": "print_model_size", "path": "utils/train_utils.py", "snippet": "def print_model_size(model, config, rank: int = 0) -> None:\n \"\"\"\n Print model name, the number of trainable parameters and initialization time.\n\n Args:\n model: The PyTorch model.\n model_name (str): Name of the model.\n init_time_start (float): Initialization start time.\n init_time_end (float): Initialization end time.\n rank (int, optional): Current process's rank. Defaults to 0.\n \"\"\"\n if rank == 0:\n print(f\"--> Model {config.model_name}\")\n total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(f\"\\n--> {config.model_name} has {total_params / 1e6} Million params\\n\")" } ]
import os import fire import random import torch import torch.optim as optim from pkg_resources import packaging from peft import get_peft_model, prepare_model_for_int8_training from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, ) from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload from torch.optim.lr_scheduler import StepLR from transformers import ( AutoModelForCausalLM, AutoTokenizer ) from transformers.models.llama.modeling_llama import LlamaDecoderLayer from configs.fsdp import fsdp_config as FSDP_CONFIG from configs.training import train_config as TRAIN_CONFIG from data.concatenator import ConcatDataset from utils.config_utils import ( update_config, generate_peft_config, generate_dataset_config, get_dataloader_kwargs, ) from utils.dataset_utils import get_preprocessed_dataset from utils.train_utils import ( train, print_model_size, )
4,963
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. def main(**kwargs): # Update the configuration for the training and sharding process train_config, fsdp_config = TRAIN_CONFIG(), FSDP_CONFIG() update_config((train_config, fsdp_config), **kwargs) if train_config.whatsapp_username is None or train_config.whatsapp_username == "": raise ValueError("Please provide your whatsapp_user_name in config/training.py or as commandline argument '--whatsapp_username [insert your username]'. Has to be same as in your exported WhatsApp chat .txt files") # Set the seeds for reproducibility torch.cuda.manual_seed(train_config.seed) torch.manual_seed(train_config.seed) random.seed(train_config.seed) #clear gpu cache torch.cuda.empty_cache() # Load the pre-trained model and setup its configuration model = AutoModelForCausalLM.from_pretrained( train_config.model_name, load_in_4bit=True if train_config.quantization else None, device_map="auto" if train_config.quantization else None, ) # Load the tokenizer and add special tokens tokenizer = AutoTokenizer.from_pretrained(train_config.model_name) tokenizer.pad_token_id = tokenizer.eos_token_id print_model_size(model, train_config, 0) # Prepare the model for int8 training if quantization is enabled if train_config.quantization: model = prepare_model_for_int8_training(model) if train_config.use_peft: peft_config = generate_peft_config(train_config, kwargs) model = get_peft_model(model, peft_config) model.print_trainable_parameters() #setting up FSDP if enable_fsdp is enabled if not train_config.quantization and not train_config.enable_fsdp: model.to("cuda") dataset_config = generate_dataset_config(train_config, kwargs) # Load and preprocess the dataset for training and validation dataset_train = get_preprocessed_dataset( tokenizer, dataset_config, split="train", ) print(f"--> Training Set Length = {len(dataset_train)}") if train_config.run_validation: dataset_val = get_preprocessed_dataset( tokenizer, dataset_config, split="test", ) print(f"--> Validation Set Length = {len(dataset_val)}") if train_config.batching_strategy == "packing":
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. def main(**kwargs): # Update the configuration for the training and sharding process train_config, fsdp_config = TRAIN_CONFIG(), FSDP_CONFIG() update_config((train_config, fsdp_config), **kwargs) if train_config.whatsapp_username is None or train_config.whatsapp_username == "": raise ValueError("Please provide your whatsapp_user_name in config/training.py or as commandline argument '--whatsapp_username [insert your username]'. Has to be same as in your exported WhatsApp chat .txt files") # Set the seeds for reproducibility torch.cuda.manual_seed(train_config.seed) torch.manual_seed(train_config.seed) random.seed(train_config.seed) #clear gpu cache torch.cuda.empty_cache() # Load the pre-trained model and setup its configuration model = AutoModelForCausalLM.from_pretrained( train_config.model_name, load_in_4bit=True if train_config.quantization else None, device_map="auto" if train_config.quantization else None, ) # Load the tokenizer and add special tokens tokenizer = AutoTokenizer.from_pretrained(train_config.model_name) tokenizer.pad_token_id = tokenizer.eos_token_id print_model_size(model, train_config, 0) # Prepare the model for int8 training if quantization is enabled if train_config.quantization: model = prepare_model_for_int8_training(model) if train_config.use_peft: peft_config = generate_peft_config(train_config, kwargs) model = get_peft_model(model, peft_config) model.print_trainable_parameters() #setting up FSDP if enable_fsdp is enabled if not train_config.quantization and not train_config.enable_fsdp: model.to("cuda") dataset_config = generate_dataset_config(train_config, kwargs) # Load and preprocess the dataset for training and validation dataset_train = get_preprocessed_dataset( tokenizer, dataset_config, split="train", ) print(f"--> Training Set Length = {len(dataset_train)}") if train_config.run_validation: dataset_val = get_preprocessed_dataset( tokenizer, dataset_config, split="test", ) print(f"--> Validation Set Length = {len(dataset_val)}") if train_config.batching_strategy == "packing":
dataset_train = ConcatDataset(dataset_train, chunk_size=train_config.context_length)
2
2023-12-28 00:02:08+00:00
8k
FoundationVision/UniRef
projects/UniRef/uniref/models/deformable_detr/criterion.py
[ { "identifier": "box_ops", "path": "projects/UniRef/uniref/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef multi_box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef generalized_multi_box_iou(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "dice_loss", "path": "projects/UniRef/uniref/models/deformable_detr/segmentation.py", "snippet": "def dice_loss(inputs, targets, num_boxes):\n \"\"\"\n Compute the DICE loss, similar to generalized IOU for masks\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n \"\"\"\n inputs = inputs.sigmoid()\n inputs = inputs.flatten(1)\n numerator = 2 * (inputs * targets).sum(1)\n denominator = inputs.sum(-1) + targets.sum(-1)\n loss = 1 - (numerator + 1) / (denominator + 1)\n return loss.sum() / num_boxes" }, { "identifier": "sigmoid_focal_loss", "path": "projects/UniRef/uniref/models/deformable_detr/segmentation.py", "snippet": "def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n return loss.mean(1).sum() / num_boxes" }, { "identifier": "mask_iou_loss", "path": "projects/UniRef/uniref/models/deformable_detr/segmentation.py", "snippet": "def mask_iou_loss(inputs, targets, num_boxes, eps=1e-7, size_average=True):\n \"\"\"\n Compute the mask iou loss, similar to IOU for masks\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n \"\"\"\n # [all_num_insts, H, W]\n assert len(inputs.shape) == 3 and inputs.shape == targets.shape\n\n inter = torch.min(inputs, targets).sum(2).sum(1)\n union = torch.max(inputs, targets).sum(2).sum(1)\n\n if size_average:\n iou = torch.sum((inter+eps) / (union+eps)) / num_boxes\n else:\n iou = (inter+eps) / (union+eps)\n \n loss = 1.0 - iou\n return loss" }, { "identifier": "bootstrap_ce_loss", "path": "projects/UniRef/uniref/models/deformable_detr/segmentation.py", "snippet": "def bootstrap_ce_loss(inputs, targets, num_boxes, bootstrap=0.4):\n \"\"\"\n Compute the mask iou loss, similar to IOU for masks\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n \"\"\"\n N, _, H, W = inputs.shape\n\n inputs = -1.0 * torch.log(inputs)\n\n if bootstrap > 0:\n num = int(H * W * bootstrap)\n loss = torch.sum(inputs * targets, dim=1).view(N, -1)\n mloss, _ = torch.sort(loss, dim=-1, descending=True)\n loss = torch.mean(mloss[:, :num])\n else:\n loss = torch.sum(inputs * targets)\n loss = loss.mean()\n return loss" }, { "identifier": "soft_aggregate", "path": "projects/UniRef/uniref/models/deformable_detr/segmentation.py", "snippet": "def soft_aggregate(prob, keep_bg=False):\n # prob: [K, H, W], the mask prob scores of K objects\n k = prob.shape\n new_prob = torch.cat([\n torch.prod(1-prob, dim=0, keepdim=True),\n prob\n ], 0).clamp(1e-7, 1-1e-7)\n logits = torch.log((new_prob /(1-new_prob)))\n\n if keep_bg:\n return F.softmax(logits, dim=0)\n else:\n return F.softmax(logits, dim=0)[1:]" }, { "identifier": "NestedTensor", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n\n def to(self, device, non_blocking=False):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device, non_blocking=non_blocking)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device, non_blocking=non_blocking)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def record_stream(self, *args, **kwargs):\n self.tensors.record_stream(*args, **kwargs)\n if self.mask is not None:\n self.mask.record_stream(*args, **kwargs)\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)" }, { "identifier": "nested_tensor_from_tensor_list", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor], size_divisibility=1, split=True):\n if split:\n tensor_list = [tensor.split(3,dim=0) for tensor in tensor_list]\n tensor_list = [item for sublist in tensor_list for item in sublist]\n\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n\n if size_divisibility > 1:\n stride = size_divisibility\n # the last two dims are H,W, both subject to divisibility requirement\n max_size[-2] = (max_size[-2] + (stride - 1)) // stride * stride\n max_size[-1] = (max_size[-1] + (stride - 1)) // stride * stride\n\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)" }, { "identifier": "accuracy", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if float(torchvision.__version__[:3]) < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n if float(torchvision.__version__[:3]) < 0.5:\n return _NewEmptyTensorOp.apply(input, output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "is_dist_avail_and_initialized", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "inverse_sigmoid", "path": "projects/UniRef/uniref/util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-5):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)" } ]
import torch import torch.nn as nn import torch.nn.functional as F import copy import random from ...util import box_ops from .segmentation import (dice_loss, sigmoid_focal_loss, mask_iou_loss, bootstrap_ce_loss, soft_aggregate) from ...util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized, inverse_sigmoid) from fvcore.nn import giou_loss, smooth_l1_loss
5,606
if len(target_boxes) == 0: losses = {} losses['loss_bbox'] = src_boxes.sum() * 0.0 losses['loss_giou'] = src_boxes.sum() * 0.0 if use_iou_branch: losses['loss_boxiou'] = src_boxes.sum() * 0.0 return losses # box iou if use_iou_branch: with torch.no_grad(): ious = compute_box_iou(box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes)) tgt_iou_scores = ious src_iou_scores = outputs['pred_boxious'] # [B, N, 1] src_iou_scores = src_iou_scores[idx] src_iou_scores = src_iou_scores.flatten(0) tgt_iou_scores = tgt_iou_scores.flatten(0) loss_boxiou = F.binary_cross_entropy_with_logits(src_iou_scores, tgt_iou_scores, reduction='mean') num_boxes = src_boxes.shape[0] if self.ota else num_boxes loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = giou_loss(box_ops.box_cxcywh_to_xyxy(src_boxes),box_ops.box_cxcywh_to_xyxy(target_boxes)) # loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( # box_ops.box_cxcywh_to_xyxy(src_boxes), # box_ops.box_cxcywh_to_xyxy(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes if use_iou_branch: losses['loss_boxiou'] = loss_boxiou return losses def loss_masks(self, outputs, targets, indices, num_boxes): """Compute the losses related to the masks: the focal loss and the dice loss. targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] """ assert "pred_masks" in outputs src_masks = outputs["pred_masks"] src_idx = self._get_src_permutation_idx(indices) tgt_idx = self._get_tgt_permutation_idx(indices) src_masks = outputs["pred_masks"] # list[tensor]: bs x [1, num_inst, num_frames, H/4, W/4] bs = len(targets) # src_masks: bs x [1, num_inst, num_frames, H/4, W/4] or [bs, num_inst, num_frames, H/4, W/4] if type(src_masks) == list: src_masks = torch.cat(src_masks, dim=1)[0] # [num_all_inst, num_frames, H/4, W/4] if src_masks.ndim == 0: # no mask label (only box label) losses = {} losses['loss_mask'] = src_masks * 0.0 losses['loss_dice'] = src_masks * 0.0 if self.mask_aux_loss: losses["loss_mask_cls"] = src_masks * 0.0 losses["loss_mask_iou"] = src_masks * 0.0 return losses # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list([t["masks"] for t in targets], size_divisibility=32, split=False).decompose() # during training, the size_divisibility is 32 target_masks = target_masks.to(src_masks) # [bs, max_num_gt, H, W] # for VOS, supervised in the original resolution if target_masks.shape[-2:] == src_masks.shape[-2:]: pass else: # downsample ground truth masks with ratio mask_out_stride start = int(self.mask_out_stride // 2) im_h, im_w = target_masks.shape[-2:] target_masks = target_masks[:, :, start::self.mask_out_stride, start::self.mask_out_stride] assert target_masks.size(2) * self.mask_out_stride == im_h assert target_masks.size(3) * self.mask_out_stride == im_w num_frames = src_masks.shape[1] # # upsample predictions to the target size # src_masks = interpolate(src_masks, size=target_masks.shape[-2:], # mode="bilinear", align_corners=False) target_masks = target_masks.reshape(bs, -1, num_frames, target_masks.shape[-2], target_masks.shape[-1]) target_masks = target_masks[tgt_idx] # [num_all_inst, num_frames, H/4, W/4] num_boxes = src_masks.shape[0] if self.ota else num_boxes if len(target_masks) == 0: # no gt losses = {} losses['loss_mask'] = src_masks.sum() * 0.0 losses['loss_dice'] = src_masks.sum() * 0.0 if self.mask_aux_loss: losses["loss_mask_cls"] = src_masks.sum() * 0.0 losses["loss_mask_iou"] = src_masks.sum() * 0.0 return losses if self.mask_aux_loss: # convert instance mask to semantice mask src_masks_aux = src_masks.flatten(0,1).sigmoid() # scores src_masks_aux_list = [] for src_mask_aux in src_masks_aux: src_masks_aux_list.append(soft_aggregate(src_mask_aux.unsqueeze(0), keep_bg=True)) src_masks_aux = torch.stack(src_masks_aux_list, dim=0) # [all_num_insts, 2, H, W] # convert targets, including bg target_masks_aux = torch.zeros_like(src_masks_aux) # [all_num_insts, 2, H, W] target_masks_aux[:, 1, :, :] = target_masks.flatten(0,1).float() target_masks_aux[:, 0, :, :] = 1.0 - target_masks.flatten(0,1).float() src_masks = src_masks.flatten(1) target_masks = target_masks.flatten(1) # src_masks/target_masks: [n_targets, num_frames * H * W] losses = { "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), "loss_dice": dice_loss(src_masks, target_masks, num_boxes), } if self.mask_aux_loss: losses.update({"loss_mask_cls": bootstrap_ce_loss(src_masks_aux, target_masks_aux, num_boxes)})
class SmoothLabelCrossEntropyLoss(nn.Module): def __init__(self, eps=0.1, ignore_index=None): super().__init__() self.eps = eps self.log_soft = nn.LogSoftmax(dim=1) self.kl = nn.KLDivLoss(reduction='none') self.ignore_index = ignore_index def forward(self, feature, target): feature = feature.float() if self.ignore_index is not None: valid_mask = target != self.ignore_index target = target[valid_mask] feature = feature[valid_mask] assert target.numel() > 0 eps = self.eps n_class = feature.size(1) one_hot = torch.zeros_like(feature).scatter(1, target.view(-1, 1), 1) one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1) log_prb = self.log_soft(feature) loss = self.kl(log_prb, one_hot) return loss.sum(dim=1).mean() class SetCriterion(nn.Module): """ This class computes the loss for DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) """ def __init__(self, num_classes, matcher, weight_dict, losses, focal_alpha=0.25, mask_out_stride=4, num_frames=1, ota=False, mask_aux_loss=False, cfg=None): """ Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.num_classes = num_classes self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha self.mask_out_stride = mask_out_stride self.num_frames = num_frames self.ota = ota self.mask_aux_loss = mask_aux_loss # boxinst configs if cfg is not None: self.boxinst_enabled = cfg.MODEL.BOXINST.ENABLED self.bottom_pixels_removed = cfg.MODEL.BOXINST.BOTTOM_PIXELS_REMOVED self.pairwise_size = cfg.MODEL.BOXINST.PAIRWISE.SIZE self.pairwise_dilation = cfg.MODEL.BOXINST.PAIRWISE.DILATION self.pairwise_color_thresh = cfg.MODEL.BOXINST.PAIRWISE.COLOR_THRESH self._warmup_iters = cfg.MODEL.BOXINST.PAIRWISE.WARMUP_ITERS self.boxinst_topk = cfg.MODEL.BOXINST.TOPK self.register_buffer("_iter", torch.zeros([1])) def loss_labels(self, outputs, targets, indices, num_boxes, log=False): """Classification loss (NLL) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] """ assert 'pred_logits' in outputs src_logits = outputs['pred_logits'] # [B, N, K] idx = self._get_src_permutation_idx(indices) target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_o if len(target_classes_o) == 0: # no gt in the batch loss_ce = src_logits.sum() * 0.0 losses = {'loss_ce': loss_ce} return losses target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1], dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:,:,:-1] num_boxes = len(idx[0]) if self.ota else num_boxes loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1] losses = {'loss_ce': loss_ce} if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) use_iou_branch = "pred_boxious" in outputs if len(target_boxes) == 0: losses = {} losses['loss_bbox'] = src_boxes.sum() * 0.0 losses['loss_giou'] = src_boxes.sum() * 0.0 if use_iou_branch: losses['loss_boxiou'] = src_boxes.sum() * 0.0 return losses # box iou if use_iou_branch: with torch.no_grad(): ious = compute_box_iou(box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes)) tgt_iou_scores = ious src_iou_scores = outputs['pred_boxious'] # [B, N, 1] src_iou_scores = src_iou_scores[idx] src_iou_scores = src_iou_scores.flatten(0) tgt_iou_scores = tgt_iou_scores.flatten(0) loss_boxiou = F.binary_cross_entropy_with_logits(src_iou_scores, tgt_iou_scores, reduction='mean') num_boxes = src_boxes.shape[0] if self.ota else num_boxes loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = giou_loss(box_ops.box_cxcywh_to_xyxy(src_boxes),box_ops.box_cxcywh_to_xyxy(target_boxes)) # loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( # box_ops.box_cxcywh_to_xyxy(src_boxes), # box_ops.box_cxcywh_to_xyxy(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes if use_iou_branch: losses['loss_boxiou'] = loss_boxiou return losses def loss_masks(self, outputs, targets, indices, num_boxes): """Compute the losses related to the masks: the focal loss and the dice loss. targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] """ assert "pred_masks" in outputs src_masks = outputs["pred_masks"] src_idx = self._get_src_permutation_idx(indices) tgt_idx = self._get_tgt_permutation_idx(indices) src_masks = outputs["pred_masks"] # list[tensor]: bs x [1, num_inst, num_frames, H/4, W/4] bs = len(targets) # src_masks: bs x [1, num_inst, num_frames, H/4, W/4] or [bs, num_inst, num_frames, H/4, W/4] if type(src_masks) == list: src_masks = torch.cat(src_masks, dim=1)[0] # [num_all_inst, num_frames, H/4, W/4] if src_masks.ndim == 0: # no mask label (only box label) losses = {} losses['loss_mask'] = src_masks * 0.0 losses['loss_dice'] = src_masks * 0.0 if self.mask_aux_loss: losses["loss_mask_cls"] = src_masks * 0.0 losses["loss_mask_iou"] = src_masks * 0.0 return losses # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list([t["masks"] for t in targets], size_divisibility=32, split=False).decompose() # during training, the size_divisibility is 32 target_masks = target_masks.to(src_masks) # [bs, max_num_gt, H, W] # for VOS, supervised in the original resolution if target_masks.shape[-2:] == src_masks.shape[-2:]: pass else: # downsample ground truth masks with ratio mask_out_stride start = int(self.mask_out_stride // 2) im_h, im_w = target_masks.shape[-2:] target_masks = target_masks[:, :, start::self.mask_out_stride, start::self.mask_out_stride] assert target_masks.size(2) * self.mask_out_stride == im_h assert target_masks.size(3) * self.mask_out_stride == im_w num_frames = src_masks.shape[1] # # upsample predictions to the target size # src_masks = interpolate(src_masks, size=target_masks.shape[-2:], # mode="bilinear", align_corners=False) target_masks = target_masks.reshape(bs, -1, num_frames, target_masks.shape[-2], target_masks.shape[-1]) target_masks = target_masks[tgt_idx] # [num_all_inst, num_frames, H/4, W/4] num_boxes = src_masks.shape[0] if self.ota else num_boxes if len(target_masks) == 0: # no gt losses = {} losses['loss_mask'] = src_masks.sum() * 0.0 losses['loss_dice'] = src_masks.sum() * 0.0 if self.mask_aux_loss: losses["loss_mask_cls"] = src_masks.sum() * 0.0 losses["loss_mask_iou"] = src_masks.sum() * 0.0 return losses if self.mask_aux_loss: # convert instance mask to semantice mask src_masks_aux = src_masks.flatten(0,1).sigmoid() # scores src_masks_aux_list = [] for src_mask_aux in src_masks_aux: src_masks_aux_list.append(soft_aggregate(src_mask_aux.unsqueeze(0), keep_bg=True)) src_masks_aux = torch.stack(src_masks_aux_list, dim=0) # [all_num_insts, 2, H, W] # convert targets, including bg target_masks_aux = torch.zeros_like(src_masks_aux) # [all_num_insts, 2, H, W] target_masks_aux[:, 1, :, :] = target_masks.flatten(0,1).float() target_masks_aux[:, 0, :, :] = 1.0 - target_masks.flatten(0,1).float() src_masks = src_masks.flatten(1) target_masks = target_masks.flatten(1) # src_masks/target_masks: [n_targets, num_frames * H * W] losses = { "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), "loss_dice": dice_loss(src_masks, target_masks, num_boxes), } if self.mask_aux_loss: losses.update({"loss_mask_cls": bootstrap_ce_loss(src_masks_aux, target_masks_aux, num_boxes)})
losses.update({"loss_mask_iou": mask_iou_loss(src_masks_aux[:, 1, :, :], target_masks_aux[:, 1, :, :], num_boxes)})
3
2023-12-22 13:31:33+00:00
8k
mkshing/scedit-pytorch
scedit_pytorch/diffusers_modules/unet_2d_condition.py
[ { "identifier": "get_up_block", "path": "scedit_pytorch/diffusers_modules/unet_2d_blocks.py", "snippet": "def get_up_block(\n up_block_type: str,\n num_layers: int,\n in_channels: int,\n out_channels: int,\n prev_output_channel: int,\n temb_channels: int,\n add_upsample: bool,\n resnet_eps: float,\n resnet_act_fn: str,\n resolution_idx: Optional[int] = None,\n transformer_layers_per_block: int = 1,\n num_attention_heads: Optional[int] = None,\n resnet_groups: Optional[int] = None,\n cross_attention_dim: Optional[int] = None,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n only_cross_attention: bool = False,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n attention_type: str = \"default\",\n resnet_skip_time_act: bool = False,\n resnet_out_scale_factor: float = 1.0,\n cross_attention_norm: Optional[str] = None,\n attention_head_dim: Optional[int] = None,\n upsample_type: Optional[str] = None,\n dropout: float = 0.0,\n) -> nn.Module:\n # If attn head dim is not defined, we default it to the number of heads\n if attention_head_dim is None:\n logger.warn(\n f\"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}.\"\n )\n attention_head_dim = num_attention_heads\n\n up_block_type = (\n up_block_type[7:] if up_block_type.startswith(\"UNetRes\") else up_block_type\n )\n if up_block_type == \"UpBlock2D\":\n return UpBlock2D(\n num_layers=num_layers,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n resolution_idx=resolution_idx,\n dropout=dropout,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n elif up_block_type == \"CrossAttnUpBlock2D\":\n if cross_attention_dim is None:\n raise ValueError(\n \"cross_attention_dim must be specified for CrossAttnUpBlock2D\"\n )\n return CrossAttnUpBlock2D(\n num_layers=num_layers,\n transformer_layers_per_block=transformer_layers_per_block,\n in_channels=in_channels,\n out_channels=out_channels,\n prev_output_channel=prev_output_channel,\n temb_channels=temb_channels,\n resolution_idx=resolution_idx,\n dropout=dropout,\n add_upsample=add_upsample,\n resnet_eps=resnet_eps,\n resnet_act_fn=resnet_act_fn,\n resnet_groups=resnet_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=num_attention_heads,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n attention_type=attention_type,\n )\n\n # raise ValueError(f\"{up_block_type} does not exist.\")\n raise NotImplementedError" }, { "identifier": "SCTunerLinearLayer", "path": "scedit_pytorch/scedit.py", "snippet": "class SCTunerLinearLayer(AbstractSCTunerLayer):\n r\"\"\"\n A linear layer that is used with SCEdit.\n\n Parameters:\n dim (`int`):\n Number of dim.\n out_features (`int`):\n Number of output features.\n rank (`int`, `optional`, defaults to 4):\n The rank of the LoRA layer.\n device (`torch.device`, `optional`, defaults to `None`):\n The device to use for the layer's weights.\n dtype (`torch.dtype`, `optional`, defaults to `None`):\n The dtype to use for the layer's weights.\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n rank: Optional[int] = None,\n scale: float = 1.0,\n device: Optional[Union[torch.device, str]] = None,\n dtype: Optional[torch.dtype] = None,\n ):\n super().__init__(dim=dim)\n if rank is None:\n rank = dim\n self.down = nn.Linear(dim, rank, device=device, dtype=dtype)\n self.up = nn.Linear(rank, dim, device=device, dtype=dtype)\n self.act = nn.GELU()\n self.rank = rank\n self.scale = scale\n\n nn.init.normal_(self.down.weight, std=1 / rank)\n nn.init.zeros_(self.up.weight)\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n orig_dtype = hidden_states.dtype\n dtype = self.down.weight.dtype\n\n hidden_states_input = hidden_states.permute(0, 2, 3, 1)\n down_hidden_states = self.down(hidden_states_input.to(dtype))\n up_hidden_states = self.up(self.act(down_hidden_states))\n up_hidden_states = up_hidden_states.to(orig_dtype).permute(0, 3, 1, 2)\n return self.scale * up_hidden_states + hidden_states" } ]
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.loaders import UNet2DConditionLoadersMixin from diffusers.utils import ( USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers, ) from diffusers.models.activations import get_activation from diffusers.models.attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, ) from diffusers.models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, PositionNet, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from diffusers.models.modeling_utils import ModelMixin from diffusers.models.unet_2d_blocks import ( UNetMidBlock2D, UNetMidBlock2DCrossAttn, UNetMidBlock2DSimpleCrossAttn, get_down_block, ) from .unet_2d_blocks import get_up_block from ..scedit import SCTunerLinearLayer import torch import torch.nn as nn import torch.utils.checkpoint
4,305
for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, dropout=dropout, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock2DCrossAttn": self.mid_block = UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, dropout=dropout, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type, ) elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": self.mid_block = UNetMidBlock2DSimpleCrossAttn( in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, dropout=dropout, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], attention_head_dim=attention_head_dim[-1], resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif mid_block_type == "UNetMidBlock2D": self.mid_block = UNetMidBlock2D( in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, dropout=dropout, num_layers=0, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, add_attention=False, ) elif mid_block_type is None: self.mid_block = None else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = ( list(reversed(transformer_layers_per_block)) if reverse_transformer_layers_per_block is None else reverse_transformer_layers_per_block ) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[ min(i + 1, len(block_out_channels) - 1) ] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNet2DConditionOutput(BaseOutput): """ The output of [`UNet2DConditionModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor = None class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. flip_sin_to_cos (`bool`, *optional*, defaults to `False`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): The tuple of upsample blocks to use. only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`): Whether to include self-attention in the basic transformer blocks, see [`~models.attention.BasicTransformerBlock`]. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, normalization and activation layers is skipped in post-processing. norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None): If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int`, *optional*): The number of attention heads. If not defined, defaults to `attention_head_dim` resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`): Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. addition_time_embed_dim: (`int`, *optional*, defaults to `None`): Dimension for the timestep embeddings. num_class_embeds (`int`, *optional*, defaults to `None`): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. time_embedding_type (`str`, *optional*, defaults to `positional`): The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. time_embedding_dim (`int`, *optional*, defaults to `None`): An optional override for the dimension of the projected time embedding. time_embedding_act_fn (`str`, *optional*, defaults to `None`): Optional activation function to use only once on the time embeddings before they are passed to the rest of the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. timestep_post_act (`str`, *optional*, defaults to `None`): The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. time_cond_proj_dim (`int`, *optional*, defaults to `None`): The dimension of `cond_proj` layer in the timestep embedding. conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time embeddings with the class embeddings. mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`): Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False` otherwise. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ), mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn", up_block_types: Tuple[str] = ( "UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", ), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, dropout: float = 0.0, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1, reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: int = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, attention_type: str = "default", class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, ): super().__init__() self.sample_size = sample_size if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len( only_cross_attention ) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len( down_block_types ): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len( down_block_types ): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len( down_block_types ): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len( down_block_types ): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) if ( isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None ): for layer_number_per_block in transformer_layers_per_block: if isinstance(layer_number_per_block, list): raise ValueError( "Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet." ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding, ) # time if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError( f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}." ) self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos, ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps( block_out_channels[0], flip_sin_to_cos, freq_shift ) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info( "encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined." ) if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn ) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding( projection_class_embeddings_input_dim, time_embed_dim ) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear( projection_class_embeddings_input_dim, time_embed_dim ) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads, ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim, ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps( addition_time_embed_dim, flip_sin_to_cos, freq_shift ) self.add_embedding = TimestepEmbedding( projection_class_embeddings_input_dim, time_embed_dim ) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding( image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding( image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type is not None: raise ValueError( f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'." ) if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len( down_block_types ) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, dropout=dropout, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock2DCrossAttn": self.mid_block = UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, dropout=dropout, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, attention_type=attention_type, ) elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": self.mid_block = UNetMidBlock2DSimpleCrossAttn( in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, dropout=dropout, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], attention_head_dim=attention_head_dim[-1], resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif mid_block_type == "UNetMidBlock2D": self.mid_block = UNetMidBlock2D( in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, dropout=dropout, num_layers=0, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, add_attention=False, ) elif mid_block_type is None: self.mid_block = None else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = ( list(reversed(transformer_layers_per_block)) if reverse_transformer_layers_per_block is None else reverse_transformer_layers_per_block ) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[ min(i + 1, len(block_out_channels) - 1) ] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False
up_block = get_up_block(
0
2023-12-22 05:37:58+00:00
8k
xhuangcv/humannorm
threestudio/models/guidance/controlnet_guidance.py
[ { "identifier": "PromptProcessorOutput", "path": "threestudio/models/prompt_processors/base.py", "snippet": "class PromptProcessorOutput:\n text_embeddings: Float[Tensor, \"N Nf\"]\n uncond_text_embeddings: Float[Tensor, \"N Nf\"]\n text_embeddings_vd: Float[Tensor, \"Nv N Nf\"]\n uncond_text_embeddings_vd: Float[Tensor, \"Nv N Nf\"]\n directions: List[DirectionConfig]\n direction2idx: Dict[str, int]\n use_perp_neg: bool\n perp_neg_f_sb: Tuple[float, float, float]\n perp_neg_f_fsb: Tuple[float, float, float]\n perp_neg_f_fs: Tuple[float, float, float]\n perp_neg_f_sf: Tuple[float, float, float]\n\n def get_text_embeddings(\n self,\n elevation: Float[Tensor, \"B\"],\n azimuth: Float[Tensor, \"B\"],\n camera_distances: Float[Tensor, \"B\"],\n human_part: Int[Tensor, \"B\"],\n view_dependent_prompting: bool = True,\n ) -> Float[Tensor, \"BB N Nf\"]:\n batch_size = elevation.shape[0]\n\n if view_dependent_prompting:\n # Get direction\n direction_idx = torch.zeros_like(elevation, dtype=torch.long)\n for d in self.directions:\n direction_idx[\n d.condition(elevation, azimuth, camera_distances, human_part)\n ] = self.direction2idx[d.name]\n\n # Get text embeddings\n text_embeddings = self.text_embeddings_vd[direction_idx] # type: ignore\n uncond_text_embeddings = self.uncond_text_embeddings_vd[direction_idx] # type: ignore\n else:\n text_embeddings = self.text_embeddings.expand(batch_size, -1, -1) # type: ignore\n uncond_text_embeddings = self.uncond_text_embeddings.expand( # type: ignore\n batch_size, -1, -1\n )\n\n # IMPORTANT: we return (cond, uncond), which is in different order than other implementations!\n return torch.cat([text_embeddings, uncond_text_embeddings], dim=0)\n\n def get_text_embeddings_perp_neg(\n self,\n elevation: Float[Tensor, \"B\"],\n azimuth: Float[Tensor, \"B\"],\n camera_distances: Float[Tensor, \"B\"],\n view_dependent_prompting: bool = True,\n ) -> Tuple[Float[Tensor, \"BBBB N Nf\"], Float[Tensor, \"B 2\"]]:\n assert (\n view_dependent_prompting\n ), \"Perp-Neg only works with view-dependent prompting\"\n\n batch_size = elevation.shape[0]\n\n direction_idx = torch.zeros_like(elevation, dtype=torch.long)\n for d in self.directions:\n direction_idx[\n d.condition(elevation, azimuth, camera_distances)\n ] = self.direction2idx[d.name]\n # 0 - side view\n # 1 - front view\n # 2 - back view\n # 3 - overhead view\n\n pos_text_embeddings = []\n neg_text_embeddings = []\n neg_guidance_weights = []\n uncond_text_embeddings = []\n\n side_emb = self.text_embeddings_vd[0]\n front_emb = self.text_embeddings_vd[1]\n back_emb = self.text_embeddings_vd[2]\n overhead_emb = self.text_embeddings_vd[3]\n\n for idx, ele, azi, dis in zip(\n direction_idx, elevation, azimuth, camera_distances\n ):\n azi = shift_azimuth_deg(azi) # to (-180, 180)\n uncond_text_embeddings.append(\n self.uncond_text_embeddings_vd[idx]\n ) # should be \"\"\n if idx.item() == 3: # overhead view\n pos_text_embeddings.append(overhead_emb) # side view\n # dummy\n neg_text_embeddings += [\n self.uncond_text_embeddings_vd[idx],\n self.uncond_text_embeddings_vd[idx],\n ]\n neg_guidance_weights += [0.0, 0.0]\n else: # interpolating views\n if torch.abs(azi) < 90:\n # front-side interpolation\n # 0 - complete side, 1 - complete front\n r_inter = 1 - torch.abs(azi) / 90\n pos_text_embeddings.append(\n r_inter * front_emb + (1 - r_inter) * side_emb\n )\n neg_text_embeddings += [front_emb, side_emb]\n neg_guidance_weights += [\n -shifted_expotional_decay(*self.perp_neg_f_fs, r_inter),\n -shifted_expotional_decay(*self.perp_neg_f_sf, 1 - r_inter),\n ]\n else:\n # side-back interpolation\n # 0 - complete back, 1 - complete side\n r_inter = 2.0 - torch.abs(azi) / 90\n pos_text_embeddings.append(\n r_inter * side_emb + (1 - r_inter) * back_emb\n )\n neg_text_embeddings += [side_emb, front_emb]\n neg_guidance_weights += [\n -shifted_expotional_decay(*self.perp_neg_f_sb, r_inter),\n -shifted_expotional_decay(*self.perp_neg_f_fsb, r_inter),\n ]\n\n text_embeddings = torch.cat(\n [\n torch.stack(pos_text_embeddings, dim=0),\n torch.stack(uncond_text_embeddings, dim=0),\n torch.stack(neg_text_embeddings, dim=0),\n ],\n dim=0,\n )\n\n return text_embeddings, torch.as_tensor(\n neg_guidance_weights, device=elevation.device\n ).reshape(batch_size, 2)" }, { "identifier": "BaseObject", "path": "threestudio/utils/base.py", "snippet": "class BaseObject(Updateable):\n @dataclass\n class Config:\n pass\n\n cfg: Config # add this to every subclass of BaseObject to enable static type checking\n\n def __init__(\n self, cfg: Optional[Union[dict, DictConfig]] = None, *args, **kwargs\n ) -> None:\n super().__init__()\n self.cfg = parse_structured(self.Config, cfg)\n self.device = get_device()\n self.configure(*args, **kwargs)\n\n def configure(self, *args, **kwargs) -> None:\n pass" }, { "identifier": "C", "path": "threestudio/utils/misc.py", "snippet": "def C(value: Any, epoch: int, global_step: int) -> float:\n if isinstance(value, int) or isinstance(value, float):\n pass\n else:\n value = config_to_primitive(value)\n if not isinstance(value, list):\n raise TypeError(\"Scalar specification only supports list, got\", type(value))\n if len(value) == 3:\n value = [0] + value\n assert len(value) == 4\n start_step, start_value, end_value, end_step = value\n if isinstance(end_step, int):\n current_step = global_step\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n elif isinstance(end_step, float):\n current_step = epoch\n value = start_value + (end_value - start_value) * max(\n min(1.0, (current_step - start_step) / (end_step - start_step)), 0.0\n )\n return value" }, { "identifier": "parse_version", "path": "threestudio/utils/misc.py", "snippet": "def parse_version(ver: str):\n return version.parse(ver)" } ]
import os import cv2 import numpy as np import torch import torch.nn.functional as F import threestudio from dataclasses import dataclass from controlnet_aux import CannyDetector, NormalBaeDetector from diffusers import ControlNetModel, DDIMScheduler, StableDiffusionControlNetPipeline from diffusers.utils.import_utils import is_xformers_available from tqdm import tqdm from threestudio.models.prompt_processors.base import PromptProcessorOutput from threestudio.utils.base import BaseObject from threestudio.utils.misc import C, parse_version from threestudio.utils.typing import * from threestudio.utils.config import ExperimentConfig, load_config from threestudio.utils.typing import Optional
4,775
) -> Float[Tensor, "B 4 64 64"]: self.scheduler.config.num_train_timesteps = t.item() self.scheduler.set_timesteps(self.cfg.diffusion_steps) with torch.no_grad(): # add noise noise = torch.randn_like(latents) latents = self.scheduler.add_noise(latents, noise, t) # type: ignore # sections of code used from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py threestudio.debug("Start editing...") for i, t in enumerate(self.scheduler.timesteps): # predict the noise residual with unet, NO grad! with torch.no_grad(): # pred noise latent_model_input = torch.cat([latents] * 2) ( down_block_res_samples, mid_block_res_sample, ) = self.forward_controlnet( latent_model_input, t, encoder_hidden_states=text_embeddings, image_cond=image_cond, condition_scale=self.cfg.condition_scale, ) noise_pred = self.forward_control_unet( latent_model_input, t, encoder_hidden_states=text_embeddings, cross_attention_kwargs=None, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ) # perform classifier-free guidance noise_pred_text, noise_pred_uncond = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.cfg.guidance_scale * ( noise_pred_text - noise_pred_uncond ) # get previous sample, continue loop latents = self.scheduler.step(noise_pred, t, latents).prev_sample threestudio.debug("Editing finished.") return latents def prepare_image_cond(self, cond_rgb: Float[Tensor, "B H W C"]): if self.cfg.control_type == "normal": cond_rgb = ( (cond_rgb[0].detach().cpu().numpy() * 255).astype(np.uint8).copy() ) detected_map = self.preprocessor(cond_rgb) control = ( torch.from_numpy(np.array(detected_map)).float().to(self.device) / 255.0 ) control = control.unsqueeze(0) control = control.permute(0, 3, 1, 2) elif self.cfg.control_type == "canny": cond_rgb = ( (cond_rgb[0].detach().cpu().numpy() * 255).astype(np.uint8).copy() ) blurred_img = cv2.blur(cond_rgb, ksize=(5, 5)) detected_map = self.preprocessor( blurred_img, self.cfg.canny_lower_bound, self.cfg.canny_upper_bound ) control = ( torch.from_numpy(np.array(detected_map)).float().to(self.device) / 255.0 ) control = control.unsqueeze(-1).repeat(1, 1, 3) control = control.unsqueeze(0) control = control.permute(0, 3, 1, 2) elif self.cfg.control_type == "input_normal": control = cond_rgb.permute(0, 3, 1, 2) else: raise ValueError(f"Unknown control type: {self.cfg.control_type}") return F.interpolate(control, (512, 512), mode="bilinear", align_corners=False) def compute_grad_sds( self, text_embeddings: Float[Tensor, "BB 77 768"], latents: Float[Tensor, "B 4 64 64"], image_cond: Float[Tensor, "B 3 512 512"], t: Int[Tensor, "B"], ): with torch.no_grad(): # add noise noise = torch.randn_like(latents) # TODO: use torch generator latents_noisy = self.scheduler.add_noise(latents, noise, t) # pred noise latent_model_input = torch.cat([latents_noisy] * 2) down_block_res_samples, mid_block_res_sample = self.forward_controlnet( latent_model_input, t, encoder_hidden_states=text_embeddings, image_cond=image_cond, condition_scale=self.cfg.condition_scale, ) noise_pred = self.forward_control_unet( latent_model_input, t, encoder_hidden_states=text_embeddings, cross_attention_kwargs=None, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ) # perform classifier-free guidance noise_pred_text, noise_pred_uncond = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.cfg.guidance_scale * ( noise_pred_text - noise_pred_uncond ) w = (1 - self.alphas[t]).view(-1, 1, 1, 1) grad = w * (noise_pred - noise) return grad def __call__( self, rgb: Float[Tensor, "B H W C"], cond_rgb: Float[Tensor, "B H W C"],
@threestudio.register("stable-diffusion-controlnet-guidance") class ControlNetGuidance(BaseObject): @dataclass class Config(BaseObject.Config): cache_dir: Optional[str] = None pretrained_model_name_or_path: str = "SG161222/Realistic_Vision_V2.0" ddim_scheduler_name_or_path: str = "/home/xinhuang/.cache/huggingface/hub/models--CompVis--stable-diffusion-v1-4/snapshots/59ec6bdf37d6279d3c0faf36e89ff1aa34f7ebf4" control_type: str = "normal" # normal/canny enable_memory_efficient_attention: bool = False enable_sequential_cpu_offload: bool = False enable_attention_slicing: bool = False enable_channels_last_format: bool = False guidance_scale: float = 7.5 condition_scale: float = 1.5 grad_clip: Optional[ Any ] = None # field(default_factory=lambda: [0, 2.0, 8.0, 1000]) half_precision_weights: bool = True min_step_percent: float = 0.02 max_step_percent: float = 0.98 diffusion_steps: int = 20 use_sds: bool = False # Canny threshold canny_lower_bound: int = 50 canny_upper_bound: int = 100 cfg: Config def configure(self) -> None: threestudio.info(f"Loading ControlNet ...") controlnet_name_or_path: str if self.cfg.control_type in ("normal", "input_normal"): controlnet_name_or_path = "/home/xinhuang/.cache/huggingface/hub/models--lllyasviel--control_v11p_sd15_normalbae/snapshots/cb7296e6587a219068e9d65864e38729cd862aa8" elif self.cfg.control_type == "canny": controlnet_name_or_path = "/home/xinhuang/.cache/huggingface/hub/models--lllyasviel--control_v11p_sd15_canny/snapshots/115a470d547982438f70198e353a921996e2e819" self.weights_dtype = ( torch.float16 if self.cfg.half_precision_weights else torch.float32 ) pipe_kwargs = { "safety_checker": None, "feature_extractor": None, "requires_safety_checker": False, "torch_dtype": self.weights_dtype, "cache_dir": self.cfg.cache_dir, } controlnet = ControlNetModel.from_pretrained( controlnet_name_or_path, torch_dtype=self.weights_dtype, cache_dir=self.cfg.cache_dir, ) self.pipe = StableDiffusionControlNetPipeline.from_pretrained( self.cfg.pretrained_model_name_or_path, controlnet=controlnet, **pipe_kwargs ).to(self.device) self.scheduler = DDIMScheduler.from_pretrained( self.cfg.ddim_scheduler_name_or_path, subfolder="scheduler", torch_dtype=self.weights_dtype, cache_dir=self.cfg.cache_dir, ) self.scheduler.set_timesteps(self.cfg.diffusion_steps) if self.cfg.enable_memory_efficient_attention: if parse_version(torch.__version__) >= parse_version("2"): threestudio.info( "PyTorch2.0 uses memory efficient attention by default." ) elif not is_xformers_available(): threestudio.warn( "xformers is not available, memory efficient attention is not enabled." ) else: self.pipe.enable_xformers_memory_efficient_attention() if self.cfg.enable_sequential_cpu_offload: self.pipe.enable_sequential_cpu_offload() if self.cfg.enable_attention_slicing: self.pipe.enable_attention_slicing(1) if self.cfg.enable_channels_last_format: self.pipe.unet.to(memory_format=torch.channels_last) # Create model self.vae = self.pipe.vae.eval() self.unet = self.pipe.unet.eval() self.controlnet = self.pipe.controlnet.eval() if self.cfg.control_type == "normal": self.preprocessor = NormalBaeDetector.from_pretrained( "/home/xinhuang/.cache/huggingface/hub/models--lllyasviel--Annotators/snapshots/9a7d84251d487d11c4834466779de6b0d2c44486" ) self.preprocessor.model.to(self.device) elif self.cfg.control_type == "canny": self.preprocessor = CannyDetector() for p in self.vae.parameters(): p.requires_grad_(False) for p in self.unet.parameters(): p.requires_grad_(False) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.set_min_max_steps() # set to default value self.alphas: Float[Tensor, "..."] = self.scheduler.alphas_cumprod.to( self.device ) self.grad_clip_val: Optional[float] = None threestudio.info(f"Loaded ControlNet!") @torch.cuda.amp.autocast(enabled=False) def set_min_max_steps(self, min_step_percent=0.02, max_step_percent=0.98): self.min_step = int(self.num_train_timesteps * min_step_percent) self.max_step = int(self.num_train_timesteps * max_step_percent) @torch.cuda.amp.autocast(enabled=False) def forward_controlnet( self, latents: Float[Tensor, "..."], t: Float[Tensor, "..."], image_cond: Float[Tensor, "..."], condition_scale: float, encoder_hidden_states: Float[Tensor, "..."], ) -> Float[Tensor, "..."]: return self.controlnet( latents.to(self.weights_dtype), t.to(self.weights_dtype), encoder_hidden_states=encoder_hidden_states.to(self.weights_dtype), controlnet_cond=image_cond.to(self.weights_dtype), conditioning_scale=condition_scale, return_dict=False, ) @torch.cuda.amp.autocast(enabled=False) def forward_control_unet( self, latents: Float[Tensor, "..."], t: Float[Tensor, "..."], encoder_hidden_states: Float[Tensor, "..."], cross_attention_kwargs, down_block_additional_residuals, mid_block_additional_residual, ) -> Float[Tensor, "..."]: input_dtype = latents.dtype return self.unet( latents.to(self.weights_dtype), t.to(self.weights_dtype), encoder_hidden_states=encoder_hidden_states.to(self.weights_dtype), cross_attention_kwargs=cross_attention_kwargs, down_block_additional_residuals=down_block_additional_residuals, mid_block_additional_residual=mid_block_additional_residual, ).sample.to(input_dtype) @torch.cuda.amp.autocast(enabled=False) def encode_images( self, imgs: Float[Tensor, "B 3 512 512"] ) -> Float[Tensor, "B 4 64 64"]: input_dtype = imgs.dtype imgs = imgs * 2.0 - 1.0 posterior = self.vae.encode(imgs.to(self.weights_dtype)).latent_dist latents = posterior.sample() * self.vae.config.scaling_factor return latents.to(input_dtype) @torch.cuda.amp.autocast(enabled=False) def encode_cond_images( self, imgs: Float[Tensor, "B 3 512 512"] ) -> Float[Tensor, "B 4 64 64"]: input_dtype = imgs.dtype imgs = imgs * 2.0 - 1.0 posterior = self.vae.encode(imgs.to(self.weights_dtype)).latent_dist latents = posterior.mode() uncond_image_latents = torch.zeros_like(latents) latents = torch.cat([latents, latents, uncond_image_latents], dim=0) return latents.to(input_dtype) @torch.cuda.amp.autocast(enabled=False) def decode_latents( self, latents: Float[Tensor, "B 4 H W"], latent_height: int = 64, latent_width: int = 64, ) -> Float[Tensor, "B 3 512 512"]: input_dtype = latents.dtype latents = F.interpolate( latents, (latent_height, latent_width), mode="bilinear", align_corners=False ) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents.to(self.weights_dtype)).sample image = (image * 0.5 + 0.5).clamp(0, 1) return image.to(input_dtype) def edit_latents( self, text_embeddings: Float[Tensor, "BB 77 768"], latents: Float[Tensor, "B 4 64 64"], image_cond: Float[Tensor, "B 3 512 512"], t: Int[Tensor, "B"], ) -> Float[Tensor, "B 4 64 64"]: self.scheduler.config.num_train_timesteps = t.item() self.scheduler.set_timesteps(self.cfg.diffusion_steps) with torch.no_grad(): # add noise noise = torch.randn_like(latents) latents = self.scheduler.add_noise(latents, noise, t) # type: ignore # sections of code used from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_instruct_pix2pix.py threestudio.debug("Start editing...") for i, t in enumerate(self.scheduler.timesteps): # predict the noise residual with unet, NO grad! with torch.no_grad(): # pred noise latent_model_input = torch.cat([latents] * 2) ( down_block_res_samples, mid_block_res_sample, ) = self.forward_controlnet( latent_model_input, t, encoder_hidden_states=text_embeddings, image_cond=image_cond, condition_scale=self.cfg.condition_scale, ) noise_pred = self.forward_control_unet( latent_model_input, t, encoder_hidden_states=text_embeddings, cross_attention_kwargs=None, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ) # perform classifier-free guidance noise_pred_text, noise_pred_uncond = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.cfg.guidance_scale * ( noise_pred_text - noise_pred_uncond ) # get previous sample, continue loop latents = self.scheduler.step(noise_pred, t, latents).prev_sample threestudio.debug("Editing finished.") return latents def prepare_image_cond(self, cond_rgb: Float[Tensor, "B H W C"]): if self.cfg.control_type == "normal": cond_rgb = ( (cond_rgb[0].detach().cpu().numpy() * 255).astype(np.uint8).copy() ) detected_map = self.preprocessor(cond_rgb) control = ( torch.from_numpy(np.array(detected_map)).float().to(self.device) / 255.0 ) control = control.unsqueeze(0) control = control.permute(0, 3, 1, 2) elif self.cfg.control_type == "canny": cond_rgb = ( (cond_rgb[0].detach().cpu().numpy() * 255).astype(np.uint8).copy() ) blurred_img = cv2.blur(cond_rgb, ksize=(5, 5)) detected_map = self.preprocessor( blurred_img, self.cfg.canny_lower_bound, self.cfg.canny_upper_bound ) control = ( torch.from_numpy(np.array(detected_map)).float().to(self.device) / 255.0 ) control = control.unsqueeze(-1).repeat(1, 1, 3) control = control.unsqueeze(0) control = control.permute(0, 3, 1, 2) elif self.cfg.control_type == "input_normal": control = cond_rgb.permute(0, 3, 1, 2) else: raise ValueError(f"Unknown control type: {self.cfg.control_type}") return F.interpolate(control, (512, 512), mode="bilinear", align_corners=False) def compute_grad_sds( self, text_embeddings: Float[Tensor, "BB 77 768"], latents: Float[Tensor, "B 4 64 64"], image_cond: Float[Tensor, "B 3 512 512"], t: Int[Tensor, "B"], ): with torch.no_grad(): # add noise noise = torch.randn_like(latents) # TODO: use torch generator latents_noisy = self.scheduler.add_noise(latents, noise, t) # pred noise latent_model_input = torch.cat([latents_noisy] * 2) down_block_res_samples, mid_block_res_sample = self.forward_controlnet( latent_model_input, t, encoder_hidden_states=text_embeddings, image_cond=image_cond, condition_scale=self.cfg.condition_scale, ) noise_pred = self.forward_control_unet( latent_model_input, t, encoder_hidden_states=text_embeddings, cross_attention_kwargs=None, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, ) # perform classifier-free guidance noise_pred_text, noise_pred_uncond = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.cfg.guidance_scale * ( noise_pred_text - noise_pred_uncond ) w = (1 - self.alphas[t]).view(-1, 1, 1, 1) grad = w * (noise_pred - noise) return grad def __call__( self, rgb: Float[Tensor, "B H W C"], cond_rgb: Float[Tensor, "B H W C"],
prompt_utils: PromptProcessorOutput,
0
2023-12-23 12:37:48+00:00
8k
jesenzhang/ComfyUI_StreamDiffusion
streamdiffusion/acceleration/tensorrt/utilities.py
[ { "identifier": "CLIP", "path": "streamdiffusion/acceleration/tensorrt/models.py", "snippet": "class CLIP(BaseModel):\n def __init__(self, device, max_batch_size, embedding_dim, min_batch_size=1):\n super(CLIP, self).__init__(\n device=device,\n max_batch_size=max_batch_size,\n min_batch_size=min_batch_size,\n embedding_dim=embedding_dim,\n )\n self.name = \"CLIP\"\n\n def get_input_names(self):\n return [\"input_ids\"]\n\n def get_output_names(self):\n return [\"text_embeddings\", \"pooler_output\"]\n\n def get_dynamic_axes(self):\n return {\"input_ids\": {0: \"B\"}, \"text_embeddings\": {0: \"B\"}}\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n self.check_dims(batch_size, image_height, image_width)\n min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims(\n batch_size, image_height, image_width, static_batch, static_shape\n )\n return {\n \"input_ids\": [\n (min_batch, self.text_maxlen),\n (batch_size, self.text_maxlen),\n (max_batch, self.text_maxlen),\n ]\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n self.check_dims(batch_size, image_height, image_width)\n return {\n \"input_ids\": (batch_size, self.text_maxlen),\n \"text_embeddings\": (batch_size, self.text_maxlen, self.embedding_dim),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n self.check_dims(batch_size, image_height, image_width)\n return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device)\n\n def optimize(self, onnx_graph):\n opt = Optimizer(onnx_graph)\n opt.info(self.name + \": original\")\n opt.select_outputs([0]) # delete graph output#1\n opt.cleanup()\n opt.info(self.name + \": remove output[1]\")\n opt.fold_constants()\n opt.info(self.name + \": fold constants\")\n opt.infer_shapes()\n opt.info(self.name + \": shape inference\")\n opt.select_outputs([0], names=[\"text_embeddings\"]) # rename network output\n opt.info(self.name + \": remove output[0]\")\n opt_onnx_graph = opt.cleanup(return_onnx=True)\n opt.info(self.name + \": finished\")\n return opt_onnx_graph" }, { "identifier": "VAE", "path": "streamdiffusion/acceleration/tensorrt/models.py", "snippet": "class VAE(BaseModel):\n def __init__(self, device, max_batch_size, min_batch_size=1):\n super(VAE, self).__init__(\n device=device,\n max_batch_size=max_batch_size,\n min_batch_size=min_batch_size,\n embedding_dim=None,\n )\n self.name = \"VAE decoder\"\n\n def get_input_names(self):\n return [\"latent\"]\n\n def get_output_names(self):\n return [\"images\"]\n\n def get_dynamic_axes(self):\n return {\n \"latent\": {0: \"B\", 2: \"H\", 3: \"W\"},\n \"images\": {0: \"B\", 2: \"8H\", 3: \"8W\"},\n }\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n (\n min_batch,\n max_batch,\n _,\n _,\n _,\n _,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)\n return {\n \"latent\": [\n (min_batch, 4, min_latent_height, min_latent_width),\n (batch_size, 4, latent_height, latent_width),\n (max_batch, 4, max_latent_height, max_latent_width),\n ]\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return {\n \"latent\": (batch_size, 4, latent_height, latent_width),\n \"images\": (batch_size, 3, image_height, image_width),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return torch.randn(\n batch_size,\n 4,\n latent_height,\n latent_width,\n dtype=torch.float32,\n device=self.device,\n )" }, { "identifier": "BaseModel", "path": "streamdiffusion/acceleration/tensorrt/models.py", "snippet": "class BaseModel:\n def __init__(\n self,\n fp16=False,\n device=\"cuda\",\n verbose=True,\n max_batch_size=16,\n min_batch_size=1,\n embedding_dim=768,\n text_maxlen=77,\n ):\n self.name = \"SD Model\"\n self.fp16 = fp16\n self.device = device\n self.verbose = verbose\n\n self.min_batch = min_batch_size\n self.max_batch = max_batch_size\n self.min_image_shape = 256 # min image resolution: 256x256\n self.max_image_shape = 1024 # max image resolution: 1024x1024\n self.min_latent_shape = self.min_image_shape // 8\n self.max_latent_shape = self.max_image_shape // 8\n\n self.embedding_dim = embedding_dim\n self.text_maxlen = text_maxlen\n\n def get_model(self):\n pass\n\n def get_input_names(self):\n pass\n\n def get_output_names(self):\n pass\n\n def get_dynamic_axes(self):\n return None\n\n def get_sample_input(self, batch_size, image_height, image_width):\n pass\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n return None\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n return None\n\n def optimize(self, onnx_graph):\n opt = Optimizer(onnx_graph, verbose=self.verbose)\n opt.info(self.name + \": original\")\n opt.cleanup()\n opt.info(self.name + \": cleanup\")\n opt.fold_constants()\n opt.info(self.name + \": fold constants\")\n opt.infer_shapes()\n opt.info(self.name + \": shape inference\")\n onnx_opt_graph = opt.cleanup(return_onnx=True)\n opt.info(self.name + \": finished\")\n return onnx_opt_graph\n\n def check_dims(self, batch_size, image_height, image_width):\n assert batch_size >= self.min_batch and batch_size <= self.max_batch\n assert image_height % 8 == 0 or image_width % 8 == 0\n latent_height = image_height // 8\n latent_width = image_width // 8\n assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape\n assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape\n return (latent_height, latent_width)\n\n def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):\n min_batch = batch_size if static_batch else self.min_batch\n max_batch = batch_size if static_batch else self.max_batch\n latent_height = image_height // 8\n latent_width = image_width // 8\n min_image_height = image_height if static_shape else self.min_image_shape\n max_image_height = image_height if static_shape else self.max_image_shape\n min_image_width = image_width if static_shape else self.min_image_shape\n max_image_width = image_width if static_shape else self.max_image_shape\n min_latent_height = latent_height if static_shape else self.min_latent_shape\n max_latent_height = latent_height if static_shape else self.max_latent_shape\n min_latent_width = latent_width if static_shape else self.min_latent_shape\n max_latent_width = latent_width if static_shape else self.max_latent_shape\n return (\n min_batch,\n max_batch,\n min_image_height,\n max_image_height,\n min_image_width,\n max_image_width,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n )" }, { "identifier": "UNet", "path": "streamdiffusion/acceleration/tensorrt/models.py", "snippet": "class UNet(BaseModel):\n def __init__(\n self,\n fp16=False,\n device=\"cuda\",\n max_batch_size=16,\n min_batch_size=1,\n embedding_dim=768,\n text_maxlen=77,\n unet_dim=4,\n ):\n super(UNet, self).__init__(\n fp16=fp16,\n device=device,\n max_batch_size=max_batch_size,\n min_batch_size=min_batch_size,\n embedding_dim=embedding_dim,\n text_maxlen=text_maxlen,\n )\n self.unet_dim = unet_dim\n self.name = \"UNet\"\n\n def get_input_names(self):\n return [\"sample\", \"timestep\", \"encoder_hidden_states\"]\n\n def get_output_names(self):\n return [\"latent\"]\n\n def get_dynamic_axes(self):\n return {\n \"sample\": {0: \"2B\", 2: \"H\", 3: \"W\"},\n \"timestep\": {0: \"2B\"},\n \"encoder_hidden_states\": {0: \"2B\"},\n \"latent\": {0: \"2B\", 2: \"H\", 3: \"W\"},\n }\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n (\n min_batch,\n max_batch,\n _,\n _,\n _,\n _,\n min_latent_height,\n max_latent_height,\n min_latent_width,\n max_latent_width,\n ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)\n return {\n \"sample\": [\n (min_batch, self.unet_dim, min_latent_height, min_latent_width),\n (batch_size, self.unet_dim, latent_height, latent_width),\n (max_batch, self.unet_dim, max_latent_height, max_latent_width),\n ],\n \"timestep\": [(min_batch,), (batch_size,), (max_batch,)],\n \"encoder_hidden_states\": [\n (min_batch, self.text_maxlen, self.embedding_dim),\n (batch_size, self.text_maxlen, self.embedding_dim),\n (max_batch, self.text_maxlen, self.embedding_dim),\n ],\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return {\n \"sample\": (2 * batch_size, self.unet_dim, latent_height, latent_width),\n \"timestep\": (2 * batch_size,),\n \"encoder_hidden_states\": (2 * batch_size, self.text_maxlen, self.embedding_dim),\n \"latent\": (2 * batch_size, 4, latent_height, latent_width),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n dtype = torch.float16 if self.fp16 else torch.float32\n return (\n torch.randn(\n 2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device\n ),\n torch.ones((2 * batch_size,), dtype=torch.float32, device=self.device),\n torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device),\n )" }, { "identifier": "VAEEncoder", "path": "streamdiffusion/acceleration/tensorrt/models.py", "snippet": "class VAEEncoder(BaseModel):\n def __init__(self, device, max_batch_size, min_batch_size=1):\n super(VAEEncoder, self).__init__(\n device=device,\n max_batch_size=max_batch_size,\n min_batch_size=min_batch_size,\n embedding_dim=None,\n )\n self.name = \"VAE encoder\"\n\n def get_input_names(self):\n return [\"images\"]\n\n def get_output_names(self):\n return [\"latent\"]\n\n def get_dynamic_axes(self):\n return {\n \"images\": {0: \"B\", 2: \"8H\", 3: \"8W\"},\n \"latent\": {0: \"B\", 2: \"H\", 3: \"W\"},\n }\n\n def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):\n assert batch_size >= self.min_batch and batch_size <= self.max_batch\n min_batch = batch_size if static_batch else self.min_batch\n max_batch = batch_size if static_batch else self.max_batch\n self.check_dims(batch_size, image_height, image_width)\n (\n min_batch,\n max_batch,\n min_image_height,\n max_image_height,\n min_image_width,\n max_image_width,\n _,\n _,\n _,\n _,\n ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)\n\n return {\n \"images\": [\n (min_batch, 3, min_image_height, min_image_width),\n (batch_size, 3, image_height, image_width),\n (max_batch, 3, max_image_height, max_image_width),\n ],\n }\n\n def get_shape_dict(self, batch_size, image_height, image_width):\n latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)\n return {\n \"images\": (batch_size, 3, image_height, image_width),\n \"latent\": (batch_size, 4, latent_height, latent_width),\n }\n\n def get_sample_input(self, batch_size, image_height, image_width):\n self.check_dims(batch_size, image_height, image_width)\n return torch.randn(\n batch_size,\n 3,\n image_height,\n image_width,\n dtype=torch.float32,\n device=self.device,\n )" } ]
import gc import numpy as np import onnx import onnx_graphsurgeon as gs import tensorrt as trt import torch from collections import OrderedDict from typing import * from cuda import cudart from PIL import Image from polygraphy import cuda from polygraphy.backend.common import bytes_from_path from polygraphy.backend.trt import ( CreateConfig, Profile, engine_from_bytes, engine_from_network, network_from_onnx_path, save_engine, ) from polygraphy.backend.trt import util as trt_util from .models import CLIP, VAE, BaseModel, UNet, VAEEncoder
6,281
if workspace_size > 0: config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size} if not enable_all_tactics: config_kwargs["tactic_sources"] = [] engine = engine_from_network( network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]), config=CreateConfig( fp16=fp16, refittable=enable_refit, profiles=[p], load_timing_cache=timing_cache, **config_kwargs ), save_timing_cache=timing_cache, ) save_engine(engine, path=self.engine_path) def load(self): print(f"Loading TensorRT engine: {self.engine_path}") self.engine = engine_from_bytes(bytes_from_path(self.engine_path)) def activate(self, reuse_device_memory=None): if reuse_device_memory: self.context = self.engine.create_execution_context_without_device_memory() self.context.device_memory = reuse_device_memory else: self.context = self.engine.create_execution_context() def allocate_buffers(self, shape_dict=None, device="cuda"): for idx in range(trt_util.get_bindings_per_profile(self.engine)): binding = self.engine[idx] if shape_dict and binding in shape_dict: shape = shape_dict[binding] else: shape = self.engine.get_binding_shape(binding) dtype = trt.nptype(self.engine.get_binding_dtype(binding)) if self.engine.binding_is_input(binding): self.context.set_binding_shape(idx, shape) tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device) self.tensors[binding] = tensor def infer(self, feed_dict, stream, use_cuda_graph=False): for name, buf in feed_dict.items(): self.tensors[name].copy_(buf) for name, tensor in self.tensors.items(): self.context.set_tensor_address(name, tensor.data_ptr()) if use_cuda_graph: if self.cuda_graph_instance is not None: CUASSERT(cudart.cudaGraphLaunch(self.cuda_graph_instance, stream.ptr)) CUASSERT(cudart.cudaStreamSynchronize(stream.ptr)) else: # do inference before CUDA graph capture noerror = self.context.execute_async_v3(stream.ptr) if not noerror: raise ValueError("ERROR: inference failed.") # capture cuda graph CUASSERT( cudart.cudaStreamBeginCapture(stream.ptr, cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal) ) self.context.execute_async_v3(stream.ptr) self.graph = CUASSERT(cudart.cudaStreamEndCapture(stream.ptr)) self.cuda_graph_instance = CUASSERT(cudart.cudaGraphInstantiate(self.graph, 0)) else: noerror = self.context.execute_async_v3(stream.ptr) if not noerror: raise ValueError("ERROR: inference failed.") return self.tensors def decode_images(images: torch.Tensor): images = ( ((images + 1) * 255 / 2).clamp(0, 255).detach().permute(0, 2, 3, 1).round().type(torch.uint8).cpu().numpy() ) return [Image.fromarray(x) for x in images] def preprocess_image(image: Image.Image): w, h = image.size w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 image = image.resize((w, h)) init_image = np.array(image).astype(np.float32) / 255.0 init_image = init_image[None].transpose(0, 3, 1, 2) init_image = torch.from_numpy(init_image).contiguous() return 2.0 * init_image - 1.0 def prepare_mask_and_masked_image(image: Image.Image, mask: Image.Image): if isinstance(image, Image.Image): image = np.array(image.convert("RGB")) image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32).contiguous() / 127.5 - 1.0 if isinstance(mask, Image.Image): mask = np.array(mask.convert("L")) mask = mask.astype(np.float32) / 255.0 mask = mask[None, None] mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask).to(dtype=torch.float32).contiguous() masked_image = image * (mask < 0.5) return mask, masked_image def create_models( model_id: str, use_auth_token: Optional[str], device: Union[str, torch.device], max_batch_size: int, unet_in_channels: int = 4, embedding_dim: int = 768, ): models = { "clip": CLIP( hf_token=use_auth_token, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim, ),
#! fork: https://github.com/NVIDIA/TensorRT/blob/main/demo/Diffusion/utilities.py # # Copyright 2022 The HuggingFace Inc. team. # SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TRT_LOGGER = trt.Logger(trt.Logger.ERROR) # Map of numpy dtype -> torch dtype numpy_to_torch_dtype_dict = { np.uint8: torch.uint8, np.int8: torch.int8, np.int16: torch.int16, np.int32: torch.int32, np.int64: torch.int64, np.float16: torch.float16, np.float32: torch.float32, np.float64: torch.float64, np.complex64: torch.complex64, np.complex128: torch.complex128, } if np.version.full_version >= "1.24.0": numpy_to_torch_dtype_dict[np.bool_] = torch.bool else: numpy_to_torch_dtype_dict[np.bool] = torch.bool # Map of torch dtype -> numpy dtype torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()} def CUASSERT(cuda_ret): err = cuda_ret[0] if err != cudart.cudaError_t.cudaSuccess: raise RuntimeError( f"CUDA ERROR: {err}, error code reference: https://nvidia.github.io/cuda-python/module/cudart.html#cuda.cudart.cudaError_t" ) if len(cuda_ret) > 1: return cuda_ret[1] return None class Engine: def __init__( self, engine_path, ): self.engine_path = engine_path self.engine = None self.context = None self.buffers = OrderedDict() self.tensors = OrderedDict() self.cuda_graph_instance = None # cuda graph def __del__(self): [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)] del self.engine del self.context del self.buffers del self.tensors def refit(self, onnx_path, onnx_refit_path): def convert_int64(arr): # TODO: smarter conversion if len(arr.shape) == 0: return np.int32(arr) return arr def add_to_map(refit_dict, name, values): if name in refit_dict: assert refit_dict[name] is None if values.dtype == np.int64: values = convert_int64(values) refit_dict[name] = values print(f"Refitting TensorRT engine with {onnx_refit_path} weights") refit_nodes = gs.import_onnx(onnx.load(onnx_refit_path)).toposort().nodes # Construct mapping from weight names in refit model -> original model name_map = {} for n, node in enumerate(gs.import_onnx(onnx.load(onnx_path)).toposort().nodes): refit_node = refit_nodes[n] assert node.op == refit_node.op # Constant nodes in ONNX do not have inputs but have a constant output if node.op == "Constant": name_map[refit_node.outputs[0].name] = node.outputs[0].name # Handle scale and bias weights elif node.op == "Conv": if node.inputs[1].__class__ == gs.Constant: name_map[refit_node.name + "_TRTKERNEL"] = node.name + "_TRTKERNEL" if node.inputs[2].__class__ == gs.Constant: name_map[refit_node.name + "_TRTBIAS"] = node.name + "_TRTBIAS" # For all other nodes: find node inputs that are initializers (gs.Constant) else: for i, inp in enumerate(node.inputs): if inp.__class__ == gs.Constant: name_map[refit_node.inputs[i].name] = inp.name def map_name(name): if name in name_map: return name_map[name] return name # Construct refit dictionary refit_dict = {} refitter = trt.Refitter(self.engine, TRT_LOGGER) all_weights = refitter.get_all() for layer_name, role in zip(all_weights[0], all_weights[1]): # for speciailized roles, use a unique name in the map: if role == trt.WeightsRole.KERNEL: name = layer_name + "_TRTKERNEL" elif role == trt.WeightsRole.BIAS: name = layer_name + "_TRTBIAS" else: name = layer_name assert name not in refit_dict, "Found duplicate layer: " + name refit_dict[name] = None for n in refit_nodes: # Constant nodes in ONNX do not have inputs but have a constant output if n.op == "Constant": name = map_name(n.outputs[0].name) print(f"Add Constant {name}\n") add_to_map(refit_dict, name, n.outputs[0].values) # Handle scale and bias weights elif n.op == "Conv": if n.inputs[1].__class__ == gs.Constant: name = map_name(n.name + "_TRTKERNEL") add_to_map(refit_dict, name, n.inputs[1].values) if n.inputs[2].__class__ == gs.Constant: name = map_name(n.name + "_TRTBIAS") add_to_map(refit_dict, name, n.inputs[2].values) # For all other nodes: find node inputs that are initializers (AKA gs.Constant) else: for inp in n.inputs: name = map_name(inp.name) if inp.__class__ == gs.Constant: add_to_map(refit_dict, name, inp.values) for layer_name, weights_role in zip(all_weights[0], all_weights[1]): if weights_role == trt.WeightsRole.KERNEL: custom_name = layer_name + "_TRTKERNEL" elif weights_role == trt.WeightsRole.BIAS: custom_name = layer_name + "_TRTBIAS" else: custom_name = layer_name # Skip refitting Trilu for now; scalar weights of type int64 value 1 - for clip model if layer_name.startswith("onnx::Trilu"): continue if refit_dict[custom_name] is not None: refitter.set_weights(layer_name, weights_role, refit_dict[custom_name]) else: print(f"[W] No refit weights for layer: {layer_name}") if not refitter.refit_cuda_engine(): print("Failed to refit!") exit(0) def build( self, onnx_path, fp16, input_profile=None, enable_refit=False, enable_all_tactics=False, timing_cache=None, workspace_size=0, ): print(f"Building TensorRT engine for {onnx_path}: {self.engine_path}") p = Profile() if input_profile: for name, dims in input_profile.items(): assert len(dims) == 3 p.add(name, min=dims[0], opt=dims[1], max=dims[2]) config_kwargs = {} if workspace_size > 0: config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size} if not enable_all_tactics: config_kwargs["tactic_sources"] = [] engine = engine_from_network( network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]), config=CreateConfig( fp16=fp16, refittable=enable_refit, profiles=[p], load_timing_cache=timing_cache, **config_kwargs ), save_timing_cache=timing_cache, ) save_engine(engine, path=self.engine_path) def load(self): print(f"Loading TensorRT engine: {self.engine_path}") self.engine = engine_from_bytes(bytes_from_path(self.engine_path)) def activate(self, reuse_device_memory=None): if reuse_device_memory: self.context = self.engine.create_execution_context_without_device_memory() self.context.device_memory = reuse_device_memory else: self.context = self.engine.create_execution_context() def allocate_buffers(self, shape_dict=None, device="cuda"): for idx in range(trt_util.get_bindings_per_profile(self.engine)): binding = self.engine[idx] if shape_dict and binding in shape_dict: shape = shape_dict[binding] else: shape = self.engine.get_binding_shape(binding) dtype = trt.nptype(self.engine.get_binding_dtype(binding)) if self.engine.binding_is_input(binding): self.context.set_binding_shape(idx, shape) tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device) self.tensors[binding] = tensor def infer(self, feed_dict, stream, use_cuda_graph=False): for name, buf in feed_dict.items(): self.tensors[name].copy_(buf) for name, tensor in self.tensors.items(): self.context.set_tensor_address(name, tensor.data_ptr()) if use_cuda_graph: if self.cuda_graph_instance is not None: CUASSERT(cudart.cudaGraphLaunch(self.cuda_graph_instance, stream.ptr)) CUASSERT(cudart.cudaStreamSynchronize(stream.ptr)) else: # do inference before CUDA graph capture noerror = self.context.execute_async_v3(stream.ptr) if not noerror: raise ValueError("ERROR: inference failed.") # capture cuda graph CUASSERT( cudart.cudaStreamBeginCapture(stream.ptr, cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal) ) self.context.execute_async_v3(stream.ptr) self.graph = CUASSERT(cudart.cudaStreamEndCapture(stream.ptr)) self.cuda_graph_instance = CUASSERT(cudart.cudaGraphInstantiate(self.graph, 0)) else: noerror = self.context.execute_async_v3(stream.ptr) if not noerror: raise ValueError("ERROR: inference failed.") return self.tensors def decode_images(images: torch.Tensor): images = ( ((images + 1) * 255 / 2).clamp(0, 255).detach().permute(0, 2, 3, 1).round().type(torch.uint8).cpu().numpy() ) return [Image.fromarray(x) for x in images] def preprocess_image(image: Image.Image): w, h = image.size w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 image = image.resize((w, h)) init_image = np.array(image).astype(np.float32) / 255.0 init_image = init_image[None].transpose(0, 3, 1, 2) init_image = torch.from_numpy(init_image).contiguous() return 2.0 * init_image - 1.0 def prepare_mask_and_masked_image(image: Image.Image, mask: Image.Image): if isinstance(image, Image.Image): image = np.array(image.convert("RGB")) image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32).contiguous() / 127.5 - 1.0 if isinstance(mask, Image.Image): mask = np.array(mask.convert("L")) mask = mask.astype(np.float32) / 255.0 mask = mask[None, None] mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask).to(dtype=torch.float32).contiguous() masked_image = image * (mask < 0.5) return mask, masked_image def create_models( model_id: str, use_auth_token: Optional[str], device: Union[str, torch.device], max_batch_size: int, unet_in_channels: int = 4, embedding_dim: int = 768, ): models = { "clip": CLIP( hf_token=use_auth_token, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim, ),
"unet": UNet(
3
2023-12-29 09:00:03+00:00
8k
pjaos/ct6_meter_os
software/ct6_app_server/lib/db_handler.py
[ { "identifier": "ConfigBase", "path": "software/ct6_app_server/lib/config.py", "snippet": "class ConfigBase(ConfigManager):\n \"\"\"@brief Responsible for managing configuration used by all apps.\"\"\"\n ICONS_ADDRESS = \"ICONS_ADDRESS\"\n ICONS_PORT = \"ICONS_PORT\"\n ICONS_USERNAME = \"ICONS_USERNAME\"\n ICONS_SSH_KEY_FILE = \"ICONS_SSH_KEY_FILE\"\n MQTT_TOPIC = \"MQTT_TOPIC\"\n TIMESTAMP = \"TIMESTAMP\"\n DB_HOST = \"DB_HOST\"\n DB_PORT = \"DB_PORT\"\n DB_USERNAME = \"DB_USERNAME\"\n DB_PASSWORD = \"DB_PASSWORD\"\n LOCAL_GUI_SERVER_ADDRESS = \"LOCAL_GUI_SERVER_ADDRESS\" \n LOCAL_GUI_SERVER_PORT = \"LOCAL_GUI_SERVER_PORT\"\n SERVER_LOGIN = \"SERVER_LOGIN\"\n SERVER_ACCESS_LOG_FILE = \"SERVER_ACCESS_LOG_FILE\"\n \n @staticmethod\n def GetTableSchema(tableSchemaString):\n \"\"\"@brief Get the table schema\n @param tableSchemaString The string defining the database table schema.\n @return A dictionary containing a database table schema.\"\"\"\n timestampFound=False\n tableSchemaDict = {}\n elems = tableSchemaString.split(\" \")\n if len(elems) > 0:\n for elem in elems:\n subElems = elem.split(\":\")\n if len(subElems) == 2:\n colName = subElems[0]\n if colName == ConfigBase.TIMESTAMP:\n timestampFound=True\n colType = subElems[1]\n tableSchemaDict[colName] = colType\n else:\n raise Exception(\"{} is an invalid table schema column.\".format(elem))\n return tableSchemaDict\n else:\n raise Exception(\"Invalid Table schema. No elements found.\")\n\n if not timestampFound:\n raise Exception(\"No {} table column defined.\".format(ConfigBase.TIMESTAMP))\n\n def __init__(self, uio, configFile, defaultConfig):\n \"\"\"@brief Constructor.\n @param uio UIO instance.\n @param configFile Config file instance.\n @param defaultConfig The default configuration.\"\"\"\n super().__init__(uio, configFile, defaultConfig, addDotToFilename=False, encrypt=True)\n self._uio = uio\n self.load()\n self.store()\n \n def _showLocalIPAddressList(self):\n \"\"\"@brief Show the user a list of local IP addresses that they may want to use to present the GUI/Bokeh server on.\n @return A List of local IP addresses.\"\"\"\n localIPList = []\n adapters = ifaddr.get_adapters()\n self._uio.info(\"Local Interface List\")\n self._uio.info(\"-\"*62)\n self._uio.info(\"| Interface Name | IP Address |\")\n self._uio.info(\"-\"*62)\n for adapter in adapters:\n for ip in adapter.ips:\n if isinstance(ip.ip, str):\n self._uio.info(\"| {: <25s} | {: <25s} |\".format(adapter.nice_name, ip.ip))\n localIPList.append(ip.ip)\n self._uio.info(\"-\"*62)\n return localIPList\n \n def _enterServerAccessLogFile(self):\n \"\"\"@brief Allow the user to enter the server access log file.\"\"\"\n # Ensure the user enters the path and name of the server access log file.\n while True:\n self.inputStr(ConfigBase.SERVER_ACCESS_LOG_FILE, \"Enter the file (full path) to record server access.\", False)\n logFile = self.getAttr(ConfigBase.SERVER_ACCESS_LOG_FILE)\n logFile = os.path.abspath(logFile)\n logPath = os.path.dirname(logFile) \n if os.path.isdir(logPath):\n # Try creating the file to check write access\n try:\n # Check if file is already present\n if os.path.isfile(logFile):\n delete = self._uio.getBoolInput(f\"OK to overwrite {logFile} ? y/n\")\n if not delete:\n continue\n # Create empty file.\n with open(logFile, 'w'):\n pass\n break\n except IOError as ex:\n self._uio.error(f\"{str(ex)} folder not found.\")\n else:\n self._uio.error(f\"{logPath} folder not found.\")\n \n def edit(self, key):\n \"\"\"@brief Provide the functionality to allow the user to enter any ct4 config parameter\n regardless of the config type.\n @param key The dict key to be edited.\n @return True if the config parameter was handled/updated\"\"\"\n handled = False\n\n if key == ConfigBase.ICONS_ADDRESS:\n self.inputStr(ConfigBase.ICONS_ADDRESS, \"Enter the ICON server address\", False)\n handled = True\n \n elif key == ConfigBase.ICONS_PORT:\n self.inputDecInt(ConfigBase.ICONS_PORT, \"Enter the ICON server port (default = 22)\", minValue=1024, maxValue=65535)\n handled = True\n\n elif key == ConfigBase.ICONS_USERNAME:\n self.inputStr(ConfigBase.ICONS_USERNAME, \"Enter ICON server username\", False)\n handled = True\n\n elif key == ConfigBase.ICONS_SSH_KEY_FILE:\n self.inputStr(ConfigBase.ICONS_SSH_KEY_FILE, \"Enter the ICON server ssh key file\", False)\n handled = True\n\n elif key == ConfigBase.MQTT_TOPIC:\n self._uio.info(\"The MQTT topic can be # to receive data on all YView devices.\")\n self._uio.info(\"To limit the data received to all devices at a location (E.G HOME/#).\")\n self._uio.info(\"To limit the data received to a single device at a location enter HOME/QUAD_CT_SENSOR_A\")\n self.inputStr(ConfigBase.MQTT_TOPIC, \"Enter the location of the device\", False)\n handled = True\n\n elif key == ConfigBase.DB_HOST:\n self.inputStr(ConfigBase.DB_HOST, \"Enter the address of the MYSQL database server\", False)\n handled = True\n\n elif key == ConfigBase.DB_PORT:\n self.inputDecInt(ConfigBase.DB_PORT, \"Enter TCP port to connect to the MYSQL database server\", minValue=1024, maxValue=65535)\n handled = True\n\n elif key == ConfigBase.DB_USERNAME:\n self.inputStr(ConfigBase.DB_USERNAME, \"Enter the database username\", False)\n handled = True\n\n elif key == ConfigBase.DB_PASSWORD:\n self.inputStr(ConfigBase.DB_PASSWORD, \"Enter the database password\", False)\n handled = True\n \n elif key == ConfigBase.LOCAL_GUI_SERVER_ADDRESS:\n localIPList = self._showLocalIPAddressList()\n # Ensure the user enters an IP address of an interface on this machine.\n while True:\n self.inputStr(ConfigBase.LOCAL_GUI_SERVER_ADDRESS, \"Enter the local IP address to serve the GUI/Bokeh web interface from\", False)\n ipAddr = self.getAttr(ConfigBase.LOCAL_GUI_SERVER_ADDRESS)\n if ipAddr in localIPList:\n break\n else:\n self._uio.error(\"{} is not a IP address of an interface on this machine.\".format(ipAddr))\n handled = True\n \n elif key == ConfigBase.LOCAL_GUI_SERVER_PORT:\n self.inputBool(ConfigBase.LOCAL_GUI_SERVER_PORT, \"Enter the TCP port to serve the GUI/Bokeh web interface from\", minValue=1024, maxValue=65535)\n handled = True\n \n elif key == ConfigBase.SERVER_LOGIN:\n self.inputBool(ConfigBase.SERVER_LOGIN, \"Enable server login\")\n handled = True\n \n elif key == ConfigBase.SERVER_ACCESS_LOG_FILE:\n self._enterServerAccessLogFile()\n \n if handled:\n self.store()\n\n return handled" }, { "identifier": "BaseConstants", "path": "software/ct6_app_server/lib/base_constants.py", "snippet": "class BaseConstants(object):\n \"\"\"@brief Responsible for defining contants\"\"\"\n LOCATION = \"LOCATION\"\n MQTT_TOPIC = \"MQTT_TOPIC\"\n UNIT_NAME = \"UNIT_NAME\"\n PRODUCT_ID = \"PRODUCT_ID\"\n IP_ADDRESS = \"IP_ADDRESS\"\n ASSY = \"ASSY\"\n SERVER_SERVICE_LIST = \"SERVER_SERVICE_LIST\"\n LOCALHOST_SERVICE_LIST = \"LOCALHOST_SERVICE_LIST\"\n WEB_SERVICE_NAME = \"WEB\"\n HTTP_SERVICE_NAME = \"HTTP\"\n WEB_SERVICE_NAME_LIST = (WEB_SERVICE_NAME, HTTP_SERVICE_NAME)\n VALID_PRODUCT_ID_LIST = (\"CT6\",)\n\n MQTT_LOOP_BLOCK_SECONDS = 1\n\n LOCALHOST = \"127.0.0.1\"\n MQTT_PORT = 1883\n\n RECONNECT_DELAY_SECS = 10\n\n DATABASE_KEY = 'Database'\n\n SHOW_DATABASES_SQL_CMD = 'SHOW DATABASES;'\n\n TIMESTAMP = \"TIMESTAMP\"\n\n @staticmethod\n def GetTableSchema(tableSchemaString):\n \"\"\"@brief Get the table schema\n @param tableSchemaString The string defining the database table schema.\n @return A dictionary containing a database table schema.\"\"\"\n timestampFound=False\n tableSchemaDict = {}\n elems = tableSchemaString.split(\" \")\n if len(elems) > 0:\n for elem in elems:\n subElems = elem.split(\":\")\n if len(subElems) == 2:\n colName = subElems[0]\n if colName == BaseConstants.TIMESTAMP:\n timestampFound=True\n colType = subElems[1]\n tableSchemaDict[colName] = colType\n else:\n raise Exception(\"{} is an invalid table schema column.\".format(elem))\n return tableSchemaDict\n else:\n raise Exception(\"Invalid Table schema. No elements found.\")\n\n if not timestampFound:\n raise Exception(\"No {} table column defined.\".format(BaseConstants.TIMESTAMP))\n \n CT6_META_TABLE_NAME = \"CT6_META\"\n CT6_TABLE_NAME = \"CT6_SENSOR\"\n\n # Dev dict params\n ASSY = \"ASSY\"\n CT1 = \"CT1\"\n CT2 = \"CT2\"\n CT3 = \"CT3\"\n CT4 = \"CT4\"\n CT5 = \"CT5\"\n CT6 = \"CT6\"\n CT_DEV_LIST = (CT1, CT2, CT3, CT4, CT5, CT6)\n NAME = \"NAME\"\n WATTS = 'WATTS'\n PRMS = \"PRMS\"\n PREACT = \"PREACT\"\n PAPPARENT = \"PAPPARENT\"\n VRMS = \"VRMS\"\n FREQ = \"FREQ\"\n PREACT = \"PREACT\"\n PF = \"PF\"\n TEMPERATURE = 'BOARD_TEMPERATURE' # The same name is used in the database for this param\n TEMP = 'TEMP'\n RSSI_DBM = 'RSSI_DBM' # The name in the database\n RSSI = 'RSSI' # The name in the dict received from the device\n\n # Database table params\n HW_ASSY = \"HW_ASSY\"\n CT1_NAME = \"CT1_NAME\"\n CT2_NAME = \"CT2_NAME\"\n CT3_NAME = \"CT3_NAME\"\n CT4_NAME = \"CT4_NAME\"\n CT5_NAME = \"CT5_NAME\"\n CT6_NAME = \"CT6_NAME\"\n\n CT1_ACT_WATTS = \"CT1_ACT_WATTS\"\n CT2_ACT_WATTS = \"CT2_ACT_WATTS\"\n CT3_ACT_WATTS = \"CT3_ACT_WATTS\"\n CT4_ACT_WATTS = \"CT4_ACT_WATTS\"\n CT5_ACT_WATTS = \"CT5_ACT_WATTS\"\n CT6_ACT_WATTS = \"CT6_ACT_WATTS\"\n \n CT1_REACT_WATTS = \"CT1_REACT_WATTS\"\n CT2_REACT_WATTS = \"CT2_REACT_WATTS\"\n CT3_REACT_WATTS = \"CT3_REACT_WATTS\"\n CT4_REACT_WATTS = \"CT4_REACT_WATTS\"\n CT5_REACT_WATTS = \"CT5_REACT_WATTS\"\n CT6_REACT_WATTS = \"CT6_REACT_WATTS\"\n\n CT1_APP_WATTS = \"CT1_APP_WATTS\"\n CT2_APP_WATTS = \"CT2_APP_WATTS\"\n CT3_APP_WATTS = \"CT3_APP_WATTS\"\n CT4_APP_WATTS = \"CT4_APP_WATTS\"\n CT5_APP_WATTS = \"CT5_APP_WATTS\"\n CT6_APP_WATTS = \"CT6_APP_WATTS\"\n \n CT1_PF = \"CT1_PF\"\n CT2_PF = \"CT2_PF\"\n CT3_PF = \"CT3_PF\"\n CT4_PF = \"CT4_PF\"\n CT5_PF = \"CT5_PF\"\n CT6_PF = \"CT6_PF\"\n VOLTAGE = \"VOLTAGE\"\n FREQUENCY = \"FREQUENCY\"\n ACTIVE = 'ACTIVE'\n FIELD_LIST_A = [CT1_ACT_WATTS, \n CT2_ACT_WATTS, \n CT3_ACT_WATTS, \n CT4_ACT_WATTS, \n CT5_ACT_WATTS, \n CT6_ACT_WATTS,\n CT1_REACT_WATTS,\n CT2_REACT_WATTS,\n CT3_REACT_WATTS,\n CT4_REACT_WATTS,\n CT5_REACT_WATTS,\n CT6_REACT_WATTS,\n CT1_APP_WATTS,\n CT2_APP_WATTS,\n CT3_APP_WATTS,\n CT4_APP_WATTS,\n CT5_APP_WATTS,\n CT6_APP_WATTS, \n CT1_PF, \n CT2_PF, \n CT3_PF, \n CT4_PF, \n CT5_PF, \n CT6_PF, \n VOLTAGE, \n FREQUENCY, \n TEMPERATURE, \n RSSI_DBM]\n # \n CT6_DB_META_TABLE_SCHEMA = \"{}:VARCHAR(64) \" \\\n \"{}:VARCHAR(64) \" \\\n \"{}:VARCHAR(64) \" \\\n \"{}:VARCHAR(64) \" \\\n \"{}:VARCHAR(64) \" \\\n \"{}:VARCHAR(64) \" \\\n \"{}:VARCHAR(64)\".format(HW_ASSY, \n CT1_NAME, \n CT2_NAME, \n CT3_NAME, \n CT4_NAME, \n CT5_NAME, \n CT6_NAME)\n CT6_DB_TABLE_SCHEMA = \"TIMESTAMP:TIMESTAMP \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(6,1) \" \\\n \"{}:FLOAT(4,3) \" \\\n \"{}:FLOAT(4,3) \" \\\n \"{}:FLOAT(4,3) \" \\\n \"{}:FLOAT(4,3) \" \\\n \"{}:FLOAT(4,3) \" \\\n \"{}:FLOAT(4,3) \" \\\n \"{}:FLOAT(4,1) \" \\\n \"{}:FLOAT(3,1) \" \\\n \"{}:FLOAT(4,1) \" \\\n \"{}:FLOAT(3,1)\".format(CT1_ACT_WATTS, \n CT2_ACT_WATTS, \n CT3_ACT_WATTS, \n CT4_ACT_WATTS, \n CT5_ACT_WATTS, \n CT6_ACT_WATTS, \n CT1_REACT_WATTS,\n CT2_REACT_WATTS,\n CT3_REACT_WATTS,\n CT4_REACT_WATTS,\n CT5_REACT_WATTS,\n CT6_REACT_WATTS,\n CT1_APP_WATTS,\n CT2_APP_WATTS,\n CT3_APP_WATTS,\n CT4_APP_WATTS,\n CT5_APP_WATTS,\n CT6_APP_WATTS,\n CT1_PF, \n CT2_PF, \n CT3_PF, \n CT4_PF, \n CT5_PF, \n CT6_PF,\n VOLTAGE, \n FREQUENCY, \n TEMPERATURE, \n RSSI_DBM)\n \n MAX_RES_DB_DATA_TABLE_NAME = CT6_TABLE_NAME\n MINUTE_RES_DB_DATA_TABLE_NAME = 'CT6_SENSOR_MINUTE'\n HOUR_RES_DB_DATA_TABLE_NAME = 'CT6_SENSOR_HOUR'\n DAY_RES_DB_DATA_TABLE_NAME = 'CT6_SENSOR_DAY'\n LOW_RES_DATA_TABLE_LIST = [MINUTE_RES_DB_DATA_TABLE_NAME,\n HOUR_RES_DB_DATA_TABLE_NAME,\n DAY_RES_DB_DATA_TABLE_NAME]" } ]
from p3lib.database_if import DBConfig, DatabaseIF from .config import ConfigBase from .base_constants import BaseConstants
4,419
#!/usr/bin/env python3 class DBHandler(BaseConstants): """@brief Responsible for interacting with the database.""" def __init__(self, uio, config): """@brief Constructor @param uio A UIO instance. @param config A ConfigBase instance.""" self._uio = uio self._config = config self._dataBaseIF = None def connect(self): """@brief connect to the database server.""" self.disconnect() self._setupDBConfig() self._dataBaseIF.connectNoDB() self._uio.info("Connected to MySQL server.") def disconnect(self): """@brief Shutdown the connection to the DBS""" if self._dataBaseIF: self._dataBaseIF.disconnect() self._dataBaseIF = None def _setupDBConfig(self): """@brief Setup the internal DB config""" self._dataBaseIF = None self._dbConfig = DBConfig()
#!/usr/bin/env python3 class DBHandler(BaseConstants): """@brief Responsible for interacting with the database.""" def __init__(self, uio, config): """@brief Constructor @param uio A UIO instance. @param config A ConfigBase instance.""" self._uio = uio self._config = config self._dataBaseIF = None def connect(self): """@brief connect to the database server.""" self.disconnect() self._setupDBConfig() self._dataBaseIF.connectNoDB() self._uio.info("Connected to MySQL server.") def disconnect(self): """@brief Shutdown the connection to the DBS""" if self._dataBaseIF: self._dataBaseIF.disconnect() self._dataBaseIF = None def _setupDBConfig(self): """@brief Setup the internal DB config""" self._dataBaseIF = None self._dbConfig = DBConfig()
self._dbConfig.serverAddress = self._config.getAttr(ConfigBase.DB_HOST)
0
2023-12-24 06:32:07+00:00
8k
neobundy/MLX-Stable-Diffusion-WebUI
stable_diffusion/model_io.py
[ { "identifier": "CLIPTextModel", "path": "stable_diffusion/clip.py", "snippet": "class CLIPTextModel(nn.Module):\n \"\"\"Implements the text encoder transformer from CLIP.\"\"\"\n\n def __init__(self, config: CLIPTextModelConfig):\n super().__init__()\n\n self.token_embedding = nn.Embedding(config.vocab_size, config.model_dims)\n self.position_embedding = nn.Embedding(config.max_length, config.model_dims)\n self.layers = [\n CLIPEncoderLayer(config.model_dims, config.num_heads)\n for i in range(config.num_layers)\n ]\n self.final_layer_norm = nn.LayerNorm(config.model_dims)\n\n def __call__(self, x):\n # Extract some shapes\n B, N = x.shape\n\n # Compute the embeddings\n x = self.token_embedding(x)\n x = x + self.position_embedding.weight[:N]\n\n # Compute the features from the transformer\n mask = nn.MultiHeadAttention.create_additive_causal_mask(N, x.dtype)\n for l in self.layers:\n x = l(x, mask)\n\n # Apply the final layernorm and return\n return self.final_layer_norm(x)" }, { "identifier": "AutoencoderConfig", "path": "stable_diffusion/config.py", "snippet": "class AutoencoderConfig(BaseConfig):\n in_channels: int = 3\n out_channels: int = 3\n latent_channels_out: int = 8\n latent_channels_in: int = 4\n block_out_channels: Tuple[int] = (128, 256, 512, 512)\n layers_per_block: int = 2\n norm_num_groups: int = 32\n scaling_factor: float = 0.18215" }, { "identifier": "CLIPTextModelConfig", "path": "stable_diffusion/config.py", "snippet": "class CLIPTextModelConfig(BaseConfig):\n num_layers: int = 23\n model_dims: int = 1024\n num_heads: int = 16\n max_length: int = 77\n vocab_size: int = 49408" }, { "identifier": "DiffusionConfig", "path": "stable_diffusion/config.py", "snippet": "class DiffusionConfig(BaseConfig):\n beta_schedule: str = \"scaled_linear\"\n beta_start: float = 0.00085\n beta_end: float = 0.012\n num_train_steps: int = 1000" }, { "identifier": "UNetConfig", "path": "stable_diffusion/config.py", "snippet": "class UNetConfig(BaseConfig):\n in_channels: int = 4\n out_channels: int = 4\n conv_in_kernel: int = 3\n conv_out_kernel: int = 3\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280)\n layers_per_block: Tuple[int] = (2, 2, 2, 2)\n mid_block_layers: int = 2\n transformer_layers_per_block: Tuple[int] = (1, 1, 1, 1)\n num_attention_heads: Tuple[int] = (5, 10, 20, 20)\n cross_attention_dim: Tuple[int] = (1024,) * 4\n norm_num_groups: int = 32" }, { "identifier": "Tokenizer", "path": "stable_diffusion/tokenizer.py", "snippet": "class Tokenizer:\n \"\"\"A simple port of CLIPTokenizer from https://github.com/huggingface/transformers/ .\"\"\"\n\n def __init__(self, bpe_ranks, vocab):\n self.bpe_ranks = bpe_ranks\n self.vocab = vocab\n self.pat = regex.compile(\n r\"\"\"<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\",\n regex.IGNORECASE,\n )\n\n self._cache = {self.bos: self.bos, self.eos: self.eos}\n\n @property\n def bos(self):\n return \"<|startoftext|>\"\n\n @property\n def bos_token(self):\n return self.vocab[self.bos]\n\n @property\n def eos(self):\n return \"<|endoftext|>\"\n\n @property\n def eos_token(self):\n return self.vocab[self.eos]\n\n def bpe(self, text):\n if text in self._cache:\n return self._cache[text]\n\n unigrams = list(text[:-1]) + [text[-1] + \"</w>\"]\n unique_bigrams = set(zip(unigrams, unigrams[1:]))\n\n if not unique_bigrams:\n return unigrams\n\n # In every iteration try to merge the two most likely bigrams. If none\n # was merged we are done.\n #\n # Ported from https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/tokenization_clip.py\n while unique_bigrams:\n bigram = min(\n unique_bigrams, key=lambda pair: self.bpe_ranks.get(pair, float(\"inf\"))\n )\n if bigram not in self.bpe_ranks:\n break\n\n new_unigrams = []\n skip = False\n for a, b in zip(unigrams, unigrams[1:]):\n if skip:\n skip = False\n continue\n\n if (a, b) == bigram:\n new_unigrams.append(a + b)\n skip = True\n\n else:\n new_unigrams.append(a)\n\n if not skip:\n new_unigrams.append(b)\n\n unigrams = new_unigrams\n unique_bigrams = set(zip(unigrams, unigrams[1:]))\n\n self._cache[text] = unigrams\n\n return unigrams\n\n def tokenize(self, text, prepend_bos=True, append_eos=True):\n if isinstance(text, list):\n return [self.tokenize(t, prepend_bos, append_eos) for t in text]\n\n # Lower case cleanup and split according to self.pat. Hugging Face does\n # a much more thorough job here but this should suffice for 95% of\n # cases.\n clean_text = regex.sub(r\"\\s+\", \" \", text.lower())\n tokens = regex.findall(self.pat, clean_text)\n\n # Split the tokens according to the byte-pair merge file\n bpe_tokens = [ti for t in tokens for ti in self.bpe(t)]\n\n # Map to token ids and return\n tokens = [self.vocab[t] for t in bpe_tokens]\n if prepend_bos:\n tokens = [self.bos_token] + tokens\n if append_eos:\n tokens.append(self.eos_token)\n\n return tokens" }, { "identifier": "UNetModel", "path": "stable_diffusion/unet.py", "snippet": "class UNetModel(nn.Module):\n \"\"\"The conditional 2D UNet model that actually performs the denoising.\"\"\"\n\n def __init__(self, config: UNetConfig):\n super().__init__()\n\n self.conv_in = nn.Conv2d(\n config.in_channels,\n config.block_out_channels[0],\n config.conv_in_kernel,\n padding=(config.conv_in_kernel - 1) // 2,\n )\n\n # Generate sinusoidal positional encodings.\n # These encodings are used in transformer models to provide information about the position of the elements in the sequence.\n self.timesteps = nn.SinusoidalPositionalEncoding(\n config.block_out_channels[0],\n max_freq=1,\n min_freq=math.exp(\n -math.log(10000) + 2 * math.log(10000) / config.block_out_channels[0]\n ),\n scale=1.0,\n cos_first=True,\n full_turns=False,\n )\n self.time_embedding = TimestepEmbedding(\n config.block_out_channels[0],\n config.block_out_channels[0] * 4,\n )\n\n # Make the downsampling blocks\n block_channels = [config.block_out_channels[0]] + list(\n config.block_out_channels\n )\n self.down_blocks = [\n UNetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=config.block_out_channels[0] * 4,\n num_layers=config.layers_per_block[i],\n transformer_layers_per_block=config.transformer_layers_per_block[i],\n num_attention_heads=config.num_attention_heads[i],\n cross_attention_dim=config.cross_attention_dim[i],\n resnet_groups=config.norm_num_groups,\n add_downsample=(i < len(config.block_out_channels) - 1),\n add_upsample=False,\n add_cross_attention=(i < len(config.block_out_channels) - 1),\n )\n for i, (in_channels, out_channels) in enumerate(\n zip(block_channels, block_channels[1:])\n )\n ]\n\n # Make the middle block\n self.mid_blocks = [\n ResnetBlock2D(\n in_channels=config.block_out_channels[-1],\n out_channels=config.block_out_channels[-1],\n temb_channels=config.block_out_channels[0] * 4,\n groups=config.norm_num_groups,\n ),\n Transformer2D(\n in_channels=config.block_out_channels[-1],\n model_dims=config.block_out_channels[-1],\n num_heads=config.num_attention_heads[-1],\n num_layers=config.transformer_layers_per_block[-1],\n encoder_dims=config.cross_attention_dim[-1],\n ),\n ResnetBlock2D(\n in_channels=config.block_out_channels[-1],\n out_channels=config.block_out_channels[-1],\n temb_channels=config.block_out_channels[0] * 4,\n groups=config.norm_num_groups,\n ),\n ]\n\n # Make the upsampling blocks\n block_channels = (\n [config.block_out_channels[0]]\n + list(config.block_out_channels)\n + [config.block_out_channels[-1]]\n )\n self.up_blocks = [\n UNetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=config.block_out_channels[0] * 4,\n prev_out_channels=prev_out_channels,\n num_layers=config.layers_per_block[i] + 1,\n transformer_layers_per_block=config.transformer_layers_per_block[i],\n num_attention_heads=config.num_attention_heads[i],\n cross_attention_dim=config.cross_attention_dim[i],\n resnet_groups=config.norm_num_groups,\n add_downsample=False,\n add_upsample=(i > 0),\n add_cross_attention=(i < len(config.block_out_channels) - 1),\n )\n for i, (in_channels, out_channels, prev_out_channels) in reversed(\n list(\n enumerate(\n zip(block_channels, block_channels[1:], block_channels[2:])\n )\n )\n )\n ]\n\n self.conv_norm_out = nn.GroupNorm(\n config.norm_num_groups,\n config.block_out_channels[0],\n pytorch_compatible=True,\n )\n self.conv_out = nn.Conv2d(\n config.block_out_channels[0],\n config.out_channels,\n config.conv_out_kernel,\n padding=(config.conv_out_kernel - 1) // 2,\n )\n\n def __call__(self, x, timestep, encoder_x, attn_mask=None, encoder_attn_mask=None):\n\n # Get the sinusoidal positional encoding for the given timestep.\n # The self.timesteps object is an instance of the nn.SinusoidalPositionalEncoding class, which generates sinusoidal positional encodings.\n # These encodings are used in transformer models to provide information about the position of the elements in the sequence.\n # The astype(x.dtype) part is ensuring that the positional encoding has the same data type as the input tensor x.\n\n temb = self.timesteps(timestep).astype(x.dtype)\n temb = self.time_embedding(temb)\n\n # Preprocess the input\n x = self.conv_in(x)\n\n # Run the downsampling part of the unet\n residuals = [x]\n for block in self.down_blocks:\n x, res = block(\n x,\n encoder_x=encoder_x,\n temb=temb,\n attn_mask=attn_mask,\n encoder_attn_mask=encoder_attn_mask,\n )\n residuals.extend(res)\n\n # Run the middle part of the unet\n x = self.mid_blocks[0](x, temb)\n x = self.mid_blocks[1](x, encoder_x, attn_mask, encoder_attn_mask)\n x = self.mid_blocks[2](x, temb)\n\n # Run the upsampling part of the unet\n for block in self.up_blocks:\n x, _ = block(\n x,\n encoder_x=encoder_x,\n temb=temb,\n attn_mask=attn_mask,\n encoder_attn_mask=encoder_attn_mask,\n residual_hidden_states=residuals,\n )\n\n # Postprocess the output\n x = self.conv_norm_out(x)\n x = nn.silu(x)\n x = self.conv_out(x)\n\n return x" }, { "identifier": "Autoencoder", "path": "stable_diffusion/vae.py", "snippet": "class Autoencoder(nn.Module):\n \"\"\"The autoencoder that allows us to perform diffusion in the latent space.\"\"\"\n\n def __init__(self, config: AutoencoderConfig):\n super().__init__()\n\n self.latent_channels = config.latent_channels_in\n self.scaling_factor = config.scaling_factor\n self.encoder = Encoder(\n config.in_channels,\n config.latent_channels_out,\n config.block_out_channels,\n config.layers_per_block,\n resnet_groups=config.norm_num_groups,\n )\n self.decoder = Decoder(\n config.latent_channels_in,\n config.out_channels,\n config.block_out_channels,\n config.layers_per_block + 1,\n resnet_groups=config.norm_num_groups,\n )\n\n self.quant_proj = nn.Linear(\n config.latent_channels_out, config.latent_channels_out\n )\n self.post_quant_proj = nn.Linear(\n config.latent_channels_in, config.latent_channels_in\n )\n\n def encode(self, x):\n x = self.encoder(x)\n\n # This line applies the linear transformation to the tensor x.\n # The purpose of this operation is to transform the features extracted by the encoder into a form suitable for quantization.\n # In this case, the transformation doesn't change the dimensionality of the data (as both input and output dimensions are config.latent_channels_out),\n # but it can still learn to make the data more suitable for the subsequent operations (like splitting into mean and logvar).\n # The term \"projection\" in quant_proj refers to the operation of applying a linear transformation to the data,\n # which can be thought of as \"projecting\" the data onto a different subspace. This is a common operation in machine learning models,\n # and it is used here to transform the data into a form that is suitable for the subsequent operations in the VAE.\n x = self.quant_proj(x)\n\n # two tensors of size (B, C, H, W) where C = latent_channels_in\n mean, logvar = x.split(2, axis=-1)\n mean = mean * self.scaling_factor\n logvar = logvar + 2 * math.log(self.scaling_factor)\n\n return mean, logvar\n\n def decode(self, z):\n z = z / self.scaling_factor\n return self.decoder(self.post_quant_proj(z))\n\n def __call__(self, x, key=None):\n mean, logvar = self.encode(x)\n z = mx.random.normal(mean.shape, key=key) * mx.exp(0.5 * logvar) + mean\n x_hat = self.decode(z)\n\n return dict(x_hat=x_hat, z=z, mean=mean, logvar=logvar)" }, { "identifier": "_DEFAULT_MODEL", "path": "stable_diffusion/models.py", "snippet": "_DEFAULT_MODEL = _AVAILABLE_MODELS[0]" }, { "identifier": "_MODELS", "path": "stable_diffusion/models.py", "snippet": "_MODELS = {model: generate_model_dict() for model in _AVAILABLE_MODELS}" }, { "identifier": "DiffuserModelPathConfig", "path": "stable_diffusion/config.py", "snippet": "class DiffuserModelPathConfig:\n def __init__(self, model_path: str = \"./diffuser_models\"):\n self.model_path = model_path\n\n @property\n def unet_config(self):\n return self.model_path + \"/unet/config.json\"\n\n @property\n def unet(self):\n return self.model_path + \"/unet/diffusion_pytorch_model.safetensors\"\n\n @property\n def scheduler(self):\n return self.model_path + \"/scheduler/scheduler_config.json\"\n\n @property\n def text_encoder_config(self):\n return self.model_path + \"/text_encoder/config.json\"\n\n @property\n def text_encoder(self):\n return self.model_path + \"/text_encoder/model.safetensors\"\n\n @property\n def vae_config(self):\n return self.model_path + \"/vae/config.json\"\n\n @property\n def vae(self):\n return self.model_path + \"/vae/diffusion_pytorch_model.safetensors\"\n\n @property\n def diffusion_config(self):\n return self.model_path + \"/scheduler/scheduler_config.json\"\n\n @property\n def tokenizer_vocab(self):\n return self.model_path + \"/tokenizer/vocab.json\"\n\n @property\n def tokenizer_merges(self):\n return self.model_path + \"/tokenizer/merges.txt\"" } ]
from typing import Optional from functools import partial from huggingface_hub import hf_hub_download from mlx.utils import tree_unflatten from safetensors import safe_open as safetensor_open from .clip import CLIPTextModel from .config import AutoencoderConfig, CLIPTextModelConfig, DiffusionConfig, UNetConfig from .tokenizer import Tokenizer from .unet import UNetModel from .vae import Autoencoder from .models import _DEFAULT_MODEL, _MODELS from .config import DiffuserModelPathConfig from tqdm import tqdm import json import mlx.core as mx import numpy as np
6,805
value = value.transpose(0, 2, 3, 1) _debug_print(f"Transposed dimensions in {key}") return [(key, _from_numpy(value))] def map_clip_text_encoder_weights(key, value): # Remove prefixes if key.startswith("text_model."): key = key[11:] _debug_print(f"Removed 'text_model.' prefix from {key}") if key.startswith("embeddings."): key = key[11:] _debug_print(f"Removed 'embeddings.' prefix from {key}") if key.startswith("encoder."): key = key[8:] _debug_print(f"Removed 'encoder.' prefix from {key}") # Map attention layers if "self_attn." in key: key = key.replace("self_attn.", "attention.") _debug_print(f"Replaced 'self_attn.' with 'attention.' in {key}") if "q_proj." in key: key = key.replace("q_proj.", "query_proj.") _debug_print(f"Replaced 'q_proj.' with 'query_proj.' in {key}") if "k_proj." in key: key = key.replace("k_proj.", "key_proj.") _debug_print(f"Replaced 'k_proj.' with 'key_proj.' in {key}") if "v_proj." in key: key = key.replace("v_proj.", "value_proj.") _debug_print(f"Replaced 'v_proj.' with 'value_proj.' in {key}") # Map ffn layers if "mlp.fc1" in key: key = key.replace("mlp.fc1", "linear1") _debug_print(f"Replaced 'mlp.fc1' with 'linear1' in {key}") if "mlp.fc2" in key: key = key.replace("mlp.fc2", "linear2") _debug_print(f"Replaced 'mlp.fc2' with 'linear2' in {key}") return [(key, _from_numpy(value))] def map_vae_weights(key, value): # Map up/downsampling if "downsamplers" in key: key = key.replace("downsamplers.0.conv", "downsample") _debug_print(f"Replaced 'downsamplers.0.conv' with 'downsample' in {key}") if "upsamplers" in key: key = key.replace("upsamplers.0.conv", "upsample") _debug_print(f"Replaced 'upsamplers.0.conv' with 'upsample' in {key}") # Map attention layers if "to_k" in key: key = key.replace("to_k", "key_proj") _debug_print(f"Replaced 'to_k' with 'key_proj' in {key}") if "to_out.0" in key: key = key.replace("to_out.0", "out_proj") _debug_print(f"Replaced 'to_out.0' with 'out_proj' in {key}") if "to_q" in key: key = key.replace("to_q", "query_proj") _debug_print(f"Replaced 'to_q' with 'query_proj' in {key}") if "to_v" in key: key = key.replace("to_v", "value_proj") _debug_print(f"Replaced 'to_v' with 'value_proj' in {key}") # Map the mid block if "mid_block.resnets.0" in key: key = key.replace("mid_block.resnets.0", "mid_blocks.0") _debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}") if "mid_block.attentions.0" in key: key = key.replace("mid_block.attentions.0", "mid_blocks.1") _debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}") if "mid_block.resnets.1" in key: key = key.replace("mid_block.resnets.1", "mid_blocks.2") _debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}") # Map the quant/post_quant layers if "quant_conv" in key: key = key.replace("quant_conv", "quant_proj") value = value.squeeze() _debug_print(f"Replaced 'quant_conv' with 'quant_proj' and squeezed value in {key}") # Map the conv_shortcut to linear if "conv_shortcut.weight" in key: value = value.squeeze() _debug_print(f"Squeezed 'conv_shortcut.weight' in {key}") # Rearrange the dimensions to [B, H, W, C] - Autoencoder expects: B, H, W, C = x.shape if len(value.shape) == 4: value = value.transpose(0, 2, 3, 1) _debug_print(f"Transposed dimensions in {key}") return [(key, _from_numpy(value))] def _flatten(params): return [(k, v) for p in params for (k, v) in p] # The weights of the model can be loaded as 16-bit floating point numbers, which is a form of quantization known as half-precision floating point. # This can reduce the memory requirements of the model by half compared to 32-bit floating point numbers, at the cost of reduced numerical precision. def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False): dtype = np.float16 if float16 else np.float32 _debug_print(f"Loading weights from {weight_file}") with safetensor_open(weight_file, framework="numpy") as f: keys = list(f.keys()) weights = _flatten([mapper(k, f.get_tensor(k).astype(dtype)) for k in tqdm(keys, desc=f"Loading weights from {weight_file}...")]) model.update(tree_unflatten(weights)) def _check_key(key: str, part: str): if key not in _MODELS: raise ValueError( f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}" )
# Copyright © 2023 Apple Inc. logfile = 'log.txt' _DEBUG = False def _debug_print(*args, **kwargs): if _DEBUG: # Convert the arguments to a string message = ' '.join(map(str, args)) # Print the message to the console print(message, **kwargs) # Open the log file in append mode and write the message with open(logfile, 'a') as f: f.write(message + '\n') def _from_numpy(x): return mx.array(np.ascontiguousarray(x)) # The `map_*_weights` functions are used to adjust the weights of a model when loading it from a file. # The weights of the model in the file might be in a different format than the weights of the model in the current codebase. # When you load a pre-trained model, the weights are stored in a dictionary where the keys are the names of the parameters in the model. # If the architecture of your model is different from the architecture of the model that the weights were trained on, you might need to adjust the keys and/or the weights to match your model's architecture. # This is what the `map_*_weights` functions are doing. They are adjusting the keys and the weights to match the architecture of the models in the current codebase. def map_unet_weights(key, value): # Map up/downsampling if "downsamplers" in key: key = key.replace("downsamplers.0.conv", "downsample") _debug_print(f"Replaced 'downsamplers.0.conv' with 'downsample' in {key}") if "upsamplers" in key: key = key.replace("upsamplers.0.conv", "upsample") _debug_print(f"Replaced 'upsamplers.0.conv' with 'upsample' in {key}") # Map the mid block if "mid_block.resnets.0" in key: key = key.replace("mid_block.resnets.0", "mid_blocks.0") _debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}") if "mid_block.attentions.0" in key: key = key.replace("mid_block.attentions.0", "mid_blocks.1") _debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}") if "mid_block.resnets.1" in key: key = key.replace("mid_block.resnets.1", "mid_blocks.2") _debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}") # Map attention layers if "to_k" in key: key = key.replace("to_k", "key_proj") _debug_print(f"Replaced 'to_k' with 'key_proj' in {key}") if "to_out.0" in key: key = key.replace("to_out.0", "out_proj") _debug_print(f"Replaced 'to_out.0' with 'out_proj' in {key}") if "to_q" in key: key = key.replace("to_q", "query_proj") _debug_print(f"Replaced 'to_q' with 'query_proj' in {key}") if "to_v" in key: key = key.replace("to_v", "value_proj") _debug_print(f"Replaced 'to_v' with 'value_proj' in {key}") # Map transformer ffn if "ff.net.2" in key: key = key.replace("ff.net.2", "linear3") _debug_print(f"Replaced 'ff.net.2' with 'linear3' in {key}") if "ff.net.0" in key: k1 = key.replace("ff.net.0.proj", "linear1") k2 = key.replace("ff.net.0.proj", "linear2") v1, v2 = np.split(value, 2) _debug_print(f"Replaced 'ff.net.0.proj' with 'linear1' and 'linear2' in {key}") return [(k1, _from_numpy(v1)), (k2, _from_numpy(v2))] # The weights of this 1x1 convolutional layer would be a 4-dimensional tensor # with shape [out_channels, in_channels, 1, 1]. # The squeeze() function is used to remove the dimensions of size 1 from this tensor, # converting it to a 2-dimensional tensor with shape [out_channels, in_channels]. # This is because the corresponding layer in the current model might be a linear layer # rather than a convolutional layer, and the weights for a linear layer are expected to be a 2-dimensional tensor. if "conv_shortcut.weight" in key: value = value.squeeze() _debug_print(f"Squeezed 'conv_shortcut.weight' in {key}") # Transform the weights from 1x1 convs to linear if len(value.shape) == 4 and ("proj_in" in key or "proj_out" in key): value = value.squeeze() _debug_print(f"Squeezed 'proj_in' or 'proj_out' in {key}") if len(value.shape) == 4: value = value.transpose(0, 2, 3, 1) _debug_print(f"Transposed dimensions in {key}") return [(key, _from_numpy(value))] def map_clip_text_encoder_weights(key, value): # Remove prefixes if key.startswith("text_model."): key = key[11:] _debug_print(f"Removed 'text_model.' prefix from {key}") if key.startswith("embeddings."): key = key[11:] _debug_print(f"Removed 'embeddings.' prefix from {key}") if key.startswith("encoder."): key = key[8:] _debug_print(f"Removed 'encoder.' prefix from {key}") # Map attention layers if "self_attn." in key: key = key.replace("self_attn.", "attention.") _debug_print(f"Replaced 'self_attn.' with 'attention.' in {key}") if "q_proj." in key: key = key.replace("q_proj.", "query_proj.") _debug_print(f"Replaced 'q_proj.' with 'query_proj.' in {key}") if "k_proj." in key: key = key.replace("k_proj.", "key_proj.") _debug_print(f"Replaced 'k_proj.' with 'key_proj.' in {key}") if "v_proj." in key: key = key.replace("v_proj.", "value_proj.") _debug_print(f"Replaced 'v_proj.' with 'value_proj.' in {key}") # Map ffn layers if "mlp.fc1" in key: key = key.replace("mlp.fc1", "linear1") _debug_print(f"Replaced 'mlp.fc1' with 'linear1' in {key}") if "mlp.fc2" in key: key = key.replace("mlp.fc2", "linear2") _debug_print(f"Replaced 'mlp.fc2' with 'linear2' in {key}") return [(key, _from_numpy(value))] def map_vae_weights(key, value): # Map up/downsampling if "downsamplers" in key: key = key.replace("downsamplers.0.conv", "downsample") _debug_print(f"Replaced 'downsamplers.0.conv' with 'downsample' in {key}") if "upsamplers" in key: key = key.replace("upsamplers.0.conv", "upsample") _debug_print(f"Replaced 'upsamplers.0.conv' with 'upsample' in {key}") # Map attention layers if "to_k" in key: key = key.replace("to_k", "key_proj") _debug_print(f"Replaced 'to_k' with 'key_proj' in {key}") if "to_out.0" in key: key = key.replace("to_out.0", "out_proj") _debug_print(f"Replaced 'to_out.0' with 'out_proj' in {key}") if "to_q" in key: key = key.replace("to_q", "query_proj") _debug_print(f"Replaced 'to_q' with 'query_proj' in {key}") if "to_v" in key: key = key.replace("to_v", "value_proj") _debug_print(f"Replaced 'to_v' with 'value_proj' in {key}") # Map the mid block if "mid_block.resnets.0" in key: key = key.replace("mid_block.resnets.0", "mid_blocks.0") _debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}") if "mid_block.attentions.0" in key: key = key.replace("mid_block.attentions.0", "mid_blocks.1") _debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}") if "mid_block.resnets.1" in key: key = key.replace("mid_block.resnets.1", "mid_blocks.2") _debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}") # Map the quant/post_quant layers if "quant_conv" in key: key = key.replace("quant_conv", "quant_proj") value = value.squeeze() _debug_print(f"Replaced 'quant_conv' with 'quant_proj' and squeezed value in {key}") # Map the conv_shortcut to linear if "conv_shortcut.weight" in key: value = value.squeeze() _debug_print(f"Squeezed 'conv_shortcut.weight' in {key}") # Rearrange the dimensions to [B, H, W, C] - Autoencoder expects: B, H, W, C = x.shape if len(value.shape) == 4: value = value.transpose(0, 2, 3, 1) _debug_print(f"Transposed dimensions in {key}") return [(key, _from_numpy(value))] def _flatten(params): return [(k, v) for p in params for (k, v) in p] # The weights of the model can be loaded as 16-bit floating point numbers, which is a form of quantization known as half-precision floating point. # This can reduce the memory requirements of the model by half compared to 32-bit floating point numbers, at the cost of reduced numerical precision. def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False): dtype = np.float16 if float16 else np.float32 _debug_print(f"Loading weights from {weight_file}") with safetensor_open(weight_file, framework="numpy") as f: keys = list(f.keys()) weights = _flatten([mapper(k, f.get_tensor(k).astype(dtype)) for k in tqdm(keys, desc=f"Loading weights from {weight_file}...")]) model.update(tree_unflatten(weights)) def _check_key(key: str, part: str): if key not in _MODELS: raise ValueError( f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}" )
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
8
2023-12-25 05:49:34+00:00
8k
Con6924/SPM
train_spm_xl.py
[ { "identifier": "SPMNetwork", "path": "src/models/spm.py", "snippet": "class SPMNetwork(nn.Module):\n UNET_TARGET_REPLACE_MODULE_TRANSFORMER = [\n \"Transformer2DModel\",\n ]\n UNET_TARGET_REPLACE_MODULE_CONV = [\n \"ResnetBlock2D\",\n \"Downsample2D\",\n \"Upsample2D\",\n ]\n\n SPM_PREFIX_UNET = \"lora_unet\" # aligning with SD webui usage\n DEFAULT_TARGET_REPLACE = UNET_TARGET_REPLACE_MODULE_TRANSFORMER\n\n def __init__(\n self,\n unet: UNet2DConditionModel,\n rank: int = 4,\n multiplier: float = 1.0,\n alpha: float = 1.0,\n module = SPMLayer,\n module_kwargs = None,\n ) -> None:\n super().__init__()\n\n self.multiplier = multiplier\n self.dim = rank\n self.alpha = alpha\n\n self.module = module\n self.module_kwargs = module_kwargs or {}\n\n # unet spm\n self.unet_spm_layers = self.create_modules(\n SPMNetwork.SPM_PREFIX_UNET,\n unet,\n SPMNetwork.DEFAULT_TARGET_REPLACE,\n self.dim,\n self.multiplier,\n )\n print(f\"Create SPM for U-Net: {len(self.unet_spm_layers)} modules.\")\n\n spm_names = set()\n for spm_layer in self.unet_spm_layers:\n assert (\n spm_layer.spm_name not in spm_names\n ), f\"duplicated SPM layer name: {spm_layer.spm_name}. {spm_names}\"\n spm_names.add(spm_layer.spm_name)\n\n for spm_layer in self.unet_spm_layers:\n spm_layer.apply_to()\n self.add_module(\n spm_layer.spm_name,\n spm_layer,\n )\n\n del unet\n\n torch.cuda.empty_cache()\n\n def create_modules(\n self,\n prefix: str,\n root_module: nn.Module,\n target_replace_modules: List[str],\n rank: int,\n multiplier: float,\n ) -> list:\n spm_layers = []\n\n for name, module in root_module.named_modules():\n if module.__class__.__name__ in target_replace_modules:\n for child_name, child_module in module.named_modules():\n if child_module.__class__.__name__ in [\"Linear\", \"Conv2d\"]:\n spm_name = prefix + \".\" + name + \".\" + child_name\n spm_name = spm_name.replace(\".\", \"_\")\n print(f\"{spm_name}\")\n spm_layer = self.module(\n spm_name, child_module, multiplier, rank, self.alpha, **self.module_kwargs\n )\n spm_layers.append(spm_layer)\n\n return spm_layers\n\n def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):\n all_params = []\n\n if self.unet_spm_layers:\n params = []\n [params.extend(spm_layer.parameters()) for spm_layer in self.unet_spm_layers]\n param_data = {\"params\": params}\n if default_lr is not None:\n param_data[\"lr\"] = default_lr\n all_params.append(param_data)\n\n return all_params\n\n def save_weights(self, file, dtype=None, metadata: Optional[dict] = None):\n state_dict = self.state_dict()\n\n if dtype is not None:\n for key in list(state_dict.keys()):\n v = state_dict[key]\n v = v.detach().clone().to(\"cpu\").to(dtype)\n state_dict[key] = v\n\n for key in list(state_dict.keys()):\n if not key.startswith(\"lora\"):\n del state_dict[key]\n\n if os.path.splitext(file)[1] == \".safetensors\":\n save_file(state_dict, file, metadata)\n else:\n torch.save(state_dict, file)\n\n def __enter__(self):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 1.0\n\n def __exit__(self, exc_type, exc_value, tb):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 0" }, { "identifier": "SPMLayer", "path": "src/models/spm.py", "snippet": "class SPMLayer(nn.Module):\n \"\"\"\n replaces forward method of the original Linear, instead of replacing the original Linear module.\n \"\"\"\n\n def __init__(\n self,\n spm_name,\n org_module: nn.Module,\n multiplier=1.0,\n dim=4,\n alpha=1,\n ):\n \"\"\"if alpha == 0 or None, alpha is rank (no scaling).\"\"\"\n super().__init__()\n self.spm_name = spm_name\n self.dim = dim\n\n if org_module.__class__.__name__ == \"Linear\":\n in_dim = org_module.in_features\n out_dim = org_module.out_features\n self.lora_down = nn.Linear(in_dim, dim, bias=False)\n self.lora_up = nn.Linear(dim, out_dim, bias=False)\n\n elif org_module.__class__.__name__ == \"Conv2d\":\n in_dim = org_module.in_channels\n out_dim = org_module.out_channels\n\n self.dim = min(self.dim, in_dim, out_dim)\n if self.dim != dim:\n print(f\"{spm_name} dim (rank) is changed to: {self.dim}\")\n\n kernel_size = org_module.kernel_size\n stride = org_module.stride\n padding = org_module.padding\n self.lora_down = nn.Conv2d(\n in_dim, self.dim, kernel_size, stride, padding, bias=False\n )\n self.lora_up = nn.Conv2d(self.dim, out_dim, (1, 1), (1, 1), bias=False)\n\n if type(alpha) == torch.Tensor:\n alpha = alpha.detach().numpy()\n alpha = dim if alpha is None or alpha == 0 else alpha\n self.scale = alpha / self.dim\n self.register_buffer(\"alpha\", torch.tensor(alpha))\n\n # same as microsoft's\n nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))\n nn.init.zeros_(self.lora_up.weight)\n\n self.multiplier = multiplier\n self.org_module = org_module # remove in applying\n\n def apply_to(self):\n self.org_forward = self.org_module.forward\n self.org_module.forward = self.forward\n del self.org_module\n\n def forward(self, x):\n return (\n self.org_forward(x)\n + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale\n )" }, { "identifier": "sample_xl", "path": "src/engine/sampling.py", "snippet": "def sample_xl(prompt_pair: PromptEmbedsPair, tokenizers=None, text_encoders=None):\n res = []\n for unconditional, target in zip(\n [prompt_pair.unconditional.text_embeds, prompt_pair.unconditional.pooled_embeds],\n [prompt_pair.target.text_embeds, prompt_pair.target.pooled_embeds]\n ):\n samples = []\n while len(samples) < prompt_pair.sampling_batch_size:\n while True:\n # sample from gaussian distribution\n noise = torch.randn_like(target)\n # normalize the noise\n noise = noise / noise.view(-1).norm(dim=-1)\n # compute the similarity\n sim = torch.cosine_similarity(target.view(-1), noise.view(-1), dim=-1)\n # the possibility of accepting the sample = 1 - sim\n if random.random() < 1 - sim:\n break\n scale = random.random() * 0.4 + 0.8\n sample = scale * noise * target.view(-1).norm(dim=-1)\n samples.append(sample)\n \n samples = [torch.cat([unconditional, s]) for s in samples]\n samples = torch.cat(samples, dim=0)\n res.append(samples)\n \n return res" }, { "identifier": "model_util", "path": "src/models/model_util.py", "snippet": "TOKENIZER_V1_MODEL_NAME = \"CompVis/stable-diffusion-v1-4\"\nTOKENIZER_V2_MODEL_NAME = \"stabilityai/stable-diffusion-2-1\"\nAVAILABLE_SCHEDULERS = Literal[\"ddim\", \"ddpm\", \"lms\", \"euler_a\"]\nSDXL_TEXT_ENCODER_TYPE = Union[CLIPTextModel, CLIPTextModelWithProjection]\nDIFFUSERS_CACHE_DIR = \".cache/\" # if you want to change the cache dir, change this\nLOCAL_ONLY = False # if you want to use only local files, change this\ndef load_diffusers_model(\n pretrained_model_name_or_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel,]:\ndef load_checkpoint_model(\n checkpoint_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, DiffusionPipeline]:\ndef load_models(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n v2: bool = False,\n v_pred: bool = False,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, SchedulerMixin, DiffusionPipeline, ]:\ndef load_diffusers_model_xl(\n pretrained_model_name_or_path: str,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel,]:\ndef load_checkpoint_model_xl(\n checkpoint_path: str,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel, DiffusionPipeline, ]:\ndef load_models_xl(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[\ndef create_noise_scheduler(\n scheduler_name: AVAILABLE_SCHEDULERS = \"ddpm\",\n prediction_type: Literal[\"epsilon\", \"v_prediction\"] = \"epsilon\",\n) -> SchedulerMixin:" }, { "identifier": "eval_util", "path": "src/evaluation/eval_util.py", "snippet": "def get_clip_preprocess(n_px=224):\n def Convert(image):\n def text_preprocess(text):\ndef clip_score(\n images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],\n texts: str,\n w: float = 2.5,\n clip_model: str = \"ViT-B/32\",\n n_px: int = 224,\n cross_matching: bool = False,\n):\ndef clip_accuracy(\n images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],\n ablated_texts: Union[List[str], str],\n anchor_texts: Union[List[str], str],\n w: float = 2.5,\n clip_model: str = \"ViT-B/32\",\n n_px: int = 224,\n):\ndef clip_eval_by_image(\n images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],\n ablated_texts: Union[List[str], str],\n anchor_texts: Union[List[str], str],\n w: float = 2.5,\n clip_model: str = \"ViT-B/32\",\n n_px: int = 224,\n):\ndef clip_eval(\n pipe: DiffusionPipeline,\n config: RootConfig,\n w: float = 2.5,\n clip_model: str = \"ViT-B/32\",\n n_px: int = 224,\n):" }, { "identifier": "config", "path": "src/configs/config.py", "snippet": "PRECISION_TYPES = Literal[\"fp32\", \"fp16\", \"bf16\", \"float32\", \"float16\", \"bfloat16\"]\nclass PretrainedModelConfig(BaseModel):\nclass NetworkConfig(BaseModel):\nclass TrainConfig(BaseModel): \nclass SaveConfig(BaseModel):\nclass LoggingConfig(BaseModel):\nclass InferenceConfig(BaseModel):\nclass OtherConfig(BaseModel):\nclass RootConfig(BaseModel):\ndef parse_precision(precision: str) -> torch.dtype:\ndef load_config_from_yaml(config_path: str) -> RootConfig:" }, { "identifier": "prompt", "path": "src/configs/prompt.py", "snippet": "ACTION_TYPES = Literal[\n \"erase\",\n \"erase_with_la\",\n]\nPROMPT_EMBEDDING = Union[torch.FloatTensor, PromptEmbedsXL]\nclass PromptEmbedsXL:\nclass PromptEmbedsCache:\nclass PromptSettings(BaseModel): # yaml\nclass PromptEmbedsPair:\n def __init__(self, embeds) -> None:\n def __setitem__(self, __name: str, __value: PROMPT_EMBEDDING) -> None:\n def __getitem__(self, __name: str) -> Optional[PROMPT_EMBEDDING]:\n def fill_prompts(cls, values):\n def __init__(\n self,\n loss_fn: torch.nn.Module,\n target: PROMPT_EMBEDDING,\n positive: PROMPT_EMBEDDING,\n unconditional: PROMPT_EMBEDDING,\n neutral: PROMPT_EMBEDDING,\n settings: PromptSettings,\n ) -> None:\n def _prepare_embeddings(\n self, \n cache: PromptEmbedsCache,\n tokenizer: CLIPTokenizer,\n text_encoder: CLIPTextModel,\n ):\n def _erase(\n self,\n target_latents: torch.FloatTensor, # \"van gogh\"\n positive_latents: torch.FloatTensor, # \"van gogh\"\n neutral_latents: torch.FloatTensor, # \"\"\n **kwargs,\n ) -> torch.FloatTensor:\n def _erase_with_la(\n self,\n target_latents: torch.FloatTensor, # \"van gogh\"\n positive_latents: torch.FloatTensor, # \"van gogh\"\n neutral_latents: torch.FloatTensor, # \"\"\n anchor_latents: torch.FloatTensor, \n anchor_latents_ori: torch.FloatTensor, \n **kwargs,\n ):\n def loss(\n self,\n **kwargs,\n ):\ndef load_prompts_from_yaml(path: str | Path) -> list[PromptSettings]:\ndef load_prompts_from_table(path: str | Path) -> list[PromptSettings]:\ndef compute_rotation_matrix(target: torch.FloatTensor):" }, { "identifier": "RootConfig", "path": "src/configs/config.py", "snippet": "class RootConfig(BaseModel):\n prompts_file: Optional[str] = None\n \n pretrained_model: PretrainedModelConfig\n\n network: Optional[NetworkConfig] = None\n\n train: Optional[TrainConfig] = None\n\n save: Optional[SaveConfig] = None\n\n logging: Optional[LoggingConfig] = None\n\n inference: Optional[InferenceConfig] = None\n\n other: Optional[OtherConfig] = None" }, { "identifier": "PromptEmbedsCache", "path": "src/configs/prompt.py", "snippet": "class PromptEmbedsCache:\n prompts: dict[str, PROMPT_EMBEDDING] = {}\n\n def __setitem__(self, __name: str, __value: PROMPT_EMBEDDING) -> None:\n self.prompts[__name] = __value\n\n def __getitem__(self, __name: str) -> Optional[PROMPT_EMBEDDING]:\n if __name in self.prompts:\n return self.prompts[__name]\n else:\n return None" }, { "identifier": "PromptEmbedsPair", "path": "src/configs/prompt.py", "snippet": "class PromptEmbedsPair:\n target: PROMPT_EMBEDDING # the concept that do not want to generate \n positive: PROMPT_EMBEDDING # generate the concept\n unconditional: PROMPT_EMBEDDING # uncondition (default should be empty)\n neutral: PROMPT_EMBEDDING # base condition (default should be empty)\n use_template: bool = False # use clip template or not\n\n guidance_scale: float\n resolution: int\n dynamic_resolution: bool\n batch_size: int\n dynamic_crops: bool\n\n loss_fn: torch.nn.Module\n action: ACTION_TYPES\n\n def __init__(\n self,\n loss_fn: torch.nn.Module,\n target: PROMPT_EMBEDDING,\n positive: PROMPT_EMBEDDING,\n unconditional: PROMPT_EMBEDDING,\n neutral: PROMPT_EMBEDDING,\n settings: PromptSettings,\n ) -> None:\n self.loss_fn = loss_fn\n self.target = target\n self.positive = positive\n self.unconditional = unconditional\n self.neutral = neutral\n \n self.settings = settings\n\n self.use_template = settings.use_template\n self.guidance_scale = settings.guidance_scale\n self.resolution = settings.resolution\n self.dynamic_resolution = settings.dynamic_resolution\n self.batch_size = settings.batch_size\n self.dynamic_crops = settings.dynamic_crops\n self.action = settings.action\n \n self.la_strength = settings.la_strength\n self.sampling_batch_size = settings.sampling_batch_size\n \n \n def _prepare_embeddings(\n self, \n cache: PromptEmbedsCache,\n tokenizer: CLIPTokenizer,\n text_encoder: CLIPTextModel,\n ):\n \"\"\"\n Prepare embeddings for training. When use_template is True, the embeddings will be\n format using a template, and then be processed by the model.\n \"\"\"\n if not self.use_template:\n return\n template = random.choice(imagenet_templates)\n target_prompt = template.format(self.settings.target)\n if cache[target_prompt]:\n self.target = cache[target_prompt]\n else:\n self.target = encode_prompts(tokenizer, text_encoder, [target_prompt])\n \n \n def _erase(\n self,\n target_latents: torch.FloatTensor, # \"van gogh\"\n positive_latents: torch.FloatTensor, # \"van gogh\"\n neutral_latents: torch.FloatTensor, # \"\"\n **kwargs,\n ) -> torch.FloatTensor:\n \"\"\"Target latents are going not to have the positive concept.\"\"\"\n\n erase_loss = self.loss_fn(\n target_latents,\n neutral_latents\n - self.guidance_scale * (positive_latents - neutral_latents),\n )\n losses = {\n \"loss\": erase_loss,\n \"loss/erase\": erase_loss,\n }\n return losses\n \n def _erase_with_la(\n self,\n target_latents: torch.FloatTensor, # \"van gogh\"\n positive_latents: torch.FloatTensor, # \"van gogh\"\n neutral_latents: torch.FloatTensor, # \"\"\n anchor_latents: torch.FloatTensor, \n anchor_latents_ori: torch.FloatTensor, \n **kwargs,\n ):\n anchoring_loss = self.loss_fn(anchor_latents, anchor_latents_ori)\n erase_loss = self._erase(\n target_latents=target_latents,\n positive_latents=positive_latents,\n neutral_latents=neutral_latents,\n )[\"loss/erase\"]\n losses = {\n \"loss\": erase_loss + self.la_strength * anchoring_loss,\n \"loss/erase\": erase_loss,\n \"loss/anchoring\": anchoring_loss\n }\n return losses\n\n def loss(\n self,\n **kwargs,\n ):\n if self.action == \"erase\":\n return self._erase(**kwargs)\n elif self.action == \"erase_with_la\":\n return self._erase_with_la(**kwargs)\n else:\n raise ValueError(\"action must be erase or erase_with_la\")" }, { "identifier": "PromptSettings", "path": "src/configs/prompt.py", "snippet": "class PromptSettings(BaseModel): # yaml\n target: str\n positive: str = None # if None, target will be used\n unconditional: str = \"\" # default is \"\"\n neutral: str = None # if None, unconditional will be used\n action: ACTION_TYPES = \"erase\" # default is \"erase\"\n guidance_scale: float = 1.0 # default is 1.0\n resolution: int = 512 # default is 512\n dynamic_resolution: bool = False # default is False\n batch_size: int = 1 # default is 1\n dynamic_crops: bool = False # default is False. only used when model is XL\n use_template: bool = False # default is False\n \n la_strength: float = 1000.0\n sampling_batch_size: int = 4\n\n seed: int = None\n case_number: int = 0\n\n @root_validator(pre=True)\n def fill_prompts(cls, values):\n keys = values.keys()\n if \"target\" not in keys:\n raise ValueError(\"target must be specified\")\n if \"positive\" not in keys:\n values[\"positive\"] = values[\"target\"]\n if \"unconditional\" not in keys:\n values[\"unconditional\"] = \"\"\n if \"neutral\" not in keys:\n values[\"neutral\"] = values[\"unconditional\"]\n\n return values" }, { "identifier": "PromptEmbedsXL", "path": "src/configs/prompt.py", "snippet": "class PromptEmbedsXL:\n text_embeds: torch.FloatTensor\n pooled_embeds: torch.FloatTensor\n\n def __init__(self, embeds) -> None:\n self.text_embeds, self.pooled_embeds = embeds" } ]
import argparse import gc import torch import src.engine.train_util as train_util import wandb from pathlib import Path from tqdm import tqdm from src.models.spm import ( SPMNetwork, SPMLayer, ) from src.engine.sampling import sample_xl from src.models import model_util from src.evaluation import eval_util from src.configs import config as config_pkg from src.configs import prompt as prompt_pkg from src.configs.config import RootConfig from src.configs.prompt import PromptEmbedsCache, PromptEmbedsPair, PromptSettings, PromptEmbedsXL
5,383
# ref: # - https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L566 # - https://huggingface.co/spaces/baulab/Erasing-Concepts-In-Diffusion/blob/main/train.py # - https://github.com/p1atdev/LECO/blob/main/train_lora_xl.py DEVICE_CUDA = torch.device("cuda:0") NUM_IMAGES_PER_PROMPT = 1 def flush(): torch.cuda.empty_cache() gc.collect() def train(
# ref: # - https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L566 # - https://huggingface.co/spaces/baulab/Erasing-Concepts-In-Diffusion/blob/main/train.py # - https://github.com/p1atdev/LECO/blob/main/train_lora_xl.py DEVICE_CUDA = torch.device("cuda:0") NUM_IMAGES_PER_PROMPT = 1 def flush(): torch.cuda.empty_cache() gc.collect() def train(
config: RootConfig,
7
2023-12-26 03:19:16+00:00
8k
davep/oshit
oshit/app/widgets/comment_card.py
[ { "identifier": "HN", "path": "oshit/hn/client.py", "snippet": "class HN:\n \"\"\"HackerNews API client.\"\"\"\n\n AGENT: Final[str] = \"Orange Site Hit (https://github.com/davep/oshit)\"\n \"\"\"The agent string to use when talking to the API.\"\"\"\n\n _BASE: Final[str] = \"https://hacker-news.firebaseio.com/v0/\"\n \"\"\"The base of the URL for the API.\"\"\"\n\n class Error(Exception):\n \"\"\"Base class for HackerNews errors.\"\"\"\n\n class RequestError(Error):\n \"\"\"Exception raised if there was a problem making an API request.\"\"\"\n\n class NoSuchUser(Error):\n \"\"\"Exception raised if no such user exists.\"\"\"\n\n def __init__(self, max_concurrency: int = 50, timeout: int | None = 5) -> None:\n \"\"\"Initialise the API client object.\n\n Args:\n max_concurrency: The maximum number of concurrent connections to use.\n timeout: The timeout for an attempted connection.\n \"\"\"\n self._client_: AsyncClient | None = None\n \"\"\"The HTTPX client.\"\"\"\n self._max_concurrency = max_concurrency\n \"\"\"The maximum number of concurrent connections to use.\"\"\"\n self._timeout = timeout\n \"\"\"The timeout to use on connections.\"\"\"\n\n @property\n def _client(self) -> AsyncClient:\n \"\"\"The API client.\"\"\"\n if self._client_ is None:\n self._client_ = AsyncClient()\n return self._client_\n\n def _api_url(self, *path: str) -> str:\n \"\"\"Construct a URL for calling on the API.\n\n Args:\n *path: The path to the endpoint.\n\n Returns:\n The URL to use.\n \"\"\"\n return f\"{self._BASE}{'/'.join(path)}\"\n\n async def _call(self, *path: str, **params: str) -> str:\n \"\"\"Call on the Pinboard API.\n\n Args:\n path: The path for the API call.\n params: The parameters for the call.\n\n Returns:\n The text returned from the call.\n \"\"\"\n try:\n response = await self._client.get(\n self._api_url(*path),\n params=params,\n headers={\"user-agent\": self.AGENT},\n timeout=self._timeout,\n )\n except RequestError as error:\n raise self.RequestError(str(error))\n\n try:\n response.raise_for_status()\n except HTTPStatusError as error:\n raise self.RequestError(str(error))\n\n return response.text\n\n async def max_item_id(self) -> int:\n \"\"\"Get the current maximum item ID.\n\n Returns:\n The ID of the maximum item on HackerNews.\n \"\"\"\n return int(loads(await self._call(\"maxitem.json\")))\n\n async def _raw_item(self, item_id: int) -> dict[str, Any]:\n \"\"\"Get the raw data of an item from the API.\n\n Args:\n item_id: The ID of the item to get.\n\n Returns:\n The JSON data of that item as a `dict`.\n \"\"\"\n # TODO: Possibly cache this.\n return cast(dict[str, Any], loads(await self._call(\"item\", f\"{item_id}.json\")))\n\n async def item(self, item_type: type[ItemType], item_id: int) -> ItemType:\n \"\"\"Get an item by its ID.\n\n Args:\n item_type: The type of the item to get from the API.\n item_id: The ID of the item to get.\n\n Returns:\n The item.\n \"\"\"\n if isinstance(item := Loader.load(await self._raw_item(item_id)), item_type):\n return item\n raise ValueError(\n f\"The item of ID '{item_id}' is of type '{item.item_type}', not {item_type.__name__}\"\n )\n\n async def _items_from_ids(\n self, item_type: type[ItemType], item_ids: list[int]\n ) -> list[ItemType]:\n \"\"\"Turn a list of item IDs into a list of items.\n\n Args:\n item_type: The type of the item we'll be getting.\n item_ids: The IDs of the items to get.\n\n Returns:\n The list of items.\n \"\"\"\n concurrency_limit = Semaphore(self._max_concurrency)\n\n async def limited(coroutine: Awaitable[ItemType]) -> ItemType:\n async with concurrency_limit:\n return await coroutine\n\n return await gather(\n *[limited(self.item(item_type, item_id)) for item_id in item_ids]\n )\n\n async def _id_list(self, list_type: str) -> list[int]:\n \"\"\"Get a given ID list.\n\n Args:\n list_type: The type of list to get.\n\n Returns:\n The list of item IDs.\n \"\"\"\n return cast(list[int], loads(await self._call(f\"{list_type}.json\")))\n\n async def top_story_ids(self) -> list[int]:\n \"\"\"Get the list of top story IDs.\n\n Returns:\n The list of the top story IDs.\n \"\"\"\n return await self._id_list(\"topstories\")\n\n async def top_stories(self) -> list[Link]:\n \"\"\"Get the top stories.\n\n Returns:\n The list of the top stories.\n \"\"\"\n return await self._items_from_ids(Link, await self.top_story_ids())\n\n async def new_story_ids(self) -> list[int]:\n \"\"\"Get the list of new story IDs.\n\n Returns:\n The list of the new story IDs.\n \"\"\"\n return await self._id_list(\"newstories\")\n\n async def new_stories(self) -> list[Link]:\n \"\"\"Get the new stories.\n\n Returns:\n The list of the new stories.\n \"\"\"\n return await self._items_from_ids(Link, await self.new_story_ids())\n\n async def best_story_ids(self) -> list[int]:\n \"\"\"Get the list of best story IDs.\n\n Returns:\n The list of the best story IDs.\n \"\"\"\n return await self._id_list(\"beststories\")\n\n async def best_stories(self) -> list[Link]:\n \"\"\"Get the best stories.\n\n Returns:\n The list of the best stories.\n \"\"\"\n return await self._items_from_ids(Link, await self.best_story_ids())\n\n async def latest_ask_story_ids(self) -> list[int]:\n \"\"\"Get the list of the latest ask story IDs.\n\n Returns:\n The list of the latest ask story IDs.\n \"\"\"\n return await self._id_list(\"askstories\")\n\n async def latest_ask_stories(self) -> list[Story]:\n \"\"\"Get the latest AskHN stories.\n\n Returns:\n The list of the latest AskHN stories.\n \"\"\"\n return await self._items_from_ids(Story, await self.latest_ask_story_ids())\n\n async def latest_show_story_ids(self) -> list[int]:\n \"\"\"Get the list of the latest show story IDs.\n\n Returns:\n The list of the latest show story IDs.\n \"\"\"\n return await self._id_list(\"showstories\")\n\n async def latest_show_stories(self) -> list[Story]:\n \"\"\"Get the latest ShowHN stories.\n\n Returns:\n The list of the latest ShowHN stories.\n \"\"\"\n return await self._items_from_ids(Story, await self.latest_show_story_ids())\n\n async def latest_job_story_ids(self) -> list[int]:\n \"\"\"Get the list of the latest job story IDs.\n\n Returns:\n The list of the latest job story IDs.\n \"\"\"\n return await self._id_list(\"jobstories\")\n\n async def latest_job_stories(self) -> list[Job]:\n \"\"\"Get the latest job stories.\n\n Returns:\n The list of the latest job stories.\n \"\"\"\n return await self._items_from_ids(Job, await self.latest_job_story_ids())\n\n async def user(self, user_id: str) -> User:\n \"\"\"Get the details of the given user.\n\n Args:\n user_id: The ID of the user.\n\n Returns:\n The details of the user.\n\n Raises:\n HN.NoSuchUser: If the user is not known.\n \"\"\"\n if user := loads(await self._call(\"user\", f\"{user_id}.json\")):\n return User().populate_with(user)\n raise self.NoSuchUser(f\"Unknown user: {user_id}\")\n\n async def comments(self, item: Item) -> list[Comment]:\n \"\"\"Get the comments for the given item.\n\n Args:\n item: The item to get the comments for.\n Returns:\n The list of the top stories.\n \"\"\"\n return await self._items_from_ids(Comment, item.kids)" }, { "identifier": "Article", "path": "oshit/hn/item/article.py", "snippet": "class Article(Item):\n \"\"\"Base class for all types of articles on HackerNews.\"\"\"\n\n descendants: int = 0\n \"\"\"The number of descendants of the article.\"\"\"\n\n score: int = 0\n \"\"\"The score of the article.\"\"\"\n\n title: str = \"\"\n \"\"\"The title of the article.\"\"\"\n\n def populate_with(self, data: dict[str, Any]) -> Self:\n \"\"\"Populate the item with the data from the given JSON value.\n\n Args:\n data: The data to populate from.\n\n Returns:\n Self\n \"\"\"\n self.descendants = data.get(\"descendants\", 0)\n self.score = data[\"score\"]\n self.title = data[\"title\"]\n return super().populate_with(data)" }, { "identifier": "Comment", "path": "oshit/hn/item/comment.py", "snippet": "class Comment(Item):\n \"\"\"Class that holds the details of a HackerNews comment.\"\"\"\n\n raw_text: str = \"\"\n \"\"\"The raw text of the comment.\"\"\"\n\n parent: int = 0\n \"\"\"The ID of the parent of the comment.\"\"\"\n\n def populate_with(self, data: dict[str, Any]) -> Self:\n \"\"\"Populate the item with the data from the given JSON value.\n\n Args:\n data: The data to populate from.\n\n Returns:\n Self\n \"\"\"\n self.raw_text = data.get(\"text\", \"\")\n self.parent = data[\"parent\"]\n return super().populate_with(data)\n\n @property\n def text(self) -> str:\n \"\"\"The text for the comment.\"\"\"\n return tidy_text(self.raw_text)\n\n @property\n def flagged(self) -> bool:\n \"\"\"Does the comment appear to be flagged?\"\"\"\n return self.raw_text == \"[flagged]\"\n\n @property\n def dead(self) -> bool:\n \"\"\"Does the comment appear to be dead?\"\"\"\n return self.raw_text == \"[dead]\"" }, { "identifier": "UserDetails", "path": "oshit/app/screens/user.py", "snippet": "class UserDetails(ModalScreen[None]):\n \"\"\"Modal dialog for showing the details of a user.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n UserDetails {\n align: center middle;\n\n Vertical {\n padding: 1 2;\n height: auto;\n width: auto;\n min-width: 40%;\n max-width: 80vw;\n background: $surface;\n border: panel $primary;\n border-title-color: $accent;\n }\n\n VerticalScroll {\n max-height: 20;\n height: auto;\n width: auto;\n }\n\n Data {\n max-width: 70vw;\n }\n\n Horizontal {\n height: auto;\n width: 100%;\n align-horizontal: right;\n border-top: solid $primary;\n padding-top: 1;\n }\n\n Button {\n margin-left: 1;\n }\n\n .hidden {\n display: none;\n }\n }\n \"\"\"\n\n BINDINGS = [(\"space\", \"visit\"), (\"escape\", \"close\")]\n\n AUTO_FOCUS = \"#close\"\n\n def __init__(self, client: HN, user_id: str) -> None:\n \"\"\"Initialise the user details dialog.\n\n Args:\n client: The HackerNews client object.\n user_id: The ID of the user to display.\n \"\"\"\n super().__init__()\n self._hn = client\n self._user = User(user_id)\n self._user_id = user_id\n\n def compose(self) -> ComposeResult:\n \"\"\"Compose the dialog.\"\"\"\n with Vertical() as dialog:\n dialog.border_title = \"User details\"\n yield Title(\"User ID:\")\n yield Data(self._user_id, id=\"user-id\")\n yield Title(\"About:\", classes=\"about hidden\")\n with VerticalScroll(classes=\"about hidden\"):\n yield Data(id=\"about\", markup=False)\n yield Title(\"Karma:\")\n yield Data(id=\"karma\")\n yield Title(\"Account created:\")\n yield Data(id=\"created\")\n yield Title(\"Submission count:\")\n yield Data(id=\"submissions\")\n with Horizontal():\n yield Button(\"Visit [dim]\\\\[Space][/]\", id=\"visit\")\n yield Button(\"Okay [dim]\\\\[Esc][/]\", id=\"close\")\n\n def _set(self, field: str, value: str) -> None:\n \"\"\"Set the value of a field on the form.\n\n Args:\n field: The field to set.\n value: The value to set the field to.\n \"\"\"\n self.query_one(f\"#{field}\", Data).update(value)\n\n @work\n async def _load_user(self) -> None:\n \"\"\"Load up the details for the user.\"\"\"\n self.query_one(Vertical).border_subtitle = \"Loading...\"\n try:\n self._user = await self._hn.user(self._user_id)\n except HN.RequestError as error:\n self.app.bell()\n self.notify(\n str(error),\n title=f\"Error loading user data for '{self._user_id}'\",\n timeout=8,\n severity=\"error\",\n )\n self._set(\"user-id\", f\"{self._user_id} [red italic](API error)[/]\")\n except HN.NoSuchUser:\n self.notify(\n \"No such user\",\n title=f\"There is no such user as '{self._user_id}'\",\n severity=\"error\",\n timeout=8,\n )\n self._set(\"user-id\", f\"{self._user_id} [red italic](Unknown User)[/]\")\n else:\n self._set(\"about\", self._user.about)\n self._set(\"karma\", intcomma(self._user.karma))\n self._set(\n \"created\",\n f\"{naturaltime(self._user.created)} [dim]({self._user.created})[/]\",\n )\n self._set(\"submissions\", f\"{intcomma(len(self._user.submitted))}\")\n self.query(\".about\").set_class(not self._user.has_about, \"hidden\")\n finally:\n self.query_one(Vertical).border_subtitle = \"\"\n\n def on_mount(self) -> None:\n \"\"\"Configure the dialog once the DOM is ready.\"\"\"\n self._load_user()\n\n @on(Button.Pressed, \"#close\")\n def action_close(self) -> None:\n \"\"\"Close the dialog screen.\"\"\"\n self.dismiss(None)\n\n @on(Button.Pressed, \"#visit\")\n def action_visit(self) -> None:\n \"\"\"Visit the page for the user.\"\"\"\n open_url(self._user.url)" } ]
from dataclasses import dataclass from webbrowser import open as open_url from textual import on from textual.app import ComposeResult from textual.containers import Vertical from textual.binding import Binding from textual.css.query import NoMatches from textual.events import Click from textual.message import Message from textual.widget import Widget from textual.widgets import Label from humanize import naturaltime from ...hn import HN from ...hn.item import Article, Comment from ..screens.user import UserDetails
4,334
"""Provides a card for displaying a HackerNews comment.""" ############################################################################## # Python imports. ############################################################################## # Textual imports. ############################################################################## # Humanize imports. ############################################################################## # Local imports. ############################################################################## class CommentCard(Vertical, can_focus=True): """Widget that displays a comment.""" DEFAULT_CSS = """ $card-border: heavy; CommentCard { border-left: $card-border $primary; border-bottom: $card-border $primary; padding: 1 0 1 1; margin: 0 1 1 1; height: auto; color: $text 70%; CommentCard { padding: 1 0 1 1; margin: 0 0 1 0; } &:focus-within { border-left: $card-border $accent 50%; border-bottom: $card-border $accent 50%; background: $boost 50%; color: $text 80%; } &:focus { border-left: $card-border $accent; border-bottom: $card-border $accent; background: $boost; color: $text; } &.deleted { color: $error 50%; text-style: italic; border: dashed $error 20%; padding: 0; Label { text-align: center; } } Label { width: 1fr; padding-right: 1; } /* These two should be combined. https://github.com/Textualize/textual/issues/3969 */ &.flagged Label { color: $text-disabled; text-style: italic; } &.dead Label { color: $text-disabled; text-style: italic; } .byline { margin-top: 1; text-align: right; color: $text-muted; text-style: italic; } } """ BINDINGS = [ Binding("enter", "gndn"), Binding("s", "next(1)", "Next Sibling"), Binding("S", "next(-1)", "Prev Sibling", key_display="Sh+S"), Binding("p", "goto_parent", "Parent"), Binding("r", "goto_root", "Go Root"), Binding("u", "view_user", "View User"), Binding("v", "view_online", "View on HN"), ] def __init__(
"""Provides a card for displaying a HackerNews comment.""" ############################################################################## # Python imports. ############################################################################## # Textual imports. ############################################################################## # Humanize imports. ############################################################################## # Local imports. ############################################################################## class CommentCard(Vertical, can_focus=True): """Widget that displays a comment.""" DEFAULT_CSS = """ $card-border: heavy; CommentCard { border-left: $card-border $primary; border-bottom: $card-border $primary; padding: 1 0 1 1; margin: 0 1 1 1; height: auto; color: $text 70%; CommentCard { padding: 1 0 1 1; margin: 0 0 1 0; } &:focus-within { border-left: $card-border $accent 50%; border-bottom: $card-border $accent 50%; background: $boost 50%; color: $text 80%; } &:focus { border-left: $card-border $accent; border-bottom: $card-border $accent; background: $boost; color: $text; } &.deleted { color: $error 50%; text-style: italic; border: dashed $error 20%; padding: 0; Label { text-align: center; } } Label { width: 1fr; padding-right: 1; } /* These two should be combined. https://github.com/Textualize/textual/issues/3969 */ &.flagged Label { color: $text-disabled; text-style: italic; } &.dead Label { color: $text-disabled; text-style: italic; } .byline { margin-top: 1; text-align: right; color: $text-muted; text-style: italic; } } """ BINDINGS = [ Binding("enter", "gndn"), Binding("s", "next(1)", "Next Sibling"), Binding("S", "next(-1)", "Prev Sibling", key_display="Sh+S"), Binding("p", "goto_parent", "Parent"), Binding("r", "goto_root", "Go Root"), Binding("u", "view_user", "View User"), Binding("v", "view_online", "View on HN"), ] def __init__(
self, client: HN, parent_item: Article | Comment, comment: Comment
2
2023-12-25 14:06:07+00:00
8k
wwxu21/CUT
finetune_unlikelihood.py
[ { "identifier": "LlamaForCausalLM", "path": "modeling_llama_unlikelihood.py", "snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config, threshold):\n super().__init__(config)\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.threshold = threshold\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n weight_like: Optional[torch.Tensor] = None,\n weight_unlike: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n input_ids_neg=None,\n attention_mask_neg=None,\n labels_neg=None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)\n logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]\n logits = torch.cat(logits, dim=-1)\n else:\n logits = self.lm_head(hidden_states)\n logits = logits.float()\n probs = torch.softmax(logits,dim=2)\n batch_size2, seq_length, hidden_size = probs.size()\n batch_size = batch_size2 // 2\n \n loss = None\n unlike_mask = weight_unlike.ne(-1).view(-1).to(probs.device)\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_probs_pos = probs[:batch_size][..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = NLLLoss()\n shift_probs_pos = shift_probs_pos.view(-1, self.config.vocab_size)\n shift_logits = torch.log(shift_probs_pos)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n \n loss = loss\n if unlike_mask.any():\n loss_unlike = self.unlikelihood(probs, labels, labels_neg, weight_unlike, unlike_mask)\n loss = (loss_unlike + loss) / 2 \n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n def unlikelihood(self, probs, labels, labels_neg, weight_unlike, unlike_mask):\n labels = labels.to(probs.device)\n labels_neg = labels_neg.to(probs.device)\n weight_unlike = weight_unlike.to(probs.device)\n shift_probs = probs[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n shift_labels_neg = labels_neg[..., 1:].contiguous()\n valid_indices = shift_labels[unlike_mask] != -100\n valid_indices_neg = shift_labels_neg[unlike_mask] != -100\n # assert (valid_indices == valid_indices_neg).all()\n batch_size2, seq_length, hidden_size = shift_probs.size()\n batch_size = batch_size2 // 2\n device = probs.device\n label_clamped = torch.clamp(shift_labels, min=0, max=hidden_size - 1) \n label_clamped_neg = torch.clamp(shift_labels_neg, min=0, max=hidden_size - 1)\n rows, cols = torch.meshgrid(torch.arange(batch_size, device=device), torch.arange(seq_length, device=device))\n probs_out = shift_probs[:batch_size][rows, cols, label_clamped][unlike_mask]\n probs_out_neg = shift_probs[batch_size:][rows, cols, label_clamped_neg][unlike_mask]\n valid_prob = probs_out[valid_indices]\n valid_prob_neg = probs_out_neg[valid_indices_neg]\n scale = (valid_prob / valid_prob_neg).detach()\n unlike_indices = scale > self.threshold # give some margins\n valid_prob_neg[unlike_indices] = 1 - valid_prob_neg[unlike_indices]\n valid_prob_neg[valid_prob_neg == 0] += 1e-5 # avoid 0\n valid_lprob_neg = torch.log(valid_prob_neg)\n valid_lprob_neg[unlike_indices] = weight_unlike[unlike_mask][0][0] * valid_lprob_neg[unlike_indices]\n valid_lprob_neg[~unlike_indices] = valid_lprob_neg[~unlike_indices]\n loss_unlike = -torch.sum(valid_lprob_neg)/ valid_lprob_neg.size(0)\n return loss_unlike\n\n \n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n )\n return reordered_past" }, { "identifier": "PeftModelForCausalLM", "path": "modeling_llama_unlikelihood.py", "snippet": "class PeftModelForCausalLM(PeftModel):\n \"\"\"\n Peft model for causal language modeling.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForCausalLM\n >>> from peft import PeftModelForCausalLM, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"PREFIX_TUNING\",\n ... \"task_type\": \"CAUSAL_LM\",\n ... \"inference_mode\": False,\n ... \"num_virtual_tokens\": 20,\n ... \"token_dim\": 1280,\n ... \"num_transformer_submodules\": 1,\n ... \"num_attention_heads\": 20,\n ... \"num_layers\": 36,\n ... \"encoder_hidden_size\": 1280,\n ... \"prefix_projection\": False,\n ... \"postprocess_past_key_value_function\": None,\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForCausalLM.from_pretrained(\"gpt2-large\")\n >>> peft_model = PeftModelForCausalLM(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n input_ids_neg=None,\n attention_mask_neg=None,\n labels_neg=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n weight_like=None,\n weight_unlike=None,\n **kwargs,\n ):\n peft_config = self.active_peft_config\n kwargs.update({'weight_like':weight_like, 'weight_unlike':weight_unlike, \"labels_neg\": labels_neg})\n input_ids = torch.cat([input_ids, input_ids_neg], dim=0)\n attention_mask = torch.cat([attention_mask, attention_mask_neg], dim=0)\n if not peft_config.is_prompt_learning:\n if self.base_model.config.model_type == \"mpt\":\n if inputs_embeds is not None:\n raise AssertionError(\"forward in MPTForCausalLM does not support inputs_embeds\")\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n batch_size = _get_batch_size(input_ids, inputs_embeds)\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device)\n attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\")\n kwargs[\"token_type_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size)\n return self.base_model(\n input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, **kwargs\n )\n else:\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n # concat prompt labels\n if labels is not None:\n prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device)\n kwargs[\"labels\"] = torch.cat((prefix_labels, labels), dim=1)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n\n def generate(self, **kwargs):\n self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation\n if hasattr(self.base_model, \"model\"):\n self.base_model.model.generation_config = self.generation_config\n else:\n self.base_model.generation_config = self.generation_config\n try:\n outputs = self.base_model.generate(**kwargs)\n except:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n raise\n else:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n return outputs\n\n def prepare_inputs_for_generation(self, *args, **kwargs):\n peft_config = self.active_peft_config\n model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)\n if peft_config.is_prompt_learning:\n if model_kwargs.get(\"attention_mask\", None) is not None:\n prefix_attention_mask = torch.ones(\n model_kwargs[\"input_ids\"].shape[0], peft_config.num_virtual_tokens\n ).to(model_kwargs[\"input_ids\"].device)\n model_kwargs[\"attention_mask\"] = torch.cat(\n (prefix_attention_mask, model_kwargs[\"attention_mask\"]), dim=1\n )\n\n if model_kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n model_kwargs[\"position_ids\"] = None\n\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\n \"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\"\n )\n kwargs[\"token_type_ids\"] = None\n\n if model_kwargs[\"past_key_values\"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size=model_kwargs[\"input_ids\"].shape[0])\n model_kwargs[\"past_key_values\"] = past_key_values\n else:\n if model_kwargs[\"past_key_values\"] is None:\n inputs_embeds = self.word_embeddings(model_kwargs[\"input_ids\"])\n prompts = self.get_prompt(batch_size=model_kwargs[\"input_ids\"].shape[0])\n prompts = prompts.to(inputs_embeds.dtype)\n model_kwargs[\"inputs_embeds\"] = torch.cat((prompts, inputs_embeds), dim=1)\n model_kwargs[\"input_ids\"] = None\n\n return model_kwargs" }, { "identifier": "Prompter", "path": "prompter.py", "snippet": "class Prompter(object):\n __slots__ = (\"template\", \"_verbose\")\n\n def __init__(self, template_name: str = \"\", verbose: bool = False):\n self._verbose = verbose\n if not template_name:\n # Enforce the default here, so the constructor can be called with '' and will not break.\n template_name = \"alpaca\"\n file_name = osp.join(\"templates\", f\"{template_name}.json\")\n if not osp.exists(file_name):\n raise ValueError(f\"Can't read {file_name}\")\n with open(file_name) as fp:\n self.template = json.load(fp)\n if self._verbose:\n print(\n f\"Using prompt template {template_name}: {self.template['description']}\"\n )\n\n def generate_prompt(\n self,\n data_point,\n output=False,\n ) -> str:\n # returns the full prompt from instruction and optional input\n # if a label (=response, =output) is provided, it's also appended.\n instruction = data_point['instruction']\n label = data_point['output']\n res = instruction\n if output:\n res = f\"{res}{label}\"\n if self._verbose:\n print(res)\n return res\n\n def get_response(self, output: str) -> str:\n return output.split(self.template[\"response_split\"])[1].strip()" } ]
import os import sys import json import fire import torch import transformers import numpy as np import random from typing import List from torch.utils.data import DataLoader from datasets import load_dataset, concatenate_datasets, Dataset from transformers import TrainerCallback, TrainingArguments, TrainerState, TrainerControl from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR from peft import ( LoraConfig, prepare_model_for_int8_training, set_peft_model_state_dict, MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PeftModel, ) from peft.utils import _prepare_prompt_learning_config from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.utils import PaddingStrategy from transformers import LlamaTokenizer, LlamaConfig from modeling_llama_unlikelihood import LlamaForCausalLM, PeftModelForCausalLM from prompter import Prompter from typing import Optional, Union, Any from dataclasses import dataclass
6,444
if return_tensors is None: return_tensors = self.return_tensors labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None labels_neg = [feature["labels_neg"] for feature in features] if "labels_neg" in features[0].keys() else None # We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the # same length to return tensors. if labels is not None: max_label_length = max(len(l) for l in labels) if labels_neg is not None: max_label_length_neg = max(len(l) for l in labels_neg) max_label_length = max(max_label_length, max_label_length_neg) if self.pad_to_multiple_of is not None: max_label_length = ( (max_label_length + self.pad_to_multiple_of - 1) // self.pad_to_multiple_of * self.pad_to_multiple_of ) # self.tokenizer.padding_side = "left" padding_side = self.tokenizer.padding_side for feature in features: feature['weight_like'] = [feature['weight_like']] feature['weight_unlike'] = [feature['weight_unlike']] remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"])) remainder_length = max_label_length - len(feature["labels_neg"]) remainder_label = [self.label_pad_token_id] * remainder_length remainder_ids = [self.tokenizer.pad_token_id] * remainder_length remainder_mask = [0] * remainder_length if isinstance(feature["labels"], list): feature["labels"] = ( feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"] ) feature["labels_neg"] = ( feature["labels_neg"] + remainder_label if padding_side == "right" else remainder_label + feature["labels_neg"] ) feature["input_ids_neg"] = ( feature["input_ids_neg"] + remainder_ids if padding_side == "right" else remainder_ids + feature["input_ids_neg"] ) feature["attention_mask_neg"] = ( feature["attention_mask_neg"] + remainder_mask if padding_side == "right" else remainder_mask + feature["attention_mask_neg"] ) elif padding_side == "right": feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64) feature["labels_neg"] = np.concatenate([feature["labels_neg"], remainder_label]).astype(np.int64) feature["input_ids_neg"] = np.concatenate([feature["input_ids_neg"], remainder_ids]).astype(np.int64) feature["attention_mask_neg"] = np.concatenate([feature["attention_mask_neg"], remainder_mask]).astype(np.int64) else: feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64) feature["labels_neg"] = np.concatenate([remainder_label, feature["labels_neg"]]).astype(np.int64) feature["input_ids_neg"] = np.concatenate([remainder_ids, feature["input_ids_neg"]]).astype(np.int64) feature["attention_mask_neg"] = np.concatenate([remainder_mask, feature["attention_mask_neg"]]).astype(np.int64) features = self.tokenizer.pad( features, padding=self.padding, max_length=max_label_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=return_tensors, ) # prepare decoder_input_ids if ( labels is not None and self.model is not None and hasattr(self.model, "prepare_decoder_input_ids_from_labels") ): decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"]) features["decoder_input_ids"] = decoder_input_ids return features class SavePeftModelCallback(TrainerCallback): def on_save( self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs, ): checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") kwargs["model"].save_pretrained(checkpoint_folder) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") torch.save({}, pytorch_model_path) return control class LoadBestPeftModelCallback(TrainerCallback): def on_train_end( self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs, ): print(f"Loading best peft model from {state.best_model_checkpoint} (score: {state.best_metric}).") best_model_path = os.path.join(state.best_model_checkpoint, "adapter_model.bin") adapters_weights = torch.load(best_model_path) model = kwargs["model"] set_peft_model_state_dict(model, adapters_weights) return control def get_peft_model(model, peft_config, adapter_name: str = "default"): """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. """ model_config = getattr(model, "config", {"model_type": "custom"}) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning: return PeftModel(model, peft_config, adapter_name=adapter_name) if peft_config.is_prompt_learning: peft_config = _prepare_prompt_learning_config(peft_config, model_config)
seed = 42 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) @dataclass class MyDataCollator: """ Data collator that will dynamically pad the inputs received, as well as the labels. Args: tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): The tokenizer used for encoding the data. model ([`PreTrainedModel`]): The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to prepare the *decoder_input_ids* This is useful when using *label_smoothing* to avoid calculating loss twice. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single sequence is provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). label_pad_token_id (`int`, *optional*, defaults to -100): The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions). return_tensors (`str`): The type of Tensor to return. Allowable values are "np", "pt" and "tf". """ tokenizer: PreTrainedTokenizerBase model: Optional[Any] = None padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None label_pad_token_id: int = -100 return_tensors: str = "pt" def __call__(self, features, return_tensors=None): if return_tensors is None: return_tensors = self.return_tensors labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None labels_neg = [feature["labels_neg"] for feature in features] if "labels_neg" in features[0].keys() else None # We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the # same length to return tensors. if labels is not None: max_label_length = max(len(l) for l in labels) if labels_neg is not None: max_label_length_neg = max(len(l) for l in labels_neg) max_label_length = max(max_label_length, max_label_length_neg) if self.pad_to_multiple_of is not None: max_label_length = ( (max_label_length + self.pad_to_multiple_of - 1) // self.pad_to_multiple_of * self.pad_to_multiple_of ) # self.tokenizer.padding_side = "left" padding_side = self.tokenizer.padding_side for feature in features: feature['weight_like'] = [feature['weight_like']] feature['weight_unlike'] = [feature['weight_unlike']] remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"])) remainder_length = max_label_length - len(feature["labels_neg"]) remainder_label = [self.label_pad_token_id] * remainder_length remainder_ids = [self.tokenizer.pad_token_id] * remainder_length remainder_mask = [0] * remainder_length if isinstance(feature["labels"], list): feature["labels"] = ( feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"] ) feature["labels_neg"] = ( feature["labels_neg"] + remainder_label if padding_side == "right" else remainder_label + feature["labels_neg"] ) feature["input_ids_neg"] = ( feature["input_ids_neg"] + remainder_ids if padding_side == "right" else remainder_ids + feature["input_ids_neg"] ) feature["attention_mask_neg"] = ( feature["attention_mask_neg"] + remainder_mask if padding_side == "right" else remainder_mask + feature["attention_mask_neg"] ) elif padding_side == "right": feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64) feature["labels_neg"] = np.concatenate([feature["labels_neg"], remainder_label]).astype(np.int64) feature["input_ids_neg"] = np.concatenate([feature["input_ids_neg"], remainder_ids]).astype(np.int64) feature["attention_mask_neg"] = np.concatenate([feature["attention_mask_neg"], remainder_mask]).astype(np.int64) else: feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64) feature["labels_neg"] = np.concatenate([remainder_label, feature["labels_neg"]]).astype(np.int64) feature["input_ids_neg"] = np.concatenate([remainder_ids, feature["input_ids_neg"]]).astype(np.int64) feature["attention_mask_neg"] = np.concatenate([remainder_mask, feature["attention_mask_neg"]]).astype(np.int64) features = self.tokenizer.pad( features, padding=self.padding, max_length=max_label_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=return_tensors, ) # prepare decoder_input_ids if ( labels is not None and self.model is not None and hasattr(self.model, "prepare_decoder_input_ids_from_labels") ): decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"]) features["decoder_input_ids"] = decoder_input_ids return features class SavePeftModelCallback(TrainerCallback): def on_save( self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs, ): checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") kwargs["model"].save_pretrained(checkpoint_folder) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") torch.save({}, pytorch_model_path) return control class LoadBestPeftModelCallback(TrainerCallback): def on_train_end( self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs, ): print(f"Loading best peft model from {state.best_model_checkpoint} (score: {state.best_metric}).") best_model_path = os.path.join(state.best_model_checkpoint, "adapter_model.bin") adapters_weights = torch.load(best_model_path) model = kwargs["model"] set_peft_model_state_dict(model, adapters_weights) return control def get_peft_model(model, peft_config, adapter_name: str = "default"): """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. """ model_config = getattr(model, "config", {"model_type": "custom"}) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning: return PeftModel(model, peft_config, adapter_name=adapter_name) if peft_config.is_prompt_learning: peft_config = _prepare_prompt_learning_config(peft_config, model_config)
return PeftModelForCausalLM(model, peft_config, adapter_name=adapter_name)
1
2023-12-22 07:32:19+00:00
8k
Maximilian-Winter/llama-cpp-agent
src/llama_cpp_agent/structured_output_agent.py
[ { "identifier": "LlamaCppAgent", "path": "src/llama_cpp_agent/llm_agent.py", "snippet": "class LlamaCppAgent:\n \"\"\"\n A base agent that can be used for chat, structured output and function calling. Is used as part of all other agents.\n \"\"\"\n def __init__(self, model: Union[Llama, LlamaLLMSettings], name: str = \"llamacpp_agent\", system_prompt: str = \"You are helpful assistant.\",\n predefined_messages_formatter_type: MessagesFormatterType = MessagesFormatterType.CHATML,\n custom_messages_formatter: MessagesFormatter = None, debug_output: bool = False):\n if isinstance(model, LlamaLLMSettings):\n model = Llama(**model.as_dict())\n self.model = model\n self.name = name\n self.system_prompt = system_prompt\n self.debug_output = debug_output\n self.messages = []\n if custom_messages_formatter is not None:\n self.messages_formatter = custom_messages_formatter\n else:\n self.messages_formatter = get_predefined_messages_formatter(predefined_messages_formatter_type)\n\n @staticmethod\n def get_function_tool_registry(function_tool_list: List[LlamaCppFunctionTool]):\n function_tool_registry = LlamaCppFunctionToolRegistry()\n\n for function_tool in function_tool_list:\n function_tool_registry.register_function_tool(function_tool)\n function_tool_registry.finalize()\n return function_tool_registry\n\n def add_message(self, message: str, role: Literal[\"system\"] | Literal[\"user\"] | Literal[\"assistant\"] = \"user\",\n auto_format=False):\n if len(self.messages) == 0:\n self.messages.append(\n {\n \"role\": \"user\",\n \"content\": message.strip(),\n },\n )\n if auto_format:\n role = \"user\" if (self.messages[-1][\"role\"] == \"assistant\" or self.messages[-1][\n \"role\"] == \"system\") else \"assistant\"\n self.messages.append(\n {\n \"role\": role,\n \"content\": message.strip(),\n },\n )\n\n def get_chat_response(\n self,\n message: str = None,\n role: Literal[\"system\", \"user\", \"assistant\", \"function\"] = \"user\",\n system_prompt: str = None,\n add_message_to_chat_history: bool = True,\n add_response_to_chat_history: bool = True,\n grammar: LlamaGrammar = None,\n function_tool_registry: LlamaCppFunctionToolRegistry = None,\n streaming_callback: Callable[[StreamingResponse], None] = None,\n max_tokens: int = 0,\n temperature: float = 0.4,\n top_k: int = 0,\n top_p: float = 1.0,\n min_p: float = 0.05,\n typical_p: float = 1.0,\n repeat_penalty: float = 1.0,\n mirostat_mode: int = 0,\n mirostat_tau: float = 5.0,\n mirostat_eta: float = 0.1,\n tfs_z: float = 1.0,\n stop_sequences: List[str] = None,\n stream: bool = True,\n print_output: bool = True,\n k_last_messages: int = 0\n ) :\n if function_tool_registry is not None:\n grammar = function_tool_registry.get_grammar()\n if system_prompt is None:\n system_prompt = self.system_prompt\n messages = [\n {\n \"role\": \"system\",\n \"content\": system_prompt.strip(),\n },\n ]\n if message is not None and add_message_to_chat_history:\n self.messages.append(\n {\n \"role\": role,\n \"content\": message.strip(),\n },\n )\n if not add_message_to_chat_history and message is not None:\n messages.append(\n {\n \"role\": role,\n \"content\": message.strip(),\n },\n )\n if k_last_messages > 0:\n messages.extend(self.messages[-k_last_messages:])\n else:\n messages.extend(self.messages)\n\n prompt, response_role = self.messages_formatter.format_messages(messages)\n if self.debug_output:\n print(prompt, end=\"\")\n\n if stop_sequences is None:\n stop_sequences = self.messages_formatter.DEFAULT_STOP_SEQUENCES\n\n if self.model:\n completion = self.model.create_completion(\n prompt=prompt,\n max_tokens=max_tokens,\n stream=stream,\n stop=stop_sequences,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n min_p=min_p,\n typical_p=typical_p,\n mirostat_mode=mirostat_mode,\n mirostat_tau=mirostat_tau,\n mirostat_eta=mirostat_eta,\n tfs_z=tfs_z,\n repeat_penalty=repeat_penalty,\n grammar=grammar\n )\n if stream and print_output:\n full_response = \"\"\n for out in completion:\n text = out['choices'][0]['text']\n full_response += text\n if streaming_callback is not None:\n streaming_callback(StreamingResponse(text=text, is_last_response=False))\n print(text, end=\"\")\n if streaming_callback is not None:\n streaming_callback(StreamingResponse(text=\"\", is_last_response=True))\n print(\"\")\n\n if add_response_to_chat_history:\n self.messages.append(\n {\n \"role\": response_role,\n \"content\": full_response.strip(),\n },\n )\n if function_tool_registry is not None:\n full_response = function_tool_registry.handle_function_call(full_response)\n return full_response if full_response else None\n return full_response.strip() if full_response else None\n if stream:\n full_response = \"\"\n for out in completion:\n text = out['choices'][0]['text']\n full_response += text\n if streaming_callback is not None:\n streaming_callback(StreamingResponse(text=text, is_last_response=False))\n if streaming_callback is not None:\n streaming_callback(StreamingResponse(text=\"\", is_last_response=True))\n if add_response_to_chat_history:\n self.messages.append(\n {\n \"role\": response_role,\n \"content\": full_response.strip(),\n },\n )\n if function_tool_registry is not None:\n full_response = function_tool_registry.handle_function_call(full_response)\n return full_response if full_response else None\n return full_response.strip() if full_response else None\n if print_output:\n text = completion['choices'][0]['text']\n print(text)\n\n if add_response_to_chat_history:\n self.messages.append(\n {\n \"role\": response_role,\n \"content\": text.strip(),\n },\n )\n if function_tool_registry is not None:\n text = function_tool_registry.handle_function_call(text)\n return text if text else None\n return text.strip() if text else None\n text = completion['choices'][0]['text']\n if add_response_to_chat_history:\n self.messages.append(\n {\n \"role\": response_role,\n \"content\": text.strip(),\n },\n )\n if function_tool_registry is not None:\n text = function_tool_registry.handle_function_call(text)\n return text if text else None\n return text.strip() if text else None\n return \"Error: No model loaded!\"\n\n def remove_last_k_chat_messages(self, k: int):\n # Ensure k is not greater than the length of the messages list\n k = min(k, len(self.messages))\n\n # Remove the last k elements\n self.messages = self.messages[:-k] if k > 0 else self.messages\n\n def remove_first_k_chat_messages(self, k: int):\n # Ensure k is not greater than the length of the messages list\n k = min(k, len(self.messages))\n\n # Remove the first k elements\n self.messages = self.messages[k:] if k > 0 else self.messages\n\n def save_messages(self, file_path: str):\n with open(file_path, 'w', encoding=\"utf-8\") as file:\n json.dump(self.messages, file, indent=4)\n\n def load_messages(self, file_path: str):\n with open(file_path, 'r', encoding=\"utf-8\") as file:\n loaded_messages = json.load(file)\n self.messages.extend(loaded_messages)\n\n @staticmethod\n def agent_conversation(\n agent_1: \"LlamaCppAgent\",\n agent_2: \"LlamaCppAgent\",\n agent_1_initial_message: str,\n number_of_exchanges: int = 15\n ):\n current_message = agent_1_initial_message\n current_agent, next_agent = agent_2, agent_1\n\n for _ in range(number_of_exchanges):\n # Current agent responds to the last message\n response = current_agent.get_chat_response(\n message=current_message,\n role=\"user\",\n add_response_to_chat_history=True,\n print_output=True,\n top_p=0.8,\n top_k=40\n )\n\n # Update the message for the next turn\n current_message = response\n\n # Swap the agents for the next turn\n current_agent, next_agent = next_agent, current_agent\n\n print(\"Conversation ended.\")\n\n @staticmethod\n def group_conversation(\n agent_list: list[\"LlamaCppAgent\"],\n initial_message: str,\n number_of_turns: int = 4\n ):\n responses = [{\n \"role\": \"user\",\n \"content\": initial_message,\n }]\n last_role = \"user\"\n for _ in range(number_of_turns):\n\n for a in agent_list:\n a.messages = responses\n response = a.get_chat_response(add_response_to_chat_history=False, add_message_to_chat_history=False)\n response = f\"{a.name}: {response}\" if not response.strip().startswith(a.name) else response\n responses.append({\n \"role\": \"user\" if last_role == \"assistant\" else \"assistant\",\n \"content\": response,\n })\n last_role = responses[-1][\"role\"]\n print(\"Conversation ended.\")" }, { "identifier": "StreamingResponse", "path": "src/llama_cpp_agent/llm_agent.py", "snippet": "class StreamingResponse:\n text: str\n is_last_response: bool" }, { "identifier": "PromptTemplate", "path": "src/llama_cpp_agent/llm_prompt_template.py", "snippet": "class PromptTemplate:\n def __init__(self, template_file=None, template_string=None):\n if template_file:\n with open(template_file, \"r\") as file:\n self.template = file.read()\n elif template_string:\n self.template = template_string\n else:\n raise ValueError(\"Either 'template_file' or 'template_string' must be provided\")\n\n @classmethod\n def from_string(cls, template_string):\n return cls(template_string=template_string)\n\n @classmethod\n def from_file(cls, template_file):\n with open(template_file, \"r\") as file:\n template_string = file.read()\n return cls(template_string=template_string)\n\n def _remove_empty_placeholders(self, text):\n # Remove lines that contain only the empty placeholder\n text = re.sub(rf'^{\"__EMPTY_TEMPLATE_FIELD__\"}$', '', text, flags=re.MULTILINE)\n # Remove the empty placeholder from lines with other content\n text = re.sub(rf'{\"__EMPTY_TEMPLATE_FIELD__\"}', '', text)\n return text\n\n def generate_prompt(self, template_fields: Union[dict, PromptTemplateFields], remove_empty_template_field=True):\n\n if isinstance(template_fields, PromptTemplateFields):\n template_fields = template_fields.get_fields_dict()\n\n if not remove_empty_template_field:\n def replace_placeholder(match):\n placeholder = match.group(1)\n return template_fields.get(placeholder, match.group(0))\n\n prompt = re.sub(r\"\\{(\\w+)\\}\", replace_placeholder, self.template)\n return prompt\n\n def replace_placeholder(match):\n placeholder = match.group(1)\n if template_fields.get(placeholder, match.group(0)) != '':\n return template_fields.get(placeholder, match.group(0))\n return \"__EMPTY_TEMPLATE_FIELD__\"\n\n # Initial placeholder replacement\n prompt = re.sub(r\"\\{(\\w+)\\}\", replace_placeholder, self.template)\n\n return self._remove_empty_placeholders(prompt)" }, { "identifier": "LlamaLLMGenerationSettings", "path": "src/llama_cpp_agent/llm_settings.py", "snippet": "class LlamaLLMGenerationSettings:\n max_tokens: int = 0\n temperature: float = 0.35\n top_k: int = 0\n top_p: float = 1.0\n min_p: float = 0.05\n typical_p: float = 1.0\n repeat_penalty: float = 1.0\n mirostat_mode: int = 0\n mirostat_tau: float = 5.0\n mirostat_eta: float = 0.1\n tfs_z: float = 1.0\n stop_sequences: List[str] = None\n stream: bool = True\n print_output: bool = True\n # k_last_messages: int = 0\n\n def save(self, file_path: str):\n with open(file_path, 'w', encoding=\"utf-8\") as file:\n json.dump(self.as_dict(), file, indent=4)\n\n @staticmethod\n def load_from_file(file_path: str) -> \"LlamaLLMGenerationSettings\":\n with open(file_path, 'r', encoding=\"utf-8\") as file:\n loaded_settings = json.load(file)\n return LlamaLLMGenerationSettings(**loaded_settings)\n\n @staticmethod\n def load_from_dict(settings: dict) -> \"LlamaLLMGenerationSettings\":\n return LlamaLLMGenerationSettings(**settings)\n\n def as_dict(self) -> dict:\n return self.__dict__" }, { "identifier": "LlamaLLMSettings", "path": "src/llama_cpp_agent/llm_settings.py", "snippet": "class LlamaLLMSettings:\n model_path: str\n n_gpu_layers: int = 0\n f16_kv: bool = True\n offload_kqv: bool = True\n use_mlock: bool = False\n embedding: bool = False\n n_threads: int = None\n n_batch: int = 512\n n_ctx: int = 512\n last_n_tokens_size: int = 64\n verbose: bool = False\n seed: int = -1\n\n def save(self, file_path: str):\n with open(file_path, 'w', encoding=\"utf-8\") as file:\n json.dump(self.as_dict(), file, indent=4)\n\n @staticmethod\n def load_from_file(file_path: str) -> \"LlamaLLMSettings\":\n with open(file_path, 'r', encoding=\"utf-8\") as file:\n loaded_settings = json.load(file)\n return LlamaLLMSettings(**loaded_settings)\n\n @staticmethod\n def load_from_dict(settings: dict) -> \"LlamaLLMSettings\":\n return LlamaLLMSettings(**settings)\n\n def as_dict(self) -> dict:\n return self.__dict__" }, { "identifier": "extract_object_from_response", "path": "src/llama_cpp_agent/output_parser.py", "snippet": "def extract_object_from_response(response: str, object_clas: type):\n obj = parse_json_response(response)\n cls = object_clas\n obj = cls(**obj)\n return obj" }, { "identifier": "MessagesFormatterType", "path": "src/llama_cpp_agent/messages_formatter.py", "snippet": "class MessagesFormatterType(Enum):\n MIXTRAL = 1\n CHATML = 2\n VICUNA = 3\n LLAMA_2 = 4\n SYNTHIA = 5\n NEURAL_CHAT = 6\n SOLAR = 7\n OPEN_CHAT = 8" }, { "identifier": "MessagesFormatter", "path": "src/llama_cpp_agent/messages_formatter.py", "snippet": "class MessagesFormatter:\n def __init__(self, PRE_PROMPT: str, SYS_PROMPT_START: str, SYS_PROMPT_END: str, USER_PROMPT_START: str,\n USER_PROMPT_END: str,\n ASSISTANT_PROMPT_START: str,\n ASSISTANT_PROMPT_END: str,\n INCLUDE_SYS_PROMPT_IN_FIRST_USER_MESSAGE: bool,\n DEFAULT_STOP_SEQUENCES: List[str],\n USE_USER_ROLE_FUNCTION_CALL_RESULT: bool = True,\n FUNCTION_PROMPT_START: str = \"\",\n FUNCTION_PROMPT_END: str = \"\"):\n self.PRE_PROMPT = PRE_PROMPT\n self.SYS_PROMPT_START = SYS_PROMPT_START\n self.SYS_PROMPT_END = SYS_PROMPT_END\n self.USER_PROMPT_START = USER_PROMPT_START\n self.USER_PROMPT_END = USER_PROMPT_END\n self.ASSISTANT_PROMPT_START = ASSISTANT_PROMPT_START\n self.ASSISTANT_PROMPT_END = ASSISTANT_PROMPT_END\n self.INCLUDE_SYS_PROMPT_IN_FIRST_USER_MESSAGE = INCLUDE_SYS_PROMPT_IN_FIRST_USER_MESSAGE\n self.DEFAULT_STOP_SEQUENCES = DEFAULT_STOP_SEQUENCES\n self.FUNCTION_PROMPT_START = FUNCTION_PROMPT_START\n self.FUNCTION_PROMPT_END = FUNCTION_PROMPT_END\n self.USE_USER_ROLE_FUNCTION_CALL_RESULT = USE_USER_ROLE_FUNCTION_CALL_RESULT\n\n def format_messages(self, messages: List[Dict[str, str]]) -> Tuple[str, str]:\n formatted_messages = self.PRE_PROMPT\n last_role = \"assistant\"\n no_user_prompt_start = False\n for message in messages:\n if message[\"role\"] == \"system\":\n formatted_messages += self.SYS_PROMPT_START + message[\"content\"] + self.SYS_PROMPT_END\n last_role = \"system\"\n if self.INCLUDE_SYS_PROMPT_IN_FIRST_USER_MESSAGE:\n formatted_messages = self.USER_PROMPT_START + formatted_messages\n no_user_prompt_start = True\n elif message[\"role\"] == \"user\":\n if no_user_prompt_start:\n no_user_prompt_start = False\n formatted_messages += message[\"content\"] + self.USER_PROMPT_END\n else:\n formatted_messages += self.USER_PROMPT_START + message[\"content\"] + self.USER_PROMPT_END\n last_role = \"user\"\n elif message[\"role\"] == \"assistant\":\n formatted_messages += self.ASSISTANT_PROMPT_START + message[\"content\"] + self.ASSISTANT_PROMPT_END\n last_role = \"assistant\"\n elif message[\"role\"] == \"function\":\n if self.USE_USER_ROLE_FUNCTION_CALL_RESULT:\n formatted_messages += self.USER_PROMPT_START + message[\"content\"] + self.USER_PROMPT_END\n last_role = \"user\"\n else:\n formatted_messages += self.FUNCTION_PROMPT_START + message[\"content\"] + self.FUNCTION_PROMPT_END\n last_role = \"function\"\n if last_role == \"system\" or last_role == \"user\":\n return formatted_messages + self.ASSISTANT_PROMPT_START.strip(), \"assistant\"\n return formatted_messages + self.USER_PROMPT_START.strip(), \"user\"\n\n def save(self, file_path: str):\n with open(file_path, 'w', encoding=\"utf-8\") as file:\n json.dump(self.as_dict(), file, indent=4)\n\n @staticmethod\n def load_from_file(file_path: str) -> \"MessagesFormatter\":\n with open(file_path, 'r', encoding=\"utf-8\") as file:\n loaded_messages_formatter = json.load(file)\n return MessagesFormatter(**loaded_messages_formatter)\n\n @staticmethod\n def load_from_dict(loaded_messages_formatter: dict) -> \"MessagesFormatter\":\n return MessagesFormatter(**loaded_messages_formatter)\n\n def as_dict(self) -> dict:\n return self.__dict__" }, { "identifier": "generate_gbnf_grammar_and_documentation", "path": "src/llama_cpp_agent/gbnf_grammar_generator/gbnf_grammar_from_pydantic_models.py", "snippet": "def generate_gbnf_grammar_and_documentation(pydantic_model_list, look_for_markdown_code_block=False,\n look_for_triple_quoted_string=False, root_rule_class: str = None,\n root_rule_content: str = None,\n model_prefix: str = \"Output Model\",\n fields_prefix: str = \"Output Fields\", list_of_outputs: bool = False,\n documentation_with_field_description=True):\n documentation = generate_text_documentation(copy(pydantic_model_list), model_prefix, fields_prefix,\n documentation_with_field_description=documentation_with_field_description)\n grammar = generate_gbnf_grammar_from_pydantic_models(pydantic_model_list, look_for_markdown_code_block,\n look_for_triple_quoted_string, root_rule_class,\n root_rule_content, list_of_outputs)\n grammar = remove_empty_lines(grammar + get_primitive_grammar(grammar))\n return grammar, documentation" } ]
import json from copy import copy from typing import Type, Callable, Union from llama_cpp import Llama, LlamaGrammar from pydantic import BaseModel from .llm_agent import LlamaCppAgent, StreamingResponse from .llm_prompt_template import PromptTemplate from .llm_settings import LlamaLLMGenerationSettings, LlamaLLMSettings from .output_parser import extract_object_from_response from .messages_formatter import MessagesFormatterType, MessagesFormatter from .gbnf_grammar_generator.gbnf_grammar_from_pydantic_models import generate_gbnf_grammar_and_documentation
5,194
class StructuredOutputAgent: """ An agent that creates structured output based on pydantic models from an unstructured text. """ def __init__(self, llama_llm: Union[Llama, LlamaLLMSettings], llama_generation_settings: LlamaLLMGenerationSettings = LlamaLLMGenerationSettings(), messages_formatter_type: MessagesFormatterType = MessagesFormatterType.CHATML, custom_messages_formatter: MessagesFormatter = None, streaming_callback: Callable[[StreamingResponse], None] = None, debug_output: bool = False): self.llama_generation_settings = llama_generation_settings self.grammar_cache = {} self.system_prompt_template = PromptTemplate.from_string( "You are an advanced AI agent. You are tasked to assist the user by creating structured output in JSON format.\n\n{documentation}") self.creation_prompt_template = PromptTemplate.from_string( "Create an JSON response based on the following input.\n\nInput:\n\n{user_input}")
class StructuredOutputAgent: """ An agent that creates structured output based on pydantic models from an unstructured text. """ def __init__(self, llama_llm: Union[Llama, LlamaLLMSettings], llama_generation_settings: LlamaLLMGenerationSettings = LlamaLLMGenerationSettings(), messages_formatter_type: MessagesFormatterType = MessagesFormatterType.CHATML, custom_messages_formatter: MessagesFormatter = None, streaming_callback: Callable[[StreamingResponse], None] = None, debug_output: bool = False): self.llama_generation_settings = llama_generation_settings self.grammar_cache = {} self.system_prompt_template = PromptTemplate.from_string( "You are an advanced AI agent. You are tasked to assist the user by creating structured output in JSON format.\n\n{documentation}") self.creation_prompt_template = PromptTemplate.from_string( "Create an JSON response based on the following input.\n\nInput:\n\n{user_input}")
self.llama_cpp_agent = LlamaCppAgent(llama_llm, debug_output=debug_output,
0
2023-12-29 16:54:39+00:00
8k
usail-hkust/LLMTSCS
utils/oneline.py
[ { "identifier": "DIC_AGENTS", "path": "utils/config.py", "snippet": "DIC_AGENTS = {\n \"Random\": RandomAgent,\n \"Fixedtime\": FixedtimeAgent,\n \"MaxPressure\": MaxPressureAgent,\n \"EfficientMaxPressure\": EfficientMaxPressureAgent,\n \"AdvancedMaxPressure\": AdvancedMaxPressureAgent,\n\n \"EfficientPressLight\": PressLightAgentOne,\n \"EfficientColight\": CoLightAgent,\n \"EfficientMPLight\": MPLightAgent,\n \"MPLight\": MPLightAgent,\n \"Colight\": CoLightAgent,\n\n \"AdvancedMPLight\": AdvancedMPLightAgent,\n \"AdvancedColight\": CoLightAgent,\n \"AdvancedDQN\": SimpleDQNAgentOne,\n \"Attend\": AttendLightAgent,\n \"ChatGPTTLCSWaitTimeForecast\": ChatGPTTLCS_Wait_Time_Forecast,\n \"ChatGPTTLCSCommonsense\": ChatGPTTLCS_Commonsense,\n \"ChatGPTTLCSCommonsenseFlowCoordination\": ChatGPTTLCS_Commonsense_Flow_Coordination,\n \"ChatGPTTLCSWaitTimeForecastCode\": ChatGPTTLCS_Wait_Time_Forecast_Code,\n \"ChatGPTTLCSCommonsenseCode\": ChatGPTTLCS_Commonsense_Code,\n \"ChatGPTTLCSCommonsenseFlowCoordinationCode\": ChatGPTTLCS_Commonsense_Flow_Coordination_Code,\n \"ChatGPTTLCSZeroKnowledge\": ChatGPTTLCS_Zero_Knowledge,\n \"ChatGPTTLCSZeroKnowledgeCode\": ChatGPTTLCS_Zero_Knowledge_Code,\n \"LLMTLCSWaitTimeForecast\": LLM_TLCS_Wait_Time_Forecast,\n \"LLMTLCSCommonsense\": LLM_TLCS_Commonsense,\n}" }, { "identifier": "merge", "path": "utils/my_utils.py", "snippet": "def merge(dic_tmp, dic_to_change):\r\ndef load_json(file):\r\ndef dump_json(data, file, indent=None):\r\ndef calculate_road_length(road_points):\r\ndef get_state(roads, env):\r\ndef get_state_detail(roads, env):\r\ndef get_state_three_segment(roads, env):\r\ndef trans_prompt_llama(message, chat_history, system_prompt):\r" }, { "identifier": "CityFlowEnv", "path": "utils/cityflow_env.py", "snippet": "class CityFlowEnv:\n\n def __init__(self, path_to_log, path_to_work_directory, dic_traffic_env_conf, dic_path):\n self.path_to_log = path_to_log\n self.path_to_work_directory = path_to_work_directory\n self.dic_traffic_env_conf = dic_traffic_env_conf\n self.dic_path = dic_path\n\n self.current_time = None\n self.id_to_index = None\n self.traffic_light_node_dict = None\n self.intersection_dict = None\n self.eng = None\n self.list_intersection = None\n self.list_inter_log = None\n self.list_lanes = None\n self.system_states = None\n self.lane_length = None\n self.waiting_vehicle_list = {}\n\n # check min action time\n if self.dic_traffic_env_conf[\"MIN_ACTION_TIME\"] <= self.dic_traffic_env_conf[\"YELLOW_TIME\"]:\n \"\"\" include the yellow time in action time \"\"\"\n print(\"MIN_ACTION_TIME should include YELLOW_TIME\")\n sys.exit()\n\n # touch new inter_{}.pkl (if exists, remove)\n for inter_ind in range(self.dic_traffic_env_conf[\"NUM_INTERSECTIONS\"]):\n path_to_log_file = os.path.join(self.path_to_log, \"inter_{0}.pkl\".format(inter_ind))\n f = open(path_to_log_file, \"wb\")\n f.close()\n\n def reset(self):\n print(\" ============= self.eng.reset() to be implemented ==========\")\n if not os.path.isdir(\"./frontend/web\"):\n os.mkdir(\"./frontend/web\")\n cityflow_config = {\n \"interval\": self.dic_traffic_env_conf[\"INTERVAL\"],\n \"seed\": int(np.random.randint(0, 100)),\n \"laneChange\": True,\n \"dir\": self.path_to_work_directory+\"/\",\n \"roadnetFile\": self.dic_traffic_env_conf[\"ROADNET_FILE\"],\n \"flowFile\": self.dic_traffic_env_conf[\"TRAFFIC_FILE\"],\n \"rlTrafficLight\": True,\n \"saveReplay\": True, # if \"GPT\" in self.dic_traffic_env_conf[\"MODEL_NAME\"] or \"llm\" in self.dic_traffic_env_conf[\"MODEL_NAME\"] else False,\n \"roadnetLogFile\": f\"../../../frontend/web/{self.dic_traffic_env_conf['ROADNET_FILE']}-{self.dic_traffic_env_conf['TRAFFIC_FILE']}-{self.dic_traffic_env_conf['MODEL_NAME']}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases-roadnetLogFile.json\",\n \"replayLogFile\": f\"../../../frontend/web/{self.dic_traffic_env_conf['ROADNET_FILE']}-{self.dic_traffic_env_conf['TRAFFIC_FILE']}-{self.dic_traffic_env_conf['MODEL_NAME']}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases-replayLogFile.txt\"\n }\n # print(cityflow_config)\n with open(os.path.join(self.path_to_work_directory, \"cityflow.config\"), \"w\") as json_file:\n json.dump(cityflow_config, json_file)\n\n self.eng = engine.Engine(os.path.join(self.path_to_work_directory, \"cityflow.config\"), thread_num=1)\n\n # get adjacency\n self.traffic_light_node_dict = self._adjacency_extraction()\n\n # get lane length\n _, self.lane_length = self.get_lane_length()\n\n # initialize intersections (grid)\n self.list_intersection = [Intersection((i+1, j+1), self.dic_traffic_env_conf, self.eng,\n self.traffic_light_node_dict[\"intersection_{0}_{1}\".format(i+1, j+1)],\n self.path_to_log,\n self.lane_length)\n for i in range(self.dic_traffic_env_conf[\"NUM_COL\"])\n for j in range(self.dic_traffic_env_conf[\"NUM_ROW\"])]\n self.list_inter_log = [[] for _ in range(self.dic_traffic_env_conf[\"NUM_COL\"] *\n self.dic_traffic_env_conf[\"NUM_ROW\"])]\n\n self.id_to_index = {}\n count = 0\n for i in range(self.dic_traffic_env_conf[\"NUM_COL\"]):\n for j in range(self.dic_traffic_env_conf[\"NUM_ROW\"]):\n self.id_to_index[\"intersection_{0}_{1}\".format(i+1, j+1)] = count\n count += 1\n\n self.list_lanes = []\n for inter in self.list_intersection:\n self.list_lanes += inter.list_lanes\n self.list_lanes = np.unique(self.list_lanes).tolist()\n\n # get new measurements\n self.system_states = {\"get_lane_vehicles\": self.eng.get_lane_vehicles(),\n \"get_lane_waiting_vehicle_count\": self.eng.get_lane_waiting_vehicle_count(),\n \"get_vehicle_speed\": self.eng.get_vehicle_speed(),\n \"get_vehicle_distance\": self.eng.get_vehicle_distance(),\n }\n\n for inter in self.list_intersection:\n inter.update_current_measurements(self.system_states)\n state, done = self.get_state()\n\n # create roadnet dict\n if self.intersection_dict is None:\n self.create_intersection_dict()\n\n return state\n\n\n def create_intersection_dict(self):\n roadnet = load_json(f'./{self.dic_path[\"PATH_TO_DATA\"]}/{self.dic_traffic_env_conf[\"ROADNET_FILE\"]}')\n\n intersections_raw = roadnet[\"intersections\"]\n roads_raw = roadnet[\"roads\"]\n\n agent_intersections = {}\n\n # init agent intersections\n for i, inter in enumerate(intersections_raw):\n inter_id = inter[\"id\"]\n intersection = None\n for env_inter in self.list_intersection:\n if env_inter.inter_name == inter_id:\n intersection = env_inter\n break\n\n if len(inter['roadLinks']) > 0:\n # collect yellow allowed road links\n yellow_time = None\n phases = inter['trafficLight']['lightphases']\n all_sets = []\n yellow_phase_idx = None\n for p_i, p in enumerate(phases):\n all_sets.append(set(p['availableRoadLinks']))\n if p[\"time\"] < 30:\n yellow_phase_idx = p_i\n yellow_time = p[\"time\"]\n yellow_allowed_links = reduce(lambda x, y: x & y, all_sets)\n\n # init intersection\n agent_intersections[inter_id] = {\"phases\": {\"Y\": {\"time\": yellow_time, \"idx\": yellow_phase_idx}},\n \"roads\": {}}\n\n # init roads\n roads = {}\n for r in inter[\"roads\"]:\n roads[r] = {\"location\": None, \"type\": \"incoming\", \"go_straight\": None, \"turn_left\": None,\n \"turn_right\": None, \"length\": None, \"max_speed\": None,\n \"lanes\": {\"go_straight\": [], \"turn_left\": [], \"turn_right\": []}}\n\n # collect road length speed info & init road location\n road_links = inter[\"roadLinks\"]\n for r in roads_raw:\n r_id = r[\"id\"]\n if r_id in roads:\n roads[r_id][\"length\"] = calculate_road_length(r[\"points\"])\n roads[r_id][\"max_speed\"] = r[\"lanes\"][0][\"maxSpeed\"]\n for env_road_location in intersection.dic_entering_approach_to_edge:\n if intersection.dic_entering_approach_to_edge[env_road_location] == r_id:\n roads[r_id][\"location\"] = location_dict_reverse[env_road_location]\n break\n for env_road_location in intersection.dic_exiting_approach_to_edge:\n if intersection.dic_exiting_approach_to_edge[env_road_location] == r_id:\n roads[r_id][\"location\"] = location_dict_reverse[env_road_location]\n break\n\n # collect signal phase info\n for p_idx, p in enumerate(phases):\n other_allowed_links = set(p['availableRoadLinks']) - yellow_allowed_links\n if len(other_allowed_links) > 0:\n allowed_directions = []\n for l_idx in other_allowed_links:\n link = road_links[l_idx]\n location = roads[link[\"startRoad\"]][\"location\"]\n direction = link[\"type\"]\n allowed_directions.append(f\"{location_dict[location]}{direction_dict[direction]}\")\n allowed_directions = sorted(allowed_directions)\n allowed_directions = f\"{allowed_directions[0]}{allowed_directions[1]}\"\n agent_intersections[inter_id][\"phases\"][allowed_directions] = {\"time\": p[\"time\"], \"idx\": p_idx}\n\n # collect location type direction info\n for r_link in road_links:\n start = r_link['startRoad']\n end = r_link['endRoad']\n lane_links = r_link['laneLinks']\n\n for r in roads:\n if r != start:\n continue\n # collect type\n roads[r][\"type\"] = \"outgoing\"\n\n # collect directions\n if r_link[\"type\"] == \"go_straight\":\n roads[r][\"go_straight\"] = end\n\n # collect lane info\n for l_link in lane_links:\n lane_id = l_link['startLaneIndex']\n if lane_id not in roads[r][\"lanes\"][\"go_straight\"]:\n roads[r][\"lanes\"][\"go_straight\"].append(lane_id)\n\n elif r_link[\"type\"] == \"turn_left\":\n roads[r][\"turn_left\"] = end\n\n # collect lane info\n for l_link in lane_links:\n lane_id = l_link['startLaneIndex']\n if lane_id not in roads[r][\"lanes\"][\"turn_left\"]:\n roads[r][\"lanes\"][\"turn_left\"].append(lane_id)\n\n elif r_link[\"type\"] == \"turn_right\":\n roads[r][\"turn_right\"] = end\n\n # collect lane info\n for l_link in lane_links:\n lane_id = l_link['startLaneIndex']\n if lane_id not in roads[r][\"lanes\"][\"turn_right\"]:\n roads[r][\"lanes\"][\"turn_right\"].append(lane_id)\n\n agent_intersections[inter_id][\"roads\"] = roads\n\n self.intersection_dict = agent_intersections\n\n def step(self, action):\n\n step_start_time = time.time()\n\n list_action_in_sec = [action]\n list_action_in_sec_display = [action]\n for i in range(self.dic_traffic_env_conf[\"MIN_ACTION_TIME\"]-1):\n if self.dic_traffic_env_conf[\"ACTION_PATTERN\"] == \"switch\":\n list_action_in_sec.append(np.zeros_like(action).tolist())\n elif self.dic_traffic_env_conf[\"ACTION_PATTERN\"] == \"set\":\n list_action_in_sec.append(np.copy(action).tolist())\n list_action_in_sec_display.append(np.full_like(action, fill_value=-1).tolist())\n\n average_reward_action_list = [0]*len(action)\n for i in range(self.dic_traffic_env_conf[\"MIN_ACTION_TIME\"]):\n\n action_in_sec = list_action_in_sec[i]\n action_in_sec_display = list_action_in_sec_display[i]\n\n instant_time = self.get_current_time()\n self.current_time = self.get_current_time()\n\n before_action_feature = self.get_feature()\n # state = self.get_state()\n\n if i == 0:\n print(\"time: {0}\".format(instant_time))\n \n self._inner_step(action_in_sec)\n\n # get reward\n reward = self.get_reward()\n for j in range(len(reward)):\n average_reward_action_list[j] = (average_reward_action_list[j] * i + reward[j]) / (i + 1)\n self.log(cur_time=instant_time, before_action_feature=before_action_feature, action=action_in_sec_display)\n next_state, done = self.get_state()\n\n print(\"Step time: \", time.time() - step_start_time)\n return next_state, reward, done, average_reward_action_list\n\n def _inner_step(self, action):\n # copy current measurements to previous measurements\n for inter in self.list_intersection:\n inter.update_previous_measurements()\n # set signals\n # multi_intersection decided by action {inter_id: phase}\n for inter_ind, inter in enumerate(self.list_intersection):\n inter.set_signal(\n action=action[inter_ind],\n action_pattern=self.dic_traffic_env_conf[\"ACTION_PATTERN\"],\n yellow_time=self.dic_traffic_env_conf[\"YELLOW_TIME\"],\n path_to_log=self.path_to_log\n )\n\n # run one step\n for i in range(int(1/self.dic_traffic_env_conf[\"INTERVAL\"])):\n self.eng.next_step()\n\n # update queuing vehicle info\n vehicle_ids = self.eng.get_vehicles(include_waiting=False)\n for v_id in vehicle_ids:\n v_info = self.eng.get_vehicle_info(v_id)\n speed = float(v_info[\"speed\"])\n if speed < 0.1:\n if v_id not in self.waiting_vehicle_list:\n self.waiting_vehicle_list[v_id] = {\"time\": None, \"link\": None}\n self.waiting_vehicle_list[v_id][\"time\"] = self.dic_traffic_env_conf[\"INTERVAL\"]\n self.waiting_vehicle_list[v_id][\"link\"] = v_info['drivable']\n else:\n if self.waiting_vehicle_list[v_id][\"link\"] != v_info['drivable']:\n self.waiting_vehicle_list[v_id] = {\"time\": None, \"link\": None}\n self.waiting_vehicle_list[v_id][\"time\"] = self.dic_traffic_env_conf[\"INTERVAL\"]\n self.waiting_vehicle_list[v_id][\"link\"] = v_info['drivable']\n else:\n self.waiting_vehicle_list[v_id][\"time\"] += self.dic_traffic_env_conf[\"INTERVAL\"]\n else:\n if v_id in self.waiting_vehicle_list:\n self.waiting_vehicle_list.pop(v_id)\n\n if v_id in self.waiting_vehicle_list and self.waiting_vehicle_list[v_id][\"link\"] != v_info['drivable']:\n self.waiting_vehicle_list.pop(v_id)\n\n self.system_states = {\"get_lane_vehicles\": self.eng.get_lane_vehicles(),\n \"get_lane_waiting_vehicle_count\": self.eng.get_lane_waiting_vehicle_count(),\n \"get_vehicle_speed\": self.eng.get_vehicle_speed(),\n \"get_vehicle_distance\": self.eng.get_vehicle_distance()\n }\n\n for inter in self.list_intersection:\n inter.update_current_measurements(self.system_states)\n\n def get_feature(self):\n list_feature = [inter.get_feature() for inter in self.list_intersection]\n return list_feature\n\n def get_state(self, list_state_feature=None):\n if list_state_feature is not None:\n list_state = [inter.get_state(list_state_feature) for inter in self.list_intersection]\n done = False\n else:\n list_state = [inter.get_state(self.dic_traffic_env_conf[\"LIST_STATE_FEATURE\"]) for inter in self.list_intersection]\n done = False\n return list_state, done\n\n def get_reward(self):\n list_reward = [inter.get_reward(self.dic_traffic_env_conf[\"DIC_REWARD_INFO\"]) for inter in self.list_intersection]\n return list_reward\n\n def get_current_time(self):\n return self.eng.get_current_time()\n\n def log(self, cur_time, before_action_feature, action):\n\n for inter_ind in range(len(self.list_intersection)):\n self.list_inter_log[inter_ind].append({\"time\": cur_time,\n \"state\": before_action_feature[inter_ind],\n \"action\": action[inter_ind]})\n\n def batch_log_2(self):\n \"\"\"\n Used for model test, only log the vehicle_inter_.csv\n \"\"\"\n for inter_ind in range(self.dic_traffic_env_conf[\"NUM_INTERSECTIONS\"]):\n # changed from origin\n if int(inter_ind) % 100 == 0:\n print(\"Batch log for inter \", inter_ind)\n path_to_log_file = os.path.join(self.path_to_log, \"vehicle_inter_{0}.csv\".format(inter_ind))\n dic_vehicle = self.list_intersection[inter_ind].get_dic_vehicle_arrive_leave_time()\n df = pd.DataFrame.from_dict(dic_vehicle, orient=\"index\")\n df.to_csv(path_to_log_file, na_rep=\"nan\")\n\n def batch_log(self, start, stop):\n for inter_ind in range(start, stop):\n # changed from origin\n if int(inter_ind) % 100 == 0:\n print(\"Batch log for inter \", inter_ind)\n path_to_log_file = os.path.join(self.path_to_log, \"vehicle_inter_{0}.csv\".format(inter_ind))\n dic_vehicle = self.list_intersection[inter_ind].get_dic_vehicle_arrive_leave_time()\n df = pd.DataFrame.from_dict(dic_vehicle, orient=\"index\")\n df.to_csv(path_to_log_file, na_rep=\"nan\")\n \n path_to_log_file = os.path.join(self.path_to_log, \"inter_{0}.pkl\".format(inter_ind))\n f = open(path_to_log_file, \"wb\")\n pickle.dump(self.list_inter_log[inter_ind], f)\n f.close()\n\n def bulk_log_multi_process(self, batch_size=100):\n assert len(self.list_intersection) == len(self.list_inter_log)\n if batch_size > len(self.list_intersection):\n batch_size_run = len(self.list_intersection)\n else:\n batch_size_run = batch_size\n process_list = []\n for batch in range(0, len(self.list_intersection), batch_size_run):\n start = batch\n stop = min(batch + batch_size, len(self.list_intersection))\n p = Process(target=self.batch_log, args=(start, stop))\n print(\"before\")\n p.start()\n print(\"end\")\n process_list.append(p)\n print(\"before join\")\n\n for t in process_list:\n t.join()\n print(\"end join\")\n\n def _adjacency_extraction(self):\n traffic_light_node_dict = {}\n file = os.path.join(self.path_to_work_directory, self.dic_traffic_env_conf[\"ROADNET_FILE\"])\n with open(\"{0}\".format(file)) as json_data:\n net = json.load(json_data)\n for inter in net[\"intersections\"]:\n if not inter[\"virtual\"]:\n traffic_light_node_dict[inter[\"id\"]] = {\"location\": {\"x\": float(inter[\"point\"][\"x\"]),\n \"y\": float(inter[\"point\"][\"y\"])},\n \"total_inter_num\": None, \"adjacency_row\": None,\n \"inter_id_to_index\": None,\n \"neighbor_ENWS\": None}\n\n top_k = self.dic_traffic_env_conf[\"TOP_K_ADJACENCY\"]\n total_inter_num = len(traffic_light_node_dict.keys())\n inter_id_to_index = {}\n\n edge_id_dict = {}\n for road in net[\"roads\"]:\n if road[\"id\"] not in edge_id_dict.keys():\n edge_id_dict[road[\"id\"]] = {}\n edge_id_dict[road[\"id\"]][\"from\"] = road[\"startIntersection\"]\n edge_id_dict[road[\"id\"]][\"to\"] = road[\"endIntersection\"]\n\n index = 0\n for i in traffic_light_node_dict.keys():\n inter_id_to_index[i] = index\n index += 1\n\n for i in traffic_light_node_dict.keys():\n location_1 = traffic_light_node_dict[i][\"location\"]\n\n row = np.array([0]*total_inter_num)\n # row = np.zeros((self.dic_traffic_env_conf[\"NUM_ROW\"],self.dic_traffic_env_conf[\"NUM_col\"]))\n for j in traffic_light_node_dict.keys():\n location_2 = traffic_light_node_dict[j][\"location\"]\n dist = self._cal_distance(location_1, location_2)\n row[inter_id_to_index[j]] = dist\n if len(row) == top_k:\n adjacency_row_unsorted = np.argpartition(row, -1)[:top_k].tolist()\n elif len(row) > top_k:\n adjacency_row_unsorted = np.argpartition(row, top_k)[:top_k].tolist()\n else:\n adjacency_row_unsorted = [k for k in range(total_inter_num)]\n adjacency_row_unsorted.remove(inter_id_to_index[i])\n traffic_light_node_dict[i][\"adjacency_row\"] = [inter_id_to_index[i]]+adjacency_row_unsorted\n traffic_light_node_dict[i][\"total_inter_num\"] = total_inter_num\n\n for i in traffic_light_node_dict.keys():\n traffic_light_node_dict[i][\"total_inter_num\"] = inter_id_to_index\n traffic_light_node_dict[i][\"neighbor_ENWS\"] = []\n for j in range(4):\n road_id = i.replace(\"intersection\", \"road\")+\"_\"+str(j)\n if edge_id_dict[road_id][\"to\"] not in traffic_light_node_dict.keys():\n traffic_light_node_dict[i][\"neighbor_ENWS\"].append(None)\n else:\n traffic_light_node_dict[i][\"neighbor_ENWS\"].append(edge_id_dict[road_id][\"to\"])\n\n return traffic_light_node_dict\n\n @staticmethod\n def _cal_distance(loc_dict1, loc_dict2):\n a = np.array((loc_dict1[\"x\"], loc_dict1[\"y\"]))\n b = np.array((loc_dict2[\"x\"], loc_dict2[\"y\"]))\n return np.sqrt(np.sum((a-b)**2))\n\n @staticmethod\n def end_cityflow():\n print(\"============== cityflow process end ===============\")\n\n def get_lane_length(self):\n \"\"\"\n newly added part for get lane length\n Read the road net file\n Return: dict{lanes} normalized with the min lane length\n \"\"\"\n file = os.path.join(self.path_to_work_directory, self.dic_traffic_env_conf[\"ROADNET_FILE\"])\n with open(file) as json_data:\n net = json.load(json_data)\n roads = net['roads']\n lanes_length_dict = {}\n lane_normalize_factor = {}\n\n for road in roads:\n points = road[\"points\"]\n road_length = abs(points[0]['x'] + points[0]['y'] - points[1]['x'] - points[1]['y'])\n for i in range(3):\n lane_id = road['id'] + \"_{0}\".format(i)\n lanes_length_dict[lane_id] = road_length\n min_length = min(lanes_length_dict.values())\n\n for key, value in lanes_length_dict.items():\n lane_normalize_factor[key] = value / min_length\n return lane_normalize_factor, lanes_length_dict" }, { "identifier": "path_check", "path": "utils/pipeline.py", "snippet": "def path_check(dic_path):\n if os.path.exists(dic_path[\"PATH_TO_WORK_DIRECTORY\"]):\n if dic_path[\"PATH_TO_WORK_DIRECTORY\"] != \"records/default\":\n raise FileExistsError\n else:\n pass\n else:\n os.makedirs(dic_path[\"PATH_TO_WORK_DIRECTORY\"])\n if os.path.exists(dic_path[\"PATH_TO_MODEL\"]):\n if dic_path[\"PATH_TO_MODEL\"] != \"model/default\":\n raise FileExistsError\n else:\n pass\n else:\n os.makedirs(dic_path[\"PATH_TO_MODEL\"])" }, { "identifier": "copy_cityflow_file", "path": "utils/pipeline.py", "snippet": "def copy_cityflow_file(dic_path, dic_traffic_env_conf, path=None):\n if path is None:\n path = dic_path[\"PATH_TO_WORK_DIRECTORY\"]\n shutil.copy(os.path.join(dic_path[\"PATH_TO_DATA\"], dic_traffic_env_conf[\"TRAFFIC_FILE\"]),\n os.path.join(path, dic_traffic_env_conf[\"TRAFFIC_FILE\"]))\n shutil.copy(os.path.join(dic_path[\"PATH_TO_DATA\"], dic_traffic_env_conf[\"ROADNET_FILE\"]),\n os.path.join(path, dic_traffic_env_conf[\"ROADNET_FILE\"]))" }, { "identifier": "copy_conf_file", "path": "utils/pipeline.py", "snippet": "def copy_conf_file(dic_path, dic_agent_conf, dic_traffic_env_conf, path=None):\n if path is None:\n path = dic_path[\"PATH_TO_WORK_DIRECTORY\"]\n json.dump(dic_agent_conf, open(os.path.join(path, \"agent.conf\"), \"w\"), indent=4)\n json.dump(dic_traffic_env_conf, open(os.path.join(path, \"traffic_env.conf\"), \"w\"), indent=4)" } ]
from .config import DIC_AGENTS from .my_utils import merge, get_state, get_state_detail, eight_phase_list, dump_json from copy import deepcopy from .cityflow_env import CityFlowEnv from .pipeline import path_check, copy_cityflow_file, copy_conf_file from tqdm import tqdm import os import time import numpy as np import wandb import threading
6,467
class OneLine: def __init__(self, dic_agent_conf, dic_traffic_env_conf, dic_path, roadnet, trafficflow): self.dic_agent_conf = dic_agent_conf self.dic_traffic_env_conf = dic_traffic_env_conf self.dic_path = dic_path self.agents = [] self.env = None self.roadnet = roadnet self.trafficflow = trafficflow self.models = [] self.initialize() def initialize(self): path_check(self.dic_path) copy_conf_file(self.dic_path, self.dic_agent_conf, self.dic_traffic_env_conf) copy_cityflow_file(self.dic_path, self.dic_traffic_env_conf) self.env = CityFlowEnv( path_to_log=self.dic_path["PATH_TO_WORK_DIRECTORY"], path_to_work_directory=self.dic_path["PATH_TO_WORK_DIRECTORY"], dic_traffic_env_conf=self.dic_traffic_env_conf, dic_path=self.dic_path ) self.env.reset() agent_name = self.dic_traffic_env_conf["MODEL_NAME"] for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']): if "ChatGPT" in agent_name:
class OneLine: def __init__(self, dic_agent_conf, dic_traffic_env_conf, dic_path, roadnet, trafficflow): self.dic_agent_conf = dic_agent_conf self.dic_traffic_env_conf = dic_traffic_env_conf self.dic_path = dic_path self.agents = [] self.env = None self.roadnet = roadnet self.trafficflow = trafficflow self.models = [] self.initialize() def initialize(self): path_check(self.dic_path) copy_conf_file(self.dic_path, self.dic_agent_conf, self.dic_traffic_env_conf) copy_cityflow_file(self.dic_path, self.dic_traffic_env_conf) self.env = CityFlowEnv( path_to_log=self.dic_path["PATH_TO_WORK_DIRECTORY"], path_to_work_directory=self.dic_path["PATH_TO_WORK_DIRECTORY"], dic_traffic_env_conf=self.dic_traffic_env_conf, dic_path=self.dic_path ) self.env.reset() agent_name = self.dic_traffic_env_conf["MODEL_NAME"] for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']): if "ChatGPT" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
0
2023-12-26 08:31:47+00:00
8k
alipay/private_llm
demo/edge_device.py
[ { "identifier": "PLLlamaConfig", "path": "demo/model.py", "snippet": "class PLLlamaConfig(LlamaConfig):\n def __init__(\n self,\n vocab_size=32000,\n hidden_size=4096,\n intermediate_size=11008,\n num_hidden_layers=32,\n num_attention_heads=32,\n num_key_value_heads=None,\n hidden_act=\"silu\",\n max_position_embeddings=2048,\n initializer_range=0.02,\n rms_norm_eps=0.000001,\n use_cache=True,\n pad_token_id=None,\n bos_token_id=1,\n eos_token_id=2,\n pretraining_tp=1,\n tie_word_embeddings=False,\n rope_theta=10000,\n rope_scaling=None,\n attention_bias=False,\n rcd=128,\n rdc=128,\n cloud_ip=\"127.0.0.1\",\n cloud_port=12345,\n layers_to_transform=None,\n **kwargs,\n ):\n super().__init__(\n vocab_size,\n hidden_size,\n intermediate_size,\n num_hidden_layers,\n num_attention_heads,\n num_key_value_heads,\n hidden_act,\n max_position_embeddings,\n initializer_range,\n rms_norm_eps,\n use_cache,\n pad_token_id,\n bos_token_id,\n eos_token_id,\n pretraining_tp,\n tie_word_embeddings,\n rope_theta,\n rope_scaling,\n attention_bias,\n **kwargs,\n )\n self.rcd = rcd\n self.rdc = rdc\n self.cloud_ip = cloud_ip\n self.cloud_port = cloud_port\n self.layers_to_transform = layers_to_transform" }, { "identifier": "LlamaForDevice", "path": "demo/model.py", "snippet": "class LlamaForDevice(PLLlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config: PLLlamaConfig):\n super().__init__(config)\n self.padding_idx = config.pad_token_id\n\n self.embed_tokens = nn.Embedding(\n config.vocab_size, config.hidden_size, self.padding_idx\n )\n from pl_lib import PLMStack\n\n if config.layers_to_transform is None:\n num_layers = config.num_hidden_layers\n else:\n num_layers = len(config.layers_to_transform)\n\n self.lora_M_stack = PLMStack(num_layers, config.rcd, config.rdc)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n self.new_gens = []\n self.tokenizer = None\n # Initialize weights and apply final processing\n self.post_init()\n\n def set_tokenizer(self, tokenizer):\n \"\"\"set tokenizer for stream\n\n Args:\n tokenizer (_type_): _description_\n \"\"\"\n self.tokenizer = tokenizer\n\n def get_output_embeddings(self):\n return self.lm_head\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(\n output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC\n )\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n s: socket.socket = None,\n comm_profiler: CommProfiler = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n\n if s is None:\n print(\"no socket provided to s\")\n return\n\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n inputs_embeds = self.embed_tokens(input_ids)\n send_tensor(s, inputs_embeds, profiler=comm_profiler) # initial communication\n\n self.lora_M_stack(s, profiler=comm_profiler)\n\n hidden_states = recv_tensor(s, buffer_size=int(4096e5)) # final comm\n if self.prefill_end is None:\n # time it for throughput analysis\n self.prefill_end = time.time()\n s.sendall(\"hi\".encode())\n past_kv_shape = recv_tensor(s, buffer_size=1024)\n # hidden_states = outputs[0] # og\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(\n self.vocab_size // self.config.pretraining_tp, dim=0\n )\n logits = [\n F.linear(hidden_states, lm_head_slices[i])\n for i in range(self.config.pretraining_tp)\n ]\n logits = torch.cat(logits, dim=-1)\n else:\n logits = self.lm_head(hidden_states)\n logits = logits.float()\n # stream output\n if self.tokenizer is not None:\n token_id = torch.argmax(logits, -1).item()\n old_full = self.tokenizer.decode(self.new_gens)\n self.new_gens.append(token_id)\n new_full = self.tokenizer.decode(self.new_gens)\n print(new_full[len(old_full) :], end=\"\", flush=True)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n bs, h, seq, hi = past_kv_shape.tolist()\n past_key_values = [[torch.zeros((int(bs), int(h), int(seq + 1), int(hi)))]]\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=past_key_values, # mock on edge side\n )\n\n def prepare_inputs_for_generation(\n self,\n input_ids,\n past_key_values=None,\n attention_mask=None,\n inputs_embeds=None,\n **kwargs,\n ):\n if past_key_values is not None:\n past_length = past_key_values[0][0].shape[2]\n\n # Some generation methods already pass only the last input ID\n if input_ids.shape[1] > past_length:\n remove_prefix_length = past_length\n else:\n # Default to old behavior: keep only final ID\n remove_prefix_length = input_ids.shape[1] - 1\n\n input_ids = input_ids[:, remove_prefix_length:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -input_ids.shape[1] :]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n s = kwargs.get(\"s\", None)\n comm_profiler = kwargs.get(\"comm_profiler\", None)\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n \"s\": s,\n \"comm_profiler\": comm_profiler,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(\n past_state.index_select(0, beam_idx.to(past_state.device))\n for past_state in layer_past\n ),\n )\n return reordered_past\n\n def my_generate(self, *args, **kwargs):\n \"\"\"simple wrapper to tell cloud when a new query comes in\"\"\"\n s = kwargs.get(\"s\", None)\n profiler = kwargs.get(\"comm_profiler\", None)\n speed_profile = kwargs.pop(\"speed_profile\", False)\n if s is None:\n print(\"no socket\")\n return\n s.sendall(\"new\".encode())\n s.recv(1024)\n with torch.no_grad():\n if speed_profile:\n self.prefill_end = None\n self.prefill_start = time.time()\n self.new_gens = []\n outs = self.generate(*args, **kwargs)\n if speed_profile:\n self.decode_end = time.time()\n s.sendall(\"finish\".encode())\n\n if profiler is not None:\n profiler.get_report()\n\n if speed_profile:\n self.decode_time = self.decode_end - self.prefill_end\n self.prefill_time = self.prefill_end - self.prefill_start\n self.decode_tokens = outs.shape[1] - kwargs.get(\"input_ids\").shape[1]\n self.prefill_tokens = kwargs.get(\"input_ids\").shape[1]\n print(\"Throughput Stats\")\n\n table = PrettyTable()\n\n # 设置列名\n table.field_names = [\"Stages\", \"Tokens\", \"Time\", \"TPS\"]\n\n table.align[\"Stages\"] = \"l\" # \"l\" 对应左对齐\n table.align[\"Tokens\"] = \"r\" # \"r\" 对应右对齐\n table.align[\"Time\"] = \"r\" # \"r\" 对应右对齐\n table.align[\"TPS\"] = \"r\" # \"r\" 对应右对齐\n table.add_row(\n [\n \"prefill\",\n self.prefill_tokens,\n round(self.prefill_time, 2),\n round(self.prefill_tokens / self.prefill_time, 2),\n ]\n )\n table.add_row(\n [\n \"decode\",\n self.decode_tokens,\n round(self.decode_time, 2),\n round(self.decode_tokens / self.decode_time, 2),\n ]\n )\n print(table)\n\n return outs\n\n def print_param_count(self):\n m_cnt = 0\n lmh = 0\n emb = 0\n for n, p in self.named_parameters():\n if \"lora\" in n:\n m_cnt += p.numel()\n elif \"lm_head\" in n:\n lmh += p.numel()\n elif \"emb\" in n:\n emb += p.numel()\n total = m_cnt + lmh + emb\n\n table = PrettyTable()\n\n # 设置列名\n table.field_names = [\"Modules\", \"Param #\", \"Param %\"]\n\n # 设置每列的对齐方式\n table.align[\"Modules\"] = \"l\" # \"l\" 对应左对齐\n table.align[\"Param #\"] = \"r\" # \"r\" 对应右对齐\n table.align[\"Param %\"] = \"r\" # \"r\" 对应右对齐\n table.add_row([\"word emb\", emb, round(emb / total * 100, 2)])\n table.add_row([\"PrivateLoRA M\", m_cnt, round(m_cnt / total * 100, 2)])\n table.add_row([\"lm head\", lmh, round(lmh / total * 100, 2)])\n print(\"Param statistics\")\n print(table)" } ]
from demo.model import PLLlamaConfig, LlamaForDevice from pl_lib import CommProfiler from transformers import AutoTokenizer from pl_lib import init_tcp_b import torch import logging import argparse
3,636
parser = argparse.ArgumentParser() parser.add_argument( "weight_path", default=None, help="path to device model weight", ) parser.add_argument( "llama_path", default=None, help="root dir of huggingface llama model, should contain weight files and config", ) parser.add_argument( "--ip", default="127.0.0.1", help="socket ip of cloud", ) parser.add_argument( "--port", default=12345, help="socket port of cloud", ) parser.add_argument( "--device", default="cpu", help="device of model", ) parser.add_argument( "--debug", default=False, ) args = parser.parse_args() log_format = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s" logging.basicConfig( level=logging.DEBUG if args.debug else logging.INFO, format=log_format ) if __name__ == "__main__": mock_small = True load_weights = False logging.info("start connecting...") s = init_tcp_b(args.ip, args.port) config = PLLlamaConfig.from_pretrained(args.llama_path) config.rcd = 128 config.rdc = 128 tokenizer = AutoTokenizer.from_pretrained(args.llama_path) logging.info("Initializing Model")
parser = argparse.ArgumentParser() parser.add_argument( "weight_path", default=None, help="path to device model weight", ) parser.add_argument( "llama_path", default=None, help="root dir of huggingface llama model, should contain weight files and config", ) parser.add_argument( "--ip", default="127.0.0.1", help="socket ip of cloud", ) parser.add_argument( "--port", default=12345, help="socket port of cloud", ) parser.add_argument( "--device", default="cpu", help="device of model", ) parser.add_argument( "--debug", default=False, ) args = parser.parse_args() log_format = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s" logging.basicConfig( level=logging.DEBUG if args.debug else logging.INFO, format=log_format ) if __name__ == "__main__": mock_small = True load_weights = False logging.info("start connecting...") s = init_tcp_b(args.ip, args.port) config = PLLlamaConfig.from_pretrained(args.llama_path) config.rcd = 128 config.rdc = 128 tokenizer = AutoTokenizer.from_pretrained(args.llama_path) logging.info("Initializing Model")
model = LlamaForDevice(config)
1
2023-12-25 06:28:04+00:00
8k
kokiez/solana-sniper
jupiter/jupiter.py
[ { "identifier": "sendWebhook", "path": "webhook.py", "snippet": "def sendWebhook(title_type_info, description):\r\n global error_webhook\r\n global webhook_url\r\n title = \"\"\r\n title_type = title_type_info.split(\"|\")\r\n if title_type[0] == \"msg\":\r\n title = title_type[1]\r\n color = colors[\"Green\"]\r\n webhook(title, color, description, webhook_url)\r\n \r\n elif title_type[0] == \"msg_b\":\r\n title = title_type[1]\r\n color = colors[\"DarkAqua\"]\r\n webhook(title, color, description, webhook_url)\r\n\r\n elif title_type[0] == \"msg_s\":\r\n title = title_type[1]\r\n color = colors[\"DarkAqua\"]\r\n webhook(title, color, description, webhook_url)\r\n\r\n elif title_type[0] == \"i_s\": #invest or slippage was changed etc\r\n title = title_type[1]\r\n color = colors[\"DarkPurple\"]\r\n webhook(title, color, description, webhook_url)\r\n \r\n elif title_type[0] == \"e\": #error\r\n title = title_type[1]\r\n color = colors[\"DarkRed\"]\r\n webhook(title, color, description, error_webhook)\r\n\r\n elif title_type[0] == \"a\": #alert\r\n title = title_type[1]\r\n color = colors[\"LuminousVividPink\"]\r\n webhook(title, color, description, webhook_url)\r\n\r\n elif title_type[0] == \"w\": #wallet info\r\n title = title_type[1]\r\n color = colors[\"Gold\"]\r\n webhook(title, color, description, webhook_url)\r" }, { "identifier": "soldToken", "path": "alreadyBought.py", "snippet": "def soldToken(desired_token_address):\r\n print(\"Deleting saved token from previousSELLBUYINFO...\")\r\n file_path = os.path.join(sys.path[0], 'data', 'previousSELLBUYINFO.json')\r\n # Load the JSON file\r\n with open(file_path, 'r') as file:\r\n data = json.load(file)\r\n\r\n # Check if the 'desired_token_address' key exists in the JSON object\r\n if desired_token_address in data:\r\n # If it exists, delete it\r\n del data[desired_token_address]\r\n\r\n # Write the updated data back to the file\r\n with open(file_path, 'w') as file:\r\n json.dump(data, file, indent=4)\r\n\r\n print(\"Deleting saved token from alreadyBoughtTokens...\")\r\n file_path = os.path.join(sys.path[0], 'data', 'alreadyBoughtTokens.json')\r\n # Load the JSON file\r\n with open(file_path, 'r') as file:\r\n data = json.load(file)\r\n\r\n # Check if the 'tokens' key exists in the JSON object\r\n if 'tokens' in data:\r\n # If it exists, check if the token is in the list\r\n if desired_token_address in data['tokens']:\r\n # If it is, remove it\r\n data['tokens'].remove(desired_token_address)\r\n\r\n # Write the updated data back to the file\r\n with open(file_path, 'w') as file:\r\n json.dump(data, file, indent=4)\r" }, { "identifier": "sell", "path": "jupiter/sell_swap.py", "snippet": "def sell(ctx, payer, TOKEN_TO_SWAP_SELL, config):\r\n token_symbol, SOl_Symbol = getSymbol(TOKEN_TO_SWAP_SELL)\r\n\r\n slippageBps = int(config.get(\"INVESTMENT\", \"slippage\"))\r\n computeUnitPriceMicroLamports = int(config.get(\"INVESTMENT\", \"computeUnitPriceMicroLamports\"))\r\n\r\n RPC_HTTPS_URL = config.get(\"INFURA_URL\", \"infuraURL\")\r\n\r\n txnBool = True\r\n while txnBool:\r\n Attempt = True\r\n while Attempt:\r\n balanceBool = True\r\n while balanceBool:\r\n tokenPk = Pubkey.from_string(TOKEN_TO_SWAP_SELL)\r\n\r\n accountProgramId = ctx.get_account_info_json_parsed(tokenPk)\r\n programid_of_token = accountProgramId.value.owner\r\n\r\n accounts = ctx.get_token_accounts_by_owner_json_parsed(payer.pubkey(),TokenAccountOpts(program_id=programid_of_token)).value\r\n for account in accounts:\r\n mint = account.account.data.parsed['info']['mint']\r\n if mint == TOKEN_TO_SWAP_SELL:\r\n tokenBalanceLamports = int(account.account.data.parsed['info']['tokenAmount']['amount'])\r\n break\r\n if int(tokenBalanceLamports) > 0:\r\n balanceBool = False\r\n else:\r\n print(\"No Balance, Retrying...\")\r\n time.sleep(5)\r\n# ========================================================================================================\r\n # Swap Info\r\n print(\"Token Balance [Lamports]: \",tokenBalanceLamports)\r\n print(\"3. Get Route for swap...\")\r\n\r\n quote_response = requests.get('https://quote-api.jup.ag/v6/quote', params={\r\n 'inputMint': TOKEN_TO_SWAP_SELL,\r\n 'outputMint': 'So11111111111111111111111111111111111111112',\r\n 'amount': tokenBalanceLamports,\r\n 'slippageBps': slippageBps\r\n }).json()\r\n# ========================================================================================================\r\n print(\"4. Get the serialized transactions to perform the swap...\")\r\n # Get the serialized transactions to perform the swap\r\n url = \"https://quote-api.jup.ag/v6/swap\"\r\n payload = json.dumps({\r\n \"userPublicKey\": str(payer.pubkey()),\r\n \"wrapAndUnwrapSol\": True,\r\n 'computeUnitPriceMicroLamports': computeUnitPriceMicroLamports,\r\n \"quoteResponse\": quote_response\r\n })\r\n headers = {\r\n 'Content-Type': 'application/json',\r\n 'Accept': 'application/json'\r\n }\r\n response = requests.request(\"POST\", url, headers=headers, data=payload)\r\n # print(response.text)\r\n if (response.status_code == 200):\r\n instructions_all = (response).json()\r\n # print(swap_instructions)\r\n Attempt = False\r\n else:\r\n print(\"Retying...\")\r\n swap_instruction = instructions_all[\"swapTransaction\"]\r\n raw_tx = VersionedTransaction.from_bytes(base64.b64decode(swap_instruction))\r\n \r\n# ========================================================================================================\r\n print(\"5. Sign Transaction...\")\r\n signature = payer.sign_message(to_bytes_versioned(raw_tx.message))\r\n signed_tx = VersionedTransaction.populate(raw_tx.message, [signature])\r\n# ========================================================================================================\r\n try:\r\n print(\"6. Execute Transaction...\")\r\n start_time = time.time()\r\n txid = (ctx.send_transaction(\r\n signed_tx\r\n # opts=TxOpts(skip_confirmation=False, skip_preflight=True, max_retries=2),\r\n\r\n ))\r\n\r\n print(f\"Transaction ID: {txid.value}\")\r\n print(f\"Transaction URL: https://solscan.io/tx/{txid.value}\")\r\n\r\n txid_string_sig = Signature.from_string(str(txid.value))\r\n checkTxn = True\r\n while checkTxn:\r\n try:\r\n status = ctx.get_transaction(txid_string_sig,\"jsonParsed\", max_supported_transaction_version=0)\r\n FeesUsed = (status.value.transaction.meta.fee) / LAMPORTS_PER_SOL\r\n if status.value.transaction.meta.err == None:\r\n print(\"Transaction Success\")\r\n print(\"Transaction Fees: {:.10f} SOL\".format(FeesUsed))\r\n\r\n end_time = time.time()\r\n execution_time = end_time - start_time\r\n print(f\"Execution time: {execution_time} seconds\")\r\n\r\n txnBool = False\r\n checkTxn = False\r\n\r\n return txid_string_sig\r\n\r\n else:\r\n print(\"Transaction Failed: \", status.value.transaction.meta.err)\r\n end_time = time.time()\r\n execution_time = end_time - start_time\r\n print(f\"Execution time: {execution_time} seconds\")\r\n checkTxn = False\r\n except Exception as e:\r\n sendWebhook(f\"e|Sell ERROR {token_symbol}\",f\"{e}\")\r\n time.sleep(2)\r\n print(\"Sleeping...\")\r\n\r\n \r\n \r\n except RPCException as e:\r\n print(f\"Error: [{e.args[0].message}]...\\nRetrying...\")\r\n sendWebhook(f\"e|SELL ERROR {token_symbol}\",f\"{e.args[0].message}\")\r\n # txnBool = False\r\n # return \"failed\"\r\n except Exception as e:\r\n print(f\"Error: [{e}]...\\nEnd...\")\r\n txnBool = False\r\n return \"failed\"\r" }, { "identifier": "get_price", "path": "birdeye.py", "snippet": "def get_price(token_address):\r\n url = f\"https://api.dexscreener.com/latest/dex/tokens/{token_address}\"\r\n exclude = ['EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB']\r\n response = requests.get(url).json()\r\n \r\n if token_address not in exclude:\r\n for pair in response['pairs']:\r\n if pair['quoteToken']['address'] == 'So11111111111111111111111111111111111111112':\r\n return float(pair['priceUsd'])\r\n else:\r\n return response['pairs'][0]['priceUsd']\r\n return None\r" }, { "identifier": "getSymbol", "path": "birdeye.py", "snippet": "def getSymbol(token):\r\n # usdc and usdt\r\n exclude = ['EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB']\r\n \r\n if token not in exclude:\r\n url = f\"https://api.dexscreener.com/latest/dex/tokens/{token}\"\r\n\r\n Token_Symbol = \"\"\r\n Sol_symbol=\"\"\r\n try:\r\n response = requests.get(url)\r\n\r\n # Check if the request was successful (status code 200)\r\n if response.status_code == 200:\r\n resp = response.json()\r\n print(\"Response:\",resp['pairs'][0]['baseToken']['symbol'])\r\n for pair in resp['pairs']:\r\n quoteToken = pair['quoteToken']['symbol']\r\n\r\n if quoteToken == 'SOL':\r\n Token_Symbol = pair['baseToken']['symbol']\r\n Sol_symbol = quoteToken\r\n return Token_Symbol, Sol_symbol\r\n\r\n\r\n else:\r\n print(f\"[getSymbol] Request failed with status code {response.status_code}\")\r\n\r\n except requests.exceptions.RequestException as e:\r\n print(f\"[getSymbol] error occurred: {e}\")\r\n except: \r\n a = 1\r\n\r\n return Token_Symbol, Sol_symbol\r\n else:\r\n if token == 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v':\r\n return \"USDC\", \"SOL\"\r\n elif token == 'EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v':\r\n return \"USDT\", \"SOL\"\r" }, { "identifier": "limit_order", "path": "monitor_price_strategy.py", "snippet": "def limit_order(bought_token_price,desired_token_address, take_profit_ratio, execution_time, txB):\r\n token_symbol, SOl_Symbol = getSymbol(desired_token_address)\r\n # CALCULATE SELL LIMIT\r\n sell_limit_token_price = bought_token_price * take_profit_ratio\r\n \r\n print(\"-\" * 79)\r\n print(f\"| {'Bought Price':<12} | {'Sell Limit':<12} | {'Tx Buy':<50} |\")\r\n print(\"-\" * 79)\r\n print(f\"|{bought_token_price:.12f} | {sell_limit_token_price:.12f} {txB:<50} |\")\r\n print(\"-\" * 79)\r\n\r\n sendWebhook(f\"msg_b|BUY INFO {token_symbol}\",f\"Bought Price: {bought_token_price:.12f}\\n**Sell Limit: {sell_limit_token_price:.15f}**\\nTotal Buy Execution time: {execution_time} seconds\\nBuy TXN: https://solscan.io/tx/{txB} |\")\r\n\r\n # LOOP = CHECK IF PRICE >= SELL LIMIT | checks price every 5 seconds\r\n priceLow = True\r\n # while priceLow and isTimePassed(time_limit) == False:\r\n while priceLow:\r\n # Check if time limit has been passed for the token bought or not\r\n bought_token_curr_price = get_price(desired_token_address)\r\n if bought_token_curr_price >= sell_limit_token_price:\r\n print(f\"Price limit reached: {bought_token_curr_price}\")\r\n priceLow = False # break the loop\r\n else:\r\n time.sleep(15)\r\n\r\n return priceLow\r" }, { "identifier": "trailing_stop_loss_func", "path": "monitor_price_strategy.py", "snippet": "def trailing_stop_loss_func(bought_token_price,desired_token_address, trailing_stop_ratio, execution_time, txB):\r\n token_symbol, SOl_Symbol = getSymbol(desired_token_address)\r\n\r\n\r\n # Set initial trailing stop loss limit\r\n trailing_ratio_of_Price = (trailing_stop_ratio / 100) * bought_token_price\r\n initial_trailing_stop_loss_token_price = bought_token_price - trailing_ratio_of_Price\r\n \r\n print(\"-\" * 79)\r\n print(f\"| {'Bought Price':<12} | {'Initial Trailing Stop Loss Limit':<12} | {'Tx Buy':<50} |\")\r\n print(\"-\" * 79)\r\n print(f\"|{bought_token_price:.12f} | {initial_trailing_stop_loss_token_price:.12f} | {txB:<50} |\")\r\n print(\"-\" * 79)\r\n\r\n sendWebhook(f\"msg_b|BUY [TRAILING] INFO {token_symbol}\",f\"Bought Price: {bought_token_price:.12f}\\n**Initial Trailing Stop Loss Limit: {initial_trailing_stop_loss_token_price:.15f}**\\nTotal Buy Execution time: {execution_time} seconds\\nBuy TXN: https://solscan.io/tx/{txB} |\")\r\n\r\n # LOOP = CHECK IF PRICE >= SELL LIMIT | checks price every 5 seconds\r\n priceLow = True\r\n # while priceLow and isTimePassed(time_limit) == False:\r\n time.sleep(5)\r\n print(f\"+|+ {'Trailing Stop Loss [Update]':<12} +|+\")\r\n print(\"-\" * 50)\r\n startingPrice=bought_token_price\r\n\r\n trailing_ratio_of_Price = (trailing_stop_ratio / 100) * startingPrice\r\n latest_sell_stop_loss_token_price = startingPrice - trailing_ratio_of_Price \r\n while priceLow:\r\n\r\n bought_token_curr_price = get_price(desired_token_address)\r\n\r\n # if time limit has been passed for the token bought or not\r\n if bought_token_curr_price <= latest_sell_stop_loss_token_price:\r\n print(f\"Trailing Price limit reached: {bought_token_curr_price:.12f}\")\r\n priceLow = False # break the loop\r\n elif bought_token_curr_price > startingPrice:\r\n \r\n trailing_ratio_of_Price = (trailing_stop_ratio / 100) * bought_token_curr_price\r\n latest_sell_stop_loss_token_price = bought_token_curr_price - trailing_ratio_of_Price \r\n\r\n startingPrice = bought_token_curr_price\r\n else:\r\n time.sleep(15)\r\n print(f\"=|= {'Bought Price':<12} =|= {'Latest Trailing Stop Loss Limit':<12} =|=\")\r\n print(\"-\" * 79)\r\n print(f\"=|={bought_token_price:.12f} =|= {latest_sell_stop_loss_token_price:.12f} =|=\")\r\n print(\"-\" * 50)\r\n\r\n print(\"-\" * 79)\r\n print(f\"| {'Bought Price':<12} | {'Latest Trailing Stop Loss Limit':<12} | {'Tx Buy':<50} |\")\r\n print(\"-\" * 79)\r\n print(f\"|{bought_token_price:.12f} | {latest_sell_stop_loss_token_price:.12f} | {txB:<50} |\")\r\n print(\"-\" * 79)\r\n\r\n sendWebhook(f\"a|BUY [TRAILING] INFO {token_symbol}\",f\"Bought Price: {bought_token_price:.12f}\\n**Latest Trailing Stop Loss Limit: {latest_sell_stop_loss_token_price:.15f}**\\nTotal Buy Execution time: {execution_time} seconds\\nBuy TXN: https://solscan.io/tx/{txB} |\")\r\n\r\n return priceLow\r" }, { "identifier": "take_profit_and_trailing_stop", "path": "monitor_price_strategy.py", "snippet": "def take_profit_and_trailing_stop(bought_token_price,desired_token_address, trailing_stop_ratio, take_profit_ratio, execution_time, txB):\r\n token_symbol, SOl_Symbol = getSymbol(desired_token_address)\r\n \r\n # CALCULATE SELL LIMIT\r\n sell_limit_token_price = bought_token_price * take_profit_ratio\r\n\r\n # Set initial trailing stop loss limit\r\n trailing_ratio_of_Price = (trailing_stop_ratio / 100) * bought_token_price\r\n initial_trailing_stop_loss_token_price = bought_token_price - trailing_ratio_of_Price\r\n \r\n print(\"-\" * 79)\r\n print(f\"| {'Bought Price':<12} | {'Sell Limit Price':<12} | {'Initial Trailing Stop Loss Limit':<12} | {'Tx Buy':<50} |\")\r\n print(\"-\" * 79)\r\n print(f\"|{bought_token_price:.12f} | {sell_limit_token_price:.12f} | {initial_trailing_stop_loss_token_price:.12f} | {txB:<50} |\")\r\n print(\"-\" * 79)\r\n\r\n sendWebhook(f\"msg_b|BUY [TRAILING+Limit] INFO {token_symbol}\",f\"Bought Price: {bought_token_price:.12f}\\n\\n**Sell Take Profit Limit: {sell_limit_token_price:.15f}**\\n**Initial Trailing Stop Loss Limit: {initial_trailing_stop_loss_token_price:.15f}**\\n\\nTotal Buy Execution time: {execution_time} seconds\\nBuy TXN: https://solscan.io/tx/{txB} |\")\r\n\r\n # LOOP = CHECK IF PRICE >= SELL LIMIT | checks price every 5 seconds\r\n priceLow = True\r\n # while priceLow and isTimePassed(time_limit) == False:\r\n time.sleep(5)\r\n print(f\"+|+ {'TRAILING+Limit [Update]':<12} +|+\")\r\n print(\"-\" * 50)\r\n startingPrice=bought_token_price\r\n\r\n trailing_ratio_of_Price = (trailing_stop_ratio / 100) * startingPrice\r\n latest_sell_stop_loss_token_price = startingPrice - trailing_ratio_of_Price\r\n Up = 0 \r\n while priceLow:\r\n\r\n bought_token_curr_price = get_price(desired_token_address)\r\n\r\n # if time limit has been passed for the token bought or not\r\n if bought_token_curr_price >= sell_limit_token_price:\r\n print(f\"Sell limit reached: {bought_token_curr_price:.12f}\")\r\n priceLow = False # break the loop\r\n elif bought_token_curr_price <= latest_sell_stop_loss_token_price:\r\n print(f\"Trailing Price limit reached: {bought_token_curr_price:.12f}\")\r\n priceLow = False # break the loop\r\n elif bought_token_curr_price > startingPrice:\r\n \r\n trailing_ratio_of_Price = (trailing_stop_ratio / 100) * bought_token_curr_price\r\n latest_sell_stop_loss_token_price = bought_token_curr_price - trailing_ratio_of_Price \r\n\r\n startingPrice = bought_token_curr_price\r\n \r\n if priceLow != False:\r\n print(f\"=|= {'Bought Price':<12} =|= {'Current Price':<12} =|= {'Sell Limit Price':<12} =|= {'Latest Trailing Stop Loss Limit':<12} =|======== {Up}\")\r\n print(\"-\" * 79)\r\n print(f\"=|={bought_token_price:.12f} =|= {bought_token_curr_price:.12f} =|= {sell_limit_token_price:.12f} =|= {latest_sell_stop_loss_token_price:.12f} =|=\")\r\n print(\"-\" * 50)\r\n time.sleep(15)\r\n Up = Up + 1\r\n\r\n\r\n print(\"-\" * 79)\r\n print(f\"| {'Bought Price':<12} | {'Sell Limit Price':<12} | {'Latest Trailing Stop Loss Limit':<12} | {'Tx Buy':<50} |\")\r\n print(\"-\" * 79)\r\n print(f\"|{bought_token_price:.12f} | {sell_limit_token_price:.12f} | {latest_sell_stop_loss_token_price:.12f} | {txB:<50} |\")\r\n print(\"-\" * 79)\r\n\r\n sendWebhook(f\"a|BUY [TRAILING+Limit] INFO {token_symbol}\",f\"Bought Price: {bought_token_price:.12f}\\n**Sell Limit Price: {sell_limit_token_price:.15f}**\\n**Latest Trailing Stop Loss Limit: {latest_sell_stop_loss_token_price:.15f}**\\nTotal Buy Execution time: {execution_time} seconds\\nBuy TXN: https://solscan.io/tx/{txB} |\")\r\n\r\n\r\n return priceLow" } ]
from webhook import sendWebhook from alreadyBought import soldToken from jupiter.sell_swap import sell from birdeye import get_price, getSymbol from monitor_price_strategy import limit_order, trailing_stop_loss_func, take_profit_and_trailing_stop import time, sys, os
5,525
# from Wallet_Info import get_wallet_Info def jupiter_swap(config, ctx, payer, desired_token_address, txB, execution_time, limit_order_sell_Bool, take_profit_ratio, trailing_stop_Bool, trailing_stop_ratio, Limit_and_Trailing_Stop_Bool, bought_token_price): token_symbol, SOl_Symbol = getSymbol(desired_token_address) txB = str(txB) # saveTokenTime() sell_NOW = True if limit_order_sell_Bool: sell_NOW = limit_order(bought_token_price,desired_token_address, take_profit_ratio, execution_time, txB) elif trailing_stop_Bool: sell_NOW = trailing_stop_loss_func(bought_token_price,desired_token_address, trailing_stop_ratio, execution_time, txB) elif Limit_and_Trailing_Stop_Bool: sell_NOW = take_profit_and_trailing_stop(bought_token_price,desired_token_address, trailing_stop_ratio, take_profit_ratio, execution_time, txB) # Call Sell Method - returns transaction hash (txS= tx for sell) if sell_NOW == False: bought_token_curr_price = get_price(desired_token_address) start_time = time.time() txS = sell(ctx, payer, desired_token_address, config) end_time = time.time() execution_time = end_time - start_time print(f"Total Sell Execution time: {execution_time} seconds") if str(txS) != 'failed': txS = str(txS) print("-" * 79) print(f"| {'Sold Price':<15} | {'Tx Sell':<40} |") print("-" * 79) print(f"| {bought_token_curr_price:.12f} | {txS:<40} |") sendWebhook(f"msg_s|SELL INFO {token_symbol}",f"Token Address: {desired_token_address}\nSold at: {bought_token_curr_price:.12f}\nTotal Sell Execution time: {execution_time} seconds\nSell TXN: https://solscan.io/tx/{txS}\n------------------- END -------------------") print("-" * 79)
# from Wallet_Info import get_wallet_Info def jupiter_swap(config, ctx, payer, desired_token_address, txB, execution_time, limit_order_sell_Bool, take_profit_ratio, trailing_stop_Bool, trailing_stop_ratio, Limit_and_Trailing_Stop_Bool, bought_token_price): token_symbol, SOl_Symbol = getSymbol(desired_token_address) txB = str(txB) # saveTokenTime() sell_NOW = True if limit_order_sell_Bool: sell_NOW = limit_order(bought_token_price,desired_token_address, take_profit_ratio, execution_time, txB) elif trailing_stop_Bool: sell_NOW = trailing_stop_loss_func(bought_token_price,desired_token_address, trailing_stop_ratio, execution_time, txB) elif Limit_and_Trailing_Stop_Bool: sell_NOW = take_profit_and_trailing_stop(bought_token_price,desired_token_address, trailing_stop_ratio, take_profit_ratio, execution_time, txB) # Call Sell Method - returns transaction hash (txS= tx for sell) if sell_NOW == False: bought_token_curr_price = get_price(desired_token_address) start_time = time.time() txS = sell(ctx, payer, desired_token_address, config) end_time = time.time() execution_time = end_time - start_time print(f"Total Sell Execution time: {execution_time} seconds") if str(txS) != 'failed': txS = str(txS) print("-" * 79) print(f"| {'Sold Price':<15} | {'Tx Sell':<40} |") print("-" * 79) print(f"| {bought_token_curr_price:.12f} | {txS:<40} |") sendWebhook(f"msg_s|SELL INFO {token_symbol}",f"Token Address: {desired_token_address}\nSold at: {bought_token_curr_price:.12f}\nTotal Sell Execution time: {execution_time} seconds\nSell TXN: https://solscan.io/tx/{txS}\n------------------- END -------------------") print("-" * 79)
soldToken(desired_token_address)
1
2023-12-26 11:40:05+00:00
8k
kraina-ai/quackosm
quackosm/pbf_file_reader.py
[ { "identifier": "FEATURES_INDEX", "path": "quackosm/_constants.py", "snippet": "FEATURES_INDEX = \"feature_id\"" }, { "identifier": "GEOMETRY_COLUMN", "path": "quackosm/_constants.py", "snippet": "GEOMETRY_COLUMN = \"geometry\"" }, { "identifier": "WGS84_CRS", "path": "quackosm/_constants.py", "snippet": "WGS84_CRS = \"EPSG:4326\"" }, { "identifier": "GroupedOsmTagsFilter", "path": "quackosm/_osm_tags_filters.py", "snippet": "def merge_osm_tags_filter(osm_tags_filter: OsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: GroupedOsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: Iterable[OsmTagsFilter]) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: Iterable[GroupedOsmTagsFilter]) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(\n osm_tags_filter: Union[\n OsmTagsFilter, GroupedOsmTagsFilter, Iterable[OsmTagsFilter], Iterable[GroupedOsmTagsFilter]\n ]\n) -> OsmTagsFilter:\ndef _merge_grouped_osm_tags_filter(grouped_filter: GroupedOsmTagsFilter) -> OsmTagsFilter:\ndef _merge_multiple_osm_tags_filters(osm_tags_filters: Iterable[OsmTagsFilter]) -> OsmTagsFilter:" }, { "identifier": "OsmWayPolygonConfig", "path": "quackosm/_osm_way_polygon_features.py", "snippet": "class OsmWayPolygonConfig(NamedTuple):\n \"\"\"OSM Way polygon features config object.\"\"\"\n\n all: Iterable[str]\n allowlist: dict[str, Iterable[str]]\n denylist: dict[str, Iterable[str]]" }, { "identifier": "parse_dict_to_config_object", "path": "quackosm/_osm_way_polygon_features.py", "snippet": "def parse_dict_to_config_object(raw_config: dict[str, Any]) -> OsmWayPolygonConfig:\n all_tags = raw_config.get(\"all\", [])\n allowlist_tags = raw_config.get(\"allowlist\", {})\n denylist_tags = raw_config.get(\"denylist\", {})\n if not is_expected_type(all_tags, Iterable[str]):\n raise ValueError(f\"Wrong type of key: all ({type(all_tags)})\")\n\n if not is_expected_type(allowlist_tags, dict[str, Iterable[str]]):\n raise ValueError(f\"Wrong type of key: all ({type(allowlist_tags)})\")\n\n if not is_expected_type(denylist_tags, dict[str, Iterable[str]]):\n raise ValueError(f\"Wrong type of key: denylist ({type(denylist_tags)})\")\n\n return OsmWayPolygonConfig(\n all=cast(Iterable[str], all_tags),\n allowlist=cast(dict[str, Iterable[str]], allowlist_tags),\n denylist=cast(dict[str, Iterable[str]], denylist_tags),\n )" }, { "identifier": "TaskProgressBar", "path": "quackosm/_rich_progress.py", "snippet": "class TaskProgressBar:\n def __init__(self, step_name: str, step_number: str):\n self.step_name = step_name\n self.step_number = step_number\n self.progress = None\n\n def __enter__(self):\n try: # pragma: no cover\n from rich.progress import (\n BarColumn,\n MofNCompleteColumn,\n Progress,\n ProgressColumn,\n SpinnerColumn,\n Task,\n Text,\n TextColumn,\n TimeElapsedColumn,\n TimeRemainingColumn,\n )\n\n class SpeedColumn(ProgressColumn):\n def render(self, task: \"Task\") -> Text:\n if task.speed is None:\n return Text(\"\")\n elif task.speed >= 1:\n return Text(f\"{task.speed:.2f} it/s\")\n else:\n return Text(f\"{1/task.speed:.2f} s/it\") # noqa: FURB126\n\n self.progress = Progress(\n SpinnerColumn(),\n TextColumn(f\"[{self.step_number: >4}/{TOTAL_STEPS}]\"),\n TextColumn(\n \"[progress.description]{task.description}\"\n \" [progress.percentage]{task.percentage:>3.0f}%\"\n ),\n BarColumn(),\n MofNCompleteColumn(),\n TextColumn(\"•\"),\n TimeElapsedColumn(),\n TextColumn(\"<\"),\n TimeRemainingColumn(),\n TextColumn(\"•\"),\n SpeedColumn(),\n transient=False,\n speed_estimate_period=1800,\n )\n\n self.progress.__enter__()\n\n except ImportError:\n self.progress = None\n\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n if self.progress:\n self.progress.__exit__(exc_type, exc_value, exc_tb)\n\n self.progress = None\n\n def track(self, iterable: Iterable):\n if self.progress is not None:\n for i in self.progress.track(list(iterable), description=self.step_name):\n yield i\n else:\n for i in iterable:\n yield i" }, { "identifier": "TaskProgressSpinner", "path": "quackosm/_rich_progress.py", "snippet": "class TaskProgressSpinner:\n def __init__(self, step_name: str, step_number: str):\n self.step_name = step_name\n self.step_number = step_number\n self.progress = None\n\n def __enter__(self):\n try: # pragma: no cover\n from rich.progress import Progress, SpinnerColumn, TextColumn, TimeElapsedColumn\n\n self.progress = Progress(\n SpinnerColumn(),\n TextColumn(f\"[{self.step_number: >4}/{TOTAL_STEPS}]\"),\n TextColumn(\"[progress.description]{task.description}\"),\n TextColumn(\"•\"),\n TimeElapsedColumn(),\n transient=False,\n )\n\n self.progress.__enter__()\n self.progress.add_task(description=self.step_name, total=None)\n\n except ImportError:\n self.progress = None\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n if self.progress:\n self.progress.__exit__(exc_type, exc_value, exc_tb)\n\n self.progress = None" }, { "identifier": "is_expected_type", "path": "quackosm/_typing.py", "snippet": "def is_expected_type(value: object, expected_type: Any) -> bool:\n \"\"\"\n Check if an object is a given type.\n\n Uses `typeguard` library to check objects using `typing` definitions.\n\n Args:\n value (object): Value to be checked against `expected_type`.\n expected_type (Any): A class or generic type instance.\n\n Returns:\n bool: Flag whether the object is an instance of the required type.\n \"\"\"\n result = False\n\n with suppress(TypeCheckError):\n check_type(value, expected_type)\n result = True\n\n return result" } ]
import hashlib import json import shutil import tempfile import warnings import duckdb import geoarrow.pyarrow as ga import geopandas as gpd import psutil import pyarrow as pa import pyarrow.parquet as pq import shapely.wkt as wktlib import quackosm._geo_arrow_io as io from collections.abc import Iterable from math import floor from pathlib import Path from typing import Any, Literal, NamedTuple, Optional, Union, cast from shapely.geometry.base import BaseGeometry from quackosm._constants import FEATURES_INDEX, GEOMETRY_COLUMN, WGS84_CRS from quackosm._osm_tags_filters import GroupedOsmTagsFilter, OsmTagsFilter, merge_osm_tags_filter from quackosm._osm_way_polygon_features import OsmWayPolygonConfig, parse_dict_to_config_object from quackosm._rich_progress import ( # type: ignore[attr-defined] TaskProgressBar, TaskProgressSpinner, ) from quackosm._typing import is_expected_type
4,390
) self._delete_directories( tmp_dir_name, [ "nodes_valid_with_tags", ], ) filtered_ways_with_linestrings = self._get_filtered_ways_with_linestrings( osm_parquet_files=converted_osm_parquet_files, ways_refs_with_nodes_structs=ways_refs_with_nodes_structs, tmp_dir_name=tmp_dir_name, ) required_ways_with_linestrings = self._get_required_ways_with_linestrings( osm_parquet_files=converted_osm_parquet_files, ways_refs_with_nodes_structs=ways_refs_with_nodes_structs, tmp_dir_name=tmp_dir_name, ) self._delete_directories( tmp_dir_name, [ "ways_required_grouped", "ways_required_ids", "ways_with_unnested_nodes_refs", "ways_refs_with_nodes_structs", "required_ways_ids_grouped", "required_ways_grouped", "required_ways_tmp", "filtered_ways_ids_grouped", "filtered_ways_grouped", "filtered_ways_tmp", ], ) filtered_ways_with_proper_geometry = self._get_filtered_ways_with_proper_geometry( converted_osm_parquet_files, filtered_ways_with_linestrings, tmp_dir_name ) self._delete_directories( tmp_dir_name, [ "ways_prepared_ids", "ways_filtered_ids", "ways_all_with_tags", "filtered_ways_with_linestrings", ], ) filtered_relations_with_geometry = self._get_filtered_relations_with_geometry( converted_osm_parquet_files, required_ways_with_linestrings, tmp_dir_name ) self._delete_directories( tmp_dir_name, [ "relations_all_with_tags", "relations_with_unnested_way_refs", "relations_filtered_ids", "required_ways_with_linestrings", "valid_relation_parts", "relation_inner_parts", "relation_outer_parts", "relation_outer_parts_with_holes", "relation_outer_parts_without_holes", ], ) self._concatenate_results_to_geoparquet( PbfFileReader.ParsedOSMFeatures( nodes=filtered_nodes_with_geometry, ways=filtered_ways_with_proper_geometry, relations=filtered_relations_with_geometry, ), tmp_dir_name=tmp_dir_name, save_file_path=result_file_path, explode_tags=explode_tags, ) return result_file_path def _generate_geoparquet_result_file_path( self, pbf_file_path: Union[str, Path], explode_tags: bool, filter_osm_ids: list[str], ) -> Path: pbf_file_name = Path(pbf_file_path).name.removesuffix(".osm.pbf") osm_filter_tags_hash_part = "nofilter" if self.tags_filter is not None: h = hashlib.new("sha256") h.update(json.dumps(self.tags_filter).encode()) osm_filter_tags_hash_part = h.hexdigest() clipping_geometry_hash_part = "noclip" if self.geometry_filter is not None: h = hashlib.new("sha256") h.update(wktlib.dumps(self.geometry_filter).encode()) clipping_geometry_hash_part = h.hexdigest() exploded_tags_part = "exploded" if explode_tags else "compact" filter_osm_ids_hash_part = "" if filter_osm_ids: h = hashlib.new("sha256") h.update(json.dumps(sorted(set(filter_osm_ids))).encode()) filter_osm_ids_hash_part = f"_{h.hexdigest()}" result_file_name = ( f"{pbf_file_name}_{osm_filter_tags_hash_part}" f"_{clipping_geometry_hash_part}_{exploded_tags_part}{filter_osm_ids_hash_part}.geoparquet" ) return Path(self.working_directory) / result_file_name def _prefilter_elements_ids( self, elements: "duckdb.DuckDBPyRelation", tmp_dir_name: str, filter_osm_ids: list[str] ) -> ConvertedOSMParquetFiles: sql_filter = self._generate_osm_tags_sql_filter() filtered_tags_clause = self._generate_filtered_tags_clause() is_intersecting = self.geometry_filter is not None
""" PBF File Reader. This module contains a reader capable of parsing a PBF file into a GeoDataFrame. """ __all__ = [ "PbfFileReader", ] class PbfFileReader: """ PbfFileReader. PBF(Protocolbuffer Binary Format)[1] file reader is a dedicated `*.osm.pbf` files reader class based on DuckDB[2] and its spatial extension[3]. Handler can filter out OSM features based on tags filter and geometry filter to limit the result. References: 1. https://wiki.openstreetmap.org/wiki/PBF_Format 2. https://duckdb.org/ 3. https://github.com/duckdb/duckdb_spatial """ class ConvertedOSMParquetFiles(NamedTuple): """List of parquet files read from the `*.osm.pbf` file.""" nodes_valid_with_tags: "duckdb.DuckDBPyRelation" nodes_filtered_ids: "duckdb.DuckDBPyRelation" ways_all_with_tags: "duckdb.DuckDBPyRelation" ways_with_unnested_nodes_refs: "duckdb.DuckDBPyRelation" ways_required_ids: "duckdb.DuckDBPyRelation" ways_filtered_ids: "duckdb.DuckDBPyRelation" relations_all_with_tags: "duckdb.DuckDBPyRelation" relations_with_unnested_way_refs: "duckdb.DuckDBPyRelation" relations_filtered_ids: "duckdb.DuckDBPyRelation" class ParsedOSMFeatures(NamedTuple): """Final list of parsed features from the `*.osm.pbf` file.""" nodes: "duckdb.DuckDBPyRelation" ways: "duckdb.DuckDBPyRelation" relations: "duckdb.DuckDBPyRelation" def __init__( self, tags_filter: Optional[Union[OsmTagsFilter, GroupedOsmTagsFilter]] = None, geometry_filter: Optional[BaseGeometry] = None, working_directory: Union[str, Path] = "files", osm_way_polygon_features_config: Optional[ Union[OsmWayPolygonConfig, dict[str, Any]] ] = None, ) -> None: """ Initialize PbfFileReader. Args: tags_filter (Union[OsmTagsFilter, GroupedOsmTagsFilter], optional): A dictionary specifying which tags to download. The keys should be OSM tags (e.g. `building`, `amenity`). The values should either be `True` for retrieving all objects with the tag, string for retrieving a single tag-value pair or list of strings for retrieving all values specified in the list. `tags={'leisure': 'park}` would return parks from the area. `tags={'leisure': 'park, 'amenity': True, 'shop': ['bakery', 'bicycle']}` would return parks, all amenity types, bakeries and bicycle shops. If `None`, handler will allow all of the tags to be parsed. Defaults to `None`. geometry_filter (BaseGeometry, optional): Region which can be used to filter only intersecting OSM objects. Defaults to `None`. working_directory (Union[str, Path], optional): Directory where to save the parsed `*.parquet` files. Defaults to "files". osm_way_polygon_features_config (Union[OsmWayPolygonConfig, dict[str, Any]], optional): Config used to determine which closed way features are polygons. Modifications to this config left are left for experienced OSM users. Defaults to predefined "osm_way_polygon_features.json". """ self.tags_filter = tags_filter self.merged_tags_filter = merge_osm_tags_filter(tags_filter) if tags_filter else None self.geometry_filter = geometry_filter self.working_directory = Path(working_directory) self.working_directory.mkdir(parents=True, exist_ok=True) self.connection: duckdb.DuckDBPyConnection = None self.rows_per_bucket = 1_000_000 memory = psutil.virtual_memory() # If less than 8 / 16 GB total memory, reduce number of rows per group if memory.total < (8 * (1024**3)): self.rows_per_bucket = 100_000 elif memory.total < (16 * (1024**3)): self.rows_per_bucket = 500_000 if osm_way_polygon_features_config is None: # Config based on two sources + manual OSM wiki check # 1. https://github.com/tyrasd/osm-polygon-features/blob/v0.9.2/polygon-features.json # 2. https://github.com/ideditor/id-area-keys/blob/v5.0.1/areaKeys.json osm_way_polygon_features_config = json.loads( (Path(__file__).parent / "osm_way_polygon_features.json").read_text() ) self.osm_way_polygon_features_config: OsmWayPolygonConfig = ( osm_way_polygon_features_config if isinstance(osm_way_polygon_features_config, OsmWayPolygonConfig) else parse_dict_to_config_object(osm_way_polygon_features_config) ) def get_features_gdf( self, file_paths: Union[str, Path, Iterable[Union[str, Path]]], explode_tags: Optional[bool] = None, ignore_cache: bool = False, filter_osm_ids: Optional[list[str]] = None, ) -> gpd.GeoDataFrame: """ Get features GeoDataFrame from a list of PBF files. Function parses multiple PBF files and returns a single GeoDataFrame with parsed OSM objects. Args: file_paths (Union[str, Path, Iterable[Union[str, Path]]]): Path or list of paths of `*.osm.pbf` files to be parsed. explode_tags (bool, optional): Whether to split tags into columns based on OSM tag keys. If `None`, will be set based on `tags_filter` parameter. If no tags filter is provided, then `explode_tags` will set to `False`, if there is tags filter it will set to `True`. Defaults to `None`. ignore_cache: (bool, optional): Whether to ignore precalculated geoparquet files or not. Defaults to False. filter_osm_ids: (list[str], optional): List of OSM features ids to read from the file. Have to be in the form of 'node/<id>', 'way/<id>' or 'relation/<id>'. Defaults to an empty list. Returns: gpd.GeoDataFrame: GeoDataFrame with OSM features. """ if isinstance(file_paths, (str, Path)): file_paths = [file_paths] if filter_osm_ids is None: filter_osm_ids = [] if explode_tags is None: explode_tags = self.tags_filter is not None parsed_geoparquet_files = [] for file_path in file_paths: parsed_geoparquet_file = self.convert_pbf_to_gpq( file_path, explode_tags=explode_tags, ignore_cache=ignore_cache, filter_osm_ids=filter_osm_ids, ) parsed_geoparquet_files.append(parsed_geoparquet_file) parquet_tables = [ io.read_geoparquet_table(parsed_parquet_file) # type: ignore for parsed_parquet_file in parsed_geoparquet_files ] joined_parquet_table: pa.Table = pa.concat_tables(parquet_tables) gdf_parquet = gpd.GeoDataFrame( data=joined_parquet_table.drop(GEOMETRY_COLUMN).to_pandas(maps_as_pydicts="strict"), geometry=ga.to_geopandas(joined_parquet_table.column(GEOMETRY_COLUMN)), ).set_index(FEATURES_INDEX) return gdf_parquet def convert_pbf_to_gpq( self, pbf_path: Union[str, Path], result_file_path: Optional[Union[str, Path]] = None, explode_tags: Optional[bool] = None, ignore_cache: bool = False, filter_osm_ids: Optional[list[str]] = None, ) -> Path: """ Convert PBF file to GeoParquet file. Args: pbf_path (Union[str, Path]): Pbf file to be parsed to GeoParquet. result_file_path (Union[str, Path], optional): Where to save the geoparquet file. If not provided, will be generated based on hashes from provided tags filter and geometry filter. Defaults to `None`. explode_tags (bool, optional): Whether to split tags into columns based on OSM tag keys. If `None`, will be set based on `tags_filter` parameter. If no tags filter is provided, then `explode_tags` will set to `False`, if there is tags filter it will set to `True`. Defaults to `None`. ignore_cache (bool, optional): Whether to ignore precalculated geoparquet files or not. Defaults to False. filter_osm_ids: (list[str], optional): List of OSM features ids to read from the file. Have to be in the form of 'node/<id>', 'way/<id>' or 'relation/<id>'. Defaults to an empty list. Returns: Path: Path to the generated GeoParquet file. """ if filter_osm_ids is None: filter_osm_ids = [] if explode_tags is None: explode_tags = self.tags_filter is not None with tempfile.TemporaryDirectory(dir=self.working_directory.resolve()) as tmp_dir_name: try: self._set_up_duckdb_connection(tmp_dir_name) result_file_path = result_file_path or self._generate_geoparquet_result_file_path( pbf_path, filter_osm_ids=filter_osm_ids, explode_tags=explode_tags, ) parsed_geoparquet_file = self._parse_pbf_file( pbf_path=pbf_path, tmp_dir_name=tmp_dir_name, result_file_path=Path(result_file_path), filter_osm_ids=filter_osm_ids, explode_tags=explode_tags, ignore_cache=ignore_cache, ) return parsed_geoparquet_file finally: if self.connection is not None: self.connection.close() self.connection = None def _set_up_duckdb_connection(self, tmp_dir_name: str) -> None: self.connection = duckdb.connect( database=str(Path(tmp_dir_name) / "db.duckdb"), config=dict(preserve_insertion_order=False), ) for extension_name in ("parquet", "spatial"): self.connection.install_extension(extension_name) self.connection.load_extension(extension_name) self.connection.sql(""" CREATE OR REPLACE MACRO linestring_to_linestring_wkt(ls) AS 'LINESTRING (' || array_to_string([pt.x || ' ' || pt.y for pt in ls], ', ') || ')'; """) self.connection.sql(""" CREATE OR REPLACE MACRO linestring_to_polygon_wkt(ls) AS 'POLYGON ((' || array_to_string([pt.x || ' ' || pt.y for pt in ls], ', ') || '))'; """) def _parse_pbf_file( self, pbf_path: Union[str, Path], tmp_dir_name: str, result_file_path: Path, filter_osm_ids: list[str], explode_tags: bool = True, ignore_cache: bool = False, ) -> Path: if not result_file_path.exists() or ignore_cache: elements = self.connection.sql(f"SELECT * FROM ST_READOSM('{Path(pbf_path)}');") converted_osm_parquet_files = self._prefilter_elements_ids( elements, tmp_dir_name, filter_osm_ids ) self._delete_directories( tmp_dir_name, [ "nodes_filtered_non_distinct_ids", "nodes_prepared_ids", "ways_valid_ids", "ways_filtered_non_distinct_ids", "relations_valid_ids", "relations_ids", ], ) filtered_nodes_with_geometry = self._get_filtered_nodes_with_geometry( converted_osm_parquet_files, tmp_dir_name ) self._delete_directories(tmp_dir_name, "nodes_filtered_ids") ways_refs_with_nodes_structs = self._get_ways_refs_with_nodes_structs( converted_osm_parquet_files, tmp_dir_name ) self._delete_directories( tmp_dir_name, [ "nodes_valid_with_tags", ], ) filtered_ways_with_linestrings = self._get_filtered_ways_with_linestrings( osm_parquet_files=converted_osm_parquet_files, ways_refs_with_nodes_structs=ways_refs_with_nodes_structs, tmp_dir_name=tmp_dir_name, ) required_ways_with_linestrings = self._get_required_ways_with_linestrings( osm_parquet_files=converted_osm_parquet_files, ways_refs_with_nodes_structs=ways_refs_with_nodes_structs, tmp_dir_name=tmp_dir_name, ) self._delete_directories( tmp_dir_name, [ "ways_required_grouped", "ways_required_ids", "ways_with_unnested_nodes_refs", "ways_refs_with_nodes_structs", "required_ways_ids_grouped", "required_ways_grouped", "required_ways_tmp", "filtered_ways_ids_grouped", "filtered_ways_grouped", "filtered_ways_tmp", ], ) filtered_ways_with_proper_geometry = self._get_filtered_ways_with_proper_geometry( converted_osm_parquet_files, filtered_ways_with_linestrings, tmp_dir_name ) self._delete_directories( tmp_dir_name, [ "ways_prepared_ids", "ways_filtered_ids", "ways_all_with_tags", "filtered_ways_with_linestrings", ], ) filtered_relations_with_geometry = self._get_filtered_relations_with_geometry( converted_osm_parquet_files, required_ways_with_linestrings, tmp_dir_name ) self._delete_directories( tmp_dir_name, [ "relations_all_with_tags", "relations_with_unnested_way_refs", "relations_filtered_ids", "required_ways_with_linestrings", "valid_relation_parts", "relation_inner_parts", "relation_outer_parts", "relation_outer_parts_with_holes", "relation_outer_parts_without_holes", ], ) self._concatenate_results_to_geoparquet( PbfFileReader.ParsedOSMFeatures( nodes=filtered_nodes_with_geometry, ways=filtered_ways_with_proper_geometry, relations=filtered_relations_with_geometry, ), tmp_dir_name=tmp_dir_name, save_file_path=result_file_path, explode_tags=explode_tags, ) return result_file_path def _generate_geoparquet_result_file_path( self, pbf_file_path: Union[str, Path], explode_tags: bool, filter_osm_ids: list[str], ) -> Path: pbf_file_name = Path(pbf_file_path).name.removesuffix(".osm.pbf") osm_filter_tags_hash_part = "nofilter" if self.tags_filter is not None: h = hashlib.new("sha256") h.update(json.dumps(self.tags_filter).encode()) osm_filter_tags_hash_part = h.hexdigest() clipping_geometry_hash_part = "noclip" if self.geometry_filter is not None: h = hashlib.new("sha256") h.update(wktlib.dumps(self.geometry_filter).encode()) clipping_geometry_hash_part = h.hexdigest() exploded_tags_part = "exploded" if explode_tags else "compact" filter_osm_ids_hash_part = "" if filter_osm_ids: h = hashlib.new("sha256") h.update(json.dumps(sorted(set(filter_osm_ids))).encode()) filter_osm_ids_hash_part = f"_{h.hexdigest()}" result_file_name = ( f"{pbf_file_name}_{osm_filter_tags_hash_part}" f"_{clipping_geometry_hash_part}_{exploded_tags_part}{filter_osm_ids_hash_part}.geoparquet" ) return Path(self.working_directory) / result_file_name def _prefilter_elements_ids( self, elements: "duckdb.DuckDBPyRelation", tmp_dir_name: str, filter_osm_ids: list[str] ) -> ConvertedOSMParquetFiles: sql_filter = self._generate_osm_tags_sql_filter() filtered_tags_clause = self._generate_filtered_tags_clause() is_intersecting = self.geometry_filter is not None
with TaskProgressSpinner("Reading nodes", "1"):
7
2023-12-28 11:26:41+00:00
8k
KyanChen/TTP
mmseg/models/decode_heads/vpd_depth_head.py
[ { "identifier": "MODELS", "path": "mmseg/registry/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmseg.models'])" }, { "identifier": "SampleList", "path": "mmseg/utils/typing_utils.py", "snippet": "" }, { "identifier": "build_loss", "path": "mmseg/models/builder.py", "snippet": "def build_loss(cfg):\n \"\"\"Build loss.\"\"\"\n warnings.warn('``build_loss`` would be deprecated soon, please use '\n '``mmseg.registry.MODELS.build()`` ')\n return LOSSES.build(cfg)" }, { "identifier": "resize", "path": "mmseg/models/utils/wrappers.py", "snippet": "def resize(input,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=None,\n warning=True):\n if warning:\n if size is not None and align_corners:\n input_h, input_w = tuple(int(x) for x in input.shape[2:])\n output_h, output_w = tuple(int(x) for x in size)\n if output_h > input_h or output_w > output_h:\n if ((output_h > 1 and output_w > 1 and input_h > 1\n and input_w > 1) and (output_h - 1) % (input_h - 1)\n and (output_w - 1) % (input_w - 1)):\n warnings.warn(\n f'When align_corners={align_corners}, '\n 'the output would more aligned if '\n f'input size {(input_h, input_w)} is `x+1` and '\n f'out size {(output_h, output_w)} is `nx+1`')\n return F.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "BaseDecodeHead", "path": "mmseg/models/decode_heads/decode_head.py", "snippet": "class BaseDecodeHead(BaseModule, metaclass=ABCMeta):\n \"\"\"Base class for BaseDecodeHead.\n\n 1. The ``init_weights`` method is used to initialize decode_head's\n model parameters. After segmentor initialization, ``init_weights``\n is triggered when ``segmentor.init_weights()`` is called externally.\n\n 2. The ``loss`` method is used to calculate the loss of decode_head,\n which includes two steps: (1) the decode_head model performs forward\n propagation to obtain the feature maps (2) The ``loss_by_feat`` method\n is called based on the feature maps to calculate the loss.\n\n .. code:: text\n\n loss(): forward() -> loss_by_feat()\n\n 3. The ``predict`` method is used to predict segmentation results,\n which includes two steps: (1) the decode_head model performs forward\n propagation to obtain the feature maps (2) The ``predict_by_feat`` method\n is called based on the feature maps to predict segmentation results\n including post-processing.\n\n .. code:: text\n\n predict(): forward() -> predict_by_feat()\n\n Args:\n in_channels (int|Sequence[int]): Input channels.\n channels (int): Channels after modules, before conv_seg.\n num_classes (int): Number of classes.\n out_channels (int): Output channels of conv_seg. Default: None.\n threshold (float): Threshold for binary segmentation in the case of\n `num_classes==1`. Default: None.\n dropout_ratio (float): Ratio of dropout layer. Default: 0.1.\n conv_cfg (dict|None): Config of conv layers. Default: None.\n norm_cfg (dict|None): Config of norm layers. Default: None.\n act_cfg (dict): Config of activation layers.\n Default: dict(type='ReLU')\n in_index (int|Sequence[int]): Input feature index. Default: -1\n input_transform (str|None): Transformation type of input features.\n Options: 'resize_concat', 'multiple_select', None.\n 'resize_concat': Multiple feature maps will be resize to the\n same size as first one and than concat together.\n Usually used in FCN head of HRNet.\n 'multiple_select': Multiple feature maps will be bundle into\n a list and passed into decode head.\n None: Only one select feature map is allowed.\n Default: None.\n loss_decode (dict | Sequence[dict]): Config of decode loss.\n The `loss_name` is property of corresponding loss function which\n could be shown in training log. If you want this loss\n item to be included into the backward graph, `loss_` must be the\n prefix of the name. Defaults to 'loss_ce'.\n e.g. dict(type='CrossEntropyLoss'),\n [dict(type='CrossEntropyLoss', loss_name='loss_ce'),\n dict(type='DiceLoss', loss_name='loss_dice')]\n Default: dict(type='CrossEntropyLoss').\n ignore_index (int | None): The label index to be ignored. When using\n masked BCE loss, ignore_index should be set to None. Default: 255.\n sampler (dict|None): The config of segmentation map sampler.\n Default: None.\n align_corners (bool): align_corners argument of F.interpolate.\n Default: False.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n \"\"\"\n\n def __init__(self,\n in_channels,\n channels,\n *,\n num_classes,\n out_channels=None,\n threshold=None,\n dropout_ratio=0.1,\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=dict(type='ReLU'),\n in_index=-1,\n input_transform=None,\n loss_decode=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=1.0),\n ignore_index=255,\n sampler=None,\n align_corners=False,\n init_cfg=dict(\n type='Normal', std=0.01, override=dict(name='conv_seg'))):\n super().__init__(init_cfg)\n self._init_inputs(in_channels, in_index, input_transform)\n self.channels = channels\n self.dropout_ratio = dropout_ratio\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.in_index = in_index\n\n self.ignore_index = ignore_index\n self.align_corners = align_corners\n\n if out_channels is None:\n if num_classes == 2:\n warnings.warn('For binary segmentation, we suggest using'\n '`out_channels = 1` to define the output'\n 'channels of segmentor, and use `threshold`'\n 'to convert `seg_logits` into a prediction'\n 'applying a threshold')\n out_channels = num_classes\n\n if out_channels != num_classes and out_channels != 1:\n raise ValueError(\n 'out_channels should be equal to num_classes,'\n 'except binary segmentation set out_channels == 1 and'\n f'num_classes == 2, but got out_channels={out_channels}'\n f'and num_classes={num_classes}')\n\n if out_channels == 1 and threshold is None:\n threshold = 0.3\n warnings.warn('threshold is not defined for binary, and defaults'\n 'to 0.3')\n self.num_classes = num_classes\n self.out_channels = out_channels\n self.threshold = threshold\n\n if isinstance(loss_decode, dict):\n self.loss_decode = build_loss(loss_decode)\n elif isinstance(loss_decode, (list, tuple)):\n self.loss_decode = nn.ModuleList()\n for loss in loss_decode:\n self.loss_decode.append(build_loss(loss))\n else:\n raise TypeError(f'loss_decode must be a dict or sequence of dict,\\\n but got {type(loss_decode)}')\n\n if sampler is not None:\n self.sampler = build_pixel_sampler(sampler, context=self)\n else:\n self.sampler = None\n\n self.conv_seg = nn.Conv2d(channels, self.out_channels, kernel_size=1)\n if dropout_ratio > 0:\n self.dropout = nn.Dropout2d(dropout_ratio)\n else:\n self.dropout = None\n\n def extra_repr(self):\n \"\"\"Extra repr.\"\"\"\n s = f'input_transform={self.input_transform}, ' \\\n f'ignore_index={self.ignore_index}, ' \\\n f'align_corners={self.align_corners}'\n return s\n\n def _init_inputs(self, in_channels, in_index, input_transform):\n \"\"\"Check and initialize input transforms.\n\n The in_channels, in_index and input_transform must match.\n Specifically, when input_transform is None, only single feature map\n will be selected. So in_channels and in_index must be of type int.\n When input_transform\n\n Args:\n in_channels (int|Sequence[int]): Input channels.\n in_index (int|Sequence[int]): Input feature index.\n input_transform (str|None): Transformation type of input features.\n Options: 'resize_concat', 'multiple_select', None.\n 'resize_concat': Multiple feature maps will be resize to the\n same size as first one and than concat together.\n Usually used in FCN head of HRNet.\n 'multiple_select': Multiple feature maps will be bundle into\n a list and passed into decode head.\n None: Only one select feature map is allowed.\n \"\"\"\n\n if input_transform is not None:\n assert input_transform in ['resize_concat', 'multiple_select']\n self.input_transform = input_transform\n self.in_index = in_index\n if input_transform is not None:\n assert isinstance(in_channels, (list, tuple))\n assert isinstance(in_index, (list, tuple))\n assert len(in_channels) == len(in_index)\n if input_transform == 'resize_concat':\n self.in_channels = sum(in_channels)\n else:\n self.in_channels = in_channels\n else:\n assert isinstance(in_channels, int)\n assert isinstance(in_index, int)\n self.in_channels = in_channels\n\n def _transform_inputs(self, inputs):\n \"\"\"Transform inputs for decoder.\n\n Args:\n inputs (list[Tensor]): List of multi-level img features.\n\n Returns:\n Tensor: The transformed inputs\n \"\"\"\n\n if self.input_transform == 'resize_concat':\n inputs = [inputs[i] for i in self.in_index]\n upsampled_inputs = [\n resize(\n input=x,\n size=inputs[0].shape[2:],\n mode='bilinear',\n align_corners=self.align_corners) for x in inputs\n ]\n inputs = torch.cat(upsampled_inputs, dim=1)\n elif self.input_transform == 'multiple_select':\n inputs = [inputs[i] for i in self.in_index]\n else:\n inputs = inputs[self.in_index]\n\n return inputs\n\n @abstractmethod\n def forward(self, inputs):\n \"\"\"Placeholder of forward function.\"\"\"\n pass\n\n def cls_seg(self, feat):\n \"\"\"Classify each pixel.\"\"\"\n if self.dropout is not None:\n feat = self.dropout(feat)\n output = self.conv_seg(feat)\n return output\n\n def loss(self, inputs: Tuple[Tensor], batch_data_samples: SampleList,\n train_cfg: ConfigType) -> dict:\n \"\"\"Forward function for training.\n\n Args:\n inputs (Tuple[Tensor]): List of multi-level img features.\n batch_data_samples (list[:obj:`SegDataSample`]): The seg\n data samples. It usually includes information such\n as `img_metas` or `gt_semantic_seg`.\n train_cfg (dict): The training config.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n seg_logits = self.forward(inputs)\n losses = self.loss_by_feat(seg_logits, batch_data_samples)\n return losses\n\n def predict(self, inputs: Tuple[Tensor], batch_img_metas: List[dict],\n test_cfg: ConfigType) -> Tensor:\n \"\"\"Forward function for prediction.\n\n Args:\n inputs (Tuple[Tensor]): List of multi-level img features.\n batch_img_metas (dict): List Image info where each dict may also\n contain: 'img_shape', 'scale_factor', 'flip', 'img_path',\n 'ori_shape', and 'pad_shape'.\n For details on the values of these keys see\n `mmseg/datasets/pipelines/formatting.py:PackSegInputs`.\n test_cfg (dict): The testing config.\n\n Returns:\n Tensor: Outputs segmentation logits map.\n \"\"\"\n seg_logits = self.forward(inputs)\n\n return self.predict_by_feat(seg_logits, batch_img_metas)\n\n def _stack_batch_gt(self, batch_data_samples: SampleList) -> Tensor:\n gt_semantic_segs = [\n data_sample.gt_sem_seg.data for data_sample in batch_data_samples\n ]\n return torch.stack(gt_semantic_segs, dim=0)\n\n def loss_by_feat(self, seg_logits: Tensor,\n batch_data_samples: SampleList) -> dict:\n \"\"\"Compute segmentation loss.\n\n Args:\n seg_logits (Tensor): The output from decode head forward function.\n batch_data_samples (List[:obj:`SegDataSample`]): The seg\n data samples. It usually includes information such\n as `metainfo` and `gt_sem_seg`.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n\n seg_label = self._stack_batch_gt(batch_data_samples)\n loss = dict()\n seg_logits = resize(\n input=seg_logits,\n size=seg_label.shape[2:],\n mode='bilinear',\n align_corners=self.align_corners)\n if self.sampler is not None:\n seg_weight = self.sampler.sample(seg_logits, seg_label)\n else:\n seg_weight = None\n seg_label = seg_label.squeeze(1)\n\n if not isinstance(self.loss_decode, nn.ModuleList):\n losses_decode = [self.loss_decode]\n else:\n losses_decode = self.loss_decode\n for loss_decode in losses_decode:\n if loss_decode.loss_name not in loss:\n loss[loss_decode.loss_name] = loss_decode(\n seg_logits,\n seg_label,\n weight=seg_weight,\n ignore_index=self.ignore_index)\n else:\n loss[loss_decode.loss_name] += loss_decode(\n seg_logits,\n seg_label,\n weight=seg_weight,\n ignore_index=self.ignore_index)\n\n loss['acc_seg'] = accuracy(\n seg_logits, seg_label, ignore_index=self.ignore_index)\n return loss\n\n def predict_by_feat(self, seg_logits: Tensor,\n batch_img_metas: List[dict]) -> Tensor:\n \"\"\"Transform a batch of output seg_logits to the input shape.\n\n Args:\n seg_logits (Tensor): The output from decode head forward function.\n batch_img_metas (list[dict]): Meta information of each image, e.g.,\n image size, scaling factor, etc.\n\n Returns:\n Tensor: Outputs segmentation logits map.\n \"\"\"\n\n if isinstance(batch_img_metas[0]['img_shape'], torch.Size):\n # slide inference\n size = batch_img_metas[0]['img_shape']\n elif 'pad_shape' in batch_img_metas[0]:\n size = batch_img_metas[0]['pad_shape'][:2]\n else:\n size = batch_img_metas[0]['img_shape']\n\n seg_logits = resize(\n input=seg_logits,\n size=size,\n mode='bilinear',\n align_corners=self.align_corners)\n return seg_logits" } ]
from typing import Dict, List, Optional, Sequence, Union from mmcv.cnn import build_conv_layer, build_norm_layer, build_upsample_layer from mmengine.model import BaseModule from torch import Tensor from mmseg.registry import MODELS from mmseg.utils import SampleList from ..builder import build_loss from ..utils import resize from .decode_head import BaseDecodeHead import torch import torch.nn as nn import torch.nn.functional as F
5,536
fmap_border (Union[int, Sequence[int]]): Feature map border for cropping. Defaults to 0. align_corners (bool): Flag for align_corners in interpolation. Defaults to False. loss_decode (dict): Configurations for the loss function. Defaults to dict(type='SiLogLoss'). init_cfg (dict): Initialization configurations. Defaults to dict(type='TruncNormal', std=0.02, layer=['Conv2d', 'Linear']). """ num_classes = 1 out_channels = 1 input_transform = None def __init__( self, max_depth: float = 10.0, in_channels: Sequence[int] = [320, 640, 1280, 1280], embed_dim: int = 192, feature_dim: int = 1536, num_deconv_layers: int = 3, num_deconv_filters: Sequence[int] = (32, 32, 32), fmap_border: Union[int, Sequence[int]] = 0, align_corners: bool = False, loss_decode: dict = dict(type='SiLogLoss'), init_cfg=dict( type='TruncNormal', std=0.02, layer=['Conv2d', 'Linear']), ): super(BaseDecodeHead, self).__init__(init_cfg=init_cfg) # initialize parameters self.in_channels = in_channels self.max_depth = max_depth self.align_corners = align_corners # feature map border if isinstance(fmap_border, int): fmap_border = (fmap_border, fmap_border) self.fmap_border = fmap_border # define network layers self.conv1 = nn.Sequential( nn.Conv2d(in_channels[0], in_channels[0], 3, stride=2, padding=1), nn.GroupNorm(16, in_channels[0]), nn.ReLU(), nn.Conv2d(in_channels[0], in_channels[0], 3, stride=2, padding=1), ) self.conv2 = nn.Conv2d( in_channels[1], in_channels[1], 3, stride=2, padding=1) self.conv_aggregation = nn.Sequential( nn.Conv2d(sum(in_channels), feature_dim, 1), nn.GroupNorm(16, feature_dim), nn.ReLU(), ) self.decoder = VPDDepthDecoder( in_channels=embed_dim * 8, out_channels=embed_dim, num_deconv_layers=num_deconv_layers, num_deconv_filters=num_deconv_filters) self.depth_pred_layer = nn.Sequential( nn.Conv2d( embed_dim, embed_dim, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=False), nn.Conv2d(embed_dim, 1, kernel_size=3, stride=1, padding=1)) # build loss if isinstance(loss_decode, dict): self.loss_decode = build_loss(loss_decode) elif isinstance(loss_decode, (list, tuple)): self.loss_decode = nn.ModuleList() for loss in loss_decode: self.loss_decode.append(build_loss(loss)) else: raise TypeError(f'loss_decode must be a dict or sequence of dict,\ but got {type(loss_decode)}') def _stack_batch_gt(self, batch_data_samples: SampleList) -> Tensor: gt_depth_maps = [ data_sample.gt_depth_map.data for data_sample in batch_data_samples ] return torch.stack(gt_depth_maps, dim=0) def forward(self, x): x = [ x[0], x[1], torch.cat([x[2], F.interpolate(x[3], scale_factor=2)], dim=1) ] x = torch.cat([self.conv1(x[0]), self.conv2(x[1]), x[2]], dim=1) x = self.conv_aggregation(x) x = x[:, :, :x.size(2) - self.fmap_border[0], :x.size(3) - self.fmap_border[1]].contiguous() x = self.decoder(x) out = self.depth_pred_layer(x) depth = torch.sigmoid(out) * self.max_depth return depth def loss_by_feat(self, pred_depth_map: Tensor, batch_data_samples: SampleList) -> dict: """Compute depth estimation loss. Args: pred_depth_map (Tensor): The output from decode head forward function. batch_data_samples (List[:obj:`SegDataSample`]): The seg data samples. It usually includes information such as `metainfo` and `gt_dpeth_map`. Returns: dict[str, Tensor]: a dictionary of loss components """ gt_depth_map = self._stack_batch_gt(batch_data_samples) loss = dict()
# Copyright (c) OpenMMLab. All rights reserved. class VPDDepthDecoder(BaseModule): """VPD Depth Decoder class. Args: in_channels (int): Number of input channels. out_channels (int): Number of output channels. num_deconv_layers (int): Number of deconvolution layers. num_deconv_filters (List[int]): List of output channels for deconvolution layers. init_cfg (Optional[Union[Dict, List[Dict]]], optional): Configuration for weight initialization. Defaults to Normal for Conv2d and ConvTranspose2d layers. """ def __init__(self, in_channels: int, out_channels: int, num_deconv_layers: int, num_deconv_filters: List[int], init_cfg: Optional[Union[Dict, List[Dict]]] = dict( type='Normal', std=0.001, layer=['Conv2d', 'ConvTranspose2d'])): super().__init__(init_cfg=init_cfg) self.in_channels = in_channels self.deconv_layers = self._make_deconv_layer( num_deconv_layers, num_deconv_filters, ) conv_layers = [] conv_layers.append( build_conv_layer( dict(type='Conv2d'), in_channels=num_deconv_filters[-1], out_channels=out_channels, kernel_size=3, stride=1, padding=1)) conv_layers.append(build_norm_layer(dict(type='BN'), out_channels)[1]) conv_layers.append(nn.ReLU(inplace=True)) self.conv_layers = nn.Sequential(*conv_layers) self.up_sample = nn.Upsample( scale_factor=2, mode='bilinear', align_corners=False) def forward(self, x): """Forward pass through the decoder network.""" out = self.deconv_layers(x) out = self.conv_layers(out) out = self.up_sample(out) out = self.up_sample(out) return out def _make_deconv_layer(self, num_layers, num_deconv_filters): """Make deconv layers.""" layers = [] in_channels = self.in_channels for i in range(num_layers): num_channels = num_deconv_filters[i] layers.append( build_upsample_layer( dict(type='deconv'), in_channels=in_channels, out_channels=num_channels, kernel_size=2, stride=2, padding=0, output_padding=0, bias=False)) layers.append(nn.BatchNorm2d(num_channels)) layers.append(nn.ReLU(inplace=True)) in_channels = num_channels return nn.Sequential(*layers) @MODELS.register_module() class VPDDepthHead(BaseDecodeHead): """Depth Prediction Head for VPD. .. _`VPD`: https://arxiv.org/abs/2303.02153 Args: max_depth (float): Maximum depth value. Defaults to 10.0. in_channels (Sequence[int]): Number of input channels for each convolutional layer. embed_dim (int): Dimension of embedding. Defaults to 192. feature_dim (int): Dimension of aggregated feature. Defaults to 1536. num_deconv_layers (int): Number of deconvolution layers in the decoder. Defaults to 3. num_deconv_filters (Sequence[int]): Number of filters for each deconv layer. Defaults to (32, 32, 32). fmap_border (Union[int, Sequence[int]]): Feature map border for cropping. Defaults to 0. align_corners (bool): Flag for align_corners in interpolation. Defaults to False. loss_decode (dict): Configurations for the loss function. Defaults to dict(type='SiLogLoss'). init_cfg (dict): Initialization configurations. Defaults to dict(type='TruncNormal', std=0.02, layer=['Conv2d', 'Linear']). """ num_classes = 1 out_channels = 1 input_transform = None def __init__( self, max_depth: float = 10.0, in_channels: Sequence[int] = [320, 640, 1280, 1280], embed_dim: int = 192, feature_dim: int = 1536, num_deconv_layers: int = 3, num_deconv_filters: Sequence[int] = (32, 32, 32), fmap_border: Union[int, Sequence[int]] = 0, align_corners: bool = False, loss_decode: dict = dict(type='SiLogLoss'), init_cfg=dict( type='TruncNormal', std=0.02, layer=['Conv2d', 'Linear']), ): super(BaseDecodeHead, self).__init__(init_cfg=init_cfg) # initialize parameters self.in_channels = in_channels self.max_depth = max_depth self.align_corners = align_corners # feature map border if isinstance(fmap_border, int): fmap_border = (fmap_border, fmap_border) self.fmap_border = fmap_border # define network layers self.conv1 = nn.Sequential( nn.Conv2d(in_channels[0], in_channels[0], 3, stride=2, padding=1), nn.GroupNorm(16, in_channels[0]), nn.ReLU(), nn.Conv2d(in_channels[0], in_channels[0], 3, stride=2, padding=1), ) self.conv2 = nn.Conv2d( in_channels[1], in_channels[1], 3, stride=2, padding=1) self.conv_aggregation = nn.Sequential( nn.Conv2d(sum(in_channels), feature_dim, 1), nn.GroupNorm(16, feature_dim), nn.ReLU(), ) self.decoder = VPDDepthDecoder( in_channels=embed_dim * 8, out_channels=embed_dim, num_deconv_layers=num_deconv_layers, num_deconv_filters=num_deconv_filters) self.depth_pred_layer = nn.Sequential( nn.Conv2d( embed_dim, embed_dim, kernel_size=3, stride=1, padding=1), nn.ReLU(inplace=False), nn.Conv2d(embed_dim, 1, kernel_size=3, stride=1, padding=1)) # build loss if isinstance(loss_decode, dict): self.loss_decode = build_loss(loss_decode) elif isinstance(loss_decode, (list, tuple)): self.loss_decode = nn.ModuleList() for loss in loss_decode: self.loss_decode.append(build_loss(loss)) else: raise TypeError(f'loss_decode must be a dict or sequence of dict,\ but got {type(loss_decode)}') def _stack_batch_gt(self, batch_data_samples: SampleList) -> Tensor: gt_depth_maps = [ data_sample.gt_depth_map.data for data_sample in batch_data_samples ] return torch.stack(gt_depth_maps, dim=0) def forward(self, x): x = [ x[0], x[1], torch.cat([x[2], F.interpolate(x[3], scale_factor=2)], dim=1) ] x = torch.cat([self.conv1(x[0]), self.conv2(x[1]), x[2]], dim=1) x = self.conv_aggregation(x) x = x[:, :, :x.size(2) - self.fmap_border[0], :x.size(3) - self.fmap_border[1]].contiguous() x = self.decoder(x) out = self.depth_pred_layer(x) depth = torch.sigmoid(out) * self.max_depth return depth def loss_by_feat(self, pred_depth_map: Tensor, batch_data_samples: SampleList) -> dict: """Compute depth estimation loss. Args: pred_depth_map (Tensor): The output from decode head forward function. batch_data_samples (List[:obj:`SegDataSample`]): The seg data samples. It usually includes information such as `metainfo` and `gt_dpeth_map`. Returns: dict[str, Tensor]: a dictionary of loss components """ gt_depth_map = self._stack_batch_gt(batch_data_samples) loss = dict()
pred_depth_map = resize(
3
2023-12-23 08:36:47+00:00
8k
jiayev/GPT4V-Image-Captioner
utils/models/cogvlm_model.py
[ { "identifier": "LlamaVisionExpertFCMixin", "path": "utils/models/mixin.py", "snippet": "class LlamaVisionExpertFCMixin(BaseMixin):\r\n def __init__(self, in_features, hidden_features, num_layers=32, num_vision_layers=0, vision_layer_range=None,\r\n params_dtype=torch.float, device=torch.device('cpu')):\r\n super().__init__()\r\n\r\n self.num_layers = num_layers\r\n self.num_vision_layers = num_vision_layers\r\n if vision_layer_range is None:\r\n vision_layer_range = [i for i in range(min(num_vision_layers, num_layers))]\r\n self.vision_layer_range = vision_layer_range\r\n self.gate_proj = nn.ModuleList([ColumnParallelLinear(\r\n in_features,\r\n hidden_features,\r\n gather_output=False,\r\n init_method=None,\r\n bias=False,\r\n params_dtype=params_dtype,\r\n module=self,\r\n name=\"dense_h_to_4h_gate\",\r\n skip_init=True,\r\n device=device\r\n ) for i in range(num_layers)])\r\n # Trainable vision expert parameters\r\n vision_dense_h_to_4h_list = []\r\n vision_dense_4h_to_h_list = []\r\n gate_proj_list = []\r\n\r\n\r\n for i in vision_layer_range:\r\n vision_dense_h_to_4h = ColumnParallelLinear(\r\n in_features,\r\n hidden_features,\r\n gather_output=False,\r\n init_method=None,\r\n bias=False,\r\n params_dtype=params_dtype,\r\n module=self,\r\n name=\"vision_dense_h_to_4h\",\r\n skip_init=True,\r\n device=device\r\n )\r\n\r\n # Project back to h.\r\n vision_dense_4h_to_h = RowParallelLinear(\r\n hidden_features,\r\n in_features,\r\n input_is_parallel=True,\r\n init_method=None,\r\n bias=False,\r\n params_dtype=params_dtype,\r\n module=self,\r\n name=\"vision_dense_4h_to_h\",\r\n skip_init=True,\r\n device=device\r\n )\r\n\r\n gate_proj = ColumnParallelLinear(\r\n in_features,\r\n hidden_features,\r\n gather_output=False,\r\n init_method=None,\r\n bias=False,\r\n params_dtype=params_dtype,\r\n module=self,\r\n name=\"vision_gate_proj\",\r\n skip_init=True,\r\n device=device\r\n )\r\n\r\n vision_dense_h_to_4h_list.append(vision_dense_h_to_4h)\r\n vision_dense_4h_to_h_list.append(vision_dense_4h_to_h)\r\n gate_proj_list.append(gate_proj)\r\n\r\n self.vision_dense_h_to_4h_list = nn.ModuleDict([\r\n (str(layer_id), vision_dense_h_to_4h)\r\n for layer_id, vision_dense_h_to_4h in zip(vision_layer_range, vision_dense_h_to_4h_list)\r\n ])\r\n self.vision_dense_4h_to_h_list = nn.ModuleDict([\r\n (str(layer_id), vision_dense_4h_to_h)\r\n for layer_id, vision_dense_4h_to_h in zip(vision_layer_range, vision_dense_4h_to_h_list)\r\n ])\r\n self.vision_gate_proj = nn.ModuleDict([\r\n (str(layer_id), gate_proj)\r\n for layer_id, gate_proj in zip(vision_layer_range, gate_proj_list)\r\n ])\r\n\r\n def mlp_forward(self, hidden_states, **kw_args):\r\n mixin_self = self\r\n self = self.transformer.layers[kw_args['layer_id']].mlp\r\n if \"vision_expert_mask\" in kw_args:\r\n vision_expert_mask = kw_args['vision_expert_mask']\r\n else:\r\n vision_expert_mask = None\r\n\r\n layer_id_key = str(int(kw_args['layer_id']))\r\n\r\n if kw_args['layer_id'] in mixin_self.vision_layer_range and (vision_expert_mask is not None) and vision_expert_mask.any():\r\n vision_dense_h_to_4h = mixin_self.vision_dense_h_to_4h_list[layer_id_key]\r\n vision_dense_4h_to_h = mixin_self.vision_dense_4h_to_h_list[layer_id_key]\r\n vision_gate_proj = mixin_self.vision_gate_proj[layer_id_key]\r\n output = torch.empty(hidden_states.shape, dtype=hidden_states.dtype, device=hidden_states.device)\r\n\r\n language_hidden_state = hidden_states[~vision_expert_mask.bool()]\r\n language_intermediate_parallel = self.activation_func(mixin_self.gate_proj[kw_args['layer_id']](language_hidden_state)) * self.dense_h_to_4h(language_hidden_state)\r\n output[~vision_expert_mask.bool()] = self.dense_4h_to_h(language_intermediate_parallel) # language_output\r\n\r\n vision_hidden_state = hidden_states[vision_expert_mask.bool()]\r\n vision_intermediate_parallel = vision_dense_h_to_4h(vision_hidden_state)\r\n gate_output = vision_gate_proj(vision_hidden_state)\r\n\r\n vision_intermediate_parallel *= self.activation_func(gate_output)\r\n output[vision_expert_mask.bool()] = vision_dense_4h_to_h(vision_intermediate_parallel) # vision_output\r\n else:\r\n intermediate_parallel = self.activation_func(mixin_self.gate_proj[kw_args['layer_id']](hidden_states)) * self.dense_h_to_4h(hidden_states)\r\n output = self.dense_4h_to_h(intermediate_parallel)\r\n\r\n return output.contiguous()\r\n\r\n def copy_param(self):\r\n with torch.no_grad():\r\n for i in self.vision_layer_range:\r\n self.vision_gate_proj[str(i)].weight.data.copy_(self.gate_proj[i].weight.data)\r\n self.vision_dense_4h_to_h_list[str(i)].weight.data.copy_(self.transformer.layers[i].mlp.dense_4h_to_h.weight.data)\r\n self.vision_dense_h_to_4h_list[str(i)].weight.data.copy_(self.transformer.layers[i].mlp.dense_h_to_4h.weight.data)\r" }, { "identifier": "LlamaVisionExpertAttnMixin", "path": "utils/models/mixin.py", "snippet": "class LlamaVisionExpertAttnMixin(BaseMixin):\r\n def __init__(self, hidden_size, num_heads, num_layers=28, num_vision_layers=0, use_vision_expert=True, vision_layer_range=None,\r\n params_dtype=torch.float, device=torch.device('cpu')):\r\n super().__init__()\r\n\r\n world_size = get_model_parallel_world_size()\r\n self.hidden_size = hidden_size\r\n self.num_attention_heads = num_heads\r\n self.hidden_size_per_attention_head = divide(hidden_size, num_heads)\r\n self.num_attention_heads_per_partition = divide(num_heads, world_size)\r\n self.inner_hidden_size = num_heads * self.hidden_size_per_attention_head\r\n\r\n self.rotary_emb = FastRotaryEmbedding(\r\n hidden_size // num_heads, pos_idx_in_fp32=False\r\n )\r\n\r\n self.num_vision_layers = num_vision_layers\r\n self.num_layers = num_layers\r\n if vision_layer_range is None:\r\n vision_layer_range = [i for i in range(min(num_vision_layers, num_layers))]\r\n self.vision_layer_range = vision_layer_range\r\n\r\n self.use_vision_expert = use_vision_expert\r\n # Trainable vision expert parameters\r\n\r\n if self.use_vision_expert:\r\n vision_query_key_value_list = []\r\n vision_dense_list = []\r\n for i in vision_layer_range:\r\n vision_query_key_value = ColumnParallelLinear(\r\n hidden_size,\r\n 3 * hidden_size,\r\n stride=3,\r\n gather_output=False,\r\n init_method=None,\r\n bias=False,\r\n params_dtype=params_dtype,\r\n module=self,\r\n name=\"vision_query_key_value\",\r\n skip_init=True,\r\n device=device\r\n )\r\n\r\n vision_dense = RowParallelLinear(\r\n self.inner_hidden_size,\r\n hidden_size,\r\n input_is_parallel=True,\r\n init_method=None,\r\n bias=False,\r\n params_dtype=params_dtype,\r\n module=self,\r\n name=\"vision_dense\",\r\n skip_init=True,\r\n device=device,\r\n final_bias=False\r\n )\r\n\r\n vision_query_key_value_list.append(vision_query_key_value)\r\n vision_dense_list.append(vision_dense)\r\n\r\n self.vision_query_key_value_list = nn.ModuleDict([\r\n (str(layer_id), vision_query_key_value)\r\n for layer_id, vision_query_key_value in zip(vision_layer_range, vision_query_key_value_list)\r\n ])\r\n self.vision_dense_list = nn.ModuleDict([\r\n (str(layer_id), vision_dense)\r\n for layer_id, vision_dense in zip(vision_layer_range, vision_dense_list)\r\n ])\r\n\r\n def attention_forward(self, hidden_states, mask, **kw_args):\r\n mixin_self = self\r\n self = self.transformer.layers[kw_args['layer_id']].attention\r\n attention_fn = attention_fn_default\r\n if 'attention_fn' in self.hooks:\r\n attention_fn = self.hooks['attention_fn']\r\n if \"vision_expert_mask\" in kw_args:\r\n vision_expert_mask = kw_args['vision_expert_mask']\r\n else:\r\n vision_expert_mask = None\r\n\r\n layer_id_key = str(int(kw_args['layer_id']))\r\n if mixin_self.use_vision_expert and kw_args['layer_id'] in mixin_self.vision_layer_range and (\r\n vision_expert_mask is not None) and vision_expert_mask.any():\r\n shape = list(hidden_states.shape)\r\n parallel_size = mpu.get_model_parallel_world_size()\r\n shape[-1] = shape[-1] * 3 // parallel_size\r\n vision_query_key_value = mixin_self.vision_query_key_value_list[layer_id_key]\r\n mixed_raw_layer = torch.empty(shape, dtype=hidden_states.dtype, device=hidden_states.device)\r\n language_hidden_states = hidden_states[~vision_expert_mask.bool()]\r\n vision_hidden_states = hidden_states[vision_expert_mask.bool()]\r\n mixed_raw_layer[~vision_expert_mask.bool()] = self.query_key_value(\r\n language_hidden_states) # language_mixed_raw_layer\r\n mixed_raw_layer[vision_expert_mask.bool()] = vision_query_key_value(\r\n vision_hidden_states) # vision_mixed_raw_layer\r\n else:\r\n mixed_raw_layer = self.query_key_value(hidden_states)\r\n\r\n (mixed_query_layer,\r\n mixed_key_layer,\r\n mixed_value_layer) = split_tensor_along_last_dim(mixed_raw_layer, 3)\r\n\r\n dropout_fn = self.attention_dropout if self.training else None\r\n\r\n query_layer = self._transpose_for_scores(mixed_query_layer)\r\n key_layer = self._transpose_for_scores(mixed_key_layer)\r\n value_layer = self._transpose_for_scores(mixed_value_layer)\r\n\r\n query_layer, key_layer = mixin_self.rotary_emb(query_layer,key_layer, kw_args['position_ids'], max_seqlen=kw_args['position_ids'].max()+1, layer_id=kw_args['layer_id'])\r\n \r\n context_layer = attention_fn(query_layer, key_layer, value_layer, mask, dropout_fn, **kw_args)\r\n\r\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\r\n new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)\r\n context_layer = context_layer.view(*new_context_layer_shape)\r\n\r\n if mixin_self.use_vision_expert and kw_args['layer_id'] in mixin_self.vision_layer_range and (\r\n vision_expert_mask is not None) and vision_expert_mask.any():\r\n vision_dense = mixin_self.vision_dense_list[layer_id_key]\r\n parallel_size = mpu.get_model_parallel_world_size()\r\n target_shape = context_layer.shape[:-1] + (context_layer.shape[-1] * parallel_size,)\r\n output = torch.empty(target_shape, dtype=hidden_states.dtype, device=hidden_states.device)\r\n output[~vision_expert_mask.bool()] = self.dense(context_layer[~vision_expert_mask.bool()]) # language\r\n output[vision_expert_mask.bool()] = vision_dense(context_layer[vision_expert_mask.bool()]) # vision\r\n else:\r\n output = self.dense(context_layer)\r\n\r\n if self.training:\r\n output = self.output_dropout(output)\r\n return output.contiguous()\r\n\r\n def copy_param(self):\r\n with torch.no_grad():\r\n for i in self.vision_layer_range:\r\n self.vision_query_key_value_list[str(i)].weight.data.copy_(self.transformer.layers[i].attention.query_key_value.weight.data)\r\n self.vision_dense_list[str(i)].weight.data.copy_(self.transformer.layers[i].attention.dense.weight.data)" }, { "identifier": "EVA2CLIPModel", "path": "utils/models/eva_clip_model.py", "snippet": "class EVA2CLIPModel(BaseModel):\r\n def __init__(self, args, transformer=None, parallel_output=True, **kwargs):\r\n property = ViTProperty(args.image_size, args.patch_size, args.pre_len, args.post_len)\r\n args.max_sequence_length = property.pre_len + property.num_patches + property.post_len\r\n if 'activation_func' not in kwargs:\r\n kwargs['activation_func'] = gelu\r\n super().__init__(args, transformer=transformer, parallel_output=parallel_output, **kwargs)\r\n self.transformer.property = property\r\n self.add_mixin(\"patch_embedding\", ImagePatchEmbeddingMixin(args.in_channels, args.hidden_size, property))\r\n self.add_mixin(\"pos_embedding\", InterpolatedPositionEmbeddingMixin())\r\n self.add_mixin(\"final\", IdentityMixin())\r\n self.add_mixin(\"newpost\", NewLayerForward())\r\n self.add_mixin(\"xattn\", XAttn(args.hidden_size // args.num_attention_heads))\r\n\r\n @classmethod\r\n def add_model_specific_args(cls, parser):\r\n group = parser.add_argument_group('EVA2CLIP', 'EVA2CLIP Configurations')\r\n group.add_argument('--image-size', nargs='+', type=int, default=[224, 224])\r\n group.add_argument('--pre-len', type=int, default=1) # [cls] by default\r\n group.add_argument('--post-len', type=int, default=0) # empty by default, but sometimes with special tokens, such as [det] in yolos.\r\n group.add_argument('--in-channels', type=int, default=3)\r\n group.add_argument('--patch-size', type=int, default=16)\r\n return parser\r" } ]
from sat.model.official.llama_model import LLaMAModel from sat.model.base_model import BaseMixin from .mixin import LlamaVisionExpertFCMixin, LlamaVisionExpertAttnMixin from sat.resources.urls import MODEL_URLS from .eva_clip_model import EVA2CLIPModel from copy import deepcopy from sat.model.finetune import PTuningV2Mixin from sat.model.finetune.lora2 import LoraMixin import json import torch import torch.nn as nn import argparse
4,408
MODEL_URLS["cogvlm-base-224"] = "r2://cogvlm-base-224.zip" MODEL_URLS["cogvlm-base-490"] = "r2://cogvlm-base-490.zip" MODEL_URLS["cogvlm-chat-v1.1"] = "r2://cogvlm-chat-v1.1.zip" MODEL_URLS["cogvlm-grounding-base"] = "r2://cogvlm-grounding-base.zip" MODEL_URLS["cogvlm-grounding-generalist-v1.1"] = "r2://cogvlm-grounding-generalist-v1.1.zip" class GLU(nn.Module): def __init__(self, args, in_features): super().__init__() self.linear_proj = nn.Linear(in_features, args.hidden_size, bias=False) self.norm1 = nn.LayerNorm(args.hidden_size) self.act1 = nn.GELU() self.act2 = nn.functional.silu self.dense_h_to_4h = nn.Linear(args.hidden_size, args.inner_hidden_size, bias=False) self.gate_proj = nn.Linear(args.hidden_size, args.inner_hidden_size, bias=False) self.dense_4h_to_h = nn.Linear(args.inner_hidden_size, args.hidden_size, bias=False) def forward(self, x): x = self.linear_proj(x) x = self.act1(self.norm1(x)) x = self.act2(self.gate_proj(x)) * self.dense_h_to_4h(x) x = self.dense_4h_to_h(x) return x def override_dist_dtype_device_args(args, b={}): if args.mode == 'inference': minimal_args = argparse.Namespace( world_size=args.world_size, rank=args.rank, local_rank=args.local_rank, skip_init=args.skip_init, use_gpu_initialization=args.use_gpu_initialization, deepspeed=args.deepspeed, bf16=args.bf16, fp16=args.fp16, mode=args.mode, device=args.device ) else: minimal_args = argparse.Namespace( world_size=args.world_size, rank=args.rank, local_rank=args.local_rank, skip_init=args.skip_init, use_gpu_initialization=args.use_gpu_initialization, deepspeed=args.deepspeed, bf16=args.bf16, fp16=args.fp16, mode=args.mode, checkpoint_activations=args.checkpoint_activations if not hasattr(args, 'vit_checkpoint_activations') else args.vit_checkpoint_activations, checkpoint_num_layers=args.checkpoint_num_layers, device=args.device, hidden_dropout=0., attention_dropout=0., ) if hasattr(args, 'model_parallel_size'): b['model_parallel_size'] = args.model_parallel_size return argparse.Namespace(**deepcopy(b), **vars(minimal_args)) class ImageMixin(BaseMixin): def __init__(self, args): super().__init__() vit_args = override_dist_dtype_device_args(args, args.eva_args) self.vit_model = EVA2CLIPModel(EVA2CLIPModel.get_args(**vars(vit_args))) self.in_features = 1792 self.linear_proj = GLU(args, self.in_features) self.image_length = args.image_length self.boi = nn.Parameter(torch.zeros(1, 1, args.hidden_size)) self.eoi = nn.Parameter(torch.zeros(1, 1, args.hidden_size)) def word_embedding_forward(self, input_ids, output_cross_layer, **kw_args): vision_inputs = {} for k in kw_args: if k.startswith('vision_') and k != 'vision_expert_mask': vision_inputs[k[7:]] = kw_args[k] if input_ids.shape[1] == 1 or not vision_inputs: return self.transformer.word_embeddings(input_ids) image_emb = self.vit_model(**vision_inputs)[0] image_emb = self.linear_proj(image_emb) image_embed_mask = kw_args['image_embed_mask'] word_embedding = self.transformer.word_embeddings(input_ids).clone() word_embedding[image_embed_mask.bool()] = torch.cat([self.boi.repeat(len(image_emb), 1, 1), image_emb, self.eoi.repeat(len(image_emb), 1, 1)], dim=1).reshape(-1, image_emb.shape[-1]) return word_embedding.contiguous() class CogVLMModel(LLaMAModel): def __init__(self, args, transformer=None, parallel_output=True, **kwargs): super().__init__(args, transformer=transformer, parallel_output=parallel_output, **kwargs) self.image_length = args.image_length self.add_mixin("eva", ImageMixin(args)) self.del_mixin("mlp")
MODEL_URLS["cogvlm-base-224"] = "r2://cogvlm-base-224.zip" MODEL_URLS["cogvlm-base-490"] = "r2://cogvlm-base-490.zip" MODEL_URLS["cogvlm-chat-v1.1"] = "r2://cogvlm-chat-v1.1.zip" MODEL_URLS["cogvlm-grounding-base"] = "r2://cogvlm-grounding-base.zip" MODEL_URLS["cogvlm-grounding-generalist-v1.1"] = "r2://cogvlm-grounding-generalist-v1.1.zip" class GLU(nn.Module): def __init__(self, args, in_features): super().__init__() self.linear_proj = nn.Linear(in_features, args.hidden_size, bias=False) self.norm1 = nn.LayerNorm(args.hidden_size) self.act1 = nn.GELU() self.act2 = nn.functional.silu self.dense_h_to_4h = nn.Linear(args.hidden_size, args.inner_hidden_size, bias=False) self.gate_proj = nn.Linear(args.hidden_size, args.inner_hidden_size, bias=False) self.dense_4h_to_h = nn.Linear(args.inner_hidden_size, args.hidden_size, bias=False) def forward(self, x): x = self.linear_proj(x) x = self.act1(self.norm1(x)) x = self.act2(self.gate_proj(x)) * self.dense_h_to_4h(x) x = self.dense_4h_to_h(x) return x def override_dist_dtype_device_args(args, b={}): if args.mode == 'inference': minimal_args = argparse.Namespace( world_size=args.world_size, rank=args.rank, local_rank=args.local_rank, skip_init=args.skip_init, use_gpu_initialization=args.use_gpu_initialization, deepspeed=args.deepspeed, bf16=args.bf16, fp16=args.fp16, mode=args.mode, device=args.device ) else: minimal_args = argparse.Namespace( world_size=args.world_size, rank=args.rank, local_rank=args.local_rank, skip_init=args.skip_init, use_gpu_initialization=args.use_gpu_initialization, deepspeed=args.deepspeed, bf16=args.bf16, fp16=args.fp16, mode=args.mode, checkpoint_activations=args.checkpoint_activations if not hasattr(args, 'vit_checkpoint_activations') else args.vit_checkpoint_activations, checkpoint_num_layers=args.checkpoint_num_layers, device=args.device, hidden_dropout=0., attention_dropout=0., ) if hasattr(args, 'model_parallel_size'): b['model_parallel_size'] = args.model_parallel_size return argparse.Namespace(**deepcopy(b), **vars(minimal_args)) class ImageMixin(BaseMixin): def __init__(self, args): super().__init__() vit_args = override_dist_dtype_device_args(args, args.eva_args) self.vit_model = EVA2CLIPModel(EVA2CLIPModel.get_args(**vars(vit_args))) self.in_features = 1792 self.linear_proj = GLU(args, self.in_features) self.image_length = args.image_length self.boi = nn.Parameter(torch.zeros(1, 1, args.hidden_size)) self.eoi = nn.Parameter(torch.zeros(1, 1, args.hidden_size)) def word_embedding_forward(self, input_ids, output_cross_layer, **kw_args): vision_inputs = {} for k in kw_args: if k.startswith('vision_') and k != 'vision_expert_mask': vision_inputs[k[7:]] = kw_args[k] if input_ids.shape[1] == 1 or not vision_inputs: return self.transformer.word_embeddings(input_ids) image_emb = self.vit_model(**vision_inputs)[0] image_emb = self.linear_proj(image_emb) image_embed_mask = kw_args['image_embed_mask'] word_embedding = self.transformer.word_embeddings(input_ids).clone() word_embedding[image_embed_mask.bool()] = torch.cat([self.boi.repeat(len(image_emb), 1, 1), image_emb, self.eoi.repeat(len(image_emb), 1, 1)], dim=1).reshape(-1, image_emb.shape[-1]) return word_embedding.contiguous() class CogVLMModel(LLaMAModel): def __init__(self, args, transformer=None, parallel_output=True, **kwargs): super().__init__(args, transformer=transformer, parallel_output=parallel_output, **kwargs) self.image_length = args.image_length self.add_mixin("eva", ImageMixin(args)) self.del_mixin("mlp")
self.add_mixin("mlp", LlamaVisionExpertFCMixin(args.hidden_size, args.inner_hidden_size, args.num_layers, 32))
0
2023-12-27 08:12:37+00:00
8k
Emperor-WS/PyEmber
ember/nn/modules/pool.py
[ { "identifier": "Module", "path": "ember/nn/modules/module.py", "snippet": "class Module(ABC):\n \"\"\"\n Base class for all neural network modules.\n\n Attributes:\n training: Indicates whether the module is in training mode.\n _modules: Dictionary to store sub-modules.\n _params: Dictionary to store parameters.\n _grads: Dictionary to store gradients.\n _cache: Dictionary to store cached values.\n\n Methods:\n forward(self, *inputs, **kwargs): Abstract method to define forward pass.\n backward(self, *outputs, **kwargs): Abstract method to define backward pass.\n train(self): Set the module to training mode.\n eval(self): Set the module to evaluation mode.\n add(self, *modules): Add sub-modules to the module.\n parameters(self): Generator for accessing parameters.\n modules(self): Generator for accessing sub-modules.\n cache(self): Generator for accessing cached values.\n gradients(self): Generator for accessing gradients.\n zero_grad(self): Zeroes the gradients of all parameters.\n state_dict(self): Returns the state dictionary of parameters.\n load_state(self, state_dict): Loads state from a state dictionary.\n save(self, filename='model.pickle'): Saves the entire model to a file using pickle.\n save_dict(self, filename='state_dict.json'): Saves the state dictionary to a JSON file.\n cpu(self): Moves all parameters to CPU.\n cuda(self): Moves all parameters to GPU.\n get_name(self): Returns the name of the module.\n inner_repr(self): Returns a string representation for inner modules.\n __repr__(self): Returns a string representation of the module.\n __call__(self, *inputs, **kwargs): Calls the forward method.\n __setattr__(self, key, value): Custom method to handle attribute setting.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor for the Module class.\n\n Initializes training mode and dictionaries for sub-modules, parameters, gradients, and cache.\n\n \"\"\"\n self.training = True\n self._modules = OrderedDict()\n self._params = OrderedDict()\n self._grads = OrderedDict()\n self._cache = OrderedDict()\n\n @abstractmethod\n def forward(self, *inputs, **kwargs):\n \"\"\"\n Abstract method for the forward pass.\n\n Args:\n *inputs: Variable-length argument list for inputs.\n **kwargs: Keyword arguments for inputs.\n\n Raises:\n NotImplementedError: If the method is not implemented in the subclass.\n\n \"\"\"\n raise NotImplementedError\n\n def backward(self, *outputs, **kwargs):\n \"\"\"\n Abstract method for the backward pass.\n\n Args:\n *outputs: Variable-length argument list for outputs.\n **kwargs: Keyword arguments for outputs.\n\n Raises:\n NotImplementedError: If the method is not implemented in the subclass.\n\n \"\"\"\n raise NotImplementedError\n\n def train(self):\n \"\"\"\n Set the module to training mode.\n\n Enables gradient tracking for all parameters.\n\n \"\"\"\n self.training = True\n for param in self.parameters():\n param.requires_grad = True\n\n def eval(self):\n \"\"\"\n Set the module to evaluation mode.\n\n Disables gradient tracking for all parameters.\n\n \"\"\"\n self.training = False\n for param in self.parameters():\n param.requires_grad = False\n\n def add(self, *modules):\n \"\"\"\n Add sub-modules to the module.\n\n Args:\n *modules: Variable-length argument list for sub-modules.\n\n \"\"\"\n for module in modules:\n idx = len(self._modules)\n name = f\"{idx}\"\n setattr(self, name, module)\n self._modules[name] = module\n\n def parameters(self):\n \"\"\"\n Generator for accessing parameters.\n\n Yields:\n Parameter: Parameters of the module.\n\n \"\"\"\n for name, value in inspect.getmembers(self):\n if isinstance(value, Parameter):\n yield value\n elif isinstance(value, Module):\n yield from value.parameters()\n\n def modules(self):\n \"\"\"\n Generator for accessing sub-modules.\n\n Yields:\n Module: Sub-modules of the module.\n\n \"\"\"\n yield from self._modules.values()\n\n def cache(self):\n \"\"\"\n Generator for accessing cached values.\n\n Yields:\n dict: Cached values of the module.\n\n \"\"\"\n for module in self.modules():\n yield module._cache\n\n def gradients(self):\n \"\"\"\n Generator for accessing gradients.\n\n Yields:\n dict: Gradients of the module.\n\n \"\"\"\n for module in self.modules():\n yield module._grads\n\n def zero_grad(self):\n \"\"\"\n Zeroes the gradients of all parameters.\n\n \"\"\"\n for parameter in self.parameters():\n parameter.zero_grad()\n\n def state_dict(self):\n \"\"\"\n Returns the state dictionary of parameters.\n\n Returns:\n OrderedDict: State dictionary of parameters.\n\n \"\"\"\n state = OrderedDict()\n for i, param in enumerate(self.parameters()):\n state[f'param{i}'] = param.tolist()\n return state\n\n def load_state(self, state_dict):\n \"\"\"\n Loads state from a state dictionary.\n\n Args:\n state_dict (OrderedDict): State dictionary to load.\n\n Raises:\n UserWarning: If the shape from the state_dict does not match the model's parameter shape.\n\n \"\"\"\n for i, param in enumerate(self.parameters()):\n data = state_dict[f'param{i}']\n if param.shape != data.shape:\n warnings.warn(f\"Shape from the `state_dict` does not match model's parameter shape. \"\n f\"Got {data.shape}, expected {param.shape}.\", UserWarning, stacklevel=2)\n param.data = Parameter(data=data)\n\n def save(self, filename='model.pickle'):\n \"\"\"\n Saves the entire model to a file using pickle.\n\n Args:\n filename (str): File name for saving the model.\n\n \"\"\"\n with open(filename, 'wb') as f:\n pickle.dump(self, f)\n\n def save_dict(self, filename='state_dict.json'):\n \"\"\"\n Saves the state dictionary to a JSON file.\n\n Args:\n filename (str): File name for saving the state dictionary.\n\n \"\"\"\n state = self.state_dict()\n with open(filename, 'w') as f:\n json.dump(state, f)\n\n def cpu(self):\n \"\"\"\n Moves all parameters to CPU.\n\n \"\"\"\n for parameter in self.parameters():\n parameter.cpu()\n\n def cuda(self):\n \"\"\"\n Moves all parameters to GPU.\n\n \"\"\"\n for parameter in self.parameters():\n parameter.cuda()\n\n def get_name(self):\n \"\"\"\n Returns the name of the module.\n\n Returns:\n str: Name of the module.\n\n \"\"\"\n return self.__class__.__name__\n\n def inner_repr(self):\n \"\"\"\n Returns a string representation for inner modules.\n\n Returns:\n str: String representation for inner modules.\n\n \"\"\"\n return \"\"\n\n def __repr__(self):\n \"\"\"\n Returns a string representation of the module.\n\n Returns:\n str: String representation of the module.\n\n \"\"\"\n # Representation similar to PyTorch\n string = f\"{self.get_name()}(\"\n tab = \" \"\n modules = self._modules\n if modules == {}:\n string += f'\\n{tab}(parameters): {self.inner_repr()}'\n else:\n for key, module in modules.items():\n string += f\"\\n{tab}({key}): {module.get_name()}({module.inner_repr()})\"\n return f'{string}\\n)'\n\n def __call__(self, *inputs, **kwargs):\n \"\"\"\n Calls the forward method.\n\n Args:\n *inputs: Variable-length argument list for inputs.\n **kwargs: Keyword arguments for inputs.\n\n Returns:\n Output: Result of the forward pass.\n\n \"\"\"\n return self.forward(*inputs, **kwargs)\n\n def __setattr__(self, key, value):\n \"\"\"\n Custom method to handle attribute setting.\n\n Args:\n key (str): Attribute key.\n value: Attribute value.\n\n \"\"\"\n # First initialize the attribute we want to add\n self.__dict__[key] = value\n # Then update the inner dictionaries '_modules', '_params'\n if isinstance(value, Module):\n self._modules[key] = value\n elif isinstance(value, Parameter):\n self._params[key] = value" }, { "identifier": "im2col", "path": "ember/nn/modules/_utils.py", "snippet": "def im2col(input_data, filter_height, filter_width, stride=1, pad=0):\n \"\"\"\n Convert image data to column data using im2col algorithm.\n\n Args:\n - input_data (numpy.ndarray): 4D input image data (N, C, H, W).\n - filter_height (int): Height of the filter.\n - filter_width (int): Width of the filter.\n - stride (int): Stride of the filter (default is 1).\n - padding (int): Padding size (default is 0).\n\n Returns:\n - numpy.ndarray: 2D column data.\n\n Raises:\n - AssertionError: If the parameters lead to invalid calculations.\n\n The im2col algorithm reshapes image data to column data for efficient matrix multiplication.\n It considers the filter size, stride, and padding to create non-overlapping blocks of the input data.\n\n \"\"\"\n N, C, input_height, input_width = input_data.shape\n assert (input_height + 2 * pad - filter_height) % stride == 0, f'Invalid parameters for im2col: ' \\\n f'(H + 2 * pad - filter_h) % stride != 0, got ' \\\n f'H={input_height}, pad={pad}, filter_h={filter_height}, stride={stride}'\n assert (input_width + 2 * pad - filter_width) % stride == 0, f'Invalid parameters for im2col: ' \\\n f'(W + 2 * pad - filter_w) % stride != 0, got ' \\\n f'W={input_width}, pad={pad}, filter_w={filter_width}, stride={stride}'\n\n # Calculate the output dimensions\n output_height = (input_height + 2 * pad - filter_height) // stride + 1\n output_width = (input_width + 2 * pad - filter_width) // stride + 1\n\n # Apply padding to the input data\n padding_config = ((0, 0), (0, 0), (pad, pad), (pad, pad))\n input_data_padded = ember.pad(input_data, padding_config)\n\n # Initialize an empty array to store the column data\n col_data = ember.zeros(\n (N, C, filter_height, filter_width, output_height, output_width))\n\n # Iterate over the filter and input data to fill the column array\n for row in range(filter_height):\n row_max = row + stride * output_height\n for col in range(filter_width):\n col_max = col + stride * output_width\n col_data[:, :, row, col, :, :] = input_data_padded[:,\n :, row:row_max:stride, col:col_max:stride]\n\n # Transpose and reshape the column data to the final form\n col_data = col_data.transpose(0, 4, 5, 1, 2, 3).reshape(\n N * output_height * output_width, -1)\n\n return col_data" }, { "identifier": "col2im", "path": "ember/nn/modules/_utils.py", "snippet": "def col2im(col_data, input_shape, filter_height, filter_width, stride=1, pad=0):\n \"\"\"\n Convert column data back to image data using col2im algorithm.\n\n Args:\n - col_data (numpy.ndarray): 2D column data.\n - input_shape (tuple): Shape of the input image (N, C, H, W).\n - filter_height (int): Height of the filter.\n - filter_width (int): Width of the filter.\n - stride (int): Stride of the filter (default is 1).\n - padding (int): Padding size (default is 0).\n\n Returns:\n - numpy.ndarray: 4D image data.\n\n Raises:\n - AssertionError: If the parameters lead to invalid calculations.\n\n The col2im algorithm reconstructs image data from column data. It reverses the im2col operation\n by distributing the values of the columns back to their corresponding positions in the original image.\n\n \"\"\"\n N, C, input_height, input_width = input_shape\n\n assert (input_height + 2 * pad - filter_height) % stride == 0, f'Invalid parameters for col2im: ' \\\n f'(H + 2 * pad - filter_h) % stride != 0, got ' \\\n f'H={input_height}, pad={pad}, filter_h={filter_height}, stride={stride}'\n assert (input_width + 2 * pad - filter_width) % stride == 0, f'Invalid parameters for col2im: ' \\\n f'(W + 2 * pad - filter_w) % stride != 0, got ' \\\n f'W={input_width}, pad={pad}, filter_w={filter_width}, stride={stride}'\n\n # Calculate the output dimensions\n output_height = (input_height + 2 * pad - filter_height) // stride + 1\n output_width = (input_width + 2 * pad - filter_width) // stride + 1\n\n # Reshape the column data and transpose to the appropriate form\n col_data = col_data.reshape(N, output_height, output_width,\n C, filter_height, filter_width).transpose(0, 3, 4, 5, 1, 2)\n\n # Initialize an empty array to store the reconstructed image data\n output_data = np.zeros((N, C, input_height + 2 * pad +\n stride - 1, input_width + 2 * pad + stride - 1))\n\n # Iterate over the filter and column data to reconstruct the image\n for row in range(filter_height):\n row_max = row + stride * output_height\n for col in range(filter_width):\n col_max = col + stride * output_width\n output_data[:, :, row:row_max:stride,\n col:col_max:stride] += col_data[:, :, row, col, :, :]\n\n # Return the final image data with appropriate cropping\n return output_data[:, :, pad:input_height + pad, pad:input_width + pad]" } ]
import numpy as np import ember from .module import Module from ._utils import im2col, col2im
4,336
class MaxPool2d(Module): """ 2D Max Pooling Layer for Convolutional Neural Networks. This layer downsamples the spatial dimensions of the input tensor by taking the maximum value within a defined pool size, with optional stride and padding. Args: - kernel_size (int or tuple of int): Size of the pooling window. If int, the window is square. - stride (int): Step size to slide the pooling window. - pad (int): Zero-padding added to the input. Methods: - forward(input_data): Performs the forward pass for max pooling. - backward(dout): Computes the backward pass for max pooling. """ def __init__(self, kernel_size, stride=1, pad=0): """ Constructor for MaxPool2d. Args: - kernel_size (int or tuple of int): Size of the pooling window. If int, the window is square. - stride (int): Step size to slide the pooling window. - pad (int): Zero-padding added to the input. """ super().__init__() # If pool_size is an int, convert it to a tuple (square window) if isinstance(kernel_size, int): kernel_size = (kernel_size, kernel_size) self.kernel_size = kernel_size self.stride = stride self.pad = pad def forward(self, input_data): """ Performs the forward pass for max pooling. Args: - input_data: The input tensor. Returns: - out: The output tensor after max pooling. """ # Extract dimensions N, C, input_height, input_width = input_data.shape # Calculate output dimensions out_h = int(1 + (input_height - self.kernel_size[0]) / self.stride) out_w = int(1 + (input_width - self.kernel_size[1]) / self.stride) # Apply im2col to input_data col = im2col(input_data, *self.kernel_size, self.stride, self.pad) col = col.reshape(-1, np.product(self.kernel_size)) # Find the indices of the maximum values and the maximum values argmax = ember.argmax(col, axis=1) out = ember.max(col, axis=1) out = out.reshape(N, out_h + 2 * self.pad, out_w + 2 * self.pad, C).transpose(0, 3, 1, 2) # Cache input_data and argmax for backward pass self._cache['x'] = input_data self._cache['argmax'] = argmax return out def backward(self, dout): """ Computes the backward pass for max pooling. Args: - dout: The gradient of the output. Returns: - dx: The gradient with respect to the input. """ # Transpose dout for easier manipulation dout = dout.transpose(0, 2, 3, 1) # Calculate pool size pool_size = np.product(self.kernel_size) # Create a matrix with zeros and assign gradients to max positions dmax = ember.zeros((dout.size, pool_size)) x = self._cache['x'] argmax = self._cache['argmax'] dmax[ember.arange(argmax.size), argmax.flatten()] = dout.flatten() dmax = dmax.reshape(dout.shape + (pool_size,)) # Reshape dmax for col2im dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1) # Apply col2im to get the gradient with respect to the input
class MaxPool2d(Module): """ 2D Max Pooling Layer for Convolutional Neural Networks. This layer downsamples the spatial dimensions of the input tensor by taking the maximum value within a defined pool size, with optional stride and padding. Args: - kernel_size (int or tuple of int): Size of the pooling window. If int, the window is square. - stride (int): Step size to slide the pooling window. - pad (int): Zero-padding added to the input. Methods: - forward(input_data): Performs the forward pass for max pooling. - backward(dout): Computes the backward pass for max pooling. """ def __init__(self, kernel_size, stride=1, pad=0): """ Constructor for MaxPool2d. Args: - kernel_size (int or tuple of int): Size of the pooling window. If int, the window is square. - stride (int): Step size to slide the pooling window. - pad (int): Zero-padding added to the input. """ super().__init__() # If pool_size is an int, convert it to a tuple (square window) if isinstance(kernel_size, int): kernel_size = (kernel_size, kernel_size) self.kernel_size = kernel_size self.stride = stride self.pad = pad def forward(self, input_data): """ Performs the forward pass for max pooling. Args: - input_data: The input tensor. Returns: - out: The output tensor after max pooling. """ # Extract dimensions N, C, input_height, input_width = input_data.shape # Calculate output dimensions out_h = int(1 + (input_height - self.kernel_size[0]) / self.stride) out_w = int(1 + (input_width - self.kernel_size[1]) / self.stride) # Apply im2col to input_data col = im2col(input_data, *self.kernel_size, self.stride, self.pad) col = col.reshape(-1, np.product(self.kernel_size)) # Find the indices of the maximum values and the maximum values argmax = ember.argmax(col, axis=1) out = ember.max(col, axis=1) out = out.reshape(N, out_h + 2 * self.pad, out_w + 2 * self.pad, C).transpose(0, 3, 1, 2) # Cache input_data and argmax for backward pass self._cache['x'] = input_data self._cache['argmax'] = argmax return out def backward(self, dout): """ Computes the backward pass for max pooling. Args: - dout: The gradient of the output. Returns: - dx: The gradient with respect to the input. """ # Transpose dout for easier manipulation dout = dout.transpose(0, 2, 3, 1) # Calculate pool size pool_size = np.product(self.kernel_size) # Create a matrix with zeros and assign gradients to max positions dmax = ember.zeros((dout.size, pool_size)) x = self._cache['x'] argmax = self._cache['argmax'] dmax[ember.arange(argmax.size), argmax.flatten()] = dout.flatten() dmax = dmax.reshape(dout.shape + (pool_size,)) # Reshape dmax for col2im dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1) # Apply col2im to get the gradient with respect to the input
dx = col2im(dcol, x.shape, *
2
2023-12-23 23:11:58+00:00
8k
Hassi34/iot-device-identification
src/stage_04_training_and_eval.py
[ { "identifier": "read_yaml", "path": "src/utils/common.py", "snippet": "def read_yaml(path_to_yaml: str) -> dict:\n with open(path_to_yaml) as yaml_file:\n content = yaml.safe_load(yaml_file)\n return content" }, { "identifier": "get_logger", "path": "src/utils/sys_logging.py", "snippet": "def get_logger(logs_filepath: str):\n logger.add(\n logs_filepath,\n format=\"{time} | {level} | {name}.{module}:{line} | {message}\",\n level=\"DEBUG\",\n rotation=\"10 KB\",\n retention=\"10 days\",\n compression=\"zip\",\n colorize=True,\n enqueue=True,\n catch=True,\n encoding=\"utf-8\",\n )\n return logger" }, { "identifier": "MLFlowManager", "path": "src/utils/mlflow_ops.py", "snippet": "class MLFlowManager:\n def __init__(self):\n if mlflow.tracking.is_tracking_uri_set():\n self.client = MlflowClient()\n else:\n raise Exception(\"Tracking URI not set\")\n\n def get_or_create_an_experiment(self, experiment_name):\n exp = mlflow.get_experiment_by_name(experiment_name)\n if exp is None:\n exp_id = mlflow.create_experiment(experiment_name)\n return exp_id\n return exp.experiment_id\n\n def latest_model_version(self, model_name) -> int:\n return self.client.get_latest_versions(model_name)[0].version\n\n @property\n def get_latest_version_model_uri(self, model_name) -> str:\n model_uri = f\"models:/{model_name}/{self.latest_model_version(model_name)}\"\n return model_uri\n\n def load_latest_model_version(self, model_name):\n return load_model(self.get_latest_version_model_uri(model_name))\n\n def get_best_run_id_and_model_uri(\n self, experiment_id: str, metric_name: str = \"metrics.mae\", ascending=True\n ):\n runs = mlflow.search_runs(f\"{experiment_id}\")\n runs = runs.dropna(subset=[\"tags.mlflow.log-model.history\"])\n runs.sort_values(by=[metric_name], ascending=ascending, inplace=True)\n runs.to_csv(\"mlflow.csv\", index=False)\n runs.reset_index(inplace=True, drop=True)\n best_run_id = runs[\"run_id\"][0]\n\n best_run = runs[runs[\"run_id\"] == best_run_id]\n artifact_uri = best_run[\"artifact_uri\"][0]\n\n logged_model_dir = best_run[\"tags.mlflow.log-model.history\"][0].split(\",\")[1:2]\n logged_model_dir = (\n logged_model_dir[0].strip().split(\":\")[1].replace('\"', \"\").strip()\n )\n\n model_uri = str(artifact_uri) + \"/\" + str(logged_model_dir)\n\n return best_run_id, model_uri\n\n def print_registered_model(self, model_name):\n for model in self.client.search_registered_models(\n filter_string=f\"name LIKE {model_name}\"\n ):\n for model_version in model.latest_versions:\n print(\n f\"name : {model_version.name} run_id : {model_version.run_id} version : {model_version.version} stage : {model_version.current_stage}\"\n )\n\n def rename_a_registered_model(self, current_name, new_name):\n self.client.rename_registered_model(\n name=current_name,\n new_name=new_name,\n )\n\n def transition_model_version_stage(self, model_name, model_version, stage):\n self.client.transition_model_version_stage(\n name=model_name, version=model_version, stage=stage\n )\n\n def log_artifact(self, artifact_path=str):\n mlflow.log_artifact(artifact_path)" }, { "identifier": "load_np_arr_from_gz", "path": "src/utils/data_ops.py", "snippet": "def load_np_arr_from_gz(filepath: str) -> np.ndarray:\n with gzip.GzipFile(filepath, \"r\") as f:\n return np.load(f)" }, { "identifier": "NN", "path": "src/lightning_pckg/model.py", "snippet": "class NN(pl.LightningModule):\n def __init__(self, input_size, learning_rate, num_classes):\n super().__init__()\n\n self.FC1 = nn.Linear(input_size, 500)\n self.FC2 = nn.Linear(500, 200)\n self.FC3 = nn.Linear(200, 50)\n self.FC4 = nn.Linear(50, num_classes)\n self.bn1 = nn.BatchNorm1d(500)\n self.bn2 = nn.BatchNorm1d(200)\n self.bn3 = nn.BatchNorm1d(50)\n self.drops = nn.Dropout(0.3)\n\n self.lr = learning_rate\n self.loss_fn = nn.CrossEntropyLoss()\n self.accuracy = torchmetrics.Accuracy(\n task=\"multiclass\", num_classes=num_classes\n )\n self.f1_score = torchmetrics.F1Score(task=\"multiclass\", num_classes=num_classes)\n\n def forward(self, x):\n x = F.relu(self.FC1(x))\n x = self.bn1(x)\n x = F.relu(self.FC2(x))\n x = self.drops(x)\n x = self.bn2(x)\n x = F.relu(self.FC3(x))\n x = self.bn3(x)\n x = self.FC4(x)\n return x\n\n def training_step(self, batch):\n loss, scores, y = self._common_step(batch)\n self.log_dict(\n {\n \"train_loss\": loss,\n },\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n )\n return {\"loss\": loss, \"scores\": scores, \"y\": y}\n\n def training_epoch_end(self, outputs):\n scores = torch.cat([x[\"scores\"] for x in outputs])\n y = torch.cat([x[\"y\"] for x in outputs])\n self.log_dict(\n {\n \"train_acc\": self.accuracy(scores, y),\n \"train_f1\": self.f1_score(scores, y),\n },\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n )\n\n def validation_step(self, batch, batch_idx):\n loss, scores, y = self._common_step(batch)\n self.log(\"val_loss\", loss)\n return loss\n\n def test_step(self, batch, batch_idx):\n loss, scores, y = self._common_step(batch)\n self.log(\"test_loss\", loss)\n return loss\n\n def _common_step(self, batch):\n x, y = batch\n x = x.reshape(x.size(0), -1)\n scores = self.forward(x)\n loss = self.loss_fn(scores, y)\n return loss, scores, y\n\n def predict_step(self, batch):\n x, y = batch\n x = x.reshape(x.size(0), -1)\n scores = self.forward(x)\n preds = torch.argmax(scores, dim=1)\n return preds\n\n def configure_optimizers(self):\n return optim.Adam(self.parameters(), lr=self.lr)" }, { "identifier": "plot_confusion_matrix", "path": "src/utils/ml.py", "snippet": "def plot_confusion_matrix(\n self,\n img_path: str,\n column_label: Union[list[int], list[str]],\n index_label: Union[list[int], list[str]],\n) -> None:\n confusion_matrix_df = pd.DataFrame(\n confusion_matrix(self.y_indices, self.predicted_indices, normalize=\"true\")\n )\n confusion_matrix_df.columns = column_label\n confusion_matrix_df.index = index_label\n ax = sns.heatmap(confusion_matrix_df, annot=False)\n Path(img_path).parent.absolute().mkdir(parents=True, exist_ok=True)\n ax.figure.savefig(img_path)\n plt.clf()" }, { "identifier": "PrintingCallback", "path": "src/lightning_pckg/training_callbacks.py", "snippet": "class PrintingCallback(Callback):\n def __init__(self):\n super().__init__()\n\n def on_train_start(self, trainer, pl_module):\n print(\"Starting to train...\")\n\n def on_train_end(self, trainer, pl_module):\n print(\"Training is done!\")" }, { "identifier": "IoT_Dataset", "path": "src/utils/ml.py", "snippet": "class IoT_Dataset(Dataset):\n def __init__(self, X, y):\n self.X = X\n self.y = y\n\n def __len__(self):\n return len(self.y)\n\n def __getitem__(self, idx):\n return self.X[idx], self.y[idx]" }, { "identifier": "get_default_device", "path": "src/utils/ml.py", "snippet": "def get_default_device():\n \"\"\"Pick GPU if available, else CPU\"\"\"\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n else:\n return torch.device(\"cpu\")" }, { "identifier": "to_device", "path": "src/utils/ml.py", "snippet": "def to_device(data, device):\n \"\"\"Move tensor(s) to chosen device\"\"\"\n if isinstance(data, (list, tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True)" }, { "identifier": "DeviceDataLoader", "path": "src/utils/ml.py", "snippet": "class DeviceDataLoader:\n \"\"\"Wrap a dataloader to move data to a device\"\"\"\n\n def __init__(self, dl, device):\n self.dl = dl\n self.device = device\n\n def __iter__(self):\n \"\"\"Yield a batch of data after moving it to device\"\"\"\n for b in self.dl:\n yield to_device(b, self.device)\n\n def __len__(self):\n \"\"\"Number of batches\"\"\"\n return len(self.dl)" } ]
import argparse import time import torch.nn.functional as F import io import pytorch_lightning as pl import mlflow import onnxruntime as rt import numpy as np import torch import sys from src.utils.common import read_yaml from src.utils.sys_logging import get_logger from src.utils import MLFlowManager from src.utils.data_ops import load_np_arr_from_gz from sklearn.metrics import classification_report from src.lightning_pckg.model import NN from src.utils.ml import plot_confusion_matrix from src.lightning_pckg.training_callbacks import PrintingCallback from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.loggers import TensorBoardLogger from pytorch_lightning.callbacks import EarlyStopping from pathlib import Path from pytorch_lightning.accelerators import CPUAccelerator from src.utils.ml import ( IoT_Dataset, get_default_device, to_device, DeviceDataLoader, ) from torch.utils.data import DataLoader from datetime import datetime
4,775
Path(ONNX_TRAINED_MODEL_FILE_PATH).parent.absolute().mkdir( parents=True, exist_ok=True ) model.to_onnx( file_path=ONNX_TRAINED_MODEL_FILE_PATH, input_sample=sample_input, input_names=["input"], verbose=True, ) logger.info(f'ONNX model has been exported to "{ONNX_TRAINED_MODEL_FILE_PATH}"') logger.info("Starting model validation...") onnx_sess = rt.InferenceSession( ONNX_TRAINED_MODEL_FILE_PATH, providers=["CUDAExecutionProvider", "CPUExecutionProvider"], ) input_name = onnx_sess.get_inputs()[0].name input_data = {input_name: sample_input.cpu().numpy()} output = onnx_sess.run(None, input_data) if isinstance(output[0], np.ndarray) and len(output[0][0]) == num_classes: logger.info("Model validation passed") else: logger.critical("Model validation failed!") sys.exit(1) logger.info("Exporting ONNX model for buffering...") buffer = io.BytesIO() torch.onnx.export(model.cpu(), sample_input.cpu(), f=buffer) buffer.seek(0) onnx_model = buffer.read() logger.info("Loaded bytes string from buffer which holds the ONNX model") logger.info("Started logging the ONNX model to MLFlow model repository...") mlflow.onnx.log_model( onnx_model=onnx_model, artifact_path=ONNX_LOGGED_MODEL_DIR, pip_requirements=mlflow.onnx.get_default_pip_requirements(), onnx_execution_providers=["CUDAExecutionProvider", "CPUExecutionProvider"], code_paths=["src/stage_04_training_and_eval.py"], registered_model_name=ONNX_MODEL_NAME, ) logger.info("ONNX has been saved in MLFlow models repo...") latest_model_version = mlflow_service.latest_model_version( model_name=ONNX_MODEL_NAME ) versions = mlflow_service.client.search_model_versions(f"name='{ONNX_MODEL_NAME}'") for version in versions: if version.current_stage == "Staging": mlflow_service.transition_model_version_stage( model_name=ONNX_MODEL_NAME, model_version=version.version, stage="Archived", ) logger.info( f"Model previous version # {version.version} has been transitioned from Staging to Archive" ) mlflow_service.transition_model_version_stage( model_name=ONNX_MODEL_NAME, model_version=latest_model_version, stage="Staging" ) logger.info( f"Model latest version # {latest_model_version} has been transitioned to MLFlow Staging" ) mlflow.log_artifact( f"{CONFUSION_MATRIX_PLOT_FILE_PATH}", artifact_path=ARTIFACT_DIR ) logger.info("Logged the confusion metrics artifact to MLflow artifacts repo") mlflow.log_artifact(f"{RAW_DATA_FILE_PATH}", artifact_path=ARTIFACT_DIR) logger.info( f"Logged the raw data file from {RAW_DATA_FILE_PATH} to MLflow artifacts repo" ) if __name__ == "__main__": args = argparse.ArgumentParser() args.add_argument("--config", "-c", default="configs/system.yaml") args.add_argument("--params", "-p", default="configs/params.yaml") parsed_args = args.parse_args() config = read_yaml(parsed_args.config) params = read_yaml(parsed_args.params) LOGS_FILE_PATH = config["logs"]["RUNNING_LOGS_FILE_PATH"] X_TRAIN_FILE_PATH = config["data"]["PREPROCESSED_DATA_FILE_PATHS"][0] Y_TRAIN_FILE_PATH = config["data"]["PREPROCESSED_DATA_FILE_PATHS"][1] X_TEST_FILE_PATH = config["data"]["PREPROCESSED_DATA_FILE_PATHS"][2] Y_TEST_FILE_PATH = config["data"]["PREPROCESSED_DATA_FILE_PATHS"][3] EXPERIMENT_NAME = config["mlflow"]["EXPERIMENT_NAME"] PYTORCH_MODEL_NAME = config["mlflow"]["PYTORCH_MODEL_NAME"] ONNX_MODEL_NAME = config["mlflow"]["ONNX_MODEL_NAME"] ONNX_LOGGED_MODEL_DIR = config["mlflow"]["ONNX_LOGGED_MODEL_DIR"] ARTIFACT_DIR = config["mlflow"]["ARTIFACT_DIR"] BATCH_SIZE_FOR_DATA_LOADER = params["data_preprocessing"][ "BATCH_SIZE_FOR_DATA_LOADER" ] NUMBER_OF_EPOCHS = params["ml"]["MAX_NUMBER_OF_EPOCHS"] MODEL_LOSS_PLOT_FILE_PATH = config["artifacts"]["MODEL_LOSS_PLOT_FILE_PATH"] MODEL_ACCURACY_PLOT_FILE_PATH = config["artifacts"]["MODEL_ACCURACY_PLOT_FILE_PATH"] CONFUSION_MATRIX_PLOT_FILE_PATH = config["artifacts"][ "CONFUSION_MATRIX_PLOT_FILE_PATH" ] TRAINED_MODEL_FILE_PATH = config["artifacts"]["TRAINED_MODEL_FILE_PATH"] ONNX_TRAINED_MODEL_FILE_PATH = config["artifacts"]["ONNX_TRAINED_MODEL_FILE_PATH"] TENSORBOARD_LOGS_DIR = config["logs"]["TENSORBOARD_LOGS_DIR"] ACCELERATOR = params["ml"]["ACCELERATOR"] DEVICES = params["ml"]["DEVICES"] MAX_NUMBER_OF_EPOCHS = params["ml"]["MAX_NUMBER_OF_EPOCHS"] EARLY_STOPPING_PATIENCE = params["ml"]["EARLY_STOPPING_PATIENCE"] METRICS_TO_TRACK = params["ml"]["METRICS_TO_TRACK"] MIN_NUMBER_OF_EPOCHS = params["ml"]["MIN_NUMBER_OF_EPOCHS"] PRECISION = params["ml"]["PRECISION"] LEARNING_RATE = params["ml"]["LEARNING_RATE"] CKPT_DIR = config["ckpt"]["CKPT_DIR"] TOP_K_CKPT_TO_BE_SAVED = params["ckpt"]["TOP_K_CKPT_TO_BE_SAVED"] OPTIMIZATION_MODE = params["ckpt"]["OPTIMIZATION_MODE"] CKPT_FILE_PATH_FOR_TRAINING = config["ckpt"]["CKPT_FILE_PATH_FOR_TRAINING"] RAW_DATA_FILE_PATH = config["data"]["RAW_DATA_FILE_PATH"][0]
# from pytorch_lightning.loggers import MLFlowLogger STAGE = "Training and Evaluation" def train_eval(): torch.set_float32_matmul_precision("medium") X_train = load_np_arr_from_gz(X_TRAIN_FILE_PATH) y_train = load_np_arr_from_gz(Y_TRAIN_FILE_PATH) X_test = load_np_arr_from_gz(X_TEST_FILE_PATH) y_test = load_np_arr_from_gz(Y_TEST_FILE_PATH) logger.info( f"Numpy Arrays have been loaded having the shapes : {X_train.shape, y_train.shape, X_test.shape, y_test.shape}" ) num_classes = len(np.unique(y_train)) X_train = torch.tensor(X_train, dtype=torch.float32) y_train = torch.tensor(y_train).type(torch.LongTensor) X_test = torch.tensor(X_test, dtype=torch.float32) y_test = torch.tensor(y_test).type(torch.LongTensor) logger.info("Convertd numpy arrays to the pytorch tensors") mlflow_service = MLFlowManager() experiment_id = mlflow_service.get_or_create_an_experiment(EXPERIMENT_NAME) timestamp = datetime.now().strftime("%Y%m%d%H%M%S") run_name = config["mlflow"]["RUN_ID_PREFIX"] + "-" + timestamp mlflow.pytorch.autolog( silent=False, log_models=False, registered_model_name=PYTORCH_MODEL_NAME ) train_ds = IoT_Dataset(X_train, y_train) test_ds = IoT_Dataset(X_test, y_test) logger.info("Train and Test Datasets created ") train_dl = DataLoader(train_ds, batch_size=BATCH_SIZE_FOR_DATA_LOADER, shuffle=True) test_dl = DataLoader(test_ds, batch_size=BATCH_SIZE_FOR_DATA_LOADER, shuffle=False) logger.info("Train and Test DataLoaders created ") device = get_default_device() device = 'cpu' logger.info(f'The default device is "{device}"') for x, y in train_dl: input_tensor_length = x.shape[1] sample_input = x[0].unsqueeze(0) break logger.info( f"Input and output tensor length is going to be {input_tensor_length} and {num_classes} respectively" ) model = NN( input_size=input_tensor_length, learning_rate=LEARNING_RATE, num_classes=num_classes, ) to_device(model, device) train_dl = DeviceDataLoader(train_dl, device) test_dl = DeviceDataLoader(test_dl, device) if ACCELERATOR == "cpu": accelerator = CPUAccelerator() elif ACCELERATOR == "gpu": accelerator = 'gpu' trainer = pl.Trainer( logger=TensorBoardLogger(TENSORBOARD_LOGS_DIR, name=EXPERIMENT_NAME), # logger=MLFlowLogger(experiment_name="my_experiment", save_dir="my_logs"), # profiler= 'simple', #find the bottleneck then commitout accelerator=accelerator, devices=DEVICES, min_epochs=MIN_NUMBER_OF_EPOCHS, max_epochs=MAX_NUMBER_OF_EPOCHS, resume_from_checkpoint=CKPT_FILE_PATH_FOR_TRAINING, precision=PRECISION, callbacks=[ PrintingCallback(), EarlyStopping(monitor=METRICS_TO_TRACK, patience=EARLY_STOPPING_PATIENCE), ModelCheckpoint( dirpath=CKPT_DIR, save_top_k=TOP_K_CKPT_TO_BE_SAVED, mode=OPTIMIZATION_MODE, monitor=METRICS_TO_TRACK, filename=f"{time.strftime('%Y%m%d%H%M%S')}-" + "{epoch}-{val_loss:.2f}", verbose=True, ), ], ) logger.info("Starting mlflow experiment...") valid_dl = test_dl # This is just for the demo purpose, you should always specify the different data for test and validation mlflow.start_run(experiment_id=experiment_id, run_name=run_name) trainer.fit(model, train_dl, valid_dl) trainer.validate(model, valid_dl) trainer.test(model, test_dl) # trainer.save_checkpoint y_actual = [] y_predicted_list = [] for x, y in valid_dl: to_device(model, device) out = model(x) probabilities = F.softmax(out, dim=1) y_predicted = torch.max(probabilities, 1)[1] y_predicted_list.extend(y_predicted.tolist()) y_actual.extend(y.tolist()) labels = list(params["labels_mapping"].values()) if len(y_actual) > 0 and len(y_predicted_list) > 0: plot_confusion_matrix( CONFUSION_MATRIX_PLOT_FILE_PATH, y=y_actual, predicted_y=y_predicted_list, label_names=labels, ) logger.info("Model has been trained successfully") logger.info(classification_report(y_actual, y_predicted_list, target_names=labels)) logger.info("Converting model to onnx format") model.eval() Path(ONNX_TRAINED_MODEL_FILE_PATH).parent.absolute().mkdir( parents=True, exist_ok=True ) model.to_onnx( file_path=ONNX_TRAINED_MODEL_FILE_PATH, input_sample=sample_input, input_names=["input"], verbose=True, ) logger.info(f'ONNX model has been exported to "{ONNX_TRAINED_MODEL_FILE_PATH}"') logger.info("Starting model validation...") onnx_sess = rt.InferenceSession( ONNX_TRAINED_MODEL_FILE_PATH, providers=["CUDAExecutionProvider", "CPUExecutionProvider"], ) input_name = onnx_sess.get_inputs()[0].name input_data = {input_name: sample_input.cpu().numpy()} output = onnx_sess.run(None, input_data) if isinstance(output[0], np.ndarray) and len(output[0][0]) == num_classes: logger.info("Model validation passed") else: logger.critical("Model validation failed!") sys.exit(1) logger.info("Exporting ONNX model for buffering...") buffer = io.BytesIO() torch.onnx.export(model.cpu(), sample_input.cpu(), f=buffer) buffer.seek(0) onnx_model = buffer.read() logger.info("Loaded bytes string from buffer which holds the ONNX model") logger.info("Started logging the ONNX model to MLFlow model repository...") mlflow.onnx.log_model( onnx_model=onnx_model, artifact_path=ONNX_LOGGED_MODEL_DIR, pip_requirements=mlflow.onnx.get_default_pip_requirements(), onnx_execution_providers=["CUDAExecutionProvider", "CPUExecutionProvider"], code_paths=["src/stage_04_training_and_eval.py"], registered_model_name=ONNX_MODEL_NAME, ) logger.info("ONNX has been saved in MLFlow models repo...") latest_model_version = mlflow_service.latest_model_version( model_name=ONNX_MODEL_NAME ) versions = mlflow_service.client.search_model_versions(f"name='{ONNX_MODEL_NAME}'") for version in versions: if version.current_stage == "Staging": mlflow_service.transition_model_version_stage( model_name=ONNX_MODEL_NAME, model_version=version.version, stage="Archived", ) logger.info( f"Model previous version # {version.version} has been transitioned from Staging to Archive" ) mlflow_service.transition_model_version_stage( model_name=ONNX_MODEL_NAME, model_version=latest_model_version, stage="Staging" ) logger.info( f"Model latest version # {latest_model_version} has been transitioned to MLFlow Staging" ) mlflow.log_artifact( f"{CONFUSION_MATRIX_PLOT_FILE_PATH}", artifact_path=ARTIFACT_DIR ) logger.info("Logged the confusion metrics artifact to MLflow artifacts repo") mlflow.log_artifact(f"{RAW_DATA_FILE_PATH}", artifact_path=ARTIFACT_DIR) logger.info( f"Logged the raw data file from {RAW_DATA_FILE_PATH} to MLflow artifacts repo" ) if __name__ == "__main__": args = argparse.ArgumentParser() args.add_argument("--config", "-c", default="configs/system.yaml") args.add_argument("--params", "-p", default="configs/params.yaml") parsed_args = args.parse_args() config = read_yaml(parsed_args.config) params = read_yaml(parsed_args.params) LOGS_FILE_PATH = config["logs"]["RUNNING_LOGS_FILE_PATH"] X_TRAIN_FILE_PATH = config["data"]["PREPROCESSED_DATA_FILE_PATHS"][0] Y_TRAIN_FILE_PATH = config["data"]["PREPROCESSED_DATA_FILE_PATHS"][1] X_TEST_FILE_PATH = config["data"]["PREPROCESSED_DATA_FILE_PATHS"][2] Y_TEST_FILE_PATH = config["data"]["PREPROCESSED_DATA_FILE_PATHS"][3] EXPERIMENT_NAME = config["mlflow"]["EXPERIMENT_NAME"] PYTORCH_MODEL_NAME = config["mlflow"]["PYTORCH_MODEL_NAME"] ONNX_MODEL_NAME = config["mlflow"]["ONNX_MODEL_NAME"] ONNX_LOGGED_MODEL_DIR = config["mlflow"]["ONNX_LOGGED_MODEL_DIR"] ARTIFACT_DIR = config["mlflow"]["ARTIFACT_DIR"] BATCH_SIZE_FOR_DATA_LOADER = params["data_preprocessing"][ "BATCH_SIZE_FOR_DATA_LOADER" ] NUMBER_OF_EPOCHS = params["ml"]["MAX_NUMBER_OF_EPOCHS"] MODEL_LOSS_PLOT_FILE_PATH = config["artifacts"]["MODEL_LOSS_PLOT_FILE_PATH"] MODEL_ACCURACY_PLOT_FILE_PATH = config["artifacts"]["MODEL_ACCURACY_PLOT_FILE_PATH"] CONFUSION_MATRIX_PLOT_FILE_PATH = config["artifacts"][ "CONFUSION_MATRIX_PLOT_FILE_PATH" ] TRAINED_MODEL_FILE_PATH = config["artifacts"]["TRAINED_MODEL_FILE_PATH"] ONNX_TRAINED_MODEL_FILE_PATH = config["artifacts"]["ONNX_TRAINED_MODEL_FILE_PATH"] TENSORBOARD_LOGS_DIR = config["logs"]["TENSORBOARD_LOGS_DIR"] ACCELERATOR = params["ml"]["ACCELERATOR"] DEVICES = params["ml"]["DEVICES"] MAX_NUMBER_OF_EPOCHS = params["ml"]["MAX_NUMBER_OF_EPOCHS"] EARLY_STOPPING_PATIENCE = params["ml"]["EARLY_STOPPING_PATIENCE"] METRICS_TO_TRACK = params["ml"]["METRICS_TO_TRACK"] MIN_NUMBER_OF_EPOCHS = params["ml"]["MIN_NUMBER_OF_EPOCHS"] PRECISION = params["ml"]["PRECISION"] LEARNING_RATE = params["ml"]["LEARNING_RATE"] CKPT_DIR = config["ckpt"]["CKPT_DIR"] TOP_K_CKPT_TO_BE_SAVED = params["ckpt"]["TOP_K_CKPT_TO_BE_SAVED"] OPTIMIZATION_MODE = params["ckpt"]["OPTIMIZATION_MODE"] CKPT_FILE_PATH_FOR_TRAINING = config["ckpt"]["CKPT_FILE_PATH_FOR_TRAINING"] RAW_DATA_FILE_PATH = config["data"]["RAW_DATA_FILE_PATH"][0]
logger = get_logger(LOGS_FILE_PATH)
1
2023-12-25 10:40:19+00:00
8k
see2023/Bert-VITS2-ext
webui.py
[ { "identifier": "split_by_language", "path": "tools/sentence.py", "snippet": "def split_by_language(text: str, target_languages: list = None) -> list:\n pattern = (\n r\"[\\!\\\"\\#\\$\\%\\&\\'\\(\\)\\*\\+\\,\\-\\.\\/\\:\\;\\<\\>\\=\\?\\@\\[\\]\\{\\}\\\\\\\\\\^\\_\\`\"\n r\"\\!?\\。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」\"\n r\"『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘\\'\\‛\\“\\”\\„\\‟…‧﹏.]+\"\n )\n sentences = re.split(pattern, text)\n\n pre_lang = \"\"\n start = 0\n end = 0\n sentences_list = []\n\n if target_languages is not None:\n sorted_target_languages = sorted(target_languages)\n if sorted_target_languages in [[\"en\", \"zh\"], [\"en\", \"ja\"], [\"en\", \"ja\", \"zh\"]]:\n new_sentences = []\n for sentence in sentences:\n new_sentences.extend(split_alpha_nonalpha(sentence))\n sentences = new_sentences\n\n for sentence in sentences:\n if check_is_none(sentence):\n continue\n\n lang = classify_language(sentence, target_languages)\n\n end += text[end:].index(sentence)\n if pre_lang != \"\" and pre_lang != lang:\n sentences_list.append((text[start:end], pre_lang))\n start = end\n end += len(sentence)\n pre_lang = lang\n sentences_list.append((text[start:], pre_lang))\n\n return sentences_list" }, { "identifier": "infer", "path": "infer.py", "snippet": "def get_net_g(model_path: str, version: str, device: str, hps):\ndef get_text(text, language_str, hps, device, style_text=None, style_weight=0.7):\ndef infer(\n text,\n emotion,\n sdp_ratio,\n noise_scale,\n noise_scale_w,\n length_scale,\n sid,\n language,\n hps,\n net_g,\n device,\n reference_audio=None,\n skip_start=False,\n skip_end=False,\n style_text=None,\n style_weight=0.7,\n):\ndef infer_multilang(\n text,\n sdp_ratio,\n noise_scale,\n noise_scale_w,\n length_scale,\n sid,\n language,\n hps,\n net_g,\n device,\n reference_audio=None,\n emotion=None,\n skip_start=False,\n skip_end=False,\n):" }, { "identifier": "VisemesNet", "path": "models.py", "snippet": "class VisemesNet(nn.Module):\n def active(self, x):\n # active_fun: 0: null, 1: tanh, 2: relu, 3: LeakyReLU\n if self.active_fun == 1:\n return torch.tanh(x)\n elif self.active_fun == 2:\n return torch.relu(x)\n elif self.active_fun == 3:\n return self.leakyReLU(x)\n else:\n return x\n\n def __init__(self, hidden_channels, lstm_bidirectional=True, active_fun = 3, enable_conv=True, \n use_transformer = False, enable_dropout=True):\n super(VisemesNet, self).__init__()\n self.lstm_bidirectional = lstm_bidirectional\n self.lstm_directions = 2 if lstm_bidirectional else 1\n self.use_transformer = use_transformer\n self.enable_dropout = enable_dropout\n if active_fun == 3:\n self.leakyReLU = nn.LeakyReLU(negative_slope=0.01)\n if use_transformer:\n num_heads=8\n num_layers=3\n dim_feedforward=512\n dropout=0.1\n activation=\"relu\"\n self.transformer_encoder_layer = nn.TransformerEncoderLayer(\n d_model=hidden_channels, \n nhead=num_heads,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n activation=activation,\n batch_first=True\n )\n self.transformer_encoder = nn.TransformerEncoder(self.transformer_encoder_layer, num_layers=num_layers)\n else:\n self.lstm = nn.LSTM(input_size=hidden_channels, hidden_size=128, num_layers=3, batch_first=True, bidirectional=lstm_bidirectional)\n if use_transformer:\n self.fc1 = nn.Linear(hidden_channels, 96)\n else:\n self.fc1 = nn.Linear(128 * self.lstm_directions, 96)\n self.fc2 = nn.Linear(96, 61)\n dropout_rate = 0.5\n if self.enable_dropout:\n self.dropout = nn.Dropout(dropout_rate)\n conv_kernel_pre = 15\n conv_kernel_post = 11\n self.conv1d_pre = nn.Conv1d(in_channels=hidden_channels, out_channels=hidden_channels, kernel_size=conv_kernel_pre, stride=1, padding=conv_kernel_pre//2)\n self.conv1d_post = nn.Conv1d(in_channels=61, out_channels=61, kernel_size=conv_kernel_post, stride=1, padding=conv_kernel_post//2)\n self.enable_conv = enable_conv\n self.active_fun = active_fun\n\n def forward(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.use_transformer:\n return self.forward_transformer(x, y)\n else:\n return self.forward_lstm(x, y)\n\n def forward_transformer(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n # batch_first: True (batch, seq, feature); False (seq, batch, feature).\n x = x.transpose(1, 2)\n\n expressions = self.transformer_encoder(x)\n \n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n # expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n\n return expressions \n\n def forward_lstm(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n x = x.transpose(1, 2)\n # x [batch_size, seq_len, hidden_channels]\n expressions = None\n expressions, _ = self.lstm(x)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n return expressions\n \n def init_weights(self):\n # 初始化权重\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.LSTM):\n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n nn.init.xavier_uniform_(param.data)\n elif 'weight_hh' in name:\n nn.init.orthogonal_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight.data, 1)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Conv1d):\n nn.init.xavier_uniform_(m.weight.data)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.TransformerEncoderLayer):\n for name, param in m.named_parameters():\n if 'weight' in name:\n if param.dim() == 1:\n nn.init.normal_(param.data)\n else:\n nn.init.xavier_uniform_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.TransformerEncoder):\n for param in m.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param.data)\n else:\n nn.init.constant_(param.data, 0)" }, { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n v_model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "translate", "path": "tools/translate.py", "snippet": "def translate(Sentence: str, to_Language: str = \"jp\", from_Language: str = \"\"):\n \"\"\"\n :param Sentence: 待翻译语句\n :param from_Language: 待翻译语句语言\n :param to_Language: 目标语言\n :return: 翻译后语句 出错时返回None\n\n 常见语言代码:中文 zh 英语 en 日语 jp\n \"\"\"\n appid = config.translate_config.app_key\n key = config.translate_config.secret_key\n if appid == \"\" or key == \"\":\n return \"请开发者在config.yml中配置app_key与secret_key\"\n url = \"https://fanyi-api.baidu.com/api/trans/vip/translate\"\n texts = Sentence.splitlines()\n outTexts = []\n for t in texts:\n if t != \"\":\n # 签名计算 参考文档 https://api.fanyi.baidu.com/product/113\n salt = str(random.randint(1, 100000))\n signString = appid + t + salt + key\n hs = hashlib.md5()\n hs.update(signString.encode(\"utf-8\"))\n signString = hs.hexdigest()\n if from_Language == \"\":\n from_Language = \"auto\"\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n payload = {\n \"q\": t,\n \"from\": from_Language,\n \"to\": to_Language,\n \"appid\": appid,\n \"salt\": salt,\n \"sign\": signString,\n }\n # 发送请求\n try:\n response = requests.post(\n url=url, data=payload, headers=headers, timeout=3\n )\n response = response.json()\n if \"trans_result\" in response.keys():\n result = response[\"trans_result\"][0]\n if \"dst\" in result.keys():\n dst = result[\"dst\"]\n outTexts.append(dst)\n except Exception:\n return Sentence\n else:\n outTexts.append(t)\n return \"\\n\".join(outTexts)" } ]
import os import logging import re_matching import torch import torchaudio import utils import gradio as gr import webbrowser import numpy as np import librosa from tools.sentence import split_by_language from infer import infer, latest_version, get_net_g, infer_multilang from models import VisemesNet from config import config from tools.translate import translate
3,772
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logging.getLogger("markdown_it").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("matplotlib").setLevel(logging.WARNING) logging.basicConfig( level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s" ) logger = logging.getLogger(__name__) net_g = None net_v = None device = config.webui_config.device if device == "mps": os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" def audio_to_visemes(audio_raw, z): global net_v visemes = net_v(z) visemes = visemes.squeeze(0) visemes = visemes.transpose(0, 1) visemes = visemes.data.cpu().float().numpy() print('visemes shape:', visemes.shape) # save as pcm_s16le wav torchaudio.save('tmp.wav', audio_raw, hps.data.sampling_rate, encoding='PCM_S', bits_per_sample=16) # save visemes to tmp.npy np.save('tmp.npy', visemes) # save z to tmp_z.npy like visemes np.save('tmp_z.npy', z.squeeze(0).transpose(0, 1).data.cpu().float().numpy()) print('tmp.wav, tmp.npy, tmp_z.npy saved') def generate_audio( slices, sdp_ratio, noise_scale, noise_scale_w, length_scale, speaker, language, reference_audio, emotion, style_text, style_weight, skip_start=False, skip_end=False, ): audio_list = [] audio_raw_list = [] z_list = [] # silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16) with torch.no_grad(): for idx, piece in enumerate(slices): skip_start = idx != 0 skip_end = idx != len(slices) - 1
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logging.getLogger("markdown_it").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("matplotlib").setLevel(logging.WARNING) logging.basicConfig( level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s" ) logger = logging.getLogger(__name__) net_g = None net_v = None device = config.webui_config.device if device == "mps": os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" def audio_to_visemes(audio_raw, z): global net_v visemes = net_v(z) visemes = visemes.squeeze(0) visemes = visemes.transpose(0, 1) visemes = visemes.data.cpu().float().numpy() print('visemes shape:', visemes.shape) # save as pcm_s16le wav torchaudio.save('tmp.wav', audio_raw, hps.data.sampling_rate, encoding='PCM_S', bits_per_sample=16) # save visemes to tmp.npy np.save('tmp.npy', visemes) # save z to tmp_z.npy like visemes np.save('tmp_z.npy', z.squeeze(0).transpose(0, 1).data.cpu().float().numpy()) print('tmp.wav, tmp.npy, tmp_z.npy saved') def generate_audio( slices, sdp_ratio, noise_scale, noise_scale_w, length_scale, speaker, language, reference_audio, emotion, style_text, style_weight, skip_start=False, skip_end=False, ): audio_list = [] audio_raw_list = [] z_list = [] # silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16) with torch.no_grad(): for idx, piece in enumerate(slices): skip_start = idx != 0 skip_end = idx != len(slices) - 1
audio, audio_raw, z = infer(
1
2023-12-27 03:09:11+00:00
8k
chinhsuanwu/ifusion-threestudio
threestudio/models/renderers/nerf_volume_renderer.py
[ { "identifier": "BaseBackground", "path": "threestudio/models/background/base.py", "snippet": "class BaseBackground(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n def configure(self):\n pass\n\n def forward(self, dirs: Float[Tensor, \"B H W 3\"]) -> Float[Tensor, \"B H W Nc\"]:\n raise NotImplementedError" }, { "identifier": "ImportanceEstimator", "path": "threestudio/models/estimators.py", "snippet": "class ImportanceEstimator(AbstractEstimator):\n def __init__(\n self,\n ) -> None:\n super().__init__()\n\n @torch.no_grad()\n def sampling(\n self,\n prop_sigma_fns: List[Callable],\n prop_samples: List[int],\n num_samples: int,\n # rendering options\n n_rays: int,\n near_plane: float,\n far_plane: float,\n sampling_type: Literal[\"uniform\", \"lindisp\"] = \"uniform\",\n # training options\n stratified: bool = False,\n requires_grad: bool = False,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"Sampling with CDFs from proposal networks.\n\n Args:\n prop_sigma_fns: Proposal network evaluate functions. It should be a list\n of functions that take in samples {t_starts (n_rays, n_samples),\n t_ends (n_rays, n_samples)} and returns the post-activation densities\n (n_rays, n_samples).\n prop_samples: Number of samples to draw from each proposal network. Should\n be the same length as `prop_sigma_fns`.\n num_samples: Number of samples to draw in the end.\n n_rays: Number of rays.\n near_plane: Near plane.\n far_plane: Far plane.\n sampling_type: Sampling type. Either \"uniform\" or \"lindisp\". Default to\n \"lindisp\".\n stratified: Whether to use stratified sampling. Default to `False`.\n\n Returns:\n A tuple of {Tensor, Tensor}:\n\n - **t_starts**: The starts of the samples. Shape (n_rays, num_samples).\n - **t_ends**: The ends of the samples. Shape (n_rays, num_samples).\n\n \"\"\"\n assert len(prop_sigma_fns) == len(prop_samples), (\n \"The number of proposal networks and the number of samples \"\n \"should be the same.\"\n )\n cdfs = torch.cat(\n [\n torch.zeros((n_rays, 1), device=self.device),\n torch.ones((n_rays, 1), device=self.device),\n ],\n dim=-1,\n )\n intervals = RayIntervals(vals=cdfs)\n\n for level_fn, level_samples in zip(prop_sigma_fns, prop_samples):\n intervals, _ = importance_sampling(\n intervals, cdfs, level_samples, stratified\n )\n t_vals = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n\n with torch.set_grad_enabled(requires_grad):\n sigmas = level_fn(t_starts, t_ends)\n assert sigmas.shape == t_starts.shape\n trans, _ = render_transmittance_from_density(t_starts, t_ends, sigmas)\n cdfs = 1.0 - torch.cat([trans, torch.zeros_like(trans[:, :1])], dim=-1)\n\n intervals, _ = importance_sampling(intervals, cdfs, num_samples, stratified)\n t_vals_fine = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n\n t_vals = torch.cat([t_vals, t_vals_fine], dim=-1)\n t_vals, _ = torch.sort(t_vals, dim=-1)\n\n t_starts_ = t_vals[..., :-1]\n t_ends_ = t_vals[..., 1:]\n\n return t_starts_, t_ends_" }, { "identifier": "BaseImplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseImplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n isosurface: bool = True\n isosurface_method: str = \"mt\"\n isosurface_resolution: int = 128\n isosurface_threshold: Union[float, str] = 0.0\n isosurface_chunk: int = 0\n isosurface_coarse_to_fine: bool = True\n isosurface_deformable_grid: bool = False\n isosurface_remove_outliers: bool = True\n isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )\n self.isosurface_helper: Optional[IsosurfaceHelper] = None\n self.unbounded: bool = False\n\n def _initilize_isosurface_helper(self):\n if self.cfg.isosurface and self.isosurface_helper is None:\n if self.cfg.isosurface_method == \"mc-cpu\":\n self.isosurface_helper = MarchingCubeCPUHelper(\n self.cfg.isosurface_resolution\n ).to(self.device)\n elif self.cfg.isosurface_method == \"mt\":\n self.isosurface_helper = MarchingTetrahedraHelper(\n self.cfg.isosurface_resolution,\n f\"load/tets/{self.cfg.isosurface_resolution}_tets.npz\",\n ).to(self.device)\n else:\n raise AttributeError(\n \"Unknown isosurface method {self.cfg.isosurface_method}\"\n )\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n raise NotImplementedError\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n # return the value of the implicit field, could be density / signed distance\n # also return a deformation field if the grid vertices can be optimized\n raise NotImplementedError\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n # return the value of the implicit field, where the zero level set represents the surface\n raise NotImplementedError\n\n def _isosurface(self, bbox: Float[Tensor, \"2 3\"], fine_stage: bool = False) -> Mesh:\n def batch_func(x):\n # scale to bbox as the input vertices are in [0, 1]\n field, deformation = self.forward_field(\n scale_tensor(\n x.to(bbox.device), self.isosurface_helper.points_range, bbox\n ),\n )\n field = field.to(\n x.device\n ) # move to the same device as the input (could be CPU)\n if deformation is not None:\n deformation = deformation.to(x.device)\n return field, deformation\n\n assert self.isosurface_helper is not None\n\n field, deformation = chunk_batch(\n batch_func,\n self.cfg.isosurface_chunk,\n self.isosurface_helper.grid_vertices,\n )\n\n threshold: float\n\n if isinstance(self.cfg.isosurface_threshold, float):\n threshold = self.cfg.isosurface_threshold\n elif self.cfg.isosurface_threshold == \"auto\":\n eps = 1.0e-5\n threshold = field[field > eps].mean().item()\n threestudio.info(\n f\"Automatically determined isosurface threshold: {threshold}\"\n )\n else:\n raise TypeError(\n f\"Unknown isosurface_threshold {self.cfg.isosurface_threshold}\"\n )\n\n level = self.forward_level(field, threshold)\n mesh: Mesh = self.isosurface_helper(level, deformation=deformation)\n mesh.v_pos = scale_tensor(\n mesh.v_pos, self.isosurface_helper.points_range, bbox\n ) # scale to bbox as the grid vertices are in [0, 1]\n mesh.add_extra(\"bbox\", bbox)\n\n if self.cfg.isosurface_remove_outliers:\n # remove outliers components with small number of faces\n # only enabled when the mesh is not differentiable\n mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold)\n\n return mesh\n\n def isosurface(self) -> Mesh:\n if not self.cfg.isosurface:\n raise NotImplementedError(\n \"Isosurface is not enabled in the current configuration\"\n )\n self._initilize_isosurface_helper()\n if self.cfg.isosurface_coarse_to_fine:\n threestudio.debug(\"First run isosurface to get a tight bounding box ...\")\n with torch.no_grad():\n mesh_coarse = self._isosurface(self.bbox)\n vmin, vmax = mesh_coarse.v_pos.amin(dim=0), mesh_coarse.v_pos.amax(dim=0)\n vmin_ = (vmin - (vmax - vmin) * 0.1).max(self.bbox[0])\n vmax_ = (vmax + (vmax - vmin) * 0.1).min(self.bbox[1])\n threestudio.debug(\"Run isosurface again with the tight bounding box ...\")\n mesh = self._isosurface(torch.stack([vmin_, vmax_], dim=0), fine_stage=True)\n else:\n mesh = self._isosurface(self.bbox)\n return mesh" }, { "identifier": "BaseMaterial", "path": "threestudio/models/materials/base.py", "snippet": "class BaseMaterial(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n requires_normal: bool = False\n requires_tangent: bool = False\n\n def configure(self):\n pass\n\n def forward(self, *args, **kwargs) -> Float[Tensor, \"*B 3\"]:\n raise NotImplementedError\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "create_network_with_input_encoding", "path": "threestudio/models/networks.py", "snippet": "def create_network_with_input_encoding(\n n_input_dims: int, n_output_dims: int, encoding_config, network_config\n) -> nn.Module:\n # input suppose to be range [0, 1]\n network_with_input_encoding: nn.Module\n if encoding_config.otype in [\n \"VanillaFrequency\",\n \"ProgressiveBandHashGrid\",\n ] or network_config.otype in [\"VanillaMLP\", \"SphereInitVanillaMLP\"]:\n encoding = get_encoding(n_input_dims, encoding_config)\n network = get_mlp(encoding.n_output_dims, n_output_dims, network_config)\n network_with_input_encoding = NetworkWithInputEncoding(encoding, network)\n else:\n network_with_input_encoding = TCNNNetworkWithInputEncoding(\n n_input_dims=n_input_dims,\n n_output_dims=n_output_dims,\n encoding_config=config_to_primitive(encoding_config),\n network_config=config_to_primitive(network_config),\n )\n return network_with_input_encoding" }, { "identifier": "VolumeRenderer", "path": "threestudio/models/renderers/base.py", "snippet": "class VolumeRenderer(Renderer):\n pass" }, { "identifier": "parse_optimizer", "path": "threestudio/systems/utils.py", "snippet": "def parse_optimizer(config, model):\n if hasattr(config, \"params\"):\n params = [\n {\"params\": get_parameters(model, name), \"name\": name, **args}\n for name, args in config.params.items()\n ]\n threestudio.debug(f\"Specify optimizer params: {config.params}\")\n else:\n params = model.parameters()\n if config.name in [\"FusedAdam\"]:\n import apex\n\n optim = getattr(apex.optimizers, config.name)(params, **config.args)\n elif config.name in [\"Adan\"]:\n from threestudio.systems import optimizers\n\n optim = getattr(optimizers, config.name)(params, **config.args)\n else:\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim" }, { "identifier": "parse_scheduler_to_instance", "path": "threestudio/systems/utils.py", "snippet": "def parse_scheduler_to_instance(config, optimizer):\n if config.name == \"ChainedScheduler\":\n schedulers = [\n parse_scheduler_to_instance(conf, optimizer) for conf in config.schedulers\n ]\n scheduler = lr_scheduler.ChainedScheduler(schedulers)\n elif config.name == \"Sequential\":\n schedulers = [\n parse_scheduler_to_instance(conf, optimizer) for conf in config.schedulers\n ]\n scheduler = lr_scheduler.SequentialLR(\n optimizer, schedulers, milestones=config.milestones\n )\n else:\n scheduler = getattr(lr_scheduler, config.name)(optimizer, **config.args)\n return scheduler" }, { "identifier": "chunk_batch", "path": "threestudio/utils/ops.py", "snippet": "def chunk_batch(func: Callable, chunk_size: int, *args, **kwargs) -> Any:\n if chunk_size <= 0:\n return func(*args, **kwargs)\n B = None\n for arg in list(args) + list(kwargs.values()):\n if isinstance(arg, torch.Tensor):\n B = arg.shape[0]\n break\n assert (\n B is not None\n ), \"No tensor found in args or kwargs, cannot determine batch size.\"\n out = defaultdict(list)\n out_type = None\n # max(1, B) to support B == 0\n for i in range(0, max(1, B), chunk_size):\n out_chunk = func(\n *[\n arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg\n for arg in args\n ],\n **{\n k: arg[i : i + chunk_size] if isinstance(arg, torch.Tensor) else arg\n for k, arg in kwargs.items()\n },\n )\n if out_chunk is None:\n continue\n out_type = type(out_chunk)\n if isinstance(out_chunk, torch.Tensor):\n out_chunk = {0: out_chunk}\n elif isinstance(out_chunk, tuple) or isinstance(out_chunk, list):\n chunk_length = len(out_chunk)\n out_chunk = {i: chunk for i, chunk in enumerate(out_chunk)}\n elif isinstance(out_chunk, dict):\n pass\n else:\n print(\n f\"Return value of func must be in type [torch.Tensor, list, tuple, dict], get {type(out_chunk)}.\"\n )\n exit(1)\n for k, v in out_chunk.items():\n v = v if torch.is_grad_enabled() else v.detach()\n out[k].append(v)\n\n if out_type is None:\n return None\n\n out_merged: Dict[Any, Optional[torch.Tensor]] = {}\n for k, v in out.items():\n if all([vv is None for vv in v]):\n # allow None in return value\n out_merged[k] = None\n elif all([isinstance(vv, torch.Tensor) for vv in v]):\n out_merged[k] = torch.cat(v, dim=0)\n else:\n raise TypeError(\n f\"Unsupported types in return value of func: {[type(vv) for vv in v if not isinstance(vv, torch.Tensor)]}\"\n )\n\n if out_type is torch.Tensor:\n return out_merged[0]\n elif out_type in [tuple, list]:\n return out_type([out_merged[i] for i in range(chunk_length)])\n elif out_type is dict:\n return out_merged" }, { "identifier": "get_activation", "path": "threestudio/utils/ops.py", "snippet": "def get_activation(name) -> Callable:\n if name is None:\n return lambda x: x\n name = name.lower()\n if name == \"none\":\n return lambda x: x\n elif name == \"lin2srgb\":\n return lambda x: torch.where(\n x > 0.0031308,\n torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055,\n 12.92 * x,\n ).clamp(0.0, 1.0)\n elif name == \"exp\":\n return lambda x: torch.exp(x)\n elif name == \"shifted_exp\":\n return lambda x: torch.exp(x - 1.0)\n elif name == \"trunc_exp\":\n return trunc_exp\n elif name == \"shifted_trunc_exp\":\n return lambda x: trunc_exp(x - 1.0)\n elif name == \"sigmoid\":\n return lambda x: torch.sigmoid(x)\n elif name == \"tanh\":\n return lambda x: torch.tanh(x)\n elif name == \"shifted_softplus\":\n return lambda x: F.softplus(x - 1.0)\n elif name == \"scale_-11_01\":\n return lambda x: x * 0.5 + 0.5\n else:\n try:\n return getattr(F, name)\n except AttributeError:\n raise ValueError(f\"Unknown activation function: {name}\")" }, { "identifier": "validate_empty_rays", "path": "threestudio/utils/ops.py", "snippet": "def validate_empty_rays(ray_indices, t_start, t_end):\n if ray_indices.nelement() == 0:\n threestudio.warn(\"Empty rays_indices!\")\n ray_indices = torch.LongTensor([0]).to(ray_indices)\n t_start = torch.Tensor([0]).to(ray_indices)\n t_end = torch.Tensor([0]).to(ray_indices)\n return ray_indices, t_start, t_end" } ]
from dataclasses import dataclass, field from functools import partial from threestudio.models.background.base import BaseBackground from threestudio.models.estimators import ImportanceEstimator from threestudio.models.geometry.base import BaseImplicitGeometry from threestudio.models.materials.base import BaseMaterial from threestudio.models.networks import create_network_with_input_encoding from threestudio.models.renderers.base import VolumeRenderer from threestudio.systems.utils import parse_optimizer, parse_scheduler_to_instance from threestudio.utils.ops import chunk_batch, get_activation, validate_empty_rays from threestudio.utils.typing import * import nerfacc import torch import torch.nn.functional as F import threestudio
5,569
proposal_network_config: Optional[dict] = None prop_optimizer_config: Optional[dict] = None prop_scheduler_config: Optional[dict] = None num_samples_per_ray_proposal: int = 64 # for importance num_samples_per_ray_importance: int = 64 cfg: Config def configure( self, geometry: BaseImplicitGeometry, material: BaseMaterial, background: BaseBackground, ) -> None: super().configure(geometry, material, background) if self.cfg.estimator == "occgrid": self.estimator = nerfacc.OccGridEstimator( roi_aabb=self.bbox.view(-1), resolution=32, levels=1 ) if not self.cfg.grid_prune: self.estimator.occs.fill_(True) self.estimator.binaries.fill_(True) self.render_step_size = ( 1.732 * 2 * self.cfg.radius / self.cfg.num_samples_per_ray ) self.randomized = self.cfg.randomized elif self.cfg.estimator == "importance": self.estimator = ImportanceEstimator() elif self.cfg.estimator == "proposal": self.prop_net = create_network_with_input_encoding( **self.cfg.proposal_network_config ) self.prop_optim = parse_optimizer( self.cfg.prop_optimizer_config, self.prop_net ) self.prop_scheduler = ( parse_scheduler_to_instance( self.cfg.prop_scheduler_config, self.prop_optim ) if self.cfg.prop_scheduler_config is not None else None ) self.estimator = nerfacc.PropNetEstimator( self.prop_optim, self.prop_scheduler ) def get_proposal_requires_grad_fn( target: float = 5.0, num_steps: int = 1000 ): schedule = lambda s: min(s / num_steps, 1.0) * target steps_since_last_grad = 0 def proposal_requires_grad_fn(step: int) -> bool: nonlocal steps_since_last_grad target_steps_since_last_grad = schedule(step) requires_grad = steps_since_last_grad > target_steps_since_last_grad if requires_grad: steps_since_last_grad = 0 steps_since_last_grad += 1 return requires_grad return proposal_requires_grad_fn self.proposal_requires_grad_fn = get_proposal_requires_grad_fn() self.randomized = self.cfg.randomized else: raise NotImplementedError( "Unknown estimator, should be one of ['occgrid', 'proposal', 'importance']." ) # for proposal self.vars_in_forward = {} def forward( self, rays_o: Float[Tensor, "B H W 3"], rays_d: Float[Tensor, "B H W 3"], light_positions: Float[Tensor, "B 3"], bg_color: Optional[Tensor] = None, **kwargs ) -> Dict[str, Float[Tensor, "..."]]: batch_size, height, width = rays_o.shape[:3] rays_o_flatten: Float[Tensor, "Nr 3"] = rays_o.reshape(-1, 3) rays_d_flatten: Float[Tensor, "Nr 3"] = rays_d.reshape(-1, 3) light_positions_flatten: Float[Tensor, "Nr 3"] = ( light_positions.reshape(-1, 1, 1, 3) .expand(-1, height, width, -1) .reshape(-1, 3) ) n_rays = rays_o_flatten.shape[0] if self.cfg.estimator == "occgrid": if not self.cfg.grid_prune: with torch.no_grad(): ray_indices, t_starts_, t_ends_ = self.estimator.sampling( rays_o_flatten, rays_d_flatten, sigma_fn=None, near_plane=self.cfg.near_plane, far_plane=self.cfg.far_plane, render_step_size=self.render_step_size, alpha_thre=0.0, stratified=self.randomized, cone_angle=0.0, early_stop_eps=0, ) else: def sigma_fn(t_starts, t_ends, ray_indices): t_starts, t_ends = t_starts[..., None], t_ends[..., None] t_origins = rays_o_flatten[ray_indices] t_positions = (t_starts + t_ends) / 2.0 t_dirs = rays_d_flatten[ray_indices] positions = t_origins + t_dirs * t_positions if self.training: sigma = self.geometry.forward_density(positions)[..., 0] else:
@threestudio.register("nerf-volume-renderer") class NeRFVolumeRenderer(VolumeRenderer): @dataclass class Config(VolumeRenderer.Config): num_samples_per_ray: int = 512 eval_chunk_size: int = 160000 randomized: bool = True near_plane: float = 0.0 far_plane: float = 1e10 return_comp_normal: bool = False return_normal_perturb: bool = False # in ["occgrid", "proposal", "importance"] estimator: str = "occgrid" # for occgrid grid_prune: bool = True prune_alpha_threshold: bool = True # for proposal proposal_network_config: Optional[dict] = None prop_optimizer_config: Optional[dict] = None prop_scheduler_config: Optional[dict] = None num_samples_per_ray_proposal: int = 64 # for importance num_samples_per_ray_importance: int = 64 cfg: Config def configure( self, geometry: BaseImplicitGeometry, material: BaseMaterial, background: BaseBackground, ) -> None: super().configure(geometry, material, background) if self.cfg.estimator == "occgrid": self.estimator = nerfacc.OccGridEstimator( roi_aabb=self.bbox.view(-1), resolution=32, levels=1 ) if not self.cfg.grid_prune: self.estimator.occs.fill_(True) self.estimator.binaries.fill_(True) self.render_step_size = ( 1.732 * 2 * self.cfg.radius / self.cfg.num_samples_per_ray ) self.randomized = self.cfg.randomized elif self.cfg.estimator == "importance": self.estimator = ImportanceEstimator() elif self.cfg.estimator == "proposal": self.prop_net = create_network_with_input_encoding( **self.cfg.proposal_network_config ) self.prop_optim = parse_optimizer( self.cfg.prop_optimizer_config, self.prop_net ) self.prop_scheduler = ( parse_scheduler_to_instance( self.cfg.prop_scheduler_config, self.prop_optim ) if self.cfg.prop_scheduler_config is not None else None ) self.estimator = nerfacc.PropNetEstimator( self.prop_optim, self.prop_scheduler ) def get_proposal_requires_grad_fn( target: float = 5.0, num_steps: int = 1000 ): schedule = lambda s: min(s / num_steps, 1.0) * target steps_since_last_grad = 0 def proposal_requires_grad_fn(step: int) -> bool: nonlocal steps_since_last_grad target_steps_since_last_grad = schedule(step) requires_grad = steps_since_last_grad > target_steps_since_last_grad if requires_grad: steps_since_last_grad = 0 steps_since_last_grad += 1 return requires_grad return proposal_requires_grad_fn self.proposal_requires_grad_fn = get_proposal_requires_grad_fn() self.randomized = self.cfg.randomized else: raise NotImplementedError( "Unknown estimator, should be one of ['occgrid', 'proposal', 'importance']." ) # for proposal self.vars_in_forward = {} def forward( self, rays_o: Float[Tensor, "B H W 3"], rays_d: Float[Tensor, "B H W 3"], light_positions: Float[Tensor, "B 3"], bg_color: Optional[Tensor] = None, **kwargs ) -> Dict[str, Float[Tensor, "..."]]: batch_size, height, width = rays_o.shape[:3] rays_o_flatten: Float[Tensor, "Nr 3"] = rays_o.reshape(-1, 3) rays_d_flatten: Float[Tensor, "Nr 3"] = rays_d.reshape(-1, 3) light_positions_flatten: Float[Tensor, "Nr 3"] = ( light_positions.reshape(-1, 1, 1, 3) .expand(-1, height, width, -1) .reshape(-1, 3) ) n_rays = rays_o_flatten.shape[0] if self.cfg.estimator == "occgrid": if not self.cfg.grid_prune: with torch.no_grad(): ray_indices, t_starts_, t_ends_ = self.estimator.sampling( rays_o_flatten, rays_d_flatten, sigma_fn=None, near_plane=self.cfg.near_plane, far_plane=self.cfg.far_plane, render_step_size=self.render_step_size, alpha_thre=0.0, stratified=self.randomized, cone_angle=0.0, early_stop_eps=0, ) else: def sigma_fn(t_starts, t_ends, ray_indices): t_starts, t_ends = t_starts[..., None], t_ends[..., None] t_origins = rays_o_flatten[ray_indices] t_positions = (t_starts + t_ends) / 2.0 t_dirs = rays_d_flatten[ray_indices] positions = t_origins + t_dirs * t_positions if self.training: sigma = self.geometry.forward_density(positions)[..., 0] else:
sigma = chunk_batch(
8
2023-12-27 20:30:33+00:00
8k
jasursadikov/mud
mud.py
[ { "identifier": "TEXT", "path": "utils.py", "snippet": "TEXT = {\n 'white': '\\033[37m',\n 'gray': '\\033[90m',\n 'black': '\\033[30m',\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'bright_white': '\\033[97m',\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n}" }, { "identifier": "RESET", "path": "utils.py", "snippet": "RESET = '\\033[0m'" }, { "identifier": "STYLES", "path": "utils.py", "snippet": "STYLES = {\n 'bold': '\\033[1m',\n 'dim': '\\033[2m',\n 'italic': '\\033[3m',\n 'underline': '\\033[4m',\n 'blink': '\\033[5m',\n}" }, { "identifier": "Config", "path": "config.py", "snippet": "class Config:\n def __init__(self):\n self.data = {}\n self.find()\n\n def save(self, file_path: str) -> None:\n root = ElementTree.Element(\"mud\")\n\n def _filter_labels(label: str):\n return bool(re.match(r'^\\w+$', label))\n\n for path, labels in self.data.items():\n dir_element = ElementTree.SubElement(root, \"dir\")\n dir_element.set(\"path\", path)\n\n valid_labels = [label for label in labels if _filter_labels(label)]\n if valid_labels:\n if len(valid_labels) == 1:\n formatted_labels = valid_labels[0]\n else:\n formatted_labels = ', '.join(valid_labels)\n dir_element.set(\"label\", formatted_labels)\n\n rough_string = ElementTree.tostring(root, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n pretty_xml = reparsed.toprettyxml(indent=\"\\t\")\n\n with open(file_path, 'w', encoding='utf-8') as file:\n file.write(pretty_xml)\n\n def find(self) -> None:\n if os.path.exists(utils.CONFIG_FILE_NAME):\n self.load(utils.CONFIG_FILE_NAME)\n return\n\n if utils.settings.mud_settings['config_path'] != '' and os.path.exists(\n utils.settings.mud_settings['config_path']):\n directory = os.path.dirname(utils.settings.mud_settings['config_path'])\n os.chdir(directory)\n os.environ['PWD'] = directory\n self.load(utils.CONFIG_FILE_NAME)\n return\n\n utils.print_error('.mudconfig file was not found. Type `mud add --all` to create configuration file.')\n\n def load(self, file_path: str) -> None:\n self.data = {}\n tree = ElementTree.parse(file_path)\n root = tree.getroot()\n for dir_element in root.findall('dir'):\n path = dir_element.get('path')\n labels = [label.strip() for label in dir_element.get('label', '').split(',') if label.strip()]\n self.data[path] = labels\n\n def all(self) -> Dict[str, List[str]]:\n return self.data\n\n def paths(self) -> List[str]:\n return list(self.data.keys())\n\n def with_label(self, label: str) -> Dict[str, List[str]]:\n if label == '':\n return self.all()\n result = {}\n for path, labels in self.data.items():\n if label in labels:\n result[path] = labels\n return result\n\n def add_label(self, path: str, label: str) -> None:\n if path not in self.data:\n self.data[path] = []\n if label not in self.data[path]:\n self.data[path].append(label)\n\n def remove_path(self, path: str) -> None:\n if path in self.data:\n del self.data[path]\n\n def remove_label(self, path: str, label: str) -> None:\n if path in self.data and label in self.data[path]:\n self.data[path].remove(label)\n if not self.data[path]:\n del self.data[path]" }, { "identifier": "Settings", "path": "settings.py", "snippet": "class Settings:\n def __init__(self, file_name: str) -> None:\n self.file_name = file_name\n self.mud_settings = None\n self.alias_settings = None\n self.config = configparser.ConfigParser()\n self.settings_file = os.path.join(os.path.expanduser('~'), self.file_name)\n self.defaults = {\n 'mud': {\n 'config_path': '',\n 'nerd_fonts': True,\n 'auto_fetch': False,\n 'run_async': True,\n 'run_table': True,\n 'simplify_branches': True\n },\n 'alias': {\n 'to': 'git checkout',\n 'fetch': 'git fetch',\n 'pull': 'git pull',\n 'push': 'git push'\n }\n }\n self.load_settings()\n\n def load_settings(self) -> None:\n if not os.path.exists(self.settings_file):\n self.config.read_dict(self.defaults)\n self.save()\n else:\n self.config.read(self.settings_file)\n\n self.mud_settings = {}\n for key in self.defaults['mud']:\n if isinstance(self.defaults['mud'][key], bool):\n self.mud_settings[key] = self.config.getboolean('mud', key, fallback=self.defaults['mud'][key])\n else:\n self.mud_settings[key] = self.config.get('mud', key, fallback=self.defaults['mud'][key])\n\n self.alias_settings = self.config['alias']\n\n def save(self) -> None:\n with open(self.settings_file, 'w') as configfile:\n self.config.write(configfile)" }, { "identifier": "Commands", "path": "commands.py", "snippet": "class Commands:\n def __init__(self, repos):\n self.repos = repos\n self.label_color_cache = {}\n self.current_color_index = 0\n\n # `mud status` command implementation\n def status(self, repos: Dict[str, List[str]]) -> None:\n table = self._get_table()\n for path, tags in repos.items():\n formatted_path = self._get_formatted_path(path)\n branch = self._get_branch_status(path)\n author = self._get_authors_name(path)\n commit = self._get_commit_message(path, 30)\n colored_labels = self._get_formatted_labels(tags)\n\n # Sync with origin status\n ahead_behind_cmd = subprocess.run(['git', 'rev-list', '--left-right', '--count', 'HEAD...@{upstream}'],\n text=True, cwd=path, capture_output=True)\n stdout = ahead_behind_cmd.stdout.strip().split()\n if len(stdout) >= 2:\n ahead, behind = stdout[0], stdout[1]\n origin_sync = ''\n if ahead and ahead != '0':\n origin_sync += f'{TEXT[\"bright_green\"]}{glyph(\"ahead\")} {ahead}{RESET}'\n if behind and behind != '0':\n if origin_sync:\n origin_sync += ' '\n origin_sync += f'{TEXT[\"bright_blue\"]}{glyph(\"behind\")} {behind}{RESET}'\n else:\n origin_sync = ''\n # Git status\n status_cmd = subprocess.run(['git', 'status', '-s'], text=True, cwd=path, capture_output=True)\n files = [line.lstrip() for line in status_cmd.stdout.strip().splitlines()]\n\n modified, added, removed, moved = 0, 0, 0, 0\n\n for file in files:\n if file.startswith('M'):\n modified += 1\n elif file.startswith('A') or file.startswith('??'):\n added += 1\n elif file.startswith('D'):\n removed += 1\n elif file.startswith('R'):\n moved += 1\n status = ''\n if added:\n status += f'{TEXT[\"bright_green\"]}{added} {glyph(\"added\")}{RESET} '\n if modified:\n status += f'{TEXT[\"yellow\"]}{modified} {glyph(\"modified\")}{RESET} '\n if moved:\n status += f'{TEXT[\"blue\"]}{moved} {glyph(\"moved\")}{RESET} '\n if removed:\n status += f'{TEXT[\"red\"]}{removed} {glyph(\"removed\")}{RESET} '\n if not files:\n status = f'{TEXT[\"green\"]}{glyph(\"clear\")}{RESET}'\n\n table.add_row([formatted_path, branch, origin_sync, status, author, commit, colored_labels])\n\n self._print_table(table)\n\n # `mud log` command implementation\n def log(self, repos: Dict[str, List[str]]) -> None:\n table = self._get_table()\n for path, labels in repos.items():\n formatted_path = self._get_formatted_path(path)\n branch = self._get_branch_status(path)\n author = self._get_authors_name(path)\n commit = self._get_commit_message(path, 35)\n colored_labels = self._get_formatted_labels(labels)\n\n # Commit time\n commit_time_cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%cd', '--date=relative'], text=True,\n cwd=path, capture_output=True)\n commit_time = commit_time_cmd.stdout.strip()\n\n table.add_row([formatted_path, branch, author, commit_time, commit, colored_labels])\n\n self._print_table(table)\n\n # `mud branch` command implementation\n def branches(self, repos: Dict[str, List[str]]) -> None:\n table = self._get_table()\n all_branches = {}\n for path in repos.keys():\n raw_branches = [line.strip() for line in\n subprocess.check_output(['git', 'branch'], text=True, cwd=path).split('\\n') if line.strip()]\n for branch in raw_branches:\n branch = branch.replace(' ', '').replace('*', '')\n if branch not in all_branches:\n all_branches[branch] = 0\n all_branches[branch] += 1\n branch_counter = Counter(all_branches)\n\n for path, labels in repos.items():\n formatted_path = self._get_formatted_path(path)\n branches = subprocess.check_output(['git', 'branch'], text=True, cwd=path).splitlines()\n current_branch = next((branch.lstrip('* ') for branch in branches if branch.startswith('*')), None)\n branches = [branch.lstrip('* ') for branch in branches]\n sorted_branches = sorted(branches, key=lambda x: branch_counter.get(x, 0), reverse=True)\n\n if current_branch and current_branch in sorted_branches:\n sorted_branches.remove(current_branch)\n sorted_branches.insert(0, current_branch)\n\n formatted_branches = self._get_formatted_branches(sorted_branches, current_branch)\n\n colored_labels = self._get_formatted_labels(labels)\n table.add_row([formatted_path, formatted_branches, colored_labels])\n\n self._print_table(table)\n\n # `mud <COMMAND>` when run_async = 0 and run_table = 0\n def run_ordered(self, repos: List[str], command: [str]) -> None:\n for path in repos:\n print(f'{self._get_formatted_path(path)}{RESET} {command}{RESET}')\n result = subprocess.run(command, shell=True, cwd=path, capture_output=True, text=True)\n if result.stderr:\n print(result.stderr)\n if result.stdout and not result.stdout.isspace():\n print(result.stdout)\n\n # `mud <COMMAND>` when run_async = 1 and run_table = 0\n async def run_async(self, repos: List[str], command: str) -> None:\n sem = asyncio.Semaphore(len(repos))\n\n async def run_process(path: str) -> None:\n async with sem:\n process = await asyncio.create_subprocess_shell(command, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = await process.communicate()\n print(f'{self._get_formatted_path(path)}>{RESET} {command}')\n if stderr:\n print(stderr.decode())\n if stdout and not stdout.isspace():\n print(stdout.decode())\n\n await asyncio.gather(*(run_process(path) for path in repos))\n\n # `mud <COMMAND>` when run_async = 1 and run_table = 1\n async def run_async_table_view(self, repos: List[str], command: str) -> None:\n sem = asyncio.Semaphore(len(repos))\n table = {repo: ['', ''] for repo in repos}\n\n async def task(repo: str) -> None:\n async with sem:\n await self._run_process(repo, table, command)\n\n tasks = [asyncio.create_task(task(repo)) for repo in repos]\n await asyncio.gather(*tasks)\n\n async def _run_process(self, repo_path: str, table: Dict[str, List[str]], command: str) -> None:\n process = await asyncio.create_subprocess_shell(command, cwd=repo_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n while True:\n line = await process.stdout.readline()\n if not line:\n break\n line = line.decode().strip()\n line = table[repo_path][0] if not line.strip() else line\n table[repo_path] = [line, f'{TEXT[\"yellow\"]}{glyph(\"running\")}']\n self._print_process(table)\n\n return_code = await process.wait()\n if return_code == 0:\n status = f'{TEXT[\"green\"]}{glyph(\"finished\")}'\n else:\n status = f'{TEXT[\"red\"]}{glyph(\"failed\")} Code: {return_code}'\n\n table[repo_path] = [table[repo_path][0], status]\n self._print_process(table)\n\n def _print_process(self, info: Dict[str, List[str]]) -> None:\n table = self._get_table()\n\n for path, (line, status) in info.items():\n formatted_path = self._get_formatted_path(path)\n table.add_row([formatted_path, line, status])\n\n print(f'\\x1bc{self._table_to_str(table)}\\n', end='')\n\n def _print_table(self, table: PrettyTable):\n table = self._table_to_str(table)\n if len(table) != 0:\n print(table)\n\n @staticmethod\n def _table_to_str(table: PrettyTable) -> str:\n table = table.get_string()\n table = '\\n'.join(line.lstrip() for line in table.splitlines())\n return table\n\n @staticmethod\n def _get_table() -> PrettyTable:\n return PrettyTable(border=False, header=False, style=PLAIN_COLUMNS, align='l')\n\n # Prettified repository path\n @staticmethod\n def _get_formatted_path(path: str) -> str:\n return f'{STYLES[\"dim\"]}{TEXT[\"gray\"]}../{RESET}{STYLES[\"dim\"]}{path}{RESET}'\n\n # Displaying current branch\n @staticmethod\n def _get_branch_status(path: str) -> str:\n branch_cmd = subprocess.run('git rev-parse --abbrev-ref HEAD', shell=True, text=True, cwd=path,\n capture_output=True)\n branch_stdout = branch_cmd.stdout.strip()\n if branch_stdout == 'master' or branch_stdout == 'main':\n branch = f'{TEXT[\"yellow\"]}{glyph(\"master\")}{RESET} {branch_stdout}'\n elif branch_stdout == 'develop':\n branch = f'{TEXT[\"green\"]}{glyph(\"feature\")}{RESET} {branch_stdout}'\n elif '/' in branch_stdout:\n branch_path = branch_stdout.split('/')\n icon = branch_path[0]\n icon = f'{TEXT[\"red\"]}{glyph(\"bugfix\")}{RESET}' if icon in ['bugfix', 'bug', 'hotfix'] else \\\n f'{TEXT[\"blue\"]}{glyph(\"release\")}{RESET}' if icon == 'release' else \\\n f'{TEXT[\"green\"]}{glyph(\"feature\")}{RESET}' if icon in ['feature', 'feat', 'develop'] else \\\n f'{TEXT[\"green\"]}{glyph(\"branch\")}{RESET}'\n branch = f'{icon} {STYLES[\"bold\"]}{branch_path[0]}{RESET}/{STYLES[\"bold\"]}{(\"/\".join(branch_path[1:]))}'\n else:\n branch = f'{TEXT[\"cyan\"]}{glyph(\"branch\")}{RESET} {branch_stdout}'\n return branch\n\n # Last author's name\n @staticmethod\n def _get_authors_name(path: str) -> str:\n cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%an'], text=True, cwd=path, capture_output=True)\n git_config_user_cmd = subprocess.run(['git', 'config', 'user.name'], text=True, capture_output=True)\n committer_color = '' if cmd.stdout.strip() == git_config_user_cmd.stdout.strip() else STYLES[\"dim\"]\n author = cmd.stdout.strip()\n author = author[:20] + '...' if len(author) > 20 else author\n author = f'{committer_color}{author}{RESET}'\n return author\n\n # Last commit message\n @staticmethod\n def _get_commit_message(path: str, max_chars: int) -> str:\n cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%s'], text=True, cwd=path, capture_output=True)\n log = cmd.stdout.strip()\n log = log[:max_chars] + '...' if len(log) > max_chars else log\n return log\n\n def _get_formatted_labels(self, labels: List[str]) -> str:\n if len(labels) == 0:\n return ''\n\n colored_label = ''\n for label in labels:\n color_index = self._get_color_index(label) % len(TEXT)\n colored_label += f'{TEXT[list(TEXT.keys())[color_index + 3]]}{glyph(\"label\")}{RESET} {label} '\n\n return colored_label\n\n @staticmethod\n def _get_formatted_branches(branches: List[str], current_branch: str) -> str:\n if len(branches) == 0:\n return ''\n\n simplify_branches = utils.settings.config['mud'].getboolean('simplify_branches') == True\n output = ''\n for branch in branches:\n is_origin = branch.startswith('origin/')\n branch = branch.replace('origin/', '') if is_origin else branch\n current_prefix = f'{STYLES[\"italic\"]}{STYLES[\"bold\"]}' if current_branch == branch else ''\n current_prefix = current_prefix + STYLES['dim'] if is_origin else current_prefix\n origin_prefix = f'{TEXT[\"magenta\"]}{STYLES[\"dim\"]}o/' if is_origin else ''\n color = 'white'\n icon = glyph('branch')\n if branch == 'master' or branch == 'main':\n color = 'yellow'\n icon = f'{glyph(\"master\")}'\n elif branch == 'develop':\n color = 'green'\n icon = f'{glyph(\"feature\")}'\n elif '/' in branch:\n parts = branch.split('/')\n end_dim = '' if is_origin else END_STYLES[\"dim\"]\n branch = '/'.join([p[0] for p in parts[:-1]] + [end_dim + (\n parts[-1][:10] + '..' if len(parts[-1]) > 10 else parts[-1])]) if simplify_branches else '/'.join(\n [p for p in parts[:-1]] + [end_dim + (parts[-1][:10] + '..' if len(parts[-1]) > 10 else parts[-1])])\n branch = f'{STYLES[\"dim\"]}{branch}'\n icon = parts[0]\n color = 'red' if icon in ['bugfix', 'bug', 'hotfix'] else \\\n 'blue' if icon == 'release' else \\\n 'green' if icon in ['feature', 'feat', 'develop'] else \\\n 'green'\n icon = f'{glyph(\"bugfix\")}' if icon in ['bugfix', 'bug', 'hotfix'] else \\\n f'{glyph(\"release\")}' if icon == 'release' else \\\n f'{glyph(\"feature\")}' if icon in ['feature', 'feat', 'develop'] else \\\n f'{glyph(\"branch\")}'\n output += f'{current_prefix}{TEXT[color]}{icon} {origin_prefix}{TEXT[color]}{branch}{RESET} '\n return output\n\n def _get_color_index(self, label: str) -> (str, str):\n if label not in self.label_color_cache:\n self.label_color_cache[label] = self.current_color_index\n self.current_color_index = (self.current_color_index + 1) % len(BACK.keys())\n return self.label_color_cache[label]" } ]
import os import sys import asyncio import argparse import subprocess import utils from argparse import ArgumentParser from utils import TEXT, RESET, STYLES from config import Config from settings import Settings from commands import Commands
6,002
MODIFIED_ATTR = '-m', '--modified' DIVERGED_ATTR = '-d', '--diverged' # Commands COMMANDS = { 'help': ['help', '--help', '-h'], 'version': ['--version'], 'set-global': ['--set-global'], 'init': ['init'], 'add': ['add', 'a'], 'remove': ['remove', 'rm'], 'branches': ['branch', 'branches', 'br'], 'status': ['status', 'st'], 'log': ['log', 'l'], } class MudCLI: def __init__(self): self.cmd_runner = None self.config = None self.parser = self._create_parser() @staticmethod def _create_parser() -> ArgumentParser: parser = argparse.ArgumentParser(description=f'mud allows you to run commands in multiple directories.') subparsers = parser.add_subparsers(dest='command') subparsers.add_parser(COMMANDS['init'][0], aliases=COMMANDS['init'][1:], help='Initializing .mudconfig, adds all repositories in this directory to .mudconfig') subparsers.add_parser(COMMANDS['status'][0], aliases=COMMANDS['status'][1:], help='Displays git status in a table view') subparsers.add_parser(COMMANDS['branches'][0], aliases=COMMANDS['branches'][1:], help='Displays all branches in a table view') subparsers.add_parser(COMMANDS['log'][0], aliases=COMMANDS['log'][1:], help='Displays log of last commit for all repos in a table view') add_parser = subparsers.add_parser(COMMANDS['add'][0], aliases=COMMANDS['add'][1:], help='Register directory') add_parser.add_argument('label', help='The label to add (optional)', nargs='?', default='', type=str) add_parser.add_argument('path', help='Directory to add (optional)', nargs='?', type=str) remove_parser = subparsers.add_parser(COMMANDS['remove'][0], aliases=COMMANDS['remove'][1:], help='Remove label from directory or directory in .mudconfig') remove_parser.add_argument('label', help='Label to remove from directory (optional)', nargs='?', default='', type=str) remove_parser.add_argument('path', help='Directory to remove (optional)', nargs='?', type=str) parser.add_argument(*LABEL_PREFIX, metavar='LABEL', nargs='?', default='', type=str, help='Filter repos with provided label') parser.add_argument(*BRANCH_PREFIX, metavar='BRANCH', nargs='?', default='', type=str, help='Filter repos with provided branch') parser.add_argument(*MODIFIED_ATTR, action='store_true', help='Filter modified repos') parser.add_argument(*DIVERGED_ATTR, action='store_true', help='Filter diverged repos') parser.add_argument(COMMANDS['set-global'][0], help='Sets \'.mudconfig\' in current directory as your global \'.mudconfig\' so you can ' 'use it anywhere', action='store_true') parser.add_argument(COMMANDS['version'][0], help='Displays current version of mud', action='store_true') parser.add_argument('catch_all', nargs='*', help='Type any commands to execute among repositories.') return parser def run(self) -> None: # Displays default help message if len(sys.argv) == 1 or sys.argv[1] in COMMANDS['help']: self.parser.print_help() return # Sets global repository in .mudsettings if sys.argv[1] in COMMANDS['set-global']: config_path = os.path.join(os.getcwd(), utils.CONFIG_FILE_NAME) if os.path.exists(config_path): utils.settings.config.set('mud', 'config_path', config_path) utils.settings.save() print('Current .mudconfig set as a global configuration') return # Prints version if sys.argv[1] in COMMANDS['version']: utils.print_version() return current_directory = os.getcwd() self.config = Config() self._filter_repos() if len(self.repos) == 0: utils.print_error('No repositories are matching this filter') return
#!/usr/bin/env python3 # Filters LABEL_PREFIX = '-l=', '--label=' BRANCH_PREFIX = '-b=', '--branch=' MODIFIED_ATTR = '-m', '--modified' DIVERGED_ATTR = '-d', '--diverged' # Commands COMMANDS = { 'help': ['help', '--help', '-h'], 'version': ['--version'], 'set-global': ['--set-global'], 'init': ['init'], 'add': ['add', 'a'], 'remove': ['remove', 'rm'], 'branches': ['branch', 'branches', 'br'], 'status': ['status', 'st'], 'log': ['log', 'l'], } class MudCLI: def __init__(self): self.cmd_runner = None self.config = None self.parser = self._create_parser() @staticmethod def _create_parser() -> ArgumentParser: parser = argparse.ArgumentParser(description=f'mud allows you to run commands in multiple directories.') subparsers = parser.add_subparsers(dest='command') subparsers.add_parser(COMMANDS['init'][0], aliases=COMMANDS['init'][1:], help='Initializing .mudconfig, adds all repositories in this directory to .mudconfig') subparsers.add_parser(COMMANDS['status'][0], aliases=COMMANDS['status'][1:], help='Displays git status in a table view') subparsers.add_parser(COMMANDS['branches'][0], aliases=COMMANDS['branches'][1:], help='Displays all branches in a table view') subparsers.add_parser(COMMANDS['log'][0], aliases=COMMANDS['log'][1:], help='Displays log of last commit for all repos in a table view') add_parser = subparsers.add_parser(COMMANDS['add'][0], aliases=COMMANDS['add'][1:], help='Register directory') add_parser.add_argument('label', help='The label to add (optional)', nargs='?', default='', type=str) add_parser.add_argument('path', help='Directory to add (optional)', nargs='?', type=str) remove_parser = subparsers.add_parser(COMMANDS['remove'][0], aliases=COMMANDS['remove'][1:], help='Remove label from directory or directory in .mudconfig') remove_parser.add_argument('label', help='Label to remove from directory (optional)', nargs='?', default='', type=str) remove_parser.add_argument('path', help='Directory to remove (optional)', nargs='?', type=str) parser.add_argument(*LABEL_PREFIX, metavar='LABEL', nargs='?', default='', type=str, help='Filter repos with provided label') parser.add_argument(*BRANCH_PREFIX, metavar='BRANCH', nargs='?', default='', type=str, help='Filter repos with provided branch') parser.add_argument(*MODIFIED_ATTR, action='store_true', help='Filter modified repos') parser.add_argument(*DIVERGED_ATTR, action='store_true', help='Filter diverged repos') parser.add_argument(COMMANDS['set-global'][0], help='Sets \'.mudconfig\' in current directory as your global \'.mudconfig\' so you can ' 'use it anywhere', action='store_true') parser.add_argument(COMMANDS['version'][0], help='Displays current version of mud', action='store_true') parser.add_argument('catch_all', nargs='*', help='Type any commands to execute among repositories.') return parser def run(self) -> None: # Displays default help message if len(sys.argv) == 1 or sys.argv[1] in COMMANDS['help']: self.parser.print_help() return # Sets global repository in .mudsettings if sys.argv[1] in COMMANDS['set-global']: config_path = os.path.join(os.getcwd(), utils.CONFIG_FILE_NAME) if os.path.exists(config_path): utils.settings.config.set('mud', 'config_path', config_path) utils.settings.save() print('Current .mudconfig set as a global configuration') return # Prints version if sys.argv[1] in COMMANDS['version']: utils.print_version() return current_directory = os.getcwd() self.config = Config() self._filter_repos() if len(self.repos) == 0: utils.print_error('No repositories are matching this filter') return
self.cmd_runner = Commands(self.config)
5
2023-12-28 13:09:31+00:00
8k
RaceCrewAI/gt-telem
gt_telem/turismo_client.py
[ { "identifier": "PlayStationNotFoundError", "path": "gt_telem/errors/playstation_errors.py", "snippet": "class PlayStationNotFoundError(Exception):\n def __init__(self, message=\"Playstation not found on this network.\"):\n super().__init__(message)" }, { "identifier": "PlayStatonOnStandbyError", "path": "gt_telem/errors/playstation_errors.py", "snippet": "class PlayStatonOnStandbyError(Exception):\n def __init__(self, playstation_ip):\n message = f\"Playstation {'at '+playstation_ip+' ' if playstation_ip else ''}is on standby.\"\n super().__init__(message)" }, { "identifier": "SpanReader", "path": "gt_telem/models/helpers.py", "snippet": "class SpanReader:\n \"\"\"\n Utility class for reading binary data in a structured manner.\n \"\"\"\n\n def __init__(self, data, byte_order=\"little\"):\n \"\"\"\n Initialize the SpanReader.\n\n Parameters:\n - data: Binary data to read.\n - byte_order (str): Byte order for interpreting binary data, 'little' or 'big'.\n \"\"\"\n self.view = memoryview(data)\n self.byte_order = \"<\" if byte_order == \"little\" else \">\"\n self.position = 0\n\n def read_int32(self):\n \"\"\"\n Read a 32-bit signed integer from the binary data.\n\n Returns:\n int: The read integer value.\n \"\"\"\n format_string = f\"{self.byte_order}i\"\n value = struct.unpack_from(format_string, self.view, self.position)[0]\n self.position += 4\n return value\n\n def read_int16(self):\n \"\"\"\n Read a 16-bit signed integer from the binary data.\n\n Returns:\n int: The read integer value.\n \"\"\"\n format_string = f\"{self.byte_order}h\"\n value = struct.unpack_from(format_string, self.view, self.position)[0]\n self.position += 2\n return value\n\n def read_single(self):\n \"\"\"\n Read a 32-bit floating-point number from the binary data.\n\n Returns:\n float: The read floating-point value.\n \"\"\"\n format_string = f\"{self.byte_order}f\"\n value = struct.unpack_from(format_string, self.view, self.position)[0]\n self.position += 4\n return value\n\n def read_byte(self):\n \"\"\"\n Read a byte from the binary data.\n\n Returns:\n int: The read byte value.\n \"\"\"\n value = struct.unpack_from(\"B\", self.view, self.position)[0]\n self.position += 1\n return value\n\n def read_bytes(self, length):\n \"\"\"\n Read a specified number of bytes from the binary data.\n\n Parameters:\n - length (int): Number of bytes to read.\n\n Returns:\n bytes: The read bytes.\n \"\"\"\n value = self.view[self.position : self.position + length].tobytes()\n self.position += length\n return value" }, { "identifier": "Telemetry", "path": "gt_telem/models/telemetry.py", "snippet": "class Telemetry(TelemetryPacket):\n \"\"\"\n Telemetry data from Gran Turismo\n\n Attributes:\n - position_x: float - X-coordinate of the position.\n - position_y: float - Y-coordinate of the position.\n - position_z: float - Z-coordinate of the position.\n - velocity_x: float - X-component of velocity.\n - velocity_y: float - Y-component of velocity.\n - velocity_z: float - Z-component of velocity.\n - rotation_x: float - X-component of rotation.\n - rotation_y: float - Y-component of rotation.\n - rotation_z: float - Z-component of rotation.\n - orientation: float - Orientation.\n - ang_vel_x: float - X-component of angular velocity.\n - ang_vel_y: float - Y-component of angular velocity.\n - ang_vel_z: float - Z-component of angular velocity.\n - body_height: float - Height of the body.\n - engine_rpm: float - Engine RPM.\n - iv: float - IV, used for encryption.\n - fuel_level: float - Fuel level.\n - fuel_capacity: float - Fuel capacity.\n - speed_mps: float - Speed in meters per second.\n - boost_pressure: float - Boost pressure.\n - oil_pressure: float - Oil pressure.\n - water_temp: float - Water temperature.\n - oil_temp: float - Oil temperature.\n - tire_fl_temp: float - Front-left tire temperature.\n - tire_fr_temp: float - Front-right tire temperature.\n - tire_rl_temp: float - Rear-left tire temperature.\n - tire_rr_temp: float - Rear-right tire temperature.\n - packet_id: int - Packet ID.\n - current_lap: int - Current lap.\n - total_laps: int - Total laps.\n - best_lap_time_ms: int - Best lap time in milliseconds.\n - last_lap_time_ms: int - Last lap time in milliseconds.\n - time_of_day_ms: int - Time of day in milliseconds.\n - race_start_pos: int - Race start position.\n - total_cars: int - Total number of cars.\n - min_alert_rpm: int - Minimum alert RPM.\n - max_alert_rpm: int - Maximum alert RPM.\n - calc_max_speed: int - Calculated maximum speed.\n - flags: int - byte that contains current/suggested gear.\n - bits: int - Collection of booleans - see properties.\n - throttle: int - Throttle.\n - brake: int - Brake.\n - empty: int - Unused.\n - road_plane_x: float - X-coordinate of the road plane.\n - road_plane_y: float - Y-coordinate of the road plane.\n - road_plane_z: float - Z-coordinate of the road plane.\n - road_plane_dist: float - Distance of the road plane. Not sure what this is.\n - wheel_fl_rps: float - Front-left wheel revolutions per second.\n - wheel_fr_rps: float - Front-right wheel revolutions per second.\n - wheel_rl_rps: float - Rear-left wheel revolutions per second.\n - wheel_rr_rps: float - Rear-right wheel revolutions per second.\n - tire_fl_radius: float - Front-left tire radius.\n - tire_fr_radius: float - Front-right tire radius.\n - tire_rl_radius: float - Rear-left tire radius.\n - tire_rr_radius: float - Rear-right tire radius.\n - tire_fl_sus_height: float - Front-left tire suspension height.\n - tire_fr_sus_height: float - Front-right tire suspension height.\n - tire_rl_sus_height: float - Rear-left tire suspension height.\n - tire_rr_sus_height: float - Rear-right tire suspension height.\n - unused1: int - Unused variable 1.\n - unused2: int - Unused variable 2.\n - unused3: int - Unused variable 3.\n - unused4: int - Unused variable 4.\n - unused5: int - Unused variable 5.\n - unused6: int - Unused variable 6.\n - unused7: int - Unused variable 7.\n - unused8: int - Unused variable 8.\n - clutch_pedal: float - Clutch pedal position.\n - clutch_engagement: float - Clutch engagement.\n - trans_rpm: float - Transmission RPM.\n - trans_top_speed: float - Transmission top speed.\n - gear1: float - Gear 1.\n - gear2: float - Gear 2.\n - gear3: float - Gear 3.\n - gear4: float - Gear 4.\n - gear5: float - Gear 5.\n - gear6: float - Gear 6.\n - gear7: float - Gear 7.\n - gear8: float - Gear 8.\n - car_code: int - Car code - on vehicles with more than 8 gears, this is corrupted.\n\n Properties:\n - position: Get the position as a Vector3D.\n - velocity: Get the velocity as a Vector3D.\n - rotation: Get the rotation as a Vector3D.\n - angular_velocity: Get the angular velocity as a Vector3D.\n - road_plane: Get the road plane coordinates as a Vector3D.\n - tire_temp: Get tire temperatures as a WheelMetric.\n - wheel_rps: Get wheel revolutions per second as a WheelMetric.\n - tire_radius: Get tire radii as a WheelMetric.\n - suspension_height: Get suspension heights as a WheelMetric.\n - current_gear: Get the current gear.\n - suggested_gear: Get the suggested gear.\n - speed_kph: Get the speed in kilometers per hour.\n - speed_mph: Get the speed in miles per hour.\n - cars_on_track: Check if there are cars on the track.\n - is_paused: Check if the simulation is paused.\n - is_loading: Check if the simulation is loading.\n - in_gear: Check if the vehicle is in gear.\n - has_turbo: Check if the vehicle has a turbo.\n - rev_limit: Check if the vehicle is at the rev limit.\n - hand_brake_active: Check if the hand brake is active.\n - lights_active: Check if the lights are active.\n - high_beams: Check if the high beams are active.\n - low_beams: Check if the low beams are active.\n - asm_active: Check if the ASM (Active Stability Management) is active.\n - tcs_active: Check if the TCS (Traction Control System) is active.\n - unknown_bool_1: Purpose unknown.\n - unknown_bool_2: Purpose unknown.\n - unknown_bool_3: Purpose unknown.\n - unknown_bool_4: Purpose unknown.\n - best_lap_time: Get the formatted best lap time.\n - last_lap_time: Get the formatted last lap time.\n - time_of_day: Get the formatted time of day.\n\n Methods\n - as_dict: Get the state of the object in a dictionary format.\n \"\"\"\n\n def __post_init__(self):\n self.time = datetime.now()\n\n @property\n def position(self) -> Vector3D:\n \"\"\"\n Get the position as a Vector3D.\n \"\"\"\n return Vector3D(self.position_x, self.position_y, self.position_z)\n\n @property\n def velocity(self) -> Vector3D:\n \"\"\"\n Get the velocity as a Vector3D.\n \"\"\"\n return Vector3D(self.velocity_x, self.velocity_y, self.velocity_z)\n\n @property\n def rotation(self) -> Vector3D:\n \"\"\"\n Get the rotation as a Vector3D.\n \"\"\"\n return Vector3D(self.rotation_x, self.rotation_y, self.rotation_z)\n\n @property\n def angular_velocity(self) -> Vector3D:\n \"\"\"\n Get the angular velocity as a Vector3D.\n \"\"\"\n return Vector3D(self.ang_vel_x, self.ang_vel_y, self.ang_vel_z)\n\n @property\n def road_plane(self) -> Vector3D:\n \"\"\"\n Get the road plane coordinates as a Vector3D.\n \"\"\"\n return Vector3D(self.road_plane_x, self.road_plane_y, self.road_plane_z)\n\n @property\n def tire_temp(self) -> WheelMetric:\n \"\"\"\n Get tire temperatures as a WheelMetric.\n \"\"\"\n return WheelMetric(\n self.tire_fl_temp, self.tire_fr_temp, self.tire_rl_temp, self.tire_rr_temp\n )\n\n @property\n def wheel_rps(self) -> WheelMetric:\n \"\"\"\n Get wheel revolutions per second as a WheelMetric.\n \"\"\"\n return WheelMetric(\n self.wheel_fl_rps, self.wheel_fr_rps, self.wheel_rl_rps, self.wheel_rr_rps\n )\n\n @property\n def tire_radius(self) -> WheelMetric:\n \"\"\"\n Get tire radii as a WheelMetric.\n \"\"\"\n return WheelMetric(\n self.tire_fl_radius,\n self.tire_fr_radius,\n self.tire_rl_radius,\n self.tire_rr_radius,\n )\n\n @property\n def suspension_height(self) -> WheelMetric:\n \"\"\"\n Get suspension heights as a WheelMetric.\n \"\"\"\n return WheelMetric(\n self.tire_fl_sus_height,\n self.tire_fr_sus_height,\n self.tire_rl_sus_height,\n self.tire_rr_sus_height,\n )\n\n @property\n def current_gear(self) -> int:\n \"\"\"\n Get the current gear.\n \"\"\"\n return self.bits & 0b1111\n\n @property\n def suggested_gear(self) -> int:\n \"\"\"\n Get the suggested gear.\n \"\"\"\n return self.bits >> 4\n\n @property\n def speed_kph(self) -> float:\n \"\"\"\n Get the speed in kilometers per hour.\n \"\"\"\n return self.speed_mps * 3.6\n\n @property\n def speed_mph(self) -> float:\n \"\"\"\n Get the speed in miles per hour.\n \"\"\"\n return self.speed_mps * 2.23694\n\n @property\n def cars_on_track(self) -> bool:\n \"\"\"\n Check if there are cars on the track.\n \"\"\"\n return bool(1<<0 & self.flags)\n\n @property\n def is_paused(self) -> bool:\n \"\"\"\n Check if the simulation is paused.\n \"\"\"\n return bool(1<<1 & self.flags)\n\n @property\n def is_loading(self) -> bool:\n \"\"\"\n Check if the simulation is loading.\n \"\"\"\n return bool(1<<2 & self.flags)\n\n @property\n def in_gear(self) -> bool:\n \"\"\"\n Check if the vehicle is in gear.\n \"\"\"\n return bool(1<<3 & self.flags)\n\n @property\n def has_turbo(self) -> bool:\n \"\"\"\n Check if the vehicle has a turbo.\n \"\"\"\n return bool(1<<4 & self.flags)\n\n @property\n def rev_limit(self) -> bool:\n \"\"\"\n Check if the vehicle is at the rev limit.\n \"\"\"\n return bool(1<<5 & self.flags)\n\n @property\n def hand_brake_active(self) -> bool:\n \"\"\"\n Check if the hand brake is active.\n \"\"\"\n return bool(1<<6 & self.flags)\n\n @property\n def lights_active(self) -> bool:\n \"\"\"\n Check if the lights are active.\n \"\"\"\n return bool(1<<7 & self.flags)\n\n @property\n def high_beams(self) -> bool:\n \"\"\"\n Check if the high beams are active.\n \"\"\"\n return bool(1<<8 & self.flags)\n\n @property\n def low_beams(self) -> bool:\n \"\"\"\n Check if the low beams are active.\n \"\"\"\n return bool(1<<9 & self.flags)\n\n @property\n def asm_active(self) -> bool:\n \"\"\"\n Check if the ASM (Active Stability Management) is active.\n \"\"\"\n return bool(1<<10 & self.flags)\n\n @property\n def tcs_active(self) -> bool:\n \"\"\"\n Check if the TCS (Traction Control System) is active.\n \"\"\"\n return bool(1<<11 & self.flags)\n\n @property\n def unknown_bool_1(self) -> bool:\n \"\"\"\n Get the value of an unknown boolean flag.\n \"\"\"\n return bool(1<<12 & self.flags)\n\n @property\n def unknown_bool_2(self) -> bool:\n \"\"\"\n Not sure\n \"\"\"\n return bool(1<<13 & self.flags)\n\n @property\n def unknown_bool_3(self) -> bool:\n \"\"\"\n Get the value of another unknown boolean flag.\n \"\"\"\n return bool(1<<14 & self.flags)\n\n @property\n def unknown_bool_4(self) -> bool:\n \"\"\"\n Get the value of another unknown boolean flag.\n \"\"\"\n return bool(1<<15 & self.flags)\n\n @property\n def best_lap_time(self) -> str:\n \"\"\"\n Get the formatted best lap time.\n \"\"\"\n if self.best_lap_time_ms == -1:\n return None\n return format_time(self.best_lap_time_ms)\n\n @property\n def last_lap_time(self) -> str:\n \"\"\"\n Get the formatted last lap time.\n \"\"\"\n if self.last_lap_time_ms == -1:\n return None\n return format_time(self.last_lap_time_ms)\n\n @property\n def time_of_day(self) -> str:\n \"\"\"\n Get the formatted time of day.\n \"\"\"\n if self.time_of_day_ms == -1:\n return None\n return format_time_of_day(self.time_of_day_ms)\n\n @property\n def as_dict(self):\n \"\"\"\n Returns a dictionary containing the state of the object.\n \"\"\"\n remove_keys = [\n x\n for x in self.__dict__.keys()\n if any(\n ignore in x\n for ignore in [\n \"_x\",\n \"_y\",\n \"_z\",\n \"flags\",\n \"bits\",\n \"empty\",\n \"unused\",\n \"_fl\",\n \"_fr\",\n \"_rl\",\n \"_rr\",\n ]\n )\n ]\n\n added = {\n \"position\": self.position,\n \"velocity\": self.velocity,\n \"rotation\": self.rotation,\n \"angular_velocity\": self.angular_velocity,\n \"road_plane\": self.road_plane,\n \"tire_temp\": self.tire_temp,\n \"wheel_rps\": self.wheel_rps,\n \"tire_radius\": self.tire_radius,\n \"suspension_height\": self.suspension_height,\n \"current_gear\": self.current_gear,\n \"suggested_gear\": self.suggested_gear,\n \"speed_kph\": self.speed_kph,\n \"speed_mph\": self.speed_mph,\n \"cars_on_track\": self.cars_on_track,\n \"is_paused\": self.is_paused,\n \"is_loading\": self.is_loading,\n \"in_gear\": self.in_gear,\n \"has_turbo\": self.has_turbo,\n \"rev_limit\": self.rev_limit,\n \"hand_brake_active\": self.hand_brake_active,\n \"lights_active\": self.lights_active,\n \"high_beams\": self.high_beams,\n \"low_beams\": self.low_beams,\n \"asm_active\": self.asm_active,\n \"tcs_active\": self.tcs_active,\n \"unknown_bool_1\": self.unknown_bool_1,\n \"unknown_bool_2\": self.unknown_bool_2,\n \"unknown_bool_3\": self.unknown_bool_3,\n \"unknown_bool_4\": self.unknown_bool_4,\n \"best_lap_time\": self.best_lap_time,\n \"last_lap_time\": self.last_lap_time,\n \"time_of_day\": self.time_of_day,\n }\n\n result = dict(self.__dict__, **added)\n for remove_key in remove_keys:\n result.pop(remove_key, None)\n\n return result\n\n @staticmethod\n def from_dict(d):\n \"\"\"\n Get telemetry instance from the as_dict property\n Useful for replays\n \"\"\"\n\n # pop the vector3s\n for vec3 in [\"position\", \"velocity\", \"rotation\", \"angular_velocity\", \"road_plane\"]:\n prop = d.pop(vec3)\n if vec3 == \"angular_velocity\":\n vec3 = \"ang_vel\"\n d[f\"{vec3}_x\"] = prop[0]\n d[f\"{vec3}_y\"] = prop[1]\n d[f\"{vec3}_z\"] = prop[2]\n # pop the corners\n for whmet, attr in {\n \"tire_temp\": \"tire_{0}_temp\",\n \"wheel_rps\": \"wheel_{0}_rps\",\n \"tire_radius\": \"tire_{0}_radius\",\n \"suspension_height\": \"tire_{0}_sus_height\"\n }.items():\n prop = d.pop(whmet)\n for i, k in {\n 0: \"fl\",\n 1: \"fr\",\n 2: \"rl\",\n 3: \"rr\"\n }.items():\n d[attr.format(k)] = prop[i]\n # rebuild the bits attr\n sg = d.pop(\"suggested_gear\") & 0xF\n cg = d.pop(\"current_gear\") & 0xF\n d[\"bits\"] = (sg << 4) | cg\n\n # just remove these:\n for prop in [\"speed_kph\", \"speed_mph\", \"best_lap_time\", \"last_lap_time\", \"time_of_day\"]:\n d.pop(prop)\n\n # Add back ones removed:\n d[\"empty\"] = 0\n for i in range(8):\n d[f\"unused{i+1}\"] = 0\n\n # rebuild flags\n d[\"flags\"] = (\n (1<<0 if d.pop(\"cars_on_track\") else 0) |\n (1<<1 if d.pop(\"is_paused\") else 0) |\n (1<<2 if d.pop(\"is_loading\") else 0) |\n (1<<3 if d.pop(\"in_gear\") else 0) |\n (1<<4 if d.pop(\"has_turbo\") else 0) |\n (1<<5 if d.pop(\"rev_limit\") else 0) |\n (1<<6 if d.pop(\"hand_brake_active\") else 0) |\n (1<<7 if d.pop(\"lights_active\") else 0) |\n (1<<8 if d.pop(\"high_beams\") else 0) |\n (1<<9 if d.pop(\"low_beams\") else 0) |\n (1<<10 if d.pop(\"asm_active\") else 0) |\n (1<<11 if d.pop(\"tcs_active\") else 0) |\n (1<<12 if d.pop(\"unknown_bool_1\", False) else 0) |\n (1<<13 if d.pop(\"clutch_out\", False) else 0) |\n (1<<13 if d.pop(\"unknown_bool_2\", False) else 0) |\n (1<<14 if d.pop(\"unknown_bool_3\", False) else 0) |\n (1<<15 if d.pop(\"unknown_bool_4\", False) else 0)\n )\n\n return Telemetry(**d)" }, { "identifier": "PDEncyption", "path": "gt_telem/net/crypto.py", "snippet": "class PDEncyption:\n \"\"\"\n PDEncyption class provides methods for decrypting ciphertext using Salsa20 stream cipher.\n Credit to https://github.com/Nenkai/PDTools\n \"\"\"\n\n _DEFAULT_KEY = b\"Simulator Interface Packet ver 0.0\"\n _GT7_KEY = b\"Simulator Interface Packet GT7 ver 0.0\"\n _IV_MASK: int = 0xDEADBEAF\n\n def __init__(self, is_gt7):\n self.is_gt7 = is_gt7\n\n def decrypt(self, ciphertext: bytes) -> bytes:\n \"\"\"\n Decrypts the provided ciphertext using Salsa20 stream cipher.\n\n Parameters:\n - ciphertext (bytes): The encrypted data to be decrypted.\n - is_gt7 (bool): Flag indicating whether to use the GT7 key. Default is True.\n\n Returns:\n bytes: The decrypted plaintext.\n \"\"\"\n seed = struct.unpack(\"<I\", ciphertext[0x40:0x44])[0]\n iv = seed ^ self._IV_MASK\n iv = struct.pack(\"<II\", iv, seed)\n return Salsa20_xor(\n ciphertext, iv, self._GT7_KEY[:32] if self.is_gt7 else self._DEFAULT_KEY[:32]\n )" }, { "identifier": "get_ps_ip_type", "path": "gt_telem/net/device_discover.py", "snippet": "def get_ps_ip_type() -> tuple[str | None, str | None]:\n \"\"\"\n Discovers the PlayStation IP address and host type using device discovery protocol.\n\n Returns:\n Tuple[Optional[str], Optional[str]]: A tuple containing the PlayStation IP address and host type.\n \"\"\"\n skt = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n skt.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n skt.settimeout(1)\n\n query = b\"SRCH * HTTP/1.1\\ndevice-discovery-protocol-version:00030010\"\n\n skt.sendto(query, (\"<broadcast>\", 9302))\n try:\n packet, addr = skt.recvfrom(1024)\n except socket.timeout:\n return None, None\n\n ps_type = _get_host_type(packet.decode(\"utf-8\"))\n host_ip = addr[0]\n\n return host_ip, ps_type" } ]
import asyncio import copy import logging import socket import threading from collections import deque from time import sleep from gt_telem.errors.playstation_errors import (PlayStationNotFoundError, PlayStatonOnStandbyError) from gt_telem.models.helpers import SpanReader from gt_telem.models.telemetry import Telemetry from gt_telem.net.crypto import PDEncyption from gt_telem.net.device_discover import get_ps_ip_type
6,215
class TurismoClient: RECEIVE_PORT = 33339 BIND_PORT = 33340 def __init__(self, is_gt7: bool=True, ps_ip: str=None): """ Initialize TurismoClient. Parameters: - is_gt7 (bool): Flag indicating whether it's Gran Turismo 7. Default is True. - ps_ip (str): PlayStation IP address. If None, it will be discovered. """ self._cancellation_token = None ip, ps = get_ps_ip_type() ip = ip or ps_ip if not ip: raise PlayStationNotFoundError() if ps and "STANDBY" in ps:
class TurismoClient: RECEIVE_PORT = 33339 BIND_PORT = 33340 def __init__(self, is_gt7: bool=True, ps_ip: str=None): """ Initialize TurismoClient. Parameters: - is_gt7 (bool): Flag indicating whether it's Gran Turismo 7. Default is True. - ps_ip (str): PlayStation IP address. If None, it will be discovered. """ self._cancellation_token = None ip, ps = get_ps_ip_type() ip = ip or ps_ip if not ip: raise PlayStationNotFoundError() if ps and "STANDBY" in ps:
raise PlayStatonOnStandbyError(ip)
1
2023-12-23 03:37:54+00:00
8k
DidacticFishstick/ultrastar-wingman
main.py
[ { "identifier": "Song", "path": "song.py", "snippet": "class Song:\n songs = {}\n usdb_ids = set()\n php_session_id = None\n\n @staticmethod\n def create_valid_dir_name(s):\n # Remove invalid characters\n s = re.sub(r'[<>:\"/\\\\|?*]', '', s)\n\n # Replace spaces with underscores\n # s = s.replace(' ', '_')\n\n # Truncate to a reasonable length to avoid exceeding max path lengths\n s = s[:255]\n\n # Ensure the name isn't a reserved name in Windows\n reserved_names = [\"CON\", \"PRN\", \"AUX\", \"NUL\", \"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\", \"LPT1\", \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\"]\n if s.upper() in reserved_names:\n s = \"_\" + s\n\n return s\n\n @classmethod\n def load_songs(cls):\n for subdir in os.listdir(config.usdx_songs_dir):\n subdir_path = os.path.join(config.usdx_songs_dir, subdir)\n\n if not os.path.isdir(subdir_path):\n continue\n\n try:\n usdb_id = None\n if os.path.isfile(os.path.join(subdir_path, \"usdb_data.json\")):\n with open(os.path.join(subdir_path, \"usdb_data.json\")) as file:\n usdb_data = json.loads(file.read())\n usdb_id = usdb_data.get(\"id\")\n\n txt_files = [f for f in os.listdir(subdir_path) if f.endswith('.txt')]\n\n if not txt_files:\n continue\n\n try:\n txt_path = os.path.join(subdir_path, txt_files[0])\n\n with open(txt_path, 'rb') as file:\n encoding = chardet.detect(file.read())['encoding']\n\n if encoding != 'utf-8':\n logging.warning(f\"Wrong encoding. Is {encoding} instead of utf-8 for '{os.path.join(subdir_path, txt_files[0])}'\")\n\n with open(txt_path, 'r', encoding=encoding) as file:\n txt = file.read()\n\n match = re.search(r'#TITLE:(.*)\\n', txt)\n if match:\n title = match.group(1)\n else:\n logging.warning(f\"No title for {subdir_path}\")\n continue\n\n match = re.search(r'#ARTIST:(.*)\\n', txt)\n if match:\n artist = match.group(1)\n else:\n logging.warning(f\"No artist for {subdir_path}\")\n continue\n\n match = re.search(r'#COVER:(.*)\\n', txt)\n cover = None\n if match:\n cover = match.group(1)\n\n match = re.search(r'#MP3:(.*)\\n', txt)\n mp3 = None\n if match:\n mp3 = match.group(1)\n\n cls(subdir_path, title, artist, usdb_id, cover, mp3)\n except:\n logging.exception(f\"Could not process song in '{subdir_path}'\")\n except:\n logging.exception(f\"Could not process song in '{subdir_path}'\")\n\n @classmethod\n async def download(cls, id):\n response = usdb.session.post(f\"https://usdb.animux.de/index.php?link=gettxt&id={id}\", headers={\"Cookie\": cls.php_session_id}, data={\"wd\": \"1\"})\n response.raise_for_status()\n\n soup = BeautifulSoup(response.content, 'html.parser')\n\n # Extract the value of the input element with the name \"txt\"\n input_element = soup.find('input', {'name': 'txt'})\n\n if input_element:\n txt = input_element['value'].replace(\"\\r\\n\", \"\\n\")\n else:\n raise DownloadException(f\"txt for {id} not found on usdb.animux.de. Are you logged in?\")\n\n # TODO: get only the id, load everything here\n match = re.search(r'#TITLE:(.*)\\n', txt)\n if match:\n title = match.group(1)\n else:\n raise DownloadException(\"missing name\")\n\n match = re.search(r'#ARTIST:(.*)\\n', txt)\n if match:\n artist = match.group(1)\n else:\n raise DownloadException(\"missing artist\")\n\n match = re.search(r'#VIDEO:(.*)\\n', txt)\n if match:\n video = match.group(1)\n else:\n raise DownloadException(\"missing video\")\n\n if id is None:\n sanitized_name = cls.create_valid_dir_name(f\"{artist} - {title}\")\n else:\n sanitized_name = cls.create_valid_dir_name(f\"{artist} - {title} ({id})\")\n\n directory = os.path.join(config.usdx_songs_dir, sanitized_name)\n\n if os.path.exists(directory):\n raise DownloadException(f\"directory '{directory}' exists\")\n\n logging.info(f\"Saving {artist} - {title} ({id}) to {directory}\")\n\n with tempfile.TemporaryDirectory() as tempdir:\n with open(os.path.join(tempdir, f\"usdb_data.json\"), \"w+\") as file:\n file.write(json.dumps({\n \"id\": id\n }))\n\n with open(os.path.join(tempdir, f\"{sanitized_name}.txt\"), \"w+\") as file:\n file.writelines(\"#VIDEO:video.mp4\\n\")\n file.writelines(\"#MP3:song.mp3\\n\")\n file.writelines(\"#COVER:cover.jpg\\n\")\n # TODO: Background\n # file.writelines(\"#BACKGROUND:background.jpg\\n\")\n for line in txt.split(\"\\n\"):\n if not any(line.startswith(s) for s in [\"#VIDEO\", \"#MP3\", \"#COVER\", \"#BACKGROUND\"]):\n file.writelines(line + \"\\n\")\n\n match = re.search(r'[va]=([a-zA-Z0-9_-]+)', video)\n if match:\n url = f\"https://www.youtube.com/watch?v={match.group(1)}\"\n else:\n raise DownloadException(f\"no video url found in txt\")\n\n process = await asyncio.create_subprocess_exec(\n \"curl\", \"-o\", \"cover.jpg\", f\"https://usdb.animux.de/data/cover/{id}.jpg\",\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n cwd=tempdir\n )\n stdout, stderr = await process.communicate()\n\n if process.returncode != 0:\n raise DownloadException(f\"cover download failed with code {process.returncode}, stdout: {stdout.decode()}, stderr: {stderr.decode()}\")\n\n # try:\n # subprocess.run([\"curl\", \"-o\", \"cover.jpg\", f\"https://usdb.animux.de/data/cover/{id}.jpg\"], cwd=tempdir, check=True)\n # except Exception as e:\n # raise DownloadException(f\"cover download failed: {e}\")\n\n process = await asyncio.create_subprocess_exec(\n config.youtube_dl, \"-o\", \"video.mp4\", \"--format\", \"mp4\", url,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n cwd=tempdir\n )\n stdout, stderr = await process.communicate()\n\n if process.returncode != 0:\n raise DownloadException(f\"youtube-dl failed with code {process.returncode}, stdout: {stdout.decode()}, stderr: {stderr.decode()}\")\n\n # try:\n # subprocess.run([config.youtube_dl, \"-o\", \"video.mp4\", \"--format\", \"mp4\", url], cwd=tempdir, check=True)\n # except Exception as e:\n # raise DownloadException(f\"youtube-dl failed: {e}\")\n\n process = await asyncio.create_subprocess_exec(\n config.ffmpeg, \"-i\", \"video.mp4\", \"-vn\", \"-acodec\", \"libmp3lame\", \"-ac\", \"2\", \"-ab\", \"160k\", \"-ar\", \"48000\", \"song.mp3\",\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n cwd=tempdir\n )\n stdout, stderr = await process.communicate()\n\n if process.returncode != 0:\n raise DownloadException(f\"ffmpeg failed with code {process.returncode}, stdout: {stdout.decode()}, stderr: {stderr.decode()}\")\n\n # try:\n # subprocess.run([config.ffmpeg, \"-i\", \"video.mp4\", \"-vn\", \"-acodec\", \"libmp3lame\", \"-ac\", \"2\", \"-ab\", \"160k\", \"-ar\", \"48000\", \"song.mp3\"], cwd=tempdir, check=True)\n # except Exception as e:\n # raise DownloadException(f\"ffmpeg failed: {e}\")\n\n os.makedirs(directory)\n\n for file_name in os.listdir(tempdir):\n source = os.path.join(tempdir, file_name)\n destination = os.path.join(directory, file_name)\n shutil.move(source, destination)\n\n return cls(directory, title, artist, id, \"cover.jpg\", \"song.mp3\")\n\n @classmethod\n def song_list(cls) -> List[dict]:\n return [s.to_json() for s in cls.songs.values()]\n\n @classmethod\n def get_song_by_id(cls, id) -> 'Song':\n return cls.songs.get(str(id))\n\n @staticmethod\n def get_mp3_length(filename):\n audiofile = eyed3.load(filename)\n duration = audiofile.info.time_secs\n return duration\n\n def __init__(self, directory: str, title: str, artist: str, usdb_id: Optional[str] = None, cover: Optional[str] = None, mp3: Optional[str] = None):\n \"\"\"\n Creates a new song from the information found in the directory\n\n :param directory: The directory to the song directory\n :param title: The song title\n :param artist: The artist\n :param usdb_id: An optional ID of the song on usdb.animux.de/\n \"\"\"\n\n self.directory = directory\n self.title = title\n self.artist = artist\n self.usdb_id = usdb_id\n self.cover = cover\n self.mp3 = mp3\n self.duration = self.get_mp3_length(os.path.join(directory, mp3))\n\n if cover:\n self.cover_path = os.path.join(directory, cover)\n else:\n self.cover_path = None\n\n self.id = usdb_id or uuid.uuid4().hex\n\n self.songs[str(self.id)] = self\n\n if usdb_id is not None:\n self.usdb_ids.add(usdb_id)\n\n def __str__(self):\n if self.usdb_id is not None:\n return f\"{self.title} - {self.artist} ({self.usdb_id})\"\n return f\"{self.title} - {self.artist}\"\n\n def __repr__(self):\n if self.usdb_id is not None:\n return f\"[Song '{self.title} - {self.artist}' ({self.usdb_id})]\"\n return f\"[Song '{self.title} - {self.artist}']\"\n\n def to_json(self):\n return {\n \"directory\": self.directory,\n \"title\": self.title,\n \"artist\": self.artist,\n \"usdb_id\": self.usdb_id,\n \"id\": self.id,\n \"duration\": self.duration\n }" }, { "identifier": "WebSocketServer", "path": "websocket_server.py", "snippet": "class WebSocketServer:\n def __init__(self, download_queue: asyncio.Queue):\n async def register(self, websocket):\n async def unregister(self, websocket):\n async def send_to_clients(self, message):\n async def handler(self, websocket, path):\n async def message_queue_consumer(self):\n async def download_queue_consumer(self, i):" } ]
import getpass import os import asyncio import json import logging import os.path import platform import signal import subprocess import threading import websockets import config import usdb import usdx from flask import render_template, Flask, request, send_file from song import Song from websocket_server import WebSocketServer, messages
4,580
return res.content, res.status_code # headers = {k: v for k, v in request.headers if k.lower() != 'host'} # # # let everybody use the same php session key # global php_session_id # if php_session_id is None: # php_session_id = headers.get("Cookie") # Song.php_session_id = php_session_id # # if php_session_id is not None: # headers["Cookie"] = php_session_id # # res = requests.request( # method=request.method, # url=request.url.replace(f"{request.host_url}usdb/", "https://usdb.animux.de/"), # headers=headers, # data=request.get_data(), # cookies=request.cookies, # allow_redirects=False, # ) # # excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection'] # headers = [ # (k, v) for k, v in res.raw.headers.items() # if k.lower() not in excluded_headers # ] # # response = Response(res.content, res.status_code, headers) # return response @app.route('/api/usdb/get_songs', methods=['GET']) def get_songs_route(): args: dict = request.args.to_dict() for key in ["golden", "songcheck"]: if key in args: args[key] = args[key] == "true" songs = usdb.get_songs(**args) for song in songs["songs"]: song["downloaded"] = song["id"] in Song.usdb_ids return songs @app.route('/api/players', methods=['GET', 'POST']) def names(): if request.method == 'POST': name = request.form['name'] logging.info(f"Adding player '{name}'") with open(config.players_file, 'a') as file: file.write(name + '\n') return {"success": True} else: try: with open(config.players_file, 'r') as file: names = file.read().splitlines() return sorted(set(names)) except FileNotFoundError: return [] @app.route('/api/players/delete', methods=['POST']) def delete_name(): name_to_delete = request.form['name'] logging.info(f"Deleting player '{name_to_delete}'") with open(config.players_file, 'r') as file: names = file.read().splitlines() with open(config.players_file, 'w') as file: for name in names: if name != name_to_delete: file.write(name + '\n') return {"success": True} @app.route('/api/players/submit', methods=['POST']) def submit_names(): usdx.enter_names(json.loads(request.form['names'])) return {"success": True} def main(): username = config.usdb_user password = config.usdb_pass if username == "<username>" or password == "<password>": username = None password = None while True: if username is None or password is None: print(f"To download songs, you need an account on https://usdb.animux.de. Create an account and enter the credentials below. You can always change these settings in the config file '{config.file_name}'.") new_username = input("Username: ") new_password = getpass.getpass("Password: ") # Windows doing windows things... while new_password == "\x16": print("The windows cmd does not allow pasting into password fields using ctrl+V. Instead you can right click in the terminal to paste your password") new_password = getpass.getpass("Password: ") if new_username != username or new_password != password: config.save_usdb_credentials(new_username, new_password) username, password = new_username, new_password if usdb.login(username, password): print("Login on https://usdb.animux.de successful") break else: print("Invalid credentials. Please try again.") username = None password = None usdx.change_config(config.setup_colors) restart_usdx() threading.Thread(target=app.run, kwargs={"host": "0.0.0.0", "port": 8080}).start() Song.load_songs()
SCRIPT_BASE_PATH = os.path.abspath(os.path.dirname(__file__)) app = Flask(__name__, static_folder=os.path.join(SCRIPT_BASE_PATH, "static"), template_folder=os.path.join(SCRIPT_BASE_PATH, "templates")) usdx_process = None download_queue = asyncio.Queue() event_loop = asyncio.get_event_loop() php_session_id = None def restart_usdx(): global usdx_process if usdx_process is not None: logging.info("Stopping USDX") if platform.system() == "Windows": subprocess.call(['taskkill', '/F', '/T', '/PID', str(usdx_process.pid)]) else: os.kill(usdx_process.pid, signal.SIGKILL) logging.info("Starting USDX") usdx_process = subprocess.Popen(str(config.usdx_path)) @app.route('/') def index(): return render_template('index.html', messages=messages) @app.route('/songs') def songs(): return render_template('songs.html', songs=Song.song_list(), messages=messages) @app.route('/song/<song_id>/cover', methods=['GET']) def cover(song_id): song = Song.get_song_by_id(song_id) if song.cover_path: return send_file(song.cover_path, mimetype=f'image/{song.cover_path.rsplit(".", 1)[-1].lower()}') else: # TODO: default cover return "", 404 @app.route('/avatars/<avatar>', methods=['GET']) def avatar(avatar): try: return send_file(os.path.join(SCRIPT_BASE_PATH, "avatars", f"cat_{avatar}"), mimetype=f'image/jpg') except FileNotFoundError: return send_file(os.path.join(SCRIPT_BASE_PATH, "avatars", "cat_rainbow.jpg"), mimetype=f'image/jpg') @app.route('/download') def download(): if request.args.get("view", "list") == "usdb": return render_template('download.html', messages=messages) else: return render_template('download_list.html', messages=messages) @app.route('/scores') def scores(): return render_template('scores.html', messages=messages) @app.route('/players') def players(): return render_template('players.html', messages=messages, colors=config.setup_colors) @app.route('/api/restart', methods=['POST']) def api_restart(): restart_usdx() return {"success": True}, 200 @app.route('/api/usdb_ids', methods=['GET']) def api_ausdb_ids(): return list(Song.usdb_ids), 200 @app.route('/api/download', methods=['POST']) def api_download_post(): id = request.json.get('id', '') if not id: return {"success": False, "error": "missing id"}, 400 id = int(id) asyncio.run_coroutine_threadsafe(download_queue.put(id), event_loop) return {"success": True}, 200 @app.route('/usdb/', defaults={'path': ''}, methods=['GET', 'POST', 'PUT', 'DELETE']) @app.route('/usdb/<path:path>', methods=['GET', 'POST', 'PUT', 'DELETE']) def proxy(path): # TODO: data does not work correctly res = usdb.session.request( method=request.method, url=request.url.replace(f"{request.host_url}usdb/", "https://usdb.animux.de/"), data=request.get_data(), ) return res.content, res.status_code # headers = {k: v for k, v in request.headers if k.lower() != 'host'} # # # let everybody use the same php session key # global php_session_id # if php_session_id is None: # php_session_id = headers.get("Cookie") # Song.php_session_id = php_session_id # # if php_session_id is not None: # headers["Cookie"] = php_session_id # # res = requests.request( # method=request.method, # url=request.url.replace(f"{request.host_url}usdb/", "https://usdb.animux.de/"), # headers=headers, # data=request.get_data(), # cookies=request.cookies, # allow_redirects=False, # ) # # excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection'] # headers = [ # (k, v) for k, v in res.raw.headers.items() # if k.lower() not in excluded_headers # ] # # response = Response(res.content, res.status_code, headers) # return response @app.route('/api/usdb/get_songs', methods=['GET']) def get_songs_route(): args: dict = request.args.to_dict() for key in ["golden", "songcheck"]: if key in args: args[key] = args[key] == "true" songs = usdb.get_songs(**args) for song in songs["songs"]: song["downloaded"] = song["id"] in Song.usdb_ids return songs @app.route('/api/players', methods=['GET', 'POST']) def names(): if request.method == 'POST': name = request.form['name'] logging.info(f"Adding player '{name}'") with open(config.players_file, 'a') as file: file.write(name + '\n') return {"success": True} else: try: with open(config.players_file, 'r') as file: names = file.read().splitlines() return sorted(set(names)) except FileNotFoundError: return [] @app.route('/api/players/delete', methods=['POST']) def delete_name(): name_to_delete = request.form['name'] logging.info(f"Deleting player '{name_to_delete}'") with open(config.players_file, 'r') as file: names = file.read().splitlines() with open(config.players_file, 'w') as file: for name in names: if name != name_to_delete: file.write(name + '\n') return {"success": True} @app.route('/api/players/submit', methods=['POST']) def submit_names(): usdx.enter_names(json.loads(request.form['names'])) return {"success": True} def main(): username = config.usdb_user password = config.usdb_pass if username == "<username>" or password == "<password>": username = None password = None while True: if username is None or password is None: print(f"To download songs, you need an account on https://usdb.animux.de. Create an account and enter the credentials below. You can always change these settings in the config file '{config.file_name}'.") new_username = input("Username: ") new_password = getpass.getpass("Password: ") # Windows doing windows things... while new_password == "\x16": print("The windows cmd does not allow pasting into password fields using ctrl+V. Instead you can right click in the terminal to paste your password") new_password = getpass.getpass("Password: ") if new_username != username or new_password != password: config.save_usdb_credentials(new_username, new_password) username, password = new_username, new_password if usdb.login(username, password): print("Login on https://usdb.animux.de successful") break else: print("Invalid credentials. Please try again.") username = None password = None usdx.change_config(config.setup_colors) restart_usdx() threading.Thread(target=app.run, kwargs={"host": "0.0.0.0", "port": 8080}).start() Song.load_songs()
server = WebSocketServer(download_queue)
1
2023-12-23 15:29:44+00:00
8k
Q-MM/PureMM
model/language_model/PureMM_llama.py
[ { "identifier": "PureMMMetaModel", "path": "model/PureMM_arch.py", "snippet": "class PureMMMetaModel:\n\n def __init__(self, config):\n super(PureMMMetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n self.vision_tower = build_vision_tower(config, delay_load=True)\n # self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)\n self.mm_projector = build_vision_projector(config)\n self.vision_tower.requires_grad_(False)\n\n def get_vision_tower(self):\n vision_tower = getattr(self, 'vision_tower', None)\n if type(vision_tower) is list:\n vision_tower = vision_tower[0]\n return vision_tower\n\n def initialize_vision_modules(self, model_args, fsdp=None, rank=0):\n vision_tower = model_args.vision_tower\n mm_vision_select_layer = model_args.mm_vision_select_layer\n mm_vision_select_feature = model_args.mm_vision_select_feature\n pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter\n\n self.config.mm_vision_tower = vision_tower\n\n vision_tower = build_vision_tower(model_args)\n\n if fsdp is not None and len(fsdp) > 0:\n self.vision_tower = [vision_tower]\n else:\n self.vision_tower = vision_tower\n self.vision_tower.requires_grad_(False)\n\n self.config.use_mm_proj = True\n self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')\n # self.config.mm_hidden_size = vision_tower.hidden_size\n if 'eva' in self.config.mm_vision_tower:\n self.config.mm_hidden_size = 1408\n else:\n self.config.mm_hidden_size = vision_tower.hidden_size\n self.config.mm_vision_select_layer = mm_vision_select_layer\n self.config.mm_vision_select_feature = mm_vision_select_feature\n\n if not hasattr(self, 'mm_projector'):\n self.mm_projector = build_vision_projector(self.config)\n\n if pretrain_mm_mlp_adapter is not None:\n mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')\n def get_w(weights, keyword):\n return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}\n\n rank0_print(rank, 'load mm_projector weight')\n mm_projector_variables = get_w(mm_projector_weights, 'mm_projector')\n rank0_print(rank, f'mm_projector_variables.keys(): {mm_projector_variables.keys()}')\n self.mm_projector.load_state_dict(mm_projector_variables)\n\n # mfm 参数载入\n if model_args.feature_select_strategy == 'mfm':\n mfm_variables = get_w(mm_projector_weights, 'mlm_proj')\n rank0_print(rank, f'mfm_variables: {mfm_variables}')\n self.vision_tower.mlm_proj.load_state_dict(mfm_variables)" }, { "identifier": "PureMMMetaForCausalLM", "path": "model/PureMM_arch.py", "snippet": "class PureMMMetaForCausalLM(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images, is_dummy=False):\n vision_tower = self.get_model().get_vision_tower()\n\n # print(f'images dtype: {images.dtype}')\n # image_features = vision_tower(images.to(dtype=torch.bfloat16))\n if not is_dummy:\n image_features = vision_tower(images)\n else:\n image_features = images\n # print(f'image_features shape: {image_features.shape}')\n # image_features = image_features.to(dtype=self.get_model().mm_projector.weight.dtype)\n image_features = self.get_model().mm_projector(image_features)\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n # print('enter 多图处理逻辑!!!!')\n # todo:不把多张图flatten到一起,视频类型可能需要修改\n # split_sizes = [image.shape[0] for image in images]\n # image_features = torch.split(image_features, split_sizes, dim=0)\n # image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n # total_image_token_num = 0\n # for batch_idx, cur_input_ids in enumerate(input_ids):\n # total_image_token_num += (cur_input_ids == IMAGE_TOKEN_INDEX).sum()\n # print(f'image_features.shape: {image_features.shape}\\ntotal_image_token_num: {total_image_token_num}')\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n # print('@@@@@@@@@@@@@@')\n # FIXME: this is a hacky fix, for deepspeed zero3 to work\n half_len = cur_input_ids.shape[0] // 2\n cur_image_features = image_features[cur_image_idx]\n cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])\n cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])\n cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0)\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False" } ]
from typing import List, Optional, Tuple, Union from torch.nn import CrossEntropyLoss from transformers import AutoConfig, AutoModelForCausalLM, \ LlamaConfig, LlamaModel, LlamaForCausalLM from transformers.modeling_outputs import CausalLMOutputWithPast from ..PureMM_arch import PureMMMetaModel, PureMMMetaForCausalLM import torch import torch.nn as nn
3,717
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class PureMMConfig(LlamaConfig): model_type = "PureMM" class PureMMLlamaModel(PureMMMetaModel, LlamaModel): config_class = PureMMConfig def __init__(self, config: LlamaConfig): super(PureMMLlamaModel, self).__init__(config)
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class PureMMConfig(LlamaConfig): model_type = "PureMM" class PureMMLlamaModel(PureMMMetaModel, LlamaModel): config_class = PureMMConfig def __init__(self, config: LlamaConfig): super(PureMMLlamaModel, self).__init__(config)
class PureMMLlamaForCausalLM(LlamaForCausalLM, PureMMMetaForCausalLM):
1
2023-12-27 09:54:09+00:00
8k
gardenifi/server
tests/api/discover_wifi_test.py
[ { "identifier": "discover_wifi", "path": "app/main_app.py", "snippet": "@app.get(\"/api/discover_wifi\")\nasync def discover_wifi(chunked: int = None, page: int = None):\n \"\"\"WIFI discovery API call.\"\"\"\n try:\n if chunked is not None:\n if page is None:\n return JSONResponse(status_code=200, content=services.discover_wifi_networks(chunked))\n return JSONResponse(status_code=200, content=services.discover_wifi_networks(chunked, page))\n return JSONResponse(status_code=200, content=services.discover_wifi_networks())\n except Exception as ex:\n raise HTTPException(status_code=500, detail=str(ex)) from ex" }, { "identifier": "Services", "path": "app/raspi/services.py", "snippet": "class Services:\n \"\"\"\n The `Services` class provides various methods for managing and controlling\n services related to a Raspberry Pi device, such as turning on/off valves,\n storing and deleting program cycles, loading program cycles, discovering\n WiFi networks, and saving WiFi network configurations.\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self._scheduler = BackgroundScheduler()\n self._scheduler_started = False\n\n @property\n def scheduler_started(self):\n \"\"\"getter\"\"\"\n return self._scheduler_started\n\n @scheduler_started.setter\n def scheduler_started(self, value):\n \"\"\"setter\"\"\"\n self._scheduler_started = value\n\n @property\n def scheduler(self):\n \"\"\"getter\"\"\"\n return self._scheduler\n\n @scheduler.setter\n def scheduler(self, value):\n \"\"\"setter\"\"\"\n self._scheduler = value\n\n def turn_on_from_program(self, valve):\n \"\"\"\n Turn on a valve based on the program.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n None\n \"\"\"\n return Helpers().toggle(2, \"out\" + str(valve))\n\n def turn_off_from_program(self, valve):\n \"\"\"\n Turn off a valve based on the program.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n None\n \"\"\"\n return Helpers().toggle(0, \"out\" + str(valve))\n\n def convert_to_utc(self, start_hour, tz_offset):\n \"\"\"\n Converts a given start hour in a specific time zone to Coordinated Universal Time (UTC).\n\n Args:\n start_hour (int): The starting hour in the local time zone.\n tz_offset (int): The time zone offset in hours. Positive values for time zones ahead of UTC,\n negative values for time zones behind UTC.\n\n Returns:\n Tuple[int, int]: A tuple containing the adjusted hour in UTC and the number of days passed.\n The adjusted hour is in the range [0, 23], and the days_passed is -1, 0, or 1\n indicating whether the adjusted hour falls before, within, or after the current day.\n\n Example:\n For a local start_hour of 10 and tz_offset of -5 (Eastern Standard Time),\n convert_to_utc(10, -5) may return (5, 0), indicating that the adjusted UTC hour is 5 with no days passed.\n\n Note:\n The method assumes a 24-hour clock format.\n \"\"\"\n logger.info(f\"Checking whether start_hour should change: {start_hour}, tz_offset: {tz_offset}\")\n # Calculate the adjusted hour\n adjusted_hour = start_hour - tz_offset\n if adjusted_hour <= 0:\n days_passed = -1\n elif adjusted_hour >= 24:\n days_passed = 1\n else:\n days_passed = 0\n adjusted_hour = adjusted_hour % 24\n return adjusted_hour, days_passed\n\n def get_previous_day(self, current_day):\n \"\"\"\n Returns the name of the previous day based on the given current day.\n\n Parameters:\n - current_day (str): The name of the current day (e.g., 'mon').\n\n Returns:\n str: The name of the previous day.\n \"\"\"\n # Find the index of the current day\n current_index = DAYS.index(current_day)\n # Calculate the index of the previous day\n previous_index = (current_index - 1) % len(DAYS)\n # Get the name of the previous day\n previous_day = DAYS[previous_index]\n return previous_day\n\n def get_next_day(self, current_day):\n \"\"\"\n Returns the name of the next day based on the given current day.\n\n Parameters:\n - current_day (str): The name of the current day (e.g., 'mon').\n\n Returns:\n str: The name of the next day.\n \"\"\"\n # Find the index of the current day\n current_index = DAYS.index(current_day)\n # Calculate the index of the next day\n next_index = (current_index + 1) % len(DAYS)\n # Get the name of the next day\n next_day = DAYS[next_index]\n return next_day\n\n def get_start_day_hour(self, day, start_hour, tz_offset):\n \"\"\"\n Checks if the start day or hour should be adjusted based on the provided conditions.\n\n Parameters:\n - day (str): The name of the current day (e.g., 'Monday').\n - start_hour (int): The original start hour (0 to 23).\n - tz_offset (int): The timezone offset in hours (-12 to +14).\n\n Returns:\n tuple: A tuple containing the adjusted day and start hour based on the provided conditions.\n \"\"\"\n logger.info(f\"Checking whether start_day should change: {day}\")\n # Convert start_hour to UTC (e.g. start_hour=0, tz_offset=2, start_hour=22)\n start_hour, days_passed = self.convert_to_utc(start_hour, tz_offset)\n if days_passed == 1:\n day = self.get_next_day(day)\n elif days_passed == -1:\n day = self.get_previous_day(day)\n logger.info(f\"new start_day: {day}\")\n logger.info(f\"new start_hour: {start_hour}\")\n return day, start_hour\n\n def get_stop_datetime(self, day, start_hour, start_min, period):\n \"\"\"\n Calculate the stop time for a program cycle.\n\n Parameters:\n - day (str): The day of the week.\n - start_hour (int): The starting hour.\n - start_min (int): The starting minute.\n - period (int): The duration of the cycle in minutes.\n\n Returns:\n tuple: A tuple containing the stop day, stop hour, and stop minute.\n \"\"\"\n logger.debug(f\"Converting to correct day, start, stop: {day}, {start_hour}, {start_min}, {period}\")\n stop_day_index = DAYS.index(day)\n logger.debug(f\"stop_day_index {stop_day_index}\")\n\n stop_min = (start_min + period) % 60\n logger.debug(f\"stop_min {stop_min}\")\n\n if stop_min < start_min:\n # should go to the next hour\n stop_hour = (start_hour + 1) % 24\n # should go to the next day\n if stop_hour < start_hour:\n stop_day_index = (stop_day_index + 1) % 7\n else:\n stop_hour = start_hour\n\n logger.debug(f\"stop_hour {stop_hour}\")\n\n stop_day = DAYS[stop_day_index]\n logger.debug(f\"stop_day: {stop_day}\")\n\n return stop_day, stop_hour, stop_min\n\n def store_program_cycles(self, json_data, store=False) -> None:\n \"\"\"\n Store program cycles and schedule them using the scheduler.\n\n Parameters:\n - json_data (dict): JSON data containing program information.\n - store (bool, optional): Whether to store the program information. Default is False.\n\n Returns:\n None\n \"\"\"\n try:\n triggers_to_start = []\n triggers_to_stop = []\n for day in json_data[\"days\"].split(\",\"):\n if day not in DAYS:\n raise DayValueException(f\"{day} is not correct! Accepted values: {DAYS}\")\n tz_offset = json_data[\"tz_offset\"]\n if not isinstance(tz_offset, int):\n raise TypeError(\"The variable tz_offset is not an integer: {tz_offset}\")\n\n # keeping day sent by user to use on every iteration of cycles\n user_day = day\n for cycle in json_data[\"cycles\"]:\n logger.info(f\"Cycle: {cycle}\")\n if int(cycle[\"min\"]) <= 0:\n logger.info(\"This cycle should not be considered to be in the program due to min <=0.\")\n continue\n start_hour = cycle[\"start\"].split(\":\")[0]\n start_min = cycle[\"start\"].split(\":\")[1]\n\n day, start_hour = self.get_start_day_hour(user_day, int(start_hour), tz_offset)\n\n logger.info(f\"Start: {day} at {start_hour}:{start_min}\")\n triggers_to_start.append(CronTrigger(day_of_week=day, hour=int(start_hour), minute=int(start_min)))\n\n stop_day, stop_hour, stop_min = self.get_stop_datetime(day, int(start_hour), int(start_min), int(cycle[\"min\"]))\n logger.info(f\"Stop: {stop_day} at {stop_hour}:{stop_min}\")\n triggers_to_stop.append(CronTrigger(day_of_week=stop_day, hour=stop_hour, minute=stop_min))\n\n logger.info(f\"FINAL Triggers To Start to be in the program:{triggers_to_start}\")\n logger.info(f\"FINAL Triggers To Stop to be in the program: {triggers_to_stop}\")\n\n self._scheduler.add_job(self.turn_on_from_program, OrTrigger(triggers_to_start), args=[json_data[\"out\"]])\n self._scheduler.add_job(self.turn_off_from_program, OrTrigger(triggers_to_stop), args=[json_data[\"out\"]])\n\n if not self._scheduler_started:\n self._scheduler.start()\n self._scheduler_started = True\n\n if store is True:\n file_path = PROGRAM + str(json_data[\"out\"]) + PROGRAM_EXT\n with open(file_path, \"w\", encoding=\"utf-8\") as outfile:\n json.dump(json_data, outfile)\n outfile.close()\n\n except KeyError as kex:\n raise KeyError(f\"The {kex} field is missing in the JSON data.\") from kex\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def delete_program(self, valve) -> bool:\n \"\"\"\n Delete a stored program for a specific valve.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n bool: True if the program was deleted, False otherwise.\n \"\"\"\n file_path = PROGRAM + str(valve) + PROGRAM_EXT\n logger.info(f\"Looking for {file_path} to delete!\")\n if path.exists(file_path):\n logger.info(f\"{file_path} exists! Deleting it...\")\n remove(file_path)\n return True\n return False\n\n def load_program_cycles_if_exists(self, valve):\n \"\"\"\n Load program cycles for a valve if a stored program exists.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n dict or None: The loaded JSON data or None if no program exists.\n \"\"\"\n file_path = PROGRAM + str(valve) + PROGRAM_EXT\n logger.info(f\"Loading {file_path} if exists!\")\n json_data = None\n if path.exists(file_path):\n logger.info(f\"{file_path} exists!\")\n with open(file_path, encoding=\"utf-8\") as json_file:\n json_data = json.load(json_file)\n self.store_program_cycles(json_data)\n json_file.close()\n if not self._scheduler_started:\n self._scheduler.start()\n self._scheduler_started = True\n return json_data\n\n def split_json_into_chunks(self, selected_page, ap_array):\n \"\"\"\n Split a JSON array into chunks and create a response JSON.\n\n Parameters:\n - selected_page (int): The requested page number.\n - ap_array (list): The array to be split.\n\n Returns:\n dict: The response JSON containing the specified page and network information.\n \"\"\"\n selected_page = int(selected_page)\n json_response = {\n \"hw_id\": RPI_HW_ID,\n \"mqtt_broker\": {\"host\": MQTT_HOST, \"port\": int(MQTT_PORT), \"user\": MQTT_USER, \"pass\": MQTT_PASS},\n \"page\": selected_page,\n \"nets\": {},\n \"pages\": 0,\n }\n json_response_to_send = json_response.copy()\n\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n logger.debug(f\"Initial JSON response headers size: {headers_size} bytes\")\n\n pages = 1\n current_chunk_size = headers_size\n json_array = []\n\n for item in ap_array:\n json_response[\"pages\"] = pages\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n item_size = len(json.dumps(item).encode(\"utf-8\"))\n logger.debug(\n \"JSON item size: \"\n + f\"{item_size} bytes, \"\n + \"current_chunk_size: \"\n + f\"{current_chunk_size} bytes, \"\n + \"total: \"\n + f\"{current_chunk_size + item_size} bytes\"\n )\n if current_chunk_size + item_size >= MAX_NUM_OF_BYTES_CHUNK - MAX_NUM_OF_BUFFER_TO_ADD:\n pages += 1\n json_response[\"pages\"] = pages\n json_array = [item]\n json_response[\"nets\"] = json_array\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n current_chunk_size = headers_size + item_size + len(\", \")\n logger.debug(\n f\"Found total >= {MAX_NUM_OF_BYTES_CHUNK}: \"\n f\"Creating a new page: {pages}. \"\n f\"Current chunk size: {current_chunk_size} bytes\"\n )\n else:\n json_array.append(item)\n current_chunk_size += item_size + len(\", \")\n if selected_page == pages:\n json_response_to_send[\"nets\"] = json_array\n\n json_response_to_send[\"pages\"] = pages\n logger.debug(f\"JSON response size: {headers_size}\")\n logger.debug(\n f\"Nets array for this page ({pages}): {json_array}. \"\n f\"Current nets array size: {len(json.dumps(json_array).encode('utf-8'))} bytes, \"\n f\"Current chunk size: {current_chunk_size} bytes\"\n )\n\n if not json_response[\"nets\"]:\n json_response_to_send[\"nets\"] = json_array\n\n logger.debug(f\"JSON total size: {len(json.dumps(json_response_to_send).encode('utf-8'))}\")\n return json_response_to_send\n\n def discover_wifi_networks(self, chunked=0, page=1, refresh_networks_file=False):\n \"\"\"\n Discover available WiFi networks and return the information.\n\n Parameters:\n - chunked (int, optional): Whether to split the response into chunks. Default is 0.\n - page (int, optional): The requested page number. Default is 1.\n - refresh_networks_file (bool, optional): Whether to refresh the networks file. Default is False.\n\n Returns:\n str or dict: The JSON response containing WiFi network information.\n \"\"\"\n try:\n if page > 1:\n refresh_networks_file = False\n json_response = {}\n ap_array = []\n retries = 0\n while retries < 30:\n retries = retries + 1\n ap_array = Helpers().scan_rpi_wifi_networks(refresh_networks_file)\n if len(ap_array) != 0:\n break\n\n json_response = json.dumps(\n {\n \"hw_id\": RPI_HW_ID,\n \"mqtt_broker\": {\"host\": MQTT_HOST, \"port\": int(MQTT_PORT), \"user\": MQTT_USER, \"pass\": MQTT_PASS},\n \"ap_array\": ap_array,\n }\n )\n\n logger.info(f\"json_response: {json_response}\")\n if chunked == 0:\n return json_response\n logger.info(f\"Split array into chunks of {MAX_NUM_OF_BYTES_CHUNK} bytes...\")\n json_response = self.split_json_into_chunks(page, ap_array)\n return json_response\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def save_wifi_network(self, ssid, wifi_key):\n \"\"\"\n Save WiFi network information.\n\n Parameters:\n - request_data (dict): The request data containing WiFi network information.\n\n Returns:\n str: \"OK\" if successful, \"NOT_OK\" otherwise.\n \"\"\"\n try:\n if ARCH == \"arm\":\n if ssid and wifi_key:\n Helpers().store_wpa_ssid_key(ssid, wifi_key)\n return \"OK\"\n raise ValueError(\"Error: You need to provide ssid and wifi_keys in POST data\")\n raise TypeError(f\"{ARCH} architecture is not supported!!!\")\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def save_wifi_network_with_wpa(self, wpa_enabled, wpa_key):\n \"\"\"\n Save WiFi network information with WPA settings.\n\n Parameters:\n - request_params (dict): The request parameters containing WPA settings.\n\n Returns:\n str: \"OK\" if successful, \"NOT_OK\" otherwise.\n \"\"\"\n try:\n if ARCH == \"arm\":\n logger.info(f\"wpa_enabled: {wpa_enabled}, wpa_key: {wpa_key}\")\n if str(wpa_enabled) == \"1\":\n Helpers().update_wpa_supplicant(1, wpa_key)\n else:\n Helpers().update_wpa_supplicant(0, wpa_key)\n\n thread = Thread(target=Helpers().sleep_and_reboot_for_wpa)\n thread.start()\n return \"OK\"\n raise TypeError(f\"{ARCH} architecture is not supported!!!\")\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise" } ]
import json import pytest from app.main_app import discover_wifi from app.raspi.services import Services
4,669
"""MIT License Copyright (c) 2023, Marios Karagiannopoulos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. **Attribution Requirement:** When using or distributing the software, an attribution to Marios Karagiannopoulos must be included. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ services = Services() class TestDiscoverWifi: """ Test class for the discover_wifi function. """ @pytest.mark.asyncio async def test_no_parameters(self): """ Test case for discover_wifi with no parameters. """
"""MIT License Copyright (c) 2023, Marios Karagiannopoulos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. **Attribution Requirement:** When using or distributing the software, an attribution to Marios Karagiannopoulos must be included. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ services = Services() class TestDiscoverWifi: """ Test class for the discover_wifi function. """ @pytest.mark.asyncio async def test_no_parameters(self): """ Test case for discover_wifi with no parameters. """
response = await discover_wifi()
0
2023-12-22 08:06:09+00:00
8k
bclavie/RAGatouille
ragatouille/RAGTrainer.py
[ { "identifier": "LateInteractionModel", "path": "ragatouille/models/base.py", "snippet": "class LateInteractionModel(ABC):\n @abstractmethod\n def __init__(\n self,\n pretrained_model_name_or_path: Union[str, Path],\n n_gpu,\n ):\n ...\n\n @abstractmethod\n def train():\n ...\n\n @abstractmethod\n def index(self, name: str, collection: list[str]):\n ...\n\n @abstractmethod\n def add_to_index(self):\n ...\n\n @abstractmethod\n def search(self, name: str, query: Union[str, list[str]]):\n ...\n\n @abstractmethod\n def _search(self, name: str, query: str):\n ...\n\n @abstractmethod\n def _batch_search(self, name: str, queries: list[str]):\n ..." }, { "identifier": "ColBERT", "path": "ragatouille/models/colbert.py", "snippet": "class ColBERT(LateInteractionModel):\n def __init__(\n self,\n pretrained_model_name_or_path: Union[str, Path],\n n_gpu: int = -1,\n index_name: Optional[str] = None,\n verbose: int = 1,\n load_from_index: bool = False,\n **kwargs,\n ):\n self.verbose = verbose\n self.collection = None\n if n_gpu == -1:\n n_gpu = 1 if torch.cuda.device_count() == 0 else torch.cuda.device_count()\n\n if load_from_index:\n ckpt_config = ColBERTConfig.load_from_index(\n str(pretrained_model_name_or_path)\n )\n self.config = ckpt_config\n self.run_config = RunConfig(\n nranks=n_gpu, experiment=self.config.experiment, root=self.config.root\n )\n self.checkpoint = self.config.checkpoint\n self.index_name = self.config.index_name\n self.collection = self._get_collection_from_file(\n str(pretrained_model_name_or_path / \"collection.json\")\n )\n else:\n ckpt_config = ColBERTConfig.load_from_checkpoint(\n str(pretrained_model_name_or_path)\n )\n self.run_config = RunConfig(\n nranks=n_gpu, experiment=\"colbert\", root=\".ragatouille/\"\n )\n local_config = ColBERTConfig(**kwargs)\n self.config = ColBERTConfig.from_existing(\n ckpt_config,\n local_config,\n )\n self.checkpoint = pretrained_model_name_or_path\n self.index_name = index_name\n\n self.run_context = Run().context(self.run_config)\n self.run_context.__enter__() # Manually enter the context\n self.searcher = None\n\n def _update_index(self, new_documents: list[str], searcher: Searcher):\n updater = IndexUpdater(\n config=self.config, searcher=searcher, checkpoint=self.checkpoint\n )\n updater.add(new_documents)\n updater.persist_to_disk()\n\n def _get_collection_from_file(self, collection_path: str):\n return srsly.read_json(collection_path)\n\n def _write_collection_to_file(self, collection, collection_path: str):\n srsly.write_json(collection_path, collection)\n\n def add_to_index(\n self,\n new_documents: list[str],\n index_name: Optional[str] = None,\n ):\n self.index_name = index_name if index_name is not None else self.index_name\n if self.index_name is None:\n print(\n \"Cannot add to index without an index_name! Please provide one.\",\n \"Returning empty results.\",\n )\n return None\n\n print(\n \"WARNING: add_to_index support is currently experimental!\",\n \"add_to_index support will be more thorough in future versions\",\n )\n\n searcher = Searcher(\n checkpoint=self.checkpoint,\n config=None,\n collection=self.collection,\n index=self.index_name,\n verbose=self.verbose,\n )\n new_documents = list(set(new_documents))\n current_len = len(searcher.collection)\n new_doc_len = len(new_documents)\n\n if (\n current_len + new_doc_len < 5000\n or new_doc_len > current_len * 0.05\n or current_len + new_doc_len\n > 100 # Export bug handler -- TODO: Remove this requirement\n ):\n new_documents += [x for x in searcher.collection]\n self.index(\n new_documents,\n index_name=self.index_name,\n max_document_length=self.config.doc_maxlen,\n overwrite=\"force_silent_overwrite\",\n )\n else:\n self._update_index(new_documents, searcher)\n\n print(\n f\"Successfully updated index with {new_doc_len} new documents!\\n\",\n f\"New index size: {new_doc_len + current_len}\",\n )\n\n return str(\n Path(self.run_config.root)\n / Path(self.run_config.experiment)\n / \"indexes\"\n / self.index_name\n )\n\n def index(\n self,\n collection: list[str],\n index_name: Optional[\"str\"] = None,\n max_document_length: int = 256,\n overwrite: Union[bool, str] = \"reuse\",\n ):\n self.config.doc_maxlen = max_document_length\n if index_name is not None:\n if self.index_name is not None:\n print(\n f\"New index_name received!\",\n f\"Updating current index_name ({self.index_name}) to {index_name}\",\n )\n self.index_name = index_name\n else:\n if self.index_name is None:\n print(\n f\"No index_name received!\",\n f\"Using default index_name ({self.checkpoint}_new_index)\",\n )\n self.index_name = self.checkpoint + \"new_index\"\n\n collection = list(set(collection))\n self.collection = collection\n\n nbits = 2\n if len(collection) < 5000:\n nbits = 8\n elif len(collection) < 10000:\n nbits = 4\n self.config = ColBERTConfig.from_existing(\n self.config, ColBERTConfig(nbits=nbits)\n )\n self.indexer = Indexer(\n checkpoint=self.checkpoint,\n config=self.config,\n verbose=self.verbose,\n )\n self.indexer.index(\n name=self.index_name, collection=collection, overwrite=overwrite\n )\n\n index_path = str(\n Path(self.run_config.root)\n / Path(self.run_config.experiment)\n / \"indexes\"\n / self.index_name\n )\n self._write_collection_to_file(collection, index_path + \"/collection.json\")\n print(\"Done indexing!\")\n\n def _load_searcher(\n self,\n index_name: Optional[str],\n force_fast: bool = False,\n ):\n if index_name is not None:\n if self.index_name is not None:\n print(\n f\"New index_name received!\",\n f\"Updating current index_name ({self.index_name}) to {index_name}\",\n )\n self.index_name = index_name\n else:\n if self.index_name is None:\n print(\n \"Cannot search without an index_name! Please provide one.\",\n \"Returning empty results.\",\n )\n return None\n print(\n f\"Loading searcher for index {self.index_name} for the first time...\",\n \"This may take a few seconds\",\n )\n self.searcher = Searcher(\n checkpoint=self.checkpoint,\n config=None,\n collection=self.collection,\n index=self.index_name,\n )\n\n if not force_fast:\n if len(self.searcher.collection) < 10000:\n self.searcher.configure(ncells=4)\n self.searcher.configure(centroid_score_threshold=0.4)\n self.searcher.configure(ndocs=512)\n elif len(self.searcher.collection) < 100000:\n self.searcher.configure(ncells=2)\n self.searcher.configure(centroid_score_threshold=0.45)\n self.searcher.configure(ndocs=1024)\n # Otherwise, use defaults for k\n else:\n # Use fast settingss\n self.searcher.configure(ncells=1)\n self.searcher.configure(centroid_score_threshold=0.5)\n self.searcher.configure(ndocs=256)\n\n print(\"Searcher loaded!\")\n\n def search(\n self,\n query: Union[str, list[str]],\n index_name: Optional[\"str\"] = None,\n k: int = 10,\n force_fast: bool = False,\n zero_index_ranks: bool = False,\n ):\n if self.searcher is None or (\n index_name is not None and self.index_name != index_name\n ):\n self._load_searcher(index_name=index_name, force_fast=force_fast)\n\n if isinstance(query, str):\n results = [self._search(query, k)]\n else:\n results = self._batch_search(query, k)\n\n to_return = []\n\n for result in results:\n result_for_query = []\n for id_, rank, score in zip(*result):\n result_for_query.append(\n {\n \"content\": self.searcher.collection[id_],\n \"score\": score,\n \"rank\": rank - 1 if zero_index_ranks else rank,\n }\n )\n to_return.append(result_for_query)\n\n if len(to_return) == 1:\n return to_return[0]\n return to_return\n\n def _search(self, query: str, k: int):\n return self.searcher.search(query, k=k)\n\n def _batch_search(self, query: list[str], k: int):\n queries = {i: x for i, x in enumerate(query)}\n results = self.searcher.search_all(queries, k=k)\n results = [\n [list(zip(*value))[i] for i in range(3)]\n for value in results.todict().values()\n ]\n return results\n\n def train(self, data_dir, training_config: ColBERTConfig):\n training_config = ColBERTConfig.from_existing(self.config, training_config)\n training_config.nway = 2\n with Run().context(self.run_config):\n trainer = Trainer(\n triples=str(data_dir / \"triples.train.colbert.jsonl\"),\n queries=str(data_dir / \"queries.train.colbert.tsv\"),\n collection=str(data_dir / \"corpus.train.colbert.tsv\"),\n config=training_config,\n )\n\n trainer.train(checkpoint=self.checkpoint)\n\n def __del__(self):\n # Clean up context\n self.run_context.__exit__(None, None, None)" }, { "identifier": "HardNegativeMiner", "path": "ragatouille/negative_miners/base.py", "snippet": "class HardNegativeMiner(ABC):\n @abstractmethod\n def export_index(self, path: Union[str, Path]) -> bool:\n ...\n\n @abstractmethod\n def mine_hard_negatives(\n self,\n queries: list[str],\n collection: list[str],\n neg_k: int,\n ):\n ...\n\n @abstractmethod\n def _mine(\n self,\n queries: list[str],\n k: int,\n ):\n ..." }, { "identifier": "SimpleMiner", "path": "ragatouille/negative_miners/simpleminer.py", "snippet": "class SimpleMiner(HardNegativeMiner):\n \"\"\"The simplest approach to hard negatives mining.\n Select the most appropriate, small-sized embedding model for the target language.\n And retrieve random negatives in the top 10-100 results.\n Strong baseline for quick, low-engineering hard negative mining.\"\"\"\n\n def __init__(\n self,\n language_code: str,\n model_size: Literal[\"small\", \"base\", \"large\"] = \"small\",\n ) -> None:\n self.n_gpu = torch.cuda.device_count()\n self.target_language = language_code\n self.model_size = model_size\n if language_code not in [\"en\", \"zh\"]:\n language_code = \"other\"\n self.model_name = f\"{language_code}_{model_size}\"\n hub_model = DenseModels[self.model_name].value\n print(f\"Loading Hard Negative SimpleMiner dense embedding model {hub_model}...\")\n self.model = SentenceTransformer(hub_model)\n self.has_index = False\n self.min_rank = 10\n\n def build_index(\n self,\n collection,\n batch_size: int = 128,\n save_index: bool = False,\n save_path: Union[str, Path] = None,\n force_fp32: bool = True,\n ):\n print(f\"Building hard negative index for {len(collection)} documents...\")\n if len(collection) > 1000:\n pool = self.model.start_multi_process_pool()\n embeds = self.model.encode_multi_process(\n collection, pool, batch_size=batch_size\n )\n self.model.stop_multi_process_pool(pool)\n else:\n embeds = self.model.encode(collection, batch_size=batch_size)\n\n print(\"All documents embedded, now adding to index...\")\n\n self.max_rank = min(110, int(len(collection) // 10))\n self.max_rank = min(self.max_rank, len(collection))\n\n storage_type = StorageDataType.Float32\n if len(collection) > 500000 and not force_fp32:\n storage_type = StorageDataType.E4M3\n\n self.voyager_index = Index(\n Space.Cosine,\n num_dimensions=self.model.get_sentence_embedding_dimension(),\n storage_data_type=storage_type,\n )\n\n self.corpus_map = {i: doc for i, doc in enumerate(collection)}\n id_to_vector = {}\n for i, emb in enumerate(embeds):\n id_to_vector[i] = emb\n self.corpus_map[i] = collection[i]\n del embeds\n\n self.voyager_index.add_items(\n vectors=[x for x in id_to_vector.values()],\n ids=[x for x in id_to_vector.keys()],\n num_threads=-1,\n )\n\n del id_to_vector\n\n if save_index:\n print(f\"Saving index to {save_path}...\")\n self.export_index(save_path)\n else:\n print(\"save_index set to False, skipping saving hard negative index\")\n print(\"Hard negative index generated\")\n self.has_index = True\n\n def query_index(self, query, top_k=110):\n results = self.voyager_index.query(\n query, k=min(top_k, self.voyager_index.__len__())\n )\n return results\n\n def mine_hard_negatives(\n self,\n queries: Union[list[str], str],\n collection: Optional[list[str]] = None,\n save_index: bool = False,\n save_path: Union[str, Path] = None,\n force_fp32: bool = True,\n ):\n if self.has_index is False and collection is not None:\n self.build_index(\n collection,\n save_index=save_index,\n save_path=save_path,\n force_fp32=force_fp32,\n )\n if isinstance(queries, str):\n print(\"mining\")\n return self._mine(queries)\n return self._batch_mine(queries)\n\n def _mine(\n self,\n query: str,\n ):\n q_emb = self.model.encode(query)\n query_results = self.query_index(q_emb, top_k=self.max_rank)\n if len(query_results) > self.min_rank:\n query_results = query_results[self.min_rank : self.max_rank]\n query_results = [self.corpus_map[x] for x in query_results[0]]\n return query_results\n\n def _batch_mine(\n self,\n queries: list[str],\n ):\n \"\"\"Separate function to parallelise later on\"\"\"\n print(f\"Retrieving hard negatives for {len(queries)} queries...\")\n results = []\n print(\"Embedding queries...\")\n query_embeddings = self.model.encode(queries, show_progress_bar=True)\n print(\"Retrieving hard negatives...\")\n for q_emb in tqdm(query_embeddings):\n query_results = self.query_index(q_emb, top_k=self.max_rank)\n query_results = query_results[self.min_rank : self.max_rank]\n query_results = [self.corpus_map[x.id] for x in query_results]\n results.append(query_results)\n print(f\"\"\"Done generating hard negatives.\"\"\")\n return results\n\n def export_index(self, path: Union[str, Path]) -> bool:\n self.voyager_index.save(path)\n return True" }, { "identifier": "seeded_shuffle", "path": "ragatouille/utils.py", "snippet": "def seeded_shuffle(collection: list, seed: int = 42):\n random.seed(seed)\n random.shuffle(collection)\n return collection" }, { "identifier": "TrainingDataProcessor", "path": "ragatouille/data/training_data_processor.py", "snippet": "class TrainingDataProcessor:\n def __init__(\n self,\n collection: list[str],\n queries: list[str],\n negative_miner=None,\n ):\n self.collection = collection\n self.queries = queries\n self.negative_miner = negative_miner\n self._make_data_map()\n self.training_triplets = []\n\n def process_raw_data(\n self,\n raw_data,\n data_type: Literal[\"pairs\", \"triplets\", \"labeled_pairs\"],\n data_dir: Union[str, Path],\n export: bool = True,\n mine_hard_negatives: bool = True,\n num_new_negatives: int = 10,\n positive_label: int = 1,\n negative_label: int = 0,\n hard_negative_minimum_rank: int = 10,\n ):\n self.negative_miner.min_rank = hard_negative_minimum_rank\n if self.negative_miner is None and mine_hard_negatives:\n raise ValueError(\n \"mine_hard_negatives is True but no negative miner was provided!\"\n )\n if data_type == \"pairs\":\n self._process_raw_pairs(\n raw_data=raw_data,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=num_new_negatives,\n )\n elif data_type == \"labeled_pairs\":\n self._process_raw_labeled_pairs(\n raw_data=raw_data,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=num_new_negatives,\n positive_label=positive_label,\n negative_label=negative_label,\n )\n elif data_type == \"triplets\":\n self._process_raw_triplets(\n raw_data=raw_data,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=num_new_negatives,\n )\n\n if export:\n self.export_training_data(data_dir)\n\n def _make_individual_triplets(self, query, positives, negatives):\n \"\"\"Create the training data in ColBERT(v1) format from raw lists of triplets\"\"\"\n triplets = []\n q = self.query_map[query]\n random.seed(42)\n if len(positives) > 1:\n all_pos_texts = [p for p in positives]\n max_triplets_per_query = 20\n negs_per_positive = max(1, max_triplets_per_query // len(all_pos_texts))\n initial_triplets_count = 0\n for pos in all_pos_texts:\n p = self.passage_map[pos]\n chosen_negs = random.sample(\n negatives, min(len(negatives), negs_per_positive)\n )\n for neg in chosen_negs:\n n = self.passage_map[neg]\n initial_triplets_count += 1\n triplets.append([q, p, n])\n\n extra_triplets_needed = max_triplets_per_query - initial_triplets_count\n while extra_triplets_needed > 0:\n p = self.passage_map[random.choice(all_pos_texts)]\n n = self.passage_map[random.choice(negatives)]\n triplets.append([q, p, n])\n extra_triplets_needed -= 1\n else:\n p = self.passage_map[positives[0]]\n for n in negatives:\n triplets.append([q, p, self.passage_map[n]])\n\n return triplets\n\n def _get_new_negatives(self, query, passages, mine_hard_negatives, n_new_negatives):\n \"\"\"Generate new negatives for each query, using either:\n - The assigned hard negative miner if mine_hard_negatives is True\n - Randomly sampling from the full collection otherwise\n \"\"\"\n if mine_hard_negatives:\n hard_negatives = self.negative_miner.mine_hard_negatives(\n query, n_new_negatives\n )\n candidates = [\n x\n for x in hard_negatives\n if x not in passages[\"positives\"] and x not in passages[\"negatives\"]\n ]\n new_negatives = random.sample(\n candidates,\n min(n_new_negatives, len(candidates)),\n )\n else:\n new_negatives = [\n x\n for x in random.sample(self.collection, n_new_negatives)\n if x not in passages[\"positives\"] and x not in passages[\"negatives\"]\n ]\n\n return new_negatives\n\n def _process_raw_pairs(self, raw_data, mine_hard_negatives, n_new_negatives):\n \"\"\"Convert unlabeled pairs into training triplets.\n It's assumed unlabeled pairs are always in the format (query, relevant_passage)\"\"\"\n training_triplets = []\n raw_grouped_triplets = defaultdict(lambda: defaultdict(list))\n\n for query, positive in raw_data:\n if isinstance(positive, str):\n positive = [positive]\n raw_grouped_triplets[query][\"positives\"] += positive\n\n for query, passages in raw_grouped_triplets.items():\n if n_new_negatives > 0:\n passages[\"negatives\"] += self._get_new_negatives(\n query=query,\n passages=passages,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=n_new_negatives,\n )\n training_triplets += self._make_individual_triplets(\n query=query,\n positives=passages[\"positives\"],\n negatives=passages[\"negatives\"],\n )\n self.training_triplets = training_triplets\n\n def _process_raw_labeled_pairs(\n self,\n raw_data,\n mine_hard_negatives,\n n_new_negatives,\n positive_label,\n negative_label,\n ):\n \"\"\"\n Convert labeled pairs intro training triplets.\n Labeled pairs are in the format (query, passage, label)\n \"\"\"\n training_triplets = []\n raw_grouped_triplets = defaultdict(lambda: defaultdict(list))\n\n for query, passage, label in raw_data:\n if isinstance(passage, str):\n passage = [passage]\n if label == positive_label:\n label = \"positives\"\n elif label == negative_label:\n label = \"negatives\"\n else:\n raise ValueError(\n f\"Label {label} must correspond to either positive_label or negative_label!\"\n )\n\n raw_grouped_triplets[query][label] += passage\n\n for query, passages in raw_grouped_triplets.items():\n if n_new_negatives > 0:\n passages[\"negatives\"] += self._get_new_negatives(\n query=query,\n passages=passages,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=n_new_negatives,\n )\n\n training_triplets += self._make_individual_triplets(\n query=query,\n positives=passages[\"positives\"],\n negatives=passages[\"negatives\"],\n )\n self.training_triplets = training_triplets\n\n def _process_raw_triplets(self, raw_data, mine_hard_negatives, n_new_negatives):\n \"\"\"\n Convert raw triplets\n (query, positives : str | list[str], negatives: str | list[str])\n into training triplets.\n \"\"\"\n training_triplets = []\n raw_grouped_triplets = defaultdict(lambda: defaultdict(list))\n for query, positive, negative in raw_data:\n if isinstance(positive, str):\n positive = [positive]\n if isinstance(negative, str):\n negative = [negative]\n\n raw_grouped_triplets[query][\"positives\"] += positive\n raw_grouped_triplets[query][\"negatives\"] += negative\n\n for query, passages in raw_grouped_triplets.items():\n if n_new_negatives > 0:\n passages[\"negatives\"] += self._get_new_negatives(\n query=query,\n passages=passages,\n mine_hard_negatives=mine_hard_negatives,\n n_new_negatives=n_new_negatives,\n )\n training_triplets += self._make_individual_triplets(\n query=query,\n positives=passages[\"positives\"],\n negatives=passages[\"negatives\"],\n )\n self.training_triplets = training_triplets\n\n def _make_data_map(self):\n \"\"\"\n Generate a query_text: query_id and passage_text: passage_id mapping\n To easily generate ColBERT-format training data.\n \"\"\"\n self.query_map = {}\n self.passage_map = {}\n\n for i, query in enumerate(self.queries):\n self.query_map[query] = i\n for i, passage in enumerate(list(self.collection)):\n self.passage_map[passage] = i\n\n def export_training_data(self, path: Union[str, Path]):\n \"\"\"\n Export training data for both training and versioning purposes.\n {path} should ideally be dvc versioned.\n \"\"\"\n\n path = Path(path)\n\n # Create the directory if it does not exist\n os.makedirs(path, exist_ok=True)\n\n with open(path / \"queries.train.colbert.tsv\", \"w\") as f:\n for query, idx in self.query_map.items():\n query = query.replace(\"\\t\", \" \").replace(\"\\n\", \" \")\n f.write(f\"{idx}\\t{query}\\n\")\n with open(path / \"corpus.train.colbert.tsv\", \"w\") as f:\n for document, idx in self.passage_map.items():\n document = document.replace(\"\\t\", \" \").replace(\"\\n\", \" \")\n f.write(f\"{idx}\\t{document}\\n\")\n\n srsly.write_jsonl(path / \"triples.train.colbert.jsonl\", self.training_triplets)" } ]
from pathlib import Path from typing import Union, Literal, Optional from colbert.infra import ColBERTConfig from ragatouille.models import LateInteractionModel, ColBERT from ragatouille.negative_miners import HardNegativeMiner, SimpleMiner from ragatouille.utils import seeded_shuffle from ragatouille.data import TrainingDataProcessor
6,723
class RAGTrainer: """Main trainer to fine-tune/train ColBERT models with a few lines.""" model: Union[LateInteractionModel, None] = None negative_miner: Union[HardNegativeMiner, None] = None collection: list[str] = [] queries: Union[list[str], None] = None raw_data: Union[list[tuple], list[list], None] = None training_triplets: list[list[int]] = list() def __init__( self, model_name: str, pretrained_model_name: str, language_code: str = "en", n_usable_gpus: int = -1, ): """ Initialise a RAGTrainer instance. This will load a base model: either an existing ColBERT model to fine-tune or a BERT/RoBERTa-like model to build a new ColBERT model from. Parameters: model_name: str - Name of the model to train. This will be used to name the checkpoints and the index. pretrained_model_name: str - Name of the pretrained model to use as a base. Can be a local path to a checkpoint or a huggingface model name. language_code: str - Language code of the model to train. This will be used to name the checkpoints and the index. n_usable_gpus: int - Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available. Returns: self (RAGTrainer): The current instance of RAGTrainer, with the base model initialised. """ self.model_name = model_name self.pretrained_model_name = pretrained_model_name self.language_code = language_code self.model = ColBERT( pretrained_model_name_or_path=pretrained_model_name, n_gpu=n_usable_gpus ) def add_documents(self, documents: list[str]): self.collection += documents
class RAGTrainer: """Main trainer to fine-tune/train ColBERT models with a few lines.""" model: Union[LateInteractionModel, None] = None negative_miner: Union[HardNegativeMiner, None] = None collection: list[str] = [] queries: Union[list[str], None] = None raw_data: Union[list[tuple], list[list], None] = None training_triplets: list[list[int]] = list() def __init__( self, model_name: str, pretrained_model_name: str, language_code: str = "en", n_usable_gpus: int = -1, ): """ Initialise a RAGTrainer instance. This will load a base model: either an existing ColBERT model to fine-tune or a BERT/RoBERTa-like model to build a new ColBERT model from. Parameters: model_name: str - Name of the model to train. This will be used to name the checkpoints and the index. pretrained_model_name: str - Name of the pretrained model to use as a base. Can be a local path to a checkpoint or a huggingface model name. language_code: str - Language code of the model to train. This will be used to name the checkpoints and the index. n_usable_gpus: int - Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available. Returns: self (RAGTrainer): The current instance of RAGTrainer, with the base model initialised. """ self.model_name = model_name self.pretrained_model_name = pretrained_model_name self.language_code = language_code self.model = ColBERT( pretrained_model_name_or_path=pretrained_model_name, n_gpu=n_usable_gpus ) def add_documents(self, documents: list[str]): self.collection += documents
seeded_shuffle(self.collection)
4
2023-12-29 16:26:42+00:00
8k
Caipengzhou/BRAU-Netplusplus
networks/bra_unet_system.py
[ { "identifier": "Block", "path": "networks/bra_block.py", "snippet": "class Block(nn.Module):\n def __init__(self, dim, input_resolution, drop_path=0., layer_scale_init_value=-1,num_heads=8, n_win=7, qk_dim=None, qk_scale=None,\n kv_per_win=4, kv_downsample_ratio=4, kv_downsample_kernel=None, kv_downsample_mode='ada_avgpool',\n topk=4, param_attention=\"qkvo\", param_routing=False, diff_routing=False, soft_routing=False,\n mlp_ratio=4, mlp_dwconv=False, side_dwconv=5, before_attn_dwconv=3, pre_norm=True, auto_pad=False):\n super().__init__()\n qk_dim = qk_dim or dim\n self.input_resolution=input_resolution\n # modules\n if before_attn_dwconv > 0:\n self.pos_embed = nn.Conv2d(dim, dim, kernel_size=before_attn_dwconv, padding=1, groups=dim)\n else:\n self.pos_embed = lambda x: 0\n self.norm1 = nn.LayerNorm(dim, eps=1e-6) # important to avoid attention collapsing\n if topk > 0:\n self.attn = BiLevelRoutingAttention(dim=dim, num_heads=num_heads, n_win=n_win, qk_dim=qk_dim,\n qk_scale=qk_scale, kv_per_win=kv_per_win,\n kv_downsample_ratio=kv_downsample_ratio,\n kv_downsample_kernel=kv_downsample_kernel,\n kv_downsample_mode=kv_downsample_mode,\n topk=topk, param_attention=param_attention, param_routing=param_routing,\n diff_routing=diff_routing, soft_routing=soft_routing,\n side_dwconv=side_dwconv,\n auto_pad=auto_pad)\n elif topk == -1:\n self.attn = Attention(dim=dim)\n elif topk == -2:\n self.attn = AttentionLePE(dim=dim, side_dwconv=side_dwconv)\n elif topk == 0:\n self.attn = nn.Sequential(rearrange('n h w c -> n c h w'), # compatiability\n nn.Conv2d(dim, dim, 1), # pseudo qkv linear\n nn.Conv2d(dim, dim, 5, padding=2, groups=dim), # pseudo attention\n nn.Conv2d(dim, dim, 1), # pseudo out linear\n rearrange('n c h w -> n h w c')\n )\n self.norm2 = nn.LayerNorm(dim, eps=1e-6)\n\n self.mlp = nn.Sequential(nn.Linear(dim, int(mlp_ratio * dim)),\n DWConv(int(mlp_ratio * dim)) if mlp_dwconv else nn.Identity(),\n nn.GELU(),\n nn.Linear(int(mlp_ratio * dim), dim)\n )\n\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n\n\n # tricks: layer scale & pre_norm/post_norm\n if layer_scale_init_value > 0:\n self.use_layer_scale = True\n self.gamma1 = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)\n self.gamma2 = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)\n else:\n self.use_layer_scale = False\n self.pre_norm = pre_norm\n\n def forward(self, x):\n \"\"\"\n x: NCHW tensor\n \"\"\"\n H, W = self.input_resolution\n\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n\n shortcut = x\n x = self.norm1(x)\n x = x.view(B, H, W, C)\n x = x.permute(0, 3, 1, 2)\n # conv pos embedding\n x = x + self.pos_embed(x)\n # permute to NHWC tensor for attention & mlp\n x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)\n # print(\"x.shape\",x.shape)\n\n # attention & mlp\n if self.pre_norm:\n if self.use_layer_scale:\n x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x))) # (N, H, W, C)\n x = x + self.drop_path(self.gamma2 * self.WF(self.norm2(x))) # (N, H, W, C)\n else:\n x = x + self.drop_path(self.attn(self.norm1(x))) # (N, H, W, C)\n x = x + self.drop_path(self.mlp(self.norm2(x))) # (N, H, W, C)\n else: # https://kexue.fm/archives/9009\n if self.use_layer_scale:\n x = self.norm1(x + self.drop_path(self.gamma1 * self.attn(x))) # (N, H, W, C)\n x = self.norm2(x + self.drop_path(self.gamma2 * self.WF(x))) # (N, H, W, C)\n else:\n x = self.norm1(x + self.drop_path(self.attn(x))) # (N, H, W, C)\n x = self.norm2(x + self.drop_path(self.WF(x))) # (N, H, W, C)\n\n # permute back\n x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)\n x = x.flatten(2).transpose(1, 2)\n return x" }, { "identifier": "BasicLayer_up", "path": "networks/bra_decoder_expandx4.py", "snippet": "class BasicLayer_up(nn.Module):\n \"\"\" A basic Swin Transformer layer for one stage.\n\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resolution.\n depth (int): Number of blocks.\n num_heads (int): Number of attention heads.\n window_size (int): Local window size.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n \"\"\"\n\n def __init__(self, dim, input_resolution, depth, embed_dim, num_heads, drop_path_rate=0.,\n layer_scale_init_value=-1, topks=[8, 8, -1, -1], qk_dims=[96, 192, 384, 768], n_win=7,\n kv_per_wins=[2, 2, -1, -1], kv_downsample_kernels=[4, 2, 1, 1], kv_downsample_ratios=[4, 2, 1, 1],\n kv_downsample_mode='ada_avgpool', param_attention='qkvo', param_routing=False, diff_routing=False,\n soft_routing=False, pre_norm=True, mlp_ratios=[4, 4, 4, 4], mlp_dwconv=False, side_dwconv=5,\n qk_scale=None, before_attn_dwconv=3, auto_pad=False, norm_layer=nn.LayerNorm, upsample=None,\n use_checkpoint=False\n ):\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.depth = depth\n self.use_checkpoint = use_checkpoint\n # stochastic depth 随机深度衰减规则\n dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum([depth]))]\n cur = 0\n # build blocks\n self.blocks = nn.ModuleList([\n Block(dim=embed_dim,\n input_resolution=input_resolution,\n drop_path=dp_rates[cur + i],\n layer_scale_init_value=layer_scale_init_value,\n num_heads=num_heads,\n n_win=n_win,\n qk_dim=qk_dims,\n qk_scale=qk_scale,\n kv_per_win=kv_per_wins,\n kv_downsample_ratio=kv_downsample_ratios,\n kv_downsample_kernel=kv_downsample_kernels,\n kv_downsample_mode=kv_downsample_mode,\n topk=topks,\n param_attention=param_attention,\n param_routing=param_routing,\n diff_routing=diff_routing,\n soft_routing=soft_routing,\n mlp_ratio=mlp_ratios,\n mlp_dwconv=mlp_dwconv,\n side_dwconv=side_dwconv,\n before_attn_dwconv=before_attn_dwconv,\n pre_norm=pre_norm,\n auto_pad=auto_pad)\n for i in range(depth)])\n\n # patch merging layer\n if upsample is not None:\n self.upsample = PatchExpand(input_resolution, dim=dim, dim_scale=2, norm_layer=norm_layer)\n else:\n self.upsample = None\n\n def forward(self, x):\n for blk in self.blocks:\n if self.use_checkpoint:\n x = checkpoint.checkpoint(blk, x)\n else:\n x = blk(x)\n if self.upsample is not None:\n x = self.upsample(x)\n return x" } ]
import math import torch import torch.nn as nn from timm.models.layers import trunc_normal_ from networks.bra_block import Block from einops import rearrange from fairscale.nn.checkpoint import checkpoint_wrapper from networks.bra_decoder_expandx4 import BasicLayer_up
3,991
nn.BatchNorm2d(out_channels) ) def forward(self, x): b, c, h, w = x.shape x_permute = x.permute(0, 2, 3, 1).view(b, -1, c) x_att_permute = self.channel_attention(x_permute).view(b, h, w, c) x_channel_att = x_att_permute.permute(0, 3, 1, 2) x = x * x_channel_att x_spatial_att = self.spatial_attention(x).sigmoid() out = x * x_spatial_att return out class PatchExpand(nn.Module): def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm): super().__init__() self.input_resolution = input_resolution self.dim = dim self.expand = nn.Linear(dim, 2 * dim, bias=False) if dim_scale == 2 else nn.Identity() self.norm = norm_layer(dim // dim_scale) def forward(self, x): """ x: B, H*W, C """ H, W = self.input_resolution x = x.permute(0,2,3,1) x = self.expand(x) B, H, W, C = x.shape x = x.view(B, H, W, C) x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=2, p2=2, c=C // 4) x = x.view(B, -1, C // 4) x = self.norm(x) return x class FinalPatchExpand_X4(nn.Module): def __init__(self, input_resolution, dim, dim_scale=4, norm_layer=nn.LayerNorm): super().__init__() self.input_resolution = input_resolution self.dim = dim self.dim_scale = dim_scale self.expand = nn.Linear(dim, 16 * dim, bias=False) self.output_dim = dim self.norm = norm_layer(self.output_dim) def forward(self, x): H, W = self.input_resolution x = self.expand(x) B, L, C = x.shape assert L == H * W, "input feature has wrong size" x = x.view(B, H, W, C) x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C // (self.dim_scale ** 2)) x = x.view(B, -1, self.output_dim) x = self.norm(x) return x class BRAUnetSystem(nn.Module): def __init__(self, img_size=256,depth=[3, 4, 8, 3],depths_decoder=[2,2,2,2], in_chans=3, num_classes=1000, embed_dim=[64, 128, 320, 512], head_dim=64, qk_scale=None, representation_size=None, drop_path_rate=0., use_checkpoint_stages=[], norm_layer=nn.LayerNorm, ######## n_win=7, kv_downsample_mode='identity', kv_per_wins=[2, 2, -1, -1], topks=[8, 8, -1, -1], side_dwconv=5, layer_scale_init_value=-1, qk_dims=[None, None, None, None], param_routing=False, diff_routing=False, soft_routing=False, pre_norm=True, pe=None, pe_stages=[0], before_attn_dwconv=3, auto_pad=False, #----------------------- kv_downsample_kernels=[4, 2, 1, 1], kv_downsample_ratios=[4, 2, 1, 1], # -> kv_per_win = [2, 2, 2, 1] mlp_ratios=[4, 4, 4, 4], param_attention='qkvo', final_upsample = "expand_first", mlp_dwconv=False): super().__init__() self.num_classes = num_classes self.num_features = embed_dim[0] # num_features for consistency with other models patches_resolution = [img_size // 4, img_size // 4] self.num_layers = len(depth) self.patches_resolution = patches_resolution self.final_upsample = final_upsample self.sccsa1 = SCCSA(in_channels=embed_dim[1],out_channels=embed_dim[1]) self.sccsa2 = SCCSA(in_channels=embed_dim[2],out_channels=embed_dim[2]) self.sccsa3 = SCCSA(in_channels=embed_dim[3],out_channels=embed_dim[3]) ############ downsample layers (patch embeddings) ###################### self.downsample_layers = nn.ModuleList() # NOTE: uniformer uses two 3*3 conv, while in many other transformers this is one 7*7 conv stem = nn.Sequential( nn.Conv2d(in_chans, embed_dim[0] // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.BatchNorm2d(embed_dim[0] // 2), nn.GELU(), nn.Conv2d(embed_dim[0] // 2, embed_dim[0], kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.BatchNorm2d(embed_dim[0]), ) self.downsample_layers.append(stem) for i in range(3): downsample_layer = nn.Sequential( nn.Conv2d(embed_dim[i], embed_dim[i+1], kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.BatchNorm2d(embed_dim[i+1]) ) self.downsample_layers.append(downsample_layer) ########################################################################## self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks nheads = [dim // head_dim for dim in qk_dims] dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depth))] cur = 0 for i in range(4): stage = nn.Sequential(
class SCCSA(nn.Module): def __init__(self, in_channels, out_channels, rate=4): super(SCCSA, self).__init__() self.channel_attention = nn.Sequential( nn.Linear(in_channels, int(in_channels / rate)), nn.ReLU(inplace=True), nn.Linear(int(in_channels / rate), in_channels) ) self.spatial_attention = nn.Sequential( nn.Conv2d(in_channels, int(in_channels / rate), kernel_size=7, padding=3), nn.BatchNorm2d(int(in_channels / rate)), nn.ReLU(inplace=True), nn.Conv2d(int(in_channels / rate), out_channels, kernel_size=7, padding=3), nn.BatchNorm2d(out_channels) ) def forward(self, x): b, c, h, w = x.shape x_permute = x.permute(0, 2, 3, 1).view(b, -1, c) x_att_permute = self.channel_attention(x_permute).view(b, h, w, c) x_channel_att = x_att_permute.permute(0, 3, 1, 2) x = x * x_channel_att x_spatial_att = self.spatial_attention(x).sigmoid() out = x * x_spatial_att return out class PatchExpand(nn.Module): def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm): super().__init__() self.input_resolution = input_resolution self.dim = dim self.expand = nn.Linear(dim, 2 * dim, bias=False) if dim_scale == 2 else nn.Identity() self.norm = norm_layer(dim // dim_scale) def forward(self, x): """ x: B, H*W, C """ H, W = self.input_resolution x = x.permute(0,2,3,1) x = self.expand(x) B, H, W, C = x.shape x = x.view(B, H, W, C) x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=2, p2=2, c=C // 4) x = x.view(B, -1, C // 4) x = self.norm(x) return x class FinalPatchExpand_X4(nn.Module): def __init__(self, input_resolution, dim, dim_scale=4, norm_layer=nn.LayerNorm): super().__init__() self.input_resolution = input_resolution self.dim = dim self.dim_scale = dim_scale self.expand = nn.Linear(dim, 16 * dim, bias=False) self.output_dim = dim self.norm = norm_layer(self.output_dim) def forward(self, x): H, W = self.input_resolution x = self.expand(x) B, L, C = x.shape assert L == H * W, "input feature has wrong size" x = x.view(B, H, W, C) x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C // (self.dim_scale ** 2)) x = x.view(B, -1, self.output_dim) x = self.norm(x) return x class BRAUnetSystem(nn.Module): def __init__(self, img_size=256,depth=[3, 4, 8, 3],depths_decoder=[2,2,2,2], in_chans=3, num_classes=1000, embed_dim=[64, 128, 320, 512], head_dim=64, qk_scale=None, representation_size=None, drop_path_rate=0., use_checkpoint_stages=[], norm_layer=nn.LayerNorm, ######## n_win=7, kv_downsample_mode='identity', kv_per_wins=[2, 2, -1, -1], topks=[8, 8, -1, -1], side_dwconv=5, layer_scale_init_value=-1, qk_dims=[None, None, None, None], param_routing=False, diff_routing=False, soft_routing=False, pre_norm=True, pe=None, pe_stages=[0], before_attn_dwconv=3, auto_pad=False, #----------------------- kv_downsample_kernels=[4, 2, 1, 1], kv_downsample_ratios=[4, 2, 1, 1], # -> kv_per_win = [2, 2, 2, 1] mlp_ratios=[4, 4, 4, 4], param_attention='qkvo', final_upsample = "expand_first", mlp_dwconv=False): super().__init__() self.num_classes = num_classes self.num_features = embed_dim[0] # num_features for consistency with other models patches_resolution = [img_size // 4, img_size // 4] self.num_layers = len(depth) self.patches_resolution = patches_resolution self.final_upsample = final_upsample self.sccsa1 = SCCSA(in_channels=embed_dim[1],out_channels=embed_dim[1]) self.sccsa2 = SCCSA(in_channels=embed_dim[2],out_channels=embed_dim[2]) self.sccsa3 = SCCSA(in_channels=embed_dim[3],out_channels=embed_dim[3]) ############ downsample layers (patch embeddings) ###################### self.downsample_layers = nn.ModuleList() # NOTE: uniformer uses two 3*3 conv, while in many other transformers this is one 7*7 conv stem = nn.Sequential( nn.Conv2d(in_chans, embed_dim[0] // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.BatchNorm2d(embed_dim[0] // 2), nn.GELU(), nn.Conv2d(embed_dim[0] // 2, embed_dim[0], kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.BatchNorm2d(embed_dim[0]), ) self.downsample_layers.append(stem) for i in range(3): downsample_layer = nn.Sequential( nn.Conv2d(embed_dim[i], embed_dim[i+1], kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.BatchNorm2d(embed_dim[i+1]) ) self.downsample_layers.append(downsample_layer) ########################################################################## self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks nheads = [dim // head_dim for dim in qk_dims] dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depth))] cur = 0 for i in range(4): stage = nn.Sequential(
*[Block(dim=embed_dim[i],
0
2023-12-29 05:45:26+00:00
8k
shibing624/chatgpt-webui
src/base_model.py
[ { "identifier": "shared", "path": "src/shared.py", "snippet": "class State:\n def interrupt(self):\n def recover(self):\n def set_api_host(self, api_host: str):\n def reset_api_host(self):\n def reset_all(self):\n def set_api_key_queue(self, api_key_list):\n def switching_api_key(self, func):\n def wrapped(*args, **kwargs):" }, { "identifier": "retrieve_proxy", "path": "src/config.py", "snippet": "@contextmanager\ndef retrieve_proxy(proxy=None):\n \"\"\"\n 1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理\n 2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量\n \"\"\"\n global http_proxy, https_proxy\n if proxy is not None:\n http_proxy = proxy\n https_proxy = proxy\n yield http_proxy, https_proxy\n else:\n old_var = os.environ[\"HTTP_PROXY\"], os.environ[\"HTTPS_PROXY\"]\n os.environ[\"HTTP_PROXY\"] = http_proxy\n os.environ[\"HTTPS_PROXY\"] = https_proxy\n yield http_proxy, https_proxy # return new proxy\n\n # return old proxy\n os.environ[\"HTTP_PROXY\"], os.environ[\"HTTPS_PROXY\"] = old_var" }, { "identifier": "construct_index", "path": "src/index_func.py", "snippet": "def construct_index(\n api_key,\n files,\n load_from_cache_if_possible=True,\n):\n from langchain.vectorstores import FAISS\n from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n if api_key:\n os.environ[\"OPENAI_API_KEY\"] = api_key\n else:\n os.environ[\"OPENAI_API_KEY\"] = \"sk-xxxxxxx\"\n index_name = get_files_hash(files)\n index_dir = os.path.join(pwd_path, '../index')\n index_path = f\"{index_dir}/{index_name}\"\n doc_file = f\"{index_path}/docs.pkl\"\n if local_embedding:\n embeddings = HuggingFaceEmbeddings(model_name=hf_emb_model_name)\n else:\n from langchain.embeddings import OpenAIEmbeddings\n if os.environ.get(\"OPENAI_API_TYPE\", \"openai\") == \"openai\":\n openai_api_base = os.environ.get(\"OPENAI_API_BASE\", OPENAI_API_BASE)\n embeddings = OpenAIEmbeddings(\n openai_api_base=openai_api_base,\n openai_api_key=os.environ.get(\"OPENAI_EMBEDDING_API_KEY\", api_key)\n )\n else:\n embeddings = OpenAIEmbeddings(\n deployment=os.environ[\"AZURE_EMBEDDING_DEPLOYMENT_NAME\"],\n openai_api_key=os.environ[\"AZURE_OPENAI_API_KEY\"],\n model=os.environ[\"AZURE_EMBEDDING_MODEL_NAME\"],\n openai_api_base=os.environ[\"AZURE_OPENAI_API_BASE_URL\"],\n openai_api_type=\"azure\"\n )\n if os.path.exists(index_path) and load_from_cache_if_possible:\n logger.info(\"找到了缓存的索引文件,加载中……\")\n index = FAISS.load_local(index_path, embeddings)\n documents = load_pkl(doc_file)\n return index, documents\n else:\n try:\n documents = get_documents(files)\n logger.info(\"构建索引中……\")\n with retrieve_proxy():\n index = FAISS.from_documents(documents, embeddings)\n logger.debug(\"索引构建完成!\")\n os.makedirs(index_dir, exist_ok=True)\n index.save_local(index_path)\n logger.debug(\"索引已保存至本地!\")\n save_pkl(documents, doc_file)\n logger.debug(\"索引文档已保存至本地!\")\n return index, documents\n except Exception as e:\n logger.error(f\"索引构建失败!error: {e}\")\n return None" }, { "identifier": "MODEL_TOKEN_LIMIT", "path": "src/presets.py", "snippet": "MODEL_TOKEN_LIMIT = {\n \"gpt-3.5-turbo\": 4096,\n \"gpt-3.5-turbo-16k\": 16384,\n \"gpt-3.5-turbo-0301\": 4096,\n \"gpt-3.5-turbo-0613\": 4096,\n \"gpt-3.5-turbo-1106\": 16384,\n \"gpt-4\": 8192,\n \"gpt-4-32k\": 32768,\n \"gpt-4-1106-preview\": 128000,\n \"gpt-4-vision-preview\": 128000,\n}" }, { "identifier": "DEFAULT_TOKEN_LIMIT", "path": "src/presets.py", "snippet": "DEFAULT_TOKEN_LIMIT = 3000 # 默认的token上限" }, { "identifier": "TOKEN_OFFSET", "path": "src/presets.py", "snippet": "TOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。" }, { "identifier": "REDUCE_TOKEN_FACTOR", "path": "src/presets.py", "snippet": "REDUCE_TOKEN_FACTOR = 0.5 # 与模型token上限想乘,得到目标token数。减少token占用时,将token占用减少到目标token数以下。" }, { "identifier": "STANDARD_ERROR_MSG", "path": "src/presets.py", "snippet": "STANDARD_ERROR_MSG = i18n(\"☹️发生了错误:\") # 错误信息的标准前缀" }, { "identifier": "NO_APIKEY_MSG", "path": "src/presets.py", "snippet": "NO_APIKEY_MSG = i18n(\"API key为空,请检查是否输入正确。\") # API key 长度不足 51 位" }, { "identifier": "BILLING_NOT_APPLICABLE_MSG", "path": "src/presets.py", "snippet": "BILLING_NOT_APPLICABLE_MSG = i18n(\"账单信息不适用\") # 本地运行的模型返回的账单信息" }, { "identifier": "NO_INPUT_MSG", "path": "src/presets.py", "snippet": "NO_INPUT_MSG = i18n(\"请输入对话内容。\") # 未输入对话内容" }, { "identifier": "HISTORY_DIR", "path": "src/presets.py", "snippet": "HISTORY_DIR = os.path.join(pwd_path, '../history')" }, { "identifier": "INITIAL_SYSTEM_PROMPT", "path": "src/presets.py", "snippet": "INITIAL_SYSTEM_PROMPT = \"You are a helpful assistant.\"" }, { "identifier": "PROMPT_TEMPLATE", "path": "src/presets.py", "snippet": "PROMPT_TEMPLATE = \"\"\"\\\nContext information is below.\n---------------------\n{context_str}\n---------------------\nCurrent date: {current_date}.\nUsing the provided context information, write a comprehensive reply to the given query.\nMake sure to cite results using [number] notation after the reference.\nIf the provided context information refer to multiple subjects with the same name, write separate answers for each subject.\nUse prior knowledge only if the given context didn't provide enough information.\nAnswer the question: {query_str}\nReply in {reply_language}\n\"\"\"" }, { "identifier": "WEBSEARCH_PTOMPT_TEMPLATE", "path": "src/presets.py", "snippet": "WEBSEARCH_PTOMPT_TEMPLATE = \"\"\"\\\nWeb search results:\n\n{web_results}\nCurrent date: {current_date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.\nQuery: {query}\nReply in {reply_language}\n\"\"\"" }, { "identifier": "i18n", "path": "src/utils.py", "snippet": " class DataframeData(TypedDict):\nclass ConfigType(Enum):\nclass ConfigItem:\nclass SetupWizard:\ndef predict(current_model, *args):\ndef billing_info(current_model):\ndef set_key(current_model, *args):\ndef load_chat_history(current_model, *args):\ndef delete_chat_history(current_model, *args):\ndef interrupt(current_model, *args):\ndef reset(current_model, *args):\ndef retry(current_model, *args):\ndef delete_first_conversation(current_model, *args):\ndef delete_last_conversation(current_model, *args):\ndef set_system_prompt(current_model, *args):\ndef rename_chat_history(current_model, *args):\ndef auto_name_chat_history(current_model, *args):\ndef export_markdown(current_model, *args):\ndef upload_chat_history(current_model, *args):\ndef set_token_upper_limit(current_model, *args):\ndef set_temperature(current_model, *args):\ndef set_top_p(current_model, *args):\ndef set_n_choices(current_model, *args):\ndef set_stop_sequence(current_model, *args):\ndef set_max_tokens(current_model, *args):\ndef set_presence_penalty(current_model, *args):\ndef set_frequency_penalty(current_model, *args):\ndef set_logit_bias(current_model, *args):\ndef set_user_identifier(current_model, *args):\ndef set_single_turn(current_model, *args):\ndef handle_file_upload(current_model, *args):\ndef handle_summarize_index(current_model, *args):\ndef like(current_model, *args):\ndef dislike(current_model, *args):\ndef count_token(input_str):\ndef markdown_to_html_with_syntax_highlight(md_str): # deprecated\n def replacer(match):\ndef normalize_markdown(md_text: str) -> str: # deprecated\ndef convert_mdtext(md_text): # deprecated\ndef clip_rawtext(chat_message, need_escape=True):\ndef convert_bot_before_marked(chat_message):\ndef convert_user_before_marked(chat_message):\ndef escape_markdown(text):\ndef convert_asis(userinput): # deprecated\ndef detect_converted_mark(userinput): # deprecated\ndef detect_language(code): # deprecated\ndef construct_text(role, text):\ndef construct_user(text):\ndef construct_system(text):\ndef construct_assistant(text):\ndef save_file(filename, model, chatbot):\ndef sorted_by_pinyin(list):\ndef sorted_by_last_modified_time(list, dir):\ndef get_file_names_by_type(dir, filetypes=[\".json\"]):\ndef get_file_names_by_pinyin(dir, filetypes=[\".json\"]):\ndef get_file_names_dropdown_by_pinyin(dir, filetypes=[\".json\"]):\ndef get_file_names_by_last_modified_time(dir, filetypes=[\".json\"]):\ndef get_history_names(user_name=\"\"):\ndef get_first_history_name(user_name=\"\"):\ndef get_history_list(user_name=\"\"):\ndef init_history_list(user_name=\"\"):\ndef filter_history(user_name, keyword):\ndef load_template(filename, mode=0):\ndef get_template_names():\ndef get_template_dropdown():\ndef get_template_content(templates, selection, original_system_prompt):\ndef reset_textbox():\ndef reset_default():\ndef change_api_host(host):\ndef change_proxy(proxy):\ndef hide_middle_chars(s):\ndef submit_key(key):\ndef replace_today(prompt):\ndef get_geoip():\n def fetch_ip():\ndef find_n(lst, max_num):\ndef start_outputing():\ndef end_outputing():\ndef cancel_outputing():\ndef transfer_input(inputs):\ndef update_chuanhu():\ndef add_source_numbers(lst, source_name=\"Source\", use_source=True):\ndef add_details(lst):\ndef sheet_to_string(sheet, sheet_name=None):\ndef excel_to_string(file_path):\ndef get_last_day_of_month(any_day):\ndef get_model_source(model_name, alternative_source):\ndef refresh_ui_elements_on_load(current_model, selected_model_name, user_name):\ndef toggle_like_btn_visibility(selected_model_name):\ndef get_corresponding_file_type_by_model_name(selected_model_name):\ndef new_auto_history_filename(username):\ndef get_history_filepath(username):\ndef beautify_err_msg(err_msg):\ndef auth_from_conf(username, password):\ndef get_files_hash(file_src=None, file_paths=None):\ndef myprint(**args):\ndef replace_special_symbols(string, replace_string=\" \"):\n def __init__(self, key, name, default=None, type=ConfigType.String) -> None:\ndef generate_prompt_string(config_item):\ndef generate_result_string(config_item, config_value):\n def __init__(self, file_path=config_file) -> None:\n def set(self, config_items: List[ConfigItem], prompt: str):\n def set_users(self):\n def __setitem__(self, setting_key: str, value):\n def __getitem__(self, setting_key: str):\n def save(self):\ndef setup_wizard():\ndef save_pkl(data, file_path):\ndef load_pkl(file_path):\ndef chinese_preprocessing_func(text: str) -> List[str]:\nSERVER_GEO_IP_MSG = None\nFETCHING_IP = False\n SERVER_GEO_IP_MSG = i18n(\"你可以使用聊天功能。\")\n SERVER_GEO_IP_MSG = \"**您的IP区域:中国。**\"\n SERVER_GEO_IP_MSG = i18n(\"您的IP区域:\") + f\"{country}。\"\n FETCHING_IP = False\n FETCHING_IP = True" } ]
import os import shutil import traceback import commentjson as json import gradio as gr import tiktoken import urllib3 from enum import Enum from itertools import islice from loguru import logger from src import shared from src.config import retrieve_proxy from src.index_func import construct_index from src.presets import ( MODEL_TOKEN_LIMIT, DEFAULT_TOKEN_LIMIT, TOKEN_OFFSET, REDUCE_TOKEN_FACTOR, STANDARD_ERROR_MSG, NO_APIKEY_MSG, BILLING_NOT_APPLICABLE_MSG, NO_INPUT_MSG, HISTORY_DIR, INITIAL_SYSTEM_PROMPT, PROMPT_TEMPLATE, WEBSEARCH_PTOMPT_TEMPLATE, ) from src.utils import ( i18n, construct_assistant, construct_user, save_file, hide_middle_chars, count_token, new_auto_history_filename, get_history_names, get_history_filepath, init_history_list, get_history_list, replace_special_symbols, get_first_history_name, add_source_numbers, add_details, replace_today, chinese_preprocessing_func, ) from langchain.vectorstores.base import VectorStoreRetriever from langchain.retrievers import BM25Retriever, EnsembleRetriever from duckduckgo_search import DDGS
5,381
logit_bias = self.logit_bias.split() bias_map = {} encoding = tiktoken.get_encoding("cl100k_base") for line in logit_bias: word, bias_amount = line.split(":") if word: for token in encoding.encode(word): bias_map[token] = float(bias_amount) return bias_map def set_user_identifier(self, new_user_identifier): self.user_identifier = new_user_identifier self.auto_save() def set_system_prompt(self, new_system_prompt): self.system_prompt = new_system_prompt self.auto_save() def set_key(self, new_access_key): self.api_key = new_access_key.strip() msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key) logger.info(msg) return self.api_key, msg def set_single_turn(self, new_single_turn): self.single_turn = new_single_turn self.auto_save() def reset(self, remain_system_prompt=False): self.history = [] self.all_token_counts = [] self.interrupted = False self.history_file_path = new_auto_history_filename(self.user_name) history_name = self.history_file_path[:-5] choices = [history_name] + get_history_names(self.user_name) system_prompt = self.system_prompt if remain_system_prompt else "" self.single_turn = self.default_single_turn self.temperature = self.default_temperature self.top_p = self.default_top_p self.n_choices = self.default_n_choices self.stop_sequence = self.default_stop_sequence self.max_generation_token = self.default_max_generation_token self.presence_penalty = self.default_presence_penalty self.frequency_penalty = self.default_frequency_penalty self.logit_bias = self.default_logit_bias self.user_identifier = self.default_user_identifier return ( [], self.token_message([0]), gr.Radio.update(choices=choices, value=history_name), system_prompt, self.single_turn, self.temperature, self.top_p, self.n_choices, self.stop_sequence, self.token_upper_limit, self.max_generation_token, self.presence_penalty, self.frequency_penalty, self.logit_bias, self.user_identifier, ) def delete_first_conversation(self): if self.history: del self.history[:2] del self.all_token_counts[0] return self.token_message() def delete_last_conversation(self, chatbot): if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]: msg = "由于包含报错信息,只删除chatbot记录" chatbot = chatbot[:-1] return chatbot, self.history if len(self.history) > 0: self.history = self.history[:-2] if len(chatbot) > 0: msg = "删除了一组chatbot对话" chatbot = chatbot[:-1] if len(self.all_token_counts) > 0: msg = "删除了一组对话的token计数记录" self.all_token_counts.pop() msg = "删除了一组对话" self.chatbot = chatbot self.auto_save(chatbot) return chatbot, msg def token_message(self, token_lst=None): if token_lst is None: token_lst = self.all_token_counts token_sum = 0 for i in range(len(token_lst)): token_sum += sum(token_lst[: i + 1]) return ( i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens" ) def rename_chat_history(self, filename, chatbot): if filename == "": return gr.update() if not filename.endswith(".json"): filename += ".json" self.delete_chat_history(self.history_file_path) # 命名重复检测 repeat_file_index = 2 full_path = os.path.join(HISTORY_DIR, self.user_name, filename) while os.path.exists(full_path): full_path = os.path.join( HISTORY_DIR, self.user_name, f"{repeat_file_index}_{filename}" ) repeat_file_index += 1 filename = os.path.basename(full_path) self.history_file_path = filename
class ModelType(Enum): Unknown = -1 OpenAI = 0 ChatGLM = 1 OpenAIInstruct = 2 OpenAIVision = 3 Claude = 4 Qwen = 5 LLaMA = 6 @classmethod def get_type(cls, model_name: str): model_name_lower = model_name.lower() if "gpt" in model_name_lower: if "instruct" in model_name_lower: model_type = ModelType.OpenAIInstruct elif "vision" in model_name_lower: model_type = ModelType.OpenAIVision else: model_type = ModelType.OpenAI elif "chatglm" in model_name_lower: model_type = ModelType.ChatGLM elif "llama" in model_name_lower or "alpaca" in model_name_lower or "yi" in model_name_lower: model_type = ModelType.LLaMA else: model_type = ModelType.Unknown return model_type class BaseLLMModel: def __init__( self, model_name, system_prompt=INITIAL_SYSTEM_PROMPT, temperature=1.0, top_p=1.0, n_choices=1, stop="", max_generation_token=None, presence_penalty=0, frequency_penalty=0, logit_bias=None, user="", single_turn=False, ) -> None: self.history = [] self.all_token_counts = [] self.model_name = model_name self.model_type = ModelType.get_type(model_name) try: self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name] except KeyError: self.token_upper_limit = DEFAULT_TOKEN_LIMIT self.interrupted = False self.system_prompt = system_prompt self.api_key = None self.need_api_key = False self.history_file_path = get_first_history_name(user) self.user_name = user self.chatbot = [] self.default_single_turn = single_turn self.default_temperature = temperature self.default_top_p = top_p self.default_n_choices = n_choices self.default_stop_sequence = stop self.default_max_generation_token = max_generation_token self.default_presence_penalty = presence_penalty self.default_frequency_penalty = frequency_penalty self.default_logit_bias = logit_bias self.default_user_identifier = user self.single_turn = single_turn self.temperature = temperature self.top_p = top_p self.n_choices = n_choices self.stop_sequence = stop self.max_generation_token = max_generation_token self.presence_penalty = presence_penalty self.frequency_penalty = frequency_penalty self.logit_bias = logit_bias self.user_identifier = user self.metadata = {} def get_answer_stream_iter(self): """stream predict, need to be implemented conversations are stored in self.history, with the most recent question, in OpenAI format should return a generator, each time give the next word (str) in the answer """ logger.warning("stream predict not implemented, using at once predict instead") response, _ = self.get_answer_at_once() yield response def get_answer_at_once(self): """predict at once, need to be implemented conversations are stored in history, with the most recent question, in OpenAI format Should return: the answer (str) total token count (int) """ logger.warning("at once predict not implemented, using stream predict instead") response_iter = self.get_answer_stream_iter() count = 0 response = '' for response in response_iter: count += 1 return response, sum(self.all_token_counts) + count def billing_info(self): """get billing infomation, inplement if needed""" return BILLING_NOT_APPLICABLE_MSG def count_token(self, user_input): """get token count from input, implement if needed""" return len(user_input) def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""): def get_return_value(): return chatbot, status_text status_text = i18n("开始实时传输回答……") if fake_input: chatbot.append((fake_input, "")) else: chatbot.append((inputs, "")) user_token_count = self.count_token(inputs) self.all_token_counts.append(user_token_count) logger.debug(f"输入token计数: {user_token_count}") stream_iter = self.get_answer_stream_iter() if display_append: display_append = ( '\n\n<hr class="append-display no-in-raw" />' + display_append ) partial_text = "" token_increment = 1 for partial_text in stream_iter: if type(partial_text) == tuple: partial_text, token_increment = partial_text chatbot[-1] = (chatbot[-1][0], partial_text + display_append) self.all_token_counts[-1] += token_increment status_text = self.token_message() yield get_return_value() if self.interrupted: self.recover() break self.history.append(construct_assistant(partial_text)) def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""): if fake_input: chatbot.append((fake_input, "")) else: chatbot.append((inputs, "")) if fake_input is not None: user_token_count = self.count_token(fake_input) else: user_token_count = self.count_token(inputs) self.all_token_counts.append(user_token_count) ai_reply, total_token_count = self.get_answer_at_once() self.history.append(construct_assistant(ai_reply)) if fake_input is not None: self.history[-2] = construct_user(fake_input) chatbot[-1] = (chatbot[-1][0], ai_reply + display_append) if fake_input is not None: self.all_token_counts[-1] += count_token(construct_assistant(ai_reply)) else: self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts) status_text = self.token_message() return chatbot, status_text def handle_file_upload(self, files, chatbot, language): """if the model accepts modal input, implement this function""" status = gr.Markdown.update() if files: construct_index(self.api_key, files=files) status = i18n("索引构建完成") return gr.Files.update(), chatbot, status def prepare_inputs( self, real_inputs, use_websearch, files, reply_language, chatbot, load_from_cache_if_possible=True, ): display_append = [] limited_context = False if type(real_inputs) == list: fake_inputs = real_inputs[0]["text"] else: fake_inputs = real_inputs if files: limited_context = True msg = "加载索引中……" logger.info(msg) index, documents = construct_index( self.api_key, files=files, load_from_cache_if_possible=load_from_cache_if_possible, ) assert index is not None, "获取索引失败" msg = "索引获取成功,生成回答中……" logger.info(msg) k = 3 score_threshold = 0.6 with retrieve_proxy(): vec_retriever = VectorStoreRetriever( vectorstore=index, search_type="similarity_score_threshold", search_kwargs={"k": k, "score_threshold": score_threshold} ) bm25_retriever = BM25Retriever.from_documents(documents, preprocess_func=chinese_preprocessing_func) bm25_retriever.k = k ensemble_retriever = EnsembleRetriever( retrievers=[bm25_retriever, vec_retriever], weights=[0.5, 0.5], ) try: relevant_documents = ensemble_retriever.get_relevant_documents(fake_inputs) except: return self.prepare_inputs( fake_inputs, use_websearch, files, reply_language, chatbot, load_from_cache_if_possible=False, ) reference_results = [ [d.page_content.strip("�"), os.path.basename(d.metadata["source"])] for d in relevant_documents ] reference_results = add_source_numbers(reference_results) display_append = add_details(reference_results) display_append = "\n\n" + "".join(display_append) if type(real_inputs) == list: real_inputs[0]["text"] = ( replace_today(PROMPT_TEMPLATE) .replace("{query_str}", fake_inputs) .replace("{context_str}", "\n\n".join(reference_results)) .replace("{reply_language}", reply_language) ) else: real_inputs = ( replace_today(PROMPT_TEMPLATE) .replace("{query_str}", real_inputs) .replace("{context_str}", "\n\n".join(reference_results)) .replace("{reply_language}", reply_language) ) elif use_websearch: search_results = [] with DDGS() as ddgs: ddgs_gen = ddgs.text(fake_inputs, backend="lite") for r in islice(ddgs_gen, 10): search_results.append(r) reference_results = [] for idx, result in enumerate(search_results): logger.debug(f"搜索结果{idx + 1}:{result}") domain_name = urllib3.util.parse_url(result["href"]).host reference_results.append([result["body"], result["href"]]) display_append.append( # f"{idx+1}. [{domain_name}]({result['href']})\n" f"<a href=\"{result['href']}\" target=\"_blank\">{idx + 1}.&nbsp;{result['title']}</a>" ) reference_results = add_source_numbers(reference_results) # display_append = "<ol>\n\n" + "".join(display_append) + "</ol>" display_append = ( '<div class = "source-a">' + "".join(display_append) + "</div>" ) if type(real_inputs) == list: real_inputs[0]["text"] = ( replace_today(WEBSEARCH_PTOMPT_TEMPLATE) .replace("{query}", fake_inputs) .replace("{web_results}", "\n\n".join(reference_results)) .replace("{reply_language}", reply_language) ) else: real_inputs = ( replace_today(WEBSEARCH_PTOMPT_TEMPLATE) .replace("{query}", fake_inputs) .replace("{web_results}", "\n\n".join(reference_results)) .replace("{reply_language}", reply_language) ) else: display_append = "" return limited_context, fake_inputs, display_append, real_inputs, chatbot def predict( self, inputs, chatbot, stream=False, use_websearch=False, files=None, reply_language="中文", should_check_token_count=True, ): # repetition_penalty, top_k status_text = "开始生成回答……" if type(inputs) == list: logger.info( "用户" + f"{self.user_name}" + "的输入为:" + "(" + str(len(inputs) - 1) + " images) " + f"{inputs[0]['text']}" ) else: logger.info( "用户" + f"{self.user_name}" + "的输入为:" + f"{inputs}" ) if should_check_token_count: if type(inputs) == list: yield chatbot + [(inputs[0]["text"], "")], status_text else: yield chatbot + [(inputs, "")], status_text if reply_language == "跟随问题语言(不稳定)": reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch." limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs( real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot ) yield chatbot + [(fake_inputs, "")], status_text if ( self.need_api_key and self.api_key is None and not shared.state.multi_api_key ): status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG logger.info(status_text) chatbot.append((inputs, "")) if len(self.history) == 0: self.history.append(construct_user(inputs)) self.history.append("") self.all_token_counts.append(0) else: self.history[-2] = construct_user(inputs) yield chatbot + [(inputs, "")], status_text return elif len(inputs.strip()) == 0: status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG logger.info(status_text) yield chatbot + [(inputs, "")], status_text return if self.single_turn: self.history = [] self.all_token_counts = [] if type(inputs) == list: self.history.append(inputs) else: self.history.append(construct_user(inputs)) try: if stream: logger.debug("使用流式传输") iter = self.stream_next_chatbot( inputs, chatbot, fake_input=fake_inputs, display_append=display_append, ) for chatbot, status_text in iter: yield chatbot, status_text else: logger.debug("不使用流式传输") chatbot, status_text = self.next_chatbot_at_once( inputs, chatbot, fake_input=fake_inputs, display_append=display_append, ) yield chatbot, status_text except Exception as e: traceback.print_exc() status_text = STANDARD_ERROR_MSG + str(e) yield chatbot, status_text if len(self.history) > 1 and self.history[-1]["content"] != inputs: logger.info("回答为:" + f"{self.history[-1]['content']}") if limited_context: self.history = [] self.all_token_counts = [] max_token = self.token_upper_limit - TOKEN_OFFSET if sum(self.all_token_counts) > max_token and should_check_token_count: count = 0 while ( sum(self.all_token_counts) > self.token_upper_limit * REDUCE_TOKEN_FACTOR and sum(self.all_token_counts) > 0 ): count += 1 del self.all_token_counts[0] del self.history[:2] logger.info(status_text) status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话" yield chatbot, status_text def retry( self, chatbot, stream=False, use_websearch=False, files=None, reply_language="中文", ): logger.debug("重试中……") if len(self.history) > 1: inputs = self.history[-2]["content"] del self.history[-2:] if len(self.all_token_counts) > 0: self.all_token_counts.pop() elif len(chatbot) > 0: inputs = chatbot[-1][0] if '<div class="user-message">' in inputs: inputs = inputs.split('<div class="user-message">')[1] inputs = inputs.split("</div>")[0] elif len(self.history) == 1: inputs = self.history[-1]["content"] del self.history[-1] else: yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的" return iter = self.predict( inputs, chatbot, stream=stream, use_websearch=use_websearch, files=files, reply_language=reply_language, ) for x in iter: yield x logger.debug("重试完毕") def interrupt(self): self.interrupted = True def recover(self): self.interrupted = False def set_token_upper_limit(self, new_upper_limit): self.token_upper_limit = new_upper_limit logger.info(f"token上限设置为{new_upper_limit}") self.auto_save() def set_temperature(self, new_temperature): self.temperature = new_temperature self.auto_save() def set_top_p(self, new_top_p): self.top_p = new_top_p self.auto_save() def set_n_choices(self, new_n_choices): self.n_choices = new_n_choices self.auto_save() def set_stop_sequence(self, new_stop_sequence: str): new_stop_sequence = new_stop_sequence.split(",") self.stop_sequence = new_stop_sequence self.auto_save() def set_max_tokens(self, new_max_tokens): self.max_generation_token = new_max_tokens self.auto_save() def set_presence_penalty(self, new_presence_penalty): self.presence_penalty = new_presence_penalty self.auto_save() def set_frequency_penalty(self, new_frequency_penalty): self.frequency_penalty = new_frequency_penalty self.auto_save() def set_logit_bias(self, logit_bias): self.logit_bias = logit_bias self.auto_save() def encoded_logit_bias(self): if self.logit_bias is None: return {} logit_bias = self.logit_bias.split() bias_map = {} encoding = tiktoken.get_encoding("cl100k_base") for line in logit_bias: word, bias_amount = line.split(":") if word: for token in encoding.encode(word): bias_map[token] = float(bias_amount) return bias_map def set_user_identifier(self, new_user_identifier): self.user_identifier = new_user_identifier self.auto_save() def set_system_prompt(self, new_system_prompt): self.system_prompt = new_system_prompt self.auto_save() def set_key(self, new_access_key): self.api_key = new_access_key.strip() msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key) logger.info(msg) return self.api_key, msg def set_single_turn(self, new_single_turn): self.single_turn = new_single_turn self.auto_save() def reset(self, remain_system_prompt=False): self.history = [] self.all_token_counts = [] self.interrupted = False self.history_file_path = new_auto_history_filename(self.user_name) history_name = self.history_file_path[:-5] choices = [history_name] + get_history_names(self.user_name) system_prompt = self.system_prompt if remain_system_prompt else "" self.single_turn = self.default_single_turn self.temperature = self.default_temperature self.top_p = self.default_top_p self.n_choices = self.default_n_choices self.stop_sequence = self.default_stop_sequence self.max_generation_token = self.default_max_generation_token self.presence_penalty = self.default_presence_penalty self.frequency_penalty = self.default_frequency_penalty self.logit_bias = self.default_logit_bias self.user_identifier = self.default_user_identifier return ( [], self.token_message([0]), gr.Radio.update(choices=choices, value=history_name), system_prompt, self.single_turn, self.temperature, self.top_p, self.n_choices, self.stop_sequence, self.token_upper_limit, self.max_generation_token, self.presence_penalty, self.frequency_penalty, self.logit_bias, self.user_identifier, ) def delete_first_conversation(self): if self.history: del self.history[:2] del self.all_token_counts[0] return self.token_message() def delete_last_conversation(self, chatbot): if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]: msg = "由于包含报错信息,只删除chatbot记录" chatbot = chatbot[:-1] return chatbot, self.history if len(self.history) > 0: self.history = self.history[:-2] if len(chatbot) > 0: msg = "删除了一组chatbot对话" chatbot = chatbot[:-1] if len(self.all_token_counts) > 0: msg = "删除了一组对话的token计数记录" self.all_token_counts.pop() msg = "删除了一组对话" self.chatbot = chatbot self.auto_save(chatbot) return chatbot, msg def token_message(self, token_lst=None): if token_lst is None: token_lst = self.all_token_counts token_sum = 0 for i in range(len(token_lst)): token_sum += sum(token_lst[: i + 1]) return ( i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens" ) def rename_chat_history(self, filename, chatbot): if filename == "": return gr.update() if not filename.endswith(".json"): filename += ".json" self.delete_chat_history(self.history_file_path) # 命名重复检测 repeat_file_index = 2 full_path = os.path.join(HISTORY_DIR, self.user_name, filename) while os.path.exists(full_path): full_path = os.path.join( HISTORY_DIR, self.user_name, f"{repeat_file_index}_{filename}" ) repeat_file_index += 1 filename = os.path.basename(full_path) self.history_file_path = filename
save_file(filename, self, chatbot)
15
2023-12-27 12:14:26+00:00
8k
camenduru/DiffMorpher-hf
morph_attn.py
[ { "identifier": "train_lora", "path": "lora_utils.py", "snippet": "def train_lora(image, prompt, save_lora_dir, model_path=None, tokenizer=None, text_encoder=None, vae=None, unet=None, noise_scheduler=None, lora_steps=200, lora_lr=2e-4, lora_rank=16, weight_name=None, safe_serialization=False, progress=tqdm):\n # initialize accelerator\n accelerator = Accelerator(\n gradient_accumulation_steps=1,\n # mixed_precision='fp16'\n )\n set_seed(0)\n\n # Load the tokenizer\n if tokenizer is None:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path,\n subfolder=\"tokenizer\",\n revision=None,\n use_fast=False,\n )\n # initialize the model\n if noise_scheduler is None:\n noise_scheduler = DDPMScheduler.from_pretrained(model_path, subfolder=\"scheduler\")\n if text_encoder is None:\n text_encoder_cls = import_model_class_from_model_name_or_path(model_path, revision=None)\n text_encoder = text_encoder_cls.from_pretrained(\n model_path, subfolder=\"text_encoder\", revision=None\n )\n if vae is None:\n vae = AutoencoderKL.from_pretrained(\n model_path, subfolder=\"vae\", revision=None\n )\n if unet is None:\n unet = UNet2DConditionModel.from_pretrained(\n model_path, subfolder=\"unet\", revision=None\n )\n\n # set device and dtype\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\n vae.requires_grad_(False)\n text_encoder.requires_grad_(False)\n unet.requires_grad_(False)\n\n unet.to(device)\n vae.to(device)\n text_encoder.to(device)\n\n # initialize UNet LoRA\n unet_lora_attn_procs = {}\n for name, attn_processor in unet.attn_processors.items():\n cross_attention_dim = None if name.endswith(\"attn1.processor\") else unet.config.cross_attention_dim\n if name.startswith(\"mid_block\"):\n hidden_size = unet.config.block_out_channels[-1]\n elif name.startswith(\"up_blocks\"):\n block_id = int(name[len(\"up_blocks.\")])\n hidden_size = list(reversed(unet.config.block_out_channels))[block_id]\n elif name.startswith(\"down_blocks\"):\n block_id = int(name[len(\"down_blocks.\")])\n hidden_size = unet.config.block_out_channels[block_id]\n else:\n raise NotImplementedError(\"name must start with up_blocks, mid_blocks, or down_blocks\")\n\n if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)):\n lora_attn_processor_class = LoRAAttnAddedKVProcessor\n else:\n lora_attn_processor_class = (\n LoRAAttnProcessor2_0 if hasattr(F, \"scaled_dot_product_attention\") else LoRAAttnProcessor\n )\n unet_lora_attn_procs[name] = lora_attn_processor_class(\n hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=lora_rank\n )\n unet.set_attn_processor(unet_lora_attn_procs)\n unet_lora_layers = AttnProcsLayers(unet.attn_processors)\n\n # Optimizer creation\n params_to_optimize = (unet_lora_layers.parameters())\n optimizer = torch.optim.AdamW(\n params_to_optimize,\n lr=lora_lr,\n betas=(0.9, 0.999),\n weight_decay=1e-2,\n eps=1e-08,\n )\n\n lr_scheduler = get_scheduler(\n \"constant\",\n optimizer=optimizer,\n num_warmup_steps=0,\n num_training_steps=lora_steps,\n num_cycles=1,\n power=1.0,\n )\n\n # prepare accelerator\n unet_lora_layers = accelerator.prepare_model(unet_lora_layers)\n optimizer = accelerator.prepare_optimizer(optimizer)\n lr_scheduler = accelerator.prepare_scheduler(lr_scheduler)\n\n # initialize text embeddings\n with torch.no_grad():\n text_inputs = tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None)\n text_embedding = encode_prompt(\n text_encoder,\n text_inputs.input_ids,\n text_inputs.attention_mask,\n text_encoder_use_attention_mask=False\n )\n\n if type(image) == np.ndarray:\n image = Image.fromarray(image)\n \n # initialize latent distribution\n image_transforms = transforms.Compose(\n [\n transforms.Resize(512, interpolation=transforms.InterpolationMode.BILINEAR),\n # transforms.RandomCrop(512),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5]),\n ]\n )\n\n image = image_transforms(image).to(device)\n image = image.unsqueeze(dim=0)\n \n latents_dist = vae.encode(image).latent_dist\n for _ in progress.tqdm(range(lora_steps), desc=\"Training LoRA...\"):\n unet.train()\n model_input = latents_dist.sample() * vae.config.scaling_factor\n # Sample noise that we'll add to the latents\n noise = torch.randn_like(model_input)\n bsz, channels, height, width = model_input.shape\n # Sample a random timestep for each image\n timesteps = torch.randint(\n 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device\n )\n timesteps = timesteps.long()\n\n # Add noise to the model input according to the noise magnitude at each timestep\n # (this is the forward diffusion process)\n noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)\n\n # Predict the noise residual\n model_pred = unet(noisy_model_input, timesteps, text_embedding).sample\n\n # Get the target for loss depending on the prediction type\n if noise_scheduler.config.prediction_type == \"epsilon\":\n target = noise\n elif noise_scheduler.config.prediction_type == \"v_prediction\":\n target = noise_scheduler.get_velocity(model_input, noise, timesteps)\n else:\n raise ValueError(f\"Unknown prediction type {noise_scheduler.config.prediction_type}\")\n\n loss = F.mse_loss(model_pred.float(), target.float(), reduction=\"mean\")\n accelerator.backward(loss)\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n\n # save the trained lora\n # unet = unet.to(torch.float32)\n # vae = vae.to(torch.float32)\n # text_encoder = text_encoder.to(torch.float32)\n\n # unwrap_model is used to remove all special modules added when doing distributed training\n # so here, there is no need to call unwrap_model\n # unet_lora_layers = accelerator.unwrap_model(unet_lora_layers)\n LoraLoaderMixin.save_lora_weights(\n save_directory=save_lora_dir,\n unet_lora_layers=unet_lora_layers,\n text_encoder_lora_layers=None,\n weight_name=weight_name,\n safe_serialization=safe_serialization\n )" }, { "identifier": "load_lora", "path": "lora_utils.py", "snippet": "def load_lora(unet, lora_0, lora_1, alpha):\n lora = {}\n for key in lora_0:\n lora[key] = (1 - alpha) * lora_0[key] + alpha * lora_1[key]\n unet.load_attn_procs(lora)\n return unet" }, { "identifier": "AlphaScheduler", "path": "alpha_scheduler.py", "snippet": "class AlphaScheduler:\n def __init__(self):\n ...\n\n def from_imgs(self, imgs):\n self.__num_values = len(imgs)\n self.__values = [0]\n for i in range(self.__num_values - 1):\n dis = distance(imgs[i], imgs[i + 1])\n self.__values.append(dis)\n self.__values[i + 1] += self.__values[i]\n for i in range(self.__num_values):\n self.__values[i] /= self.__values[-1]\n\n def save(self, filename):\n torch.save(torch.tensor(self.__values), filename)\n\n def load(self, filename):\n self.__values = torch.load(filename).tolist()\n self.__num_values = len(self.__values)\n\n def get_x(self, y):\n assert y >= 0 and y <= 1\n id = bisect.bisect_left(self.__values, y)\n id -= 1\n if id < 0:\n id = 0\n yl = self.__values[id]\n yr = self.__values[id + 1]\n xl = id * (1 / (self.__num_values - 1))\n xr = (id + 1) * (1 / (self.__num_values - 1))\n x = (y - yl) / (yr - yl) * (xr - xl) + xl\n return x\n\n def get_list(self, len=None):\n if len is None:\n len = self.__num_values\n\n ys = torch.linspace(0, 1, len)\n res = [self.get_x(y) for y in ys]\n return res" } ]
import os import torch import torch.nn.functional as F import tqdm import numpy as np import safetensors from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.attention_processor import AttnProcessor from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from PIL import Image from torchvision import transforms from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from lora_utils import train_lora, load_lora from diffusers import StableDiffusionPipeline from argparse import ArgumentParser from alpha_scheduler import AlphaScheduler
5,076
for k in self.unet.attn_processors.keys(): if do_replace_attn(k): attn_processor_dict[k] = StoreProcessor(self.unet.attn_processors[k], self.img0_dict, k) else: attn_processor_dict[k] = self.unet.attn_processors[k] self.unet.set_attn_processor(attn_processor_dict) latents = self.cal_latent( num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha_list[0], False, fix_lora ) first_image = self.latent2image(latents) first_image = Image.fromarray(first_image) if save: first_image.save(f"{self.output_path}/{0:02d}.png") self.unet = load_lora(self.unet, lora_0, lora_1, 1 if fix_lora is None else fix_lora) attn_processor_dict = {} for k in self.unet.attn_processors.keys(): if do_replace_attn(k): attn_processor_dict[k] = StoreProcessor(self.unet.attn_processors[k], self.img1_dict, k) else: attn_processor_dict[k] = self.unet.attn_processors[k] self.unet.set_attn_processor(attn_processor_dict) latents = self.cal_latent( num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha_list[-1], False, fix_lora ) last_image = self.latent2image(latents) last_image = Image.fromarray(last_image) if save: last_image.save( f"{self.output_path}/{num_frames - 1:02d}.png") for i in progress.tqdm(range(1, num_frames - 1), desc=desc): alpha = alpha_list[i] self.unet = load_lora(self.unet, lora_0, lora_1, alpha if fix_lora is None else fix_lora) attn_processor_dict = {} for k in self.unet.attn_processors.keys(): if do_replace_attn(k): attn_processor_dict[k] = LoadProcessor( self.unet.attn_processors[k], k, self.img0_dict, self.img1_dict, alpha, attn_beta, lamb) else: attn_processor_dict[k] = self.unet.attn_processors[k] self.unet.set_attn_processor(attn_processor_dict) latents = self.cal_latent( num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha_list[i], False, fix_lora ) image = self.latent2image(latents) image = Image.fromarray(image) if save: image.save(f"{self.output_path}/{i:02d}.png") images.append(image) images = [first_image] + images + [last_image] else: for k, alpha in enumerate(alpha_list): latents = self.cal_latent( num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha_list[k], self.use_lora, fix_lora ) image = self.latent2image(latents) image = Image.fromarray(image) if save: image.save(f"{self.output_path}/{k:02d}.png") images.append(image) return images with torch.no_grad(): if self.use_reschedule:
parser = ArgumentParser() parser.add_argument( '--image_path_0', type=str, default='', help='Path of the image to be processed (default: %(default)s)') parser.add_argument( '--prompt_0', type=str, default='', help='Prompt of the image (default: %(default)s)') parser.add_argument( '--image_path_1', type=str, default='', help='Path of the 2nd image to be processed, used in "morphing" mode (default: %(default)s)') parser.add_argument( '--prompt_1', type=str, default='', help='Prompt of the 2nd image, used in "morphing" mode (default: %(default)s)') parser.add_argument( '--output_path', type=str, default='', help='Path of the output image (default: %(default)s)' ) parser.add_argument( '--num_frames', type=int, default=50, help='Number of frames to generate (default: %(default)s)' ) parser.add_argument( '--duration', type=int, default=50, help='Duration of each frame (default: %(default)s)' ) parser.add_argument( '--use_lora', action='store_true', help='Use LORA to generate images (default: False)' ) parser.add_argument( '--guidance_scale', type=float, default=1., help='CFG guidace (default: %(default)s)' ) parser.add_argument( '--attn_beta', type=float, default=None, ) parser.add_argument( '-reschedule', action='store_true', ) parser.add_argument( '--lamd', type=float, default=0.6, ) parser.add_argument( '--use_adain', action='store_true' ) args = parser.parse_args() # name = args.output_path.split('/')[-1] # attn_beta = args.attn_beta # num_frames = args.num_frames # use_alpha_scheduler = args.reschedule # attn_step = 50 * args.lamd def calc_mean_std(feat, eps=1e-5): # eps is a small value added to the variance to avoid divide-by-zero. size = feat.size() N, C = size[:2] feat_var = feat.view(N, C, -1).var(dim=2) + eps if len(size) == 3: feat_std = feat_var.sqrt().view(N, C, 1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1) else: feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1) return feat_mean, feat_std def get_img(img, resolution=512): norm_mean = [0.5, 0.5, 0.5] norm_std = [0.5, 0.5, 0.5] transform = transforms.Compose([ transforms.Resize((resolution, resolution)), transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std) ]) img = transform(img) return img.unsqueeze(0) @torch.no_grad() def slerp(p0, p1, fract_mixing: float, adain=True): r""" Copied from lunarring/latentblending Helper function to correctly mix two random variables using spherical interpolation. The function will always cast up to float64 for sake of extra 4. Args: p0: First tensor for interpolation p1: Second tensor for interpolation fract_mixing: float Mixing coefficient of interval [0, 1]. 0 will return in p0 1 will return in p1 0.x will return a mix between both preserving angular velocity. """ if p0.dtype == torch.float16: recast_to = 'fp16' else: recast_to = 'fp32' p0 = p0.double() p1 = p1.double() if adain: mean1, std1 = calc_mean_std(p0) mean2, std2 = calc_mean_std(p1) mean = mean1 * (1 - fract_mixing) + mean2 * fract_mixing std = std1 * (1 - fract_mixing) + std2 * fract_mixing norm = torch.linalg.norm(p0) * torch.linalg.norm(p1) epsilon = 1e-7 dot = torch.sum(p0 * p1) / norm dot = dot.clamp(-1+epsilon, 1-epsilon) theta_0 = torch.arccos(dot) sin_theta_0 = torch.sin(theta_0) theta_t = theta_0 * fract_mixing s0 = torch.sin(theta_0 - theta_t) / sin_theta_0 s1 = torch.sin(theta_t) / sin_theta_0 interp = p0*s0 + p1*s1 if adain: interp = F.instance_norm(interp) * std + mean if recast_to == 'fp16': interp = interp.half() elif recast_to == 'fp32': interp = interp.float() return interp def do_replace_attn(key: str): # return key.startswith('up_blocks.2') or key.startswith('up_blocks.3') return key.startswith('up') class StoreProcessor(): def __init__(self, original_processor, value_dict, name): self.original_processor = original_processor self.value_dict = value_dict self.name = name self.value_dict[self.name] = dict() self.id = 0 def __call__(self, attn, hidden_states, *args, encoder_hidden_states=None, attention_mask=None, **kwargs): # Is self attention if encoder_hidden_states is None: self.value_dict[self.name][self.id] = hidden_states.detach() self.id += 1 res = self.original_processor(attn, hidden_states, *args, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **kwargs) return res class LoadProcessor(): def __init__(self, original_processor, name, img0_dict, img1_dict, alpha, beta=0, lamb=0.6): super().__init__() self.original_processor = original_processor self.name = name self.img0_dict = img0_dict self.img1_dict = img1_dict self.alpha = alpha self.beta = beta self.lamb = lamb self.id = 0 def parent_call( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0 ): residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view( batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask( attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm( hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) + scale * \ self.original_processor.to_q_lora(hidden_states) query = attn.head_to_batch_dim(query) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states( encoder_hidden_states) key = attn.to_k(encoder_hidden_states) + scale * \ self.original_processor.to_k_lora(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) + scale * \ self.original_processor.to_v_lora(encoder_hidden_states) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores( query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0]( hidden_states) + scale * self.original_processor.to_out_lora(hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose( -1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states def __call__(self, attn, hidden_states, *args, encoder_hidden_states=None, attention_mask=None, **kwargs): # Is self attention if encoder_hidden_states is None: # hardcode timestep if self.id < 50 * self.lamb: map0 = self.img0_dict[self.name][self.id] map1 = self.img1_dict[self.name][self.id] cross_map = self.beta * hidden_states + \ (1 - self.beta) * ((1 - self.alpha) * map0 + self.alpha * map1) # cross_map = self.beta * hidden_states + \ # (1 - self.beta) * slerp(map0, map1, self.alpha) # cross_map = slerp(slerp(map0, map1, self.alpha), # hidden_states, self.beta) # cross_map = hidden_states # cross_map = torch.cat( # ((1 - self.alpha) * map0, self.alpha * map1), dim=1) # res = self.original_processor(attn, hidden_states, *args, # encoder_hidden_states=cross_map, # attention_mask=attention_mask, # temb=temb, **kwargs) res = self.parent_call(attn, hidden_states, *args, encoder_hidden_states=cross_map, attention_mask=attention_mask, **kwargs) else: res = self.original_processor(attn, hidden_states, *args, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **kwargs) self.id += 1 # if self.id == len(self.img0_dict[self.name]): if self.id == len(self.img0_dict[self.name]): self.id = 0 else: res = self.original_processor(attn, hidden_states, *args, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **kwargs) return res class DiffMorpherPipeline(StableDiffusionPipeline): def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__(vae, text_encoder, tokenizer, unet, scheduler, safety_checker, feature_extractor, requires_safety_checker) self.img0_dict = dict() self.img1_dict = dict() def inv_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[ timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=50, num_actual_inference_steps=None, guidance_scale=1., eta=0.0, **kwds): """ invert a real image into noise map with determinisc DDIM inversion """ DEVICE = torch.device( "cuda") if torch.cuda.is_available() else torch.device("cpu") batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(DEVICE))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.image2latent(image) # unconditional embedding for classifier free guidance if guidance_scale > 1.: max_length = text_input.input_ids.shape[-1] unconditional_input = self.tokenizer( [""] * batch_size, padding="max_length", max_length=77, return_tensors="pt" ) unconditional_embeddings = self.text_encoder( unconditional_input.input_ids.to(DEVICE))[0] text_embeddings = torch.cat( [unconditional_embeddings, text_embeddings], dim=0) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) # print("attributes: ", self.scheduler.__dict__) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm.tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue if guidance_scale > 1.: model_inputs = torch.cat([latents] * 2) else: model_inputs = latents # predict the noise noise_pred = self.unet( model_inputs, t, encoder_hidden_states=text_embeddings).sample if guidance_scale > 1.: noise_pred_uncon, noise_pred_con = noise_pred.chunk(2, dim=0) noise_pred = noise_pred_uncon + guidance_scale * \ (noise_pred_con - noise_pred_uncon) # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.inv_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) return latents @torch.no_grad() def ddim_inversion(self, latent, cond): timesteps = reversed(self.scheduler.timesteps) with torch.autocast(device_type='cuda', dtype=torch.float32): for i, t in enumerate(tqdm.tqdm(timesteps, desc="DDIM inversion")): cond_batch = cond.repeat(latent.shape[0], 1, 1) alpha_prod_t = self.scheduler.alphas_cumprod[t] alpha_prod_t_prev = ( self.scheduler.alphas_cumprod[timesteps[i - 1]] if i > 0 else self.scheduler.final_alpha_cumprod ) mu = alpha_prod_t ** 0.5 mu_prev = alpha_prod_t_prev ** 0.5 sigma = (1 - alpha_prod_t) ** 0.5 sigma_prev = (1 - alpha_prod_t_prev) ** 0.5 eps = self.unet( latent, t, encoder_hidden_states=cond_batch).sample pred_x0 = (latent - sigma_prev * eps) / mu_prev latent = mu * pred_x0 + sigma * eps # if save_latents: # torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt')) # torch.save(latent, os.path.join(save_path, f'noisy_latents_{t}.pt')) return latent def step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, ): """ predict the sample of the next step in the denoise process. """ prev_timestep = timestep - \ self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps alpha_prod_t = self.scheduler.alphas_cumprod[timestep] alpha_prod_t_prev = self.scheduler.alphas_cumprod[ prev_timestep] if prev_timestep > 0 else self.scheduler.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_prev)**0.5 * model_output x_prev = alpha_prod_t_prev**0.5 * pred_x0 + pred_dir return x_prev, pred_x0 @torch.no_grad() def image2latent(self, image): DEVICE = torch.device( "cuda") if torch.cuda.is_available() else torch.device("cpu") if type(image) is Image: image = np.array(image) image = torch.from_numpy(image).float() / 127.5 - 1 image = image.permute(2, 0, 1).unsqueeze(0) # input image density range [-1, 1] latents = self.vae.encode(image.to(DEVICE))['latent_dist'].mean latents = latents * 0.18215 return latents @torch.no_grad() def latent2image(self, latents, return_type='np'): latents = 1 / 0.18215 * latents.detach() image = self.vae.decode(latents)['sample'] if return_type == 'np': image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy()[0] image = (image * 255).astype(np.uint8) elif return_type == "pt": image = (image / 2 + 0.5).clamp(0, 1) return image def latent2image_grad(self, latents): latents = 1 / 0.18215 * latents image = self.vae.decode(latents)['sample'] return image # range [-1, 1] @torch.no_grad() def cal_latent(self, num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha, use_lora, fix_lora=None): # latents = torch.cos(alpha * torch.pi / 2) * img_noise_0 + \ # torch.sin(alpha * torch.pi / 2) * img_noise_1 # latents = (1 - alpha) * img_noise_0 + alpha * img_noise_1 # latents = latents / ((1 - alpha) ** 2 + alpha ** 2) latents = slerp(img_noise_0, img_noise_1, alpha, self.use_adain) text_embeddings = (1 - alpha) * text_embeddings_0 + \ alpha * text_embeddings_1 self.scheduler.set_timesteps(num_inference_steps) if use_lora: if fix_lora is not None: self.unet = load_lora(self.unet, lora_0, lora_1, fix_lora) else: self.unet = load_lora(self.unet, lora_0, lora_1, alpha) for i, t in enumerate(tqdm.tqdm(self.scheduler.timesteps, desc=f"DDIM Sampler, alpha={alpha}")): if guidance_scale > 1.: model_inputs = torch.cat([latents] * 2) else: model_inputs = latents if unconditioning is not None and isinstance(unconditioning, list): _, text_embeddings = text_embeddings.chunk(2) text_embeddings = torch.cat( [unconditioning[i].expand(*text_embeddings.shape), text_embeddings]) # predict the noise noise_pred = self.unet( model_inputs, t, encoder_hidden_states=text_embeddings).sample if guidance_scale > 1.0: noise_pred_uncon, noise_pred_con = noise_pred.chunk( 2, dim=0) noise_pred = noise_pred_uncon + guidance_scale * \ (noise_pred_con - noise_pred_uncon) # compute the previous noise sample x_t -> x_t-1 # YUJUN: right now, the only difference between step here and step in scheduler # is that scheduler version would clamp pred_x0 between [-1,1] # don't know if that's gonna have huge impact latents = self.scheduler.step( noise_pred, t, latents, return_dict=False)[0] return latents @torch.no_grad() def get_text_embeddings(self, prompt, guidance_scale, neg_prompt, batch_size): DEVICE = torch.device( "cuda") if torch.cuda.is_available() else torch.device("cpu") # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.cuda())[0] if guidance_scale > 1.: if neg_prompt: uc_text = neg_prompt else: uc_text = "" unconditional_input = self.tokenizer( [uc_text] * batch_size, padding="max_length", max_length=77, return_tensors="pt" ) unconditional_embeddings = self.text_encoder( unconditional_input.input_ids.to(DEVICE))[0] text_embeddings = torch.cat( [unconditional_embeddings, text_embeddings], dim=0) return text_embeddings def __call__( self, img_0=None, img_1=None, img_path_0=None, img_path_1=None, prompt_0="", prompt_1="", save_lora_dir="./lora", load_lora_path_0=None, load_lora_path_1=None, lora_steps=200, lora_lr=2e-4, lora_rank=16, batch_size=1, height=512, width=512, num_inference_steps=50, num_actual_inference_steps=None, guidance_scale=1, attn_beta=0, lamb=0.6, use_lora = True, use_adain = True, use_reschedule = True, output_path = "./results", num_frames=50, fix_lora=None, progress=tqdm, unconditioning=None, neg_prompt=None, **kwds): # if isinstance(prompt, list): # batch_size = len(prompt) # elif isinstance(prompt, str): # if batch_size > 1: # prompt = [prompt] * batch_size self.scheduler.set_timesteps(num_inference_steps) self.use_lora = use_lora self.use_adain = use_adain self.use_reschedule = use_reschedule self.output_path = output_path if img_0 is None: img_0 = Image.open(img_path_0).convert("RGB") # else: # img_0 = Image.fromarray(img_0).convert("RGB") if img_1 is None: img_1 = Image.open(img_path_1).convert("RGB") # else: # img_1 = Image.fromarray(img_1).convert("RGB") if self.use_lora: print("Loading lora...") if not load_lora_path_0: weight_name = f"{output_path.split('/')[-1]}_lora_0.ckpt" load_lora_path_0 = save_lora_dir + "/" + weight_name if not os.path.exists(load_lora_path_0): train_lora(img_0, prompt_0, save_lora_dir, None, self.tokenizer, self.text_encoder, self.vae, self.unet, self.scheduler, lora_steps, lora_lr, lora_rank, weight_name=weight_name) print(f"Load from {load_lora_path_0}.") if load_lora_path_0.endswith(".safetensors"): lora_0 = safetensors.torch.load_file( load_lora_path_0, device="cpu") else: lora_0 = torch.load(load_lora_path_0, map_location="cpu") if not load_lora_path_1: weight_name = f"{output_path.split('/')[-1]}_lora_1.ckpt" load_lora_path_1 = save_lora_dir + "/" + weight_name if not os.path.exists(load_lora_path_1): train_lora(img_1, prompt_1, save_lora_dir, None, self.tokenizer, self.text_encoder, self.vae, self.unet, self.scheduler, lora_steps, lora_lr, lora_rank, weight_name=weight_name) print(f"Load from {load_lora_path_1}.") if load_lora_path_1.endswith(".safetensors"): lora_1 = safetensors.torch.load_file( load_lora_path_1, device="cpu") else: lora_1 = torch.load(load_lora_path_1, map_location="cpu") text_embeddings_0 = self.get_text_embeddings( prompt_0, guidance_scale, neg_prompt, batch_size) text_embeddings_1 = self.get_text_embeddings( prompt_1, guidance_scale, neg_prompt, batch_size) img_0 = get_img(img_0) img_1 = get_img(img_1) if self.use_lora: self.unet = load_lora(self.unet, lora_0, lora_1, 0) img_noise_0 = self.ddim_inversion( self.image2latent(img_0), text_embeddings_0) if self.use_lora: self.unet = load_lora(self.unet, lora_0, lora_1, 1) img_noise_1 = self.ddim_inversion( self.image2latent(img_1), text_embeddings_1) print("latents shape: ", img_noise_0.shape) def morph(alpha_list, progress, desc, save=False): images = [] if attn_beta is not None: self.unet = load_lora(self.unet, lora_0, lora_1, 0 if fix_lora is None else fix_lora) attn_processor_dict = {} for k in self.unet.attn_processors.keys(): if do_replace_attn(k): attn_processor_dict[k] = StoreProcessor(self.unet.attn_processors[k], self.img0_dict, k) else: attn_processor_dict[k] = self.unet.attn_processors[k] self.unet.set_attn_processor(attn_processor_dict) latents = self.cal_latent( num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha_list[0], False, fix_lora ) first_image = self.latent2image(latents) first_image = Image.fromarray(first_image) if save: first_image.save(f"{self.output_path}/{0:02d}.png") self.unet = load_lora(self.unet, lora_0, lora_1, 1 if fix_lora is None else fix_lora) attn_processor_dict = {} for k in self.unet.attn_processors.keys(): if do_replace_attn(k): attn_processor_dict[k] = StoreProcessor(self.unet.attn_processors[k], self.img1_dict, k) else: attn_processor_dict[k] = self.unet.attn_processors[k] self.unet.set_attn_processor(attn_processor_dict) latents = self.cal_latent( num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha_list[-1], False, fix_lora ) last_image = self.latent2image(latents) last_image = Image.fromarray(last_image) if save: last_image.save( f"{self.output_path}/{num_frames - 1:02d}.png") for i in progress.tqdm(range(1, num_frames - 1), desc=desc): alpha = alpha_list[i] self.unet = load_lora(self.unet, lora_0, lora_1, alpha if fix_lora is None else fix_lora) attn_processor_dict = {} for k in self.unet.attn_processors.keys(): if do_replace_attn(k): attn_processor_dict[k] = LoadProcessor( self.unet.attn_processors[k], k, self.img0_dict, self.img1_dict, alpha, attn_beta, lamb) else: attn_processor_dict[k] = self.unet.attn_processors[k] self.unet.set_attn_processor(attn_processor_dict) latents = self.cal_latent( num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha_list[i], False, fix_lora ) image = self.latent2image(latents) image = Image.fromarray(image) if save: image.save(f"{self.output_path}/{i:02d}.png") images.append(image) images = [first_image] + images + [last_image] else: for k, alpha in enumerate(alpha_list): latents = self.cal_latent( num_inference_steps, guidance_scale, unconditioning, img_noise_0, img_noise_1, text_embeddings_0, text_embeddings_1, lora_0, lora_1, alpha_list[k], self.use_lora, fix_lora ) image = self.latent2image(latents) image = Image.fromarray(image) if save: image.save(f"{self.output_path}/{k:02d}.png") images.append(image) return images with torch.no_grad(): if self.use_reschedule:
alpha_scheduler = AlphaScheduler()
2
2023-12-25 04:51:41+00:00
8k
camenduru/AnyDoor-online-hf
app.py
[ { "identifier": "create_model", "path": "cldm/model.py", "snippet": "def create_model(config_path):\n config = OmegaConf.load(config_path)\n model = instantiate_from_config(config.model).cpu()\n print(f'Loaded model config from [{config_path}]')\n return model" }, { "identifier": "load_state_dict", "path": "cldm/model.py", "snippet": "def load_state_dict(ckpt_path, location='cpu'):\n _, extension = os.path.splitext(ckpt_path)\n if extension.lower() == \".safetensors\":\n import safetensors.torch\n state_dict = safetensors.torch.load_file(ckpt_path, device=location)\n else:\n state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location)))\n state_dict = get_state_dict(state_dict)\n print(f'Loaded state_dict from [{ckpt_path}]')\n return state_dict" }, { "identifier": "DDIMSampler", "path": "cldm/ddim_hacked.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n #x_T 1,4,64,64\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n model_t = self.model.apply_model(x, t, c)\n model_uncond = self.model.apply_model(x, t, unconditional_conditioning)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n num_reference_steps = timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), timesteps[i], device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" }, { "identifier": "disable_verbosity", "path": "cldm/hack.py", "snippet": "def disable_verbosity():\n logging.set_verbosity_error()\n print('logging improved.')\n return" }, { "identifier": "enable_sliced_attention", "path": "cldm/hack.py", "snippet": "def enable_sliced_attention():\n ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward\n print('Enabled sliced_attention.')\n return" } ]
import os import sys import cv2 import einops import numpy as np import torch import random import gradio as gr import albumentations as A import torchvision.transforms as T from PIL import Image from mydatasets.data_utils import * from cldm.model import create_model, load_state_dict from cldm.ddim_hacked import DDIMSampler from omegaconf import OmegaConf from cldm.hack import disable_verbosity, enable_sliced_attention from huggingface_hub import snapshot_download from iseg.coarse_mask_refine_util import BaselineModel
4,411
#sys.path.append('.') snapshot_download(repo_id="xichenhku/AnyDoor_models", local_dir="./AnyDoor_models") snapshot_download(repo_id="xichenhku/mask_refine", local_dir="./mask_refine") cv2.setNumThreads(0) cv2.ocl.setUseOpenCL(False) save_memory = False disable_verbosity() if save_memory: enable_sliced_attention() config = OmegaConf.load('./configs/demo.yaml') model_ckpt = config.pretrained_model model_config = config.config_file use_interactive_seg = config.config_file model = create_model(model_config ).cpu() model.load_state_dict(load_state_dict(model_ckpt, location='cuda')) model = model.cuda()
#sys.path.append('.') snapshot_download(repo_id="xichenhku/AnyDoor_models", local_dir="./AnyDoor_models") snapshot_download(repo_id="xichenhku/mask_refine", local_dir="./mask_refine") cv2.setNumThreads(0) cv2.ocl.setUseOpenCL(False) save_memory = False disable_verbosity() if save_memory: enable_sliced_attention() config = OmegaConf.load('./configs/demo.yaml') model_ckpt = config.pretrained_model model_config = config.config_file use_interactive_seg = config.config_file model = create_model(model_config ).cpu() model.load_state_dict(load_state_dict(model_ckpt, location='cuda')) model = model.cuda()
ddim_sampler = DDIMSampler(model)
2
2023-12-25 04:48:34+00:00
8k
pangxincheng/TaskManager
task_manager/manager/core.py
[ { "identifier": "GPUManager", "path": "task_manager/manager/gpu.py", "snippet": "class GPUManager(mp.Process):\n\n def __init__(\n self,\n identity: str,\n device_id: int,\n gpu_manager_addr: str\n ) -> None:\n mp.Process.__init__(self, daemon=True)\n assert gpu_manager_addr.startswith(\"tcp://\") or gpu_manager_addr.startswith(\"ipc://\"), \\\n \"gpu manager address must start with tcp:// or ipc://\"\n self.identity = identity\n self.device_id = device_id\n self.gpu_manager_addr = gpu_manager_addr\n\n self.device = None\n self.ctx = None\n\n self.gpu_client = None\n\n self.preemptive_condition = None\n self.lock_for_chunks = None\n self.zmq_context = None\n self.worker_thread = None\n self.preemptive_thread = None\n self.router_client = None\n \n self.inter_addr = f\"inproc://gpu{self.device_id}_inter_server\"\n\n self.chunks = []\n self.chunk_size = None\n self.auto_preemptive = False\n self.preemptive_interval = 5\n\n def __del__(self):\n if self.ctx is not None:\n try:\n self.ctx.pop()\n except Exception as e:\n pass\n clear_context_caches()\n\n def worker_fn(self):\n assert self.zmq_context is not None, \"zmq context is not initialized\"\n \n # init pycuda\n pycuda_drv.init()\n assert self.device_id < pycuda_drv.Device.count(), \"Invalid device ID\"\n self.device = pycuda_drv.Device(self.device_id)\n self.ctx = self.device.make_context()\n device_local_var = self.device\n ctx_local_var = self.ctx\n\n inter_server = zmq_utils.ZMQServer(\n addr=self.inter_addr,\n context=self.zmq_context,\n )\n while self.running:\n identity, msg = inter_server.recv_binary()\n identity = identity.decode(\"utf-8\")\n command = common_utils.byte_msg_to_dict(msg)\n return_msg = self.exception_wrapper(\n fn=getattr(self, command[\"function\"], self._default_fn),\n *command.get(\"args\", {}),\n **command.get(\"kwargs\", {})\n )\n inter_server.send_binary(\n any=common_utils.dict_to_byte_msg(return_msg),\n identity=identity,\n )\n\n try:\n ctx_local_var.pop()\n ctx_local_var = None\n device_local_var = None\n clear_context_caches()\n except Exception as e:\n pass\n\n def preemptive_fn(self):\n assert self.zmq_context is not None, \"zmq context is not initialized\"\n preemptive_client = zmq_utils.ZMQClient(\n addr=self.inter_addr,\n identity=f\"gpu{self.device_id}_preemptive_client\",\n context=self.zmq_context,\n )\n while self.running:\n with self.preemptive_condition:\n if not self.auto_preemptive:\n self.preemptive_condition.wait()\n if self.auto_preemptive:\n preemptive_client.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"get_free_memory\"\n })\n )\n msg = common_utils.byte_msg_to_dict(preemptive_client.recv_binary()[0])\n if msg[\"status\"] == 200:\n free_bytes = msg[\"result\"]\n while free_bytes > self.chunk_size:\n chunks_len = -1\n with self.lock_for_chunks:\n chunks_len = len(self.chunks)\n assert chunks_len >= 0 and self.chunk_size >= 0 and self.max_size >= 0, \"Invalid chunks\"\n if (chunks_len + 1) * self.chunk_size >= self.max_size:\n # if the total size of chunks is larger than max_size, we will not allocate more chunks\n # and we will increase the sleep time \n time.sleep(self.preemptive_interval * 4)\n if self.preemptive_auto_close:\n self.auto_preemptive = False\n break\n try:\n preemptive_client.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"mem_alloc\",\n \"kwargs\": {\n \"chunk_size\": self.chunk_size,\n \"max_size\": self.chunk_size,\n \"unit\": \"B\"\n }\n }),\n )\n return_msg = common_utils.byte_msg_to_dict(preemptive_client.recv_binary()[0])\n if return_msg[\"status\"] == 200:\n time.sleep(self.preemptive_interval)\n else:\n time.sleep(self.preemptive_interval * 4)\n except Exception as e:\n time.sleep(self.preemptive_interval * 4)\n else:\n time.sleep(self.preemptive_interval * 4)\n\n def _init_manager(self):\n self.running = True\n\n self.gpu_client = zmq_utils.ZMQClient(self.gpu_manager_addr, self.identity)\n\n self.preemptive_condition = threading.Condition()\n self.preemptive_auto_close = False\n self.lock_for_chunks = threading.Lock()\n self.zmq_context = zmq.Context()\n self.worker_thread = threading.Thread(target=self.worker_fn, daemon=True)\n self.preemptive_thread = threading.Thread(target=self.preemptive_fn, daemon=True)\n self.worker_thread.start()\n self.preemptive_thread.start()\n self.router_client = zmq_utils.ZMQClient(\n addr=self.inter_addr,\n identity=f\"gpu{self.device_id}_router_client\",\n context=self.zmq_context,\n )\n time.sleep(1)\n self.gpu_client.send_binary(common_utils.dict_to_byte_msg({\n \"status\": 200,\n \"result\": f\"Success start a watching dog🐶 on GPU{self.device_id}\"\n }))\n\n def run(self):\n self._init_manager()\n while self.running:\n self.router_client.send_binary(self.gpu_client.recv_binary()[0])\n self.gpu_client.send_binary(self.router_client.recv_binary()[0])\n \n def exception_wrapper(self, fn, *args, **kwargs) -> Dict[str, Any]:\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n return {\n \"status\": 400,\n \"result\": f\"Exception when call {fn.__name__}, the excption is \" + str(e)\n }\n\n def _default_fn(self, *args, **kwargs):\n raise NotImplementedError(\"This function is not implemented\")\n\n def exit(self):\n self.running = False\n return {\n \"status\": 200,\n \"result\": \"👋bye~\"\n }\n\n def get_gpu_info(self, info_level: str=\"simple\") -> Dict[str, Any]:\n free_bytes, total_bytes = pycuda_drv.mem_get_info()\n device_msg = {\n \"device_id\": self.device_id,\n \"device_name\": self.device.name(),\n \"total_memory\": common_utils.fmt_bytes(total_bytes),\n \"free_memory\": common_utils.fmt_bytes(free_bytes),\n \"compute_capability\": float(\"%d.%d\" % self.device.compute_capability()),\n \"chunk_size\": \"none\",\n \"n_chunks\": -1\n }\n if self.chunk_size is not None:\n device_msg[\"chunk_size\"] = common_utils.fmt_bytes(self.chunk_size)\n device_msg[\"n_chunks\"] = len(self.chunks)\n if info_level != \"simple\":\n device_attributes_tuples = self.device.get_attributes().items()\n device_attributes = {}\n\n for k, v in device_attributes_tuples:\n device_attributes[str(k)] = v\n device_msg[\"device_attributes\"] = device_attributes\n return {\n \"status\": 200,\n \"result\": device_msg\n }\n\n def get_free_memory(self) -> Dict[str, Any]:\n free_bytes, _ = pycuda_drv.mem_get_info()\n return {\n \"status\": 200,\n \"result\": free_bytes\n }\n\n def mem_alloc(self, chunk_size: int, max_size: int, unit: str=\"MiB\") -> Dict[str, Any]:\n assert chunk_size > 0, \"chunk size must be positive\"\n assert max_size > 0, \"max size must be positive\"\n assert unit in [\"B\", \"KiB\", \"MiB\", \"GiB\"], \"unit must be one of [B, KiB, MiB, GiB]\"\n chunk_size = common_utils.to_bytes(chunk_size, unit)\n max_size = common_utils.to_bytes(max_size, unit)\n if self.chunk_size is not None:\n assert self.chunk_size == chunk_size, \"currently the chunk size must be the same\"\n free_bytes, _ = pycuda_drv.mem_get_info()\n assert chunk_size <= max_size, \"chunk size must be smaller than max size\"\n assert chunk_size <= free_bytes, \"chunk size must be smaller than free memory\"\n assert max_size <= free_bytes, \"max size must be smaller than free memory\"\n n_chunks = min(max_size // chunk_size, free_bytes // chunk_size)\n with self.lock_for_chunks:\n self.chunks += [pycuda_drv.mem_alloc(chunk_size) for _ in range(n_chunks)]\n self.chunk_size = chunk_size\n return {\n \"status\": 200,\n \"result\": f\"Success allocate {common_utils.fmt_bytes(n_chunks * chunk_size)} memory\"\n }\n\n def mem_release(self, mem_size: int, unit: str=\"MiB\") -> Dict[str, Any]:\n assert mem_size > 0, \"memory size must be positive\"\n assert unit in [\"B\", \"KiB\", \"MiB\", \"GiB\"], \"unit must be one of [B, KiB, MiB, GiB]\"\n if self.chunk_size is None:\n raise Exception(\"mem_alloc() must be called before mem_release\")\n mem_size = common_utils.to_bytes(mem_size, unit)\n if mem_size > self.chunk_size * len(self.chunks):\n raise Exception(f\"memory size {common_utils.fmt_bytes(mem_size)} is too large\")\n n_chunks = math.ceil(mem_size / self.chunk_size)\n with self.lock_for_chunks:\n self.chunks = self.chunks[n_chunks:]\n clear_context_caches()\n return {\n \"status\": 200,\n \"result\": f\"Success release {common_utils.fmt_bytes(n_chunks * self.chunk_size)} memory\"\n }\n\n def start_preemptive(self, chunk_size: int, max_size: int, unit: str=\"MiB\", auto_close: bool=False):\n assert chunk_size > 0, \"chunk size must be positive\"\n assert max_size > 0, \"max size must be positive\"\n assert unit in [\"B\", \"KiB\", \"MiB\", \"GiB\"], \"unit must be one of [B, KiB, MiB, GiB]\"\n chunk_size = common_utils.to_bytes(chunk_size, unit)\n max_size = common_utils.to_bytes(max_size, unit)\n if self.chunk_size is not None:\n assert self.chunk_size == chunk_size, \"currently the chunk size must be the same\"\n self.chunk_size = chunk_size\n self.max_size = max_size\n if self.auto_preemptive:\n return {\n \"status\": 400,\n \"result\": \"Already in preemptive mode, Please call stop_preemptive() first\"\n }\n with self.preemptive_condition:\n self.auto_preemptive = True\n self.preemptive_auto_close = auto_close\n self.preemptive_condition.notify_all()\n return {\n \"status\": 200,\n \"result\": \"Success start preemptive\"\n }\n\n def stop_preemptive(self):\n with self.preemptive_condition:\n self.auto_preemptive = False\n self.preemptive_condition.notify_all()\n return {\n \"status\": 200,\n \"result\": \"Success stop preemptive\"\n }" }, { "identifier": "TaskManager", "path": "task_manager/manager/task.py", "snippet": "class TaskManager(mp.Process):\n\n def __init__(\n self, \n identity: str, \n core_manager_addr: str,\n task_manager_addr: str,\n user_args: str,\n stdout_file: str,\n stderr_file: str,\n ) -> None:\n mp.Process.__init__(self, daemon=True)\n assert core_manager_addr.startswith(\"tcp://\") or core_manager_addr.startswith(\"ipc://\"), \\\n \"core_manager_addr must start with tcp:// or ipc://\"\n assert task_manager_addr.startswith(\"tcp://\") or task_manager_addr.startswith(\"ipc://\"), \\\n \"task_manager_addr must start with tcp:// or ipc://\"\n self.identity = identity\n self.core_manager_addr = core_manager_addr\n self.task_manager_addr = task_manager_addr\n self.user_args = user_args\n self.process_lock = None\n self.process = None\n self.stdout_file = stdout_file\n self.stderr_file = stderr_file\n\n def daemon_fn(self):\n daemon_client = zmq_utils.ZMQClient(\n addr=self.core_manager_addr,\n identity=self.identity,\n )\n time.sleep(1)\n while self.running:\n poll_status = None\n with self.process_lock:\n if self.process is not None:\n poll_status = self.process.poll()\n if poll_status is not None:\n daemon_client.send_binary(\n any=common_utils.dict_to_byte_msg({\n \"function\": \"remove_task_by_task_daemon\",\n \"kwargs\": {\n \"identity\": self.identity,\n \"msg\": \"Task finished\",\n \"return_code\": poll_status,\n }\n })\n )\n self.running = False\n else:\n time.sleep(5)\n\n def signal_handler(self, signum, frame):\n exit(0)\n\n def _init_manager(self) -> None:\n signal.signal(signal.SIGINT, self.signal_handler)\n signal.signal(signal.SIGTERM, self.signal_handler)\n self.running = True\n self.task_client = zmq_utils.ZMQClient(\n addr=self.task_manager_addr,\n identity=self.identity,\n )\n time.sleep(1)\n self.stdout = open(self.stdout_file, \"wb\")\n if self.stdout_file == self.stderr_file:\n self.stderr = self.stdout\n else:\n self.stderr = open(self.stderr_file, \"wb\")\n self.process_lock = threading.Lock()\n self.process = subprocess.Popen(\n self.user_args,\n stdout=self.stdout,\n stderr=self.stderr,\n start_new_session=True,\n )\n self.task_client.send_binary(common_utils.dict_to_byte_msg({\n \"status\": 200,\n \"result\": f\"Success start a watching dog🐶 to run {' '.join(self.user_args)}\"\n }))\n time.sleep(1)\n self.daemon_thread = threading.Thread(target=self.daemon_fn, daemon=True)\n self.daemon_thread.start()\n\n def run(self):\n self._init_manager()\n while self.running:\n msg = self.task_client.recv_binary()[0]\n command = common_utils.byte_msg_to_dict(msg)\n return_msg = self.exception_wrapper(\n fn=getattr(self, command[\"function\"], self._default_fn),\n *command.get(\"args\", {}),\n **command.get(\"kwargs\", {})\n )\n self.task_client.send_binary(\n any=common_utils.dict_to_byte_msg(return_msg),\n )\n\n def exception_wrapper(self, fn, *args, **kwargs) -> Dict[str, Any]:\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n return {\n \"status\": 400,\n \"result\": f\"Exception when call {fn.__name__}, the excption is \" + str(e)\n }\n\n def _default_fn(self, *args, **kwargs):\n raise NotImplementedError(\"This function is not implemented\")\n\n def exit(self):\n self.running = False\n with self.process_lock:\n os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)\n return_code = self.process.wait()\n self.stdout.close()\n if self.stdout_file != self.stderr_file:\n self.stderr.close()\n return {\n \"status\": 200,\n \"result\": {\n \"msg\": \"👋bye~\",\n \"return_code\": return_code,\n }\n }\n\n def get_status(self):\n process_status = None\n with self.process_lock:\n if self.process is not None:\n process_status = self.process.poll()\n if process_status is None:\n return {\n \"status\": 200,\n \"result\": {\n \"status\": \"running\"\n }\n }\n else:\n return {\n \"status\": 200,\n \"result\": {\n \"status\": \"finished\"\n }\n }" } ]
import os import time import multiprocessing as mp import pycuda.driver as pycuda_drv import task_manager.utils.zmq_utils as zmq_utils import task_manager.utils.common_utils as common_utils from typing import Dict, List, Any from task_manager.manager.gpu import GPUManager from task_manager.manager.task import TaskManager
6,008
) identity_, msg = self._task_manager.recv_binary() identity_ = identity_.decode("utf-8") msg = common_utils.byte_msg_to_dict(msg) assert identity == identity_, "identity mismatch" return_msg["watched_tasks"][identity] = msg return { "status": 200, "result": { "msg": "👋bye~", "watched_gpus": return_msg } } def get_gpus_info_by_identities(self, identities: List[str], info_level: str="simple") -> Dict[str, Any]: if len(identities) == 0: identities = list(self.watched_gpus.keys()) assert len(identities) == len(set(identities)), "identities should not contain duplicate elements" return_msg = {} for identity in identities: if identity not in self.watched_gpus.keys(): return_msg[identity] = { "status": 400, "result": f"Could not find a watch dog with identity {identity}" } else: self._gpu_manager.send_binary( any=common_utils.dict_to_byte_msg({ "function": "get_gpu_info", "kwargs": { "info_level": info_level } }), identity=identity ) identity_, msg = self._gpu_manager.recv_binary() identity_ = identity_.decode("utf-8") msg = common_utils.byte_msg_to_dict(msg) assert identity == identity_, "identity mismatch" return_msg[identity] = msg return { "status": 200, "result": return_msg } def get_gpus_info_by_device_ids(self, device_ids: List[int], info_level: str="simple") -> Dict[str, Any]: if len(device_ids) == 0: device_ids = list(range(pycuda_drv.Device.count())) assert len(device_ids) == len(set(device_ids)), "device_ids should not contain duplicate elements" assert all([device_id >= 0 and device_id < pycuda_drv.Device.count() for device_id in device_ids]), \ "The device_id should be in the valid range" watched_gpu_device_ids = { self.watched_gpus[identity]["device_id"]: identity for identity in self.watched_gpus.keys() if self.watched_gpus[identity]["device_id"] in device_ids } unwatched_gpus = sorted(list(set(device_ids) - watched_gpu_device_ids.keys())) return_msg = {} for device_id in watched_gpu_device_ids.keys(): identity = watched_gpu_device_ids[device_id] self._gpu_manager.send_binary( any=common_utils.dict_to_byte_msg({ "function": "get_gpu_info", "kwargs": { "info_level": info_level } }), identity=identity ) identity_, msg = self._gpu_manager.recv_binary() identity_ = identity_.decode("utf-8") msg = common_utils.byte_msg_to_dict(msg) assert identity == identity_, "identity mismatch" return_msg[identity] = msg return_msg["unwatched"] = [] for device_id in unwatched_gpus: gpu_device = pycuda_drv.Device(device_id) device_msg = { "device_id": device_id, "device_name": gpu_device.name(), "total_memory": common_utils.fmt_bytes(gpu_device.total_memory()), "compute_capability": float("%d.%d" % gpu_device.compute_capability()), } if info_level != "simple": device_attributes_tuples = gpu_device.get_attributes().items() device_attributes = {} for k, v in device_attributes_tuples: device_attributes[str(k)] = v device_msg["device_attributes"] = device_attributes return_msg["unwatched"].append(device_msg) return { "status": 200, "result": return_msg } def start_watch_dog_by_device_ids(self, device_ids: List[int]) -> Dict[str, Any]: assert len(device_ids) > 0, "device_ids should not be empty" assert len(device_ids) == len(set(device_ids)), "device_ids should not contain duplicate elements" assert all([device_id >= 0 and device_id < pycuda_drv.Device.count() for device_id in device_ids]), \ "The device_id should be in the valid range" watched_gpu_device_ids = { self.watched_gpus[identity]["device_id"]: identity for identity in self.watched_gpus.keys() if self.watched_gpus[identity]["device_id"] in device_ids } return_msg = {} for device_id in device_ids: if device_id in watched_gpu_device_ids.keys(): return_msg[watched_gpu_device_ids[device_id]] = { "status": 400, "result": f"GPU{device_id} is already being watched by {watched_gpu_device_ids[device_id]}" } else: timestamp = str(time.time()) identity = common_utils.md5(f"watch_dog_{device_id}_{timestamp}")
class CoreManager(mp.Process): def __init__( self, core_manager_addr: str, gpu_manager_addr: str="ipc://gpu_manager", task_manager_addr: str="ipc://task_manager", log_dir: str="logs", log_level: str="INFO", ) -> None: mp.Process.__init__(self) assert core_manager_addr.startswith("tcp://") or core_manager_addr.startswith("ipc://"), \ "core manager address must start with tcp:// or ipc://" assert gpu_manager_addr.startswith("tcp://") or gpu_manager_addr.startswith("ipc://"), \ "gpu manager address must start with tcp:// or ipc://" assert task_manager_addr.startswith("tcp://") or task_manager_addr.startswith("ipc://"), \ "task manager address must start with tcp:// or ipc://" self.core_manager_addr = core_manager_addr self.gpu_manager_addr = gpu_manager_addr self.task_manager_addr = task_manager_addr self.log_dir = log_dir self.log_level = log_level def _init_manager(self) -> None: self.logger = common_utils.get_logger( logger_name="core_manager", log_level=self.log_level, handler=os.path.join(self.log_dir, "core_manager.log") ) self.logger.info(f"CoreManager is listening on {self.core_manager_addr}") self._core_manager = zmq_utils.ZMQServer( addr=self.core_manager_addr, ) time.sleep(1) self.logger.info(f"GPUManager is listening on {self.gpu_manager_addr}") self._gpu_manager = zmq_utils.ZMQServer( addr=self.gpu_manager_addr, ) self.logger.info(f"TaskManager is listening on {self.task_manager_addr}") self._task_manager = zmq_utils.ZMQServer( addr=self.task_manager_addr, ) self.watched_gpus = {} self.watched_tasks = {} pycuda_drv.init() self.running = True def run(self) -> None: self._init_manager() while self.running: identity, msg = self._core_manager.recv_binary() command = common_utils.byte_msg_to_dict(msg) self.logger.info(f"receive command to call {command['function']}") return_msg = self.exception_wrapper( fn=getattr(self, command["function"], self._default_fn), *command.get("args", {}), **command.get("kwargs", {}) ) self._core_manager.send_binary( any=common_utils.dict_to_byte_msg(return_msg), identity=identity ) def exception_wrapper(self, fn, *args, **kwargs) -> Dict[str, Any]: try: return fn(*args, **kwargs) except Exception as e: self.logger.error(f"Exception when call {fn.__name__}") self.logger.exception(e) return { "status": 400, "result": f"Exception when call {fn.__name__}, the excption is " + str(e) } def _default_fn(self, *args, **kwargs) -> None: raise NotImplementedError("This function is not implemented") def exit(self) -> Dict[str, Any]: self.logger.info("=> [info] exit core server...") self.running = False return_msg = { "watched_gpus": {}, "watched_tasks": {} } for identity in self.watched_gpus.keys(): self._gpu_manager.send_binary( any=common_utils.dict_to_byte_msg({ "function": "exit" }), identity=identity ) identity_, msg = self._gpu_manager.recv_binary() identity_ = identity_.decode("utf-8") msg = common_utils.byte_msg_to_dict(msg) assert identity == identity_, "identity mismatch" return_msg["watched_gpus"][identity] = msg for identity in self.watched_tasks.keys(): self._task_manager.send_binary( any=common_utils.dict_to_byte_msg({ "function": "exit" }), identity=identity ) identity_, msg = self._task_manager.recv_binary() identity_ = identity_.decode("utf-8") msg = common_utils.byte_msg_to_dict(msg) assert identity == identity_, "identity mismatch" return_msg["watched_tasks"][identity] = msg return { "status": 200, "result": { "msg": "👋bye~", "watched_gpus": return_msg } } def get_gpus_info_by_identities(self, identities: List[str], info_level: str="simple") -> Dict[str, Any]: if len(identities) == 0: identities = list(self.watched_gpus.keys()) assert len(identities) == len(set(identities)), "identities should not contain duplicate elements" return_msg = {} for identity in identities: if identity not in self.watched_gpus.keys(): return_msg[identity] = { "status": 400, "result": f"Could not find a watch dog with identity {identity}" } else: self._gpu_manager.send_binary( any=common_utils.dict_to_byte_msg({ "function": "get_gpu_info", "kwargs": { "info_level": info_level } }), identity=identity ) identity_, msg = self._gpu_manager.recv_binary() identity_ = identity_.decode("utf-8") msg = common_utils.byte_msg_to_dict(msg) assert identity == identity_, "identity mismatch" return_msg[identity] = msg return { "status": 200, "result": return_msg } def get_gpus_info_by_device_ids(self, device_ids: List[int], info_level: str="simple") -> Dict[str, Any]: if len(device_ids) == 0: device_ids = list(range(pycuda_drv.Device.count())) assert len(device_ids) == len(set(device_ids)), "device_ids should not contain duplicate elements" assert all([device_id >= 0 and device_id < pycuda_drv.Device.count() for device_id in device_ids]), \ "The device_id should be in the valid range" watched_gpu_device_ids = { self.watched_gpus[identity]["device_id"]: identity for identity in self.watched_gpus.keys() if self.watched_gpus[identity]["device_id"] in device_ids } unwatched_gpus = sorted(list(set(device_ids) - watched_gpu_device_ids.keys())) return_msg = {} for device_id in watched_gpu_device_ids.keys(): identity = watched_gpu_device_ids[device_id] self._gpu_manager.send_binary( any=common_utils.dict_to_byte_msg({ "function": "get_gpu_info", "kwargs": { "info_level": info_level } }), identity=identity ) identity_, msg = self._gpu_manager.recv_binary() identity_ = identity_.decode("utf-8") msg = common_utils.byte_msg_to_dict(msg) assert identity == identity_, "identity mismatch" return_msg[identity] = msg return_msg["unwatched"] = [] for device_id in unwatched_gpus: gpu_device = pycuda_drv.Device(device_id) device_msg = { "device_id": device_id, "device_name": gpu_device.name(), "total_memory": common_utils.fmt_bytes(gpu_device.total_memory()), "compute_capability": float("%d.%d" % gpu_device.compute_capability()), } if info_level != "simple": device_attributes_tuples = gpu_device.get_attributes().items() device_attributes = {} for k, v in device_attributes_tuples: device_attributes[str(k)] = v device_msg["device_attributes"] = device_attributes return_msg["unwatched"].append(device_msg) return { "status": 200, "result": return_msg } def start_watch_dog_by_device_ids(self, device_ids: List[int]) -> Dict[str, Any]: assert len(device_ids) > 0, "device_ids should not be empty" assert len(device_ids) == len(set(device_ids)), "device_ids should not contain duplicate elements" assert all([device_id >= 0 and device_id < pycuda_drv.Device.count() for device_id in device_ids]), \ "The device_id should be in the valid range" watched_gpu_device_ids = { self.watched_gpus[identity]["device_id"]: identity for identity in self.watched_gpus.keys() if self.watched_gpus[identity]["device_id"] in device_ids } return_msg = {} for device_id in device_ids: if device_id in watched_gpu_device_ids.keys(): return_msg[watched_gpu_device_ids[device_id]] = { "status": 400, "result": f"GPU{device_id} is already being watched by {watched_gpu_device_ids[device_id]}" } else: timestamp = str(time.time()) identity = common_utils.md5(f"watch_dog_{device_id}_{timestamp}")
watchdog = GPUManager(
0
2023-12-30 11:47:06+00:00
8k
yixinNB/pyscrcpy
pyscrcpy/core.py
[ { "identifier": "EVENT_DISCONNECT", "path": "pyscrcpy/const.py", "snippet": "EVENT_DISCONNECT = \"disconnect\"" }, { "identifier": "EVENT_FRAME", "path": "pyscrcpy/const.py", "snippet": "EVENT_FRAME = \"frame\"" }, { "identifier": "EVENT_INIT", "path": "pyscrcpy/const.py", "snippet": "EVENT_INIT = \"init\"" }, { "identifier": "LOCK_SCREEN_ORIENTATION_UNLOCKED", "path": "pyscrcpy/const.py", "snippet": "LOCK_SCREEN_ORIENTATION_UNLOCKED = -1" }, { "identifier": "EVENT_ONCHANGE", "path": "pyscrcpy/const.py", "snippet": "EVENT_ONCHANGE = \"onchange\"" }, { "identifier": "ControlSender", "path": "pyscrcpy/control.py", "snippet": "class ControlSender:\n def __init__(self, parent):\n self.parent = parent # client object\n self.adbutil_devices = parent.device\n\n @inject(const.TYPE_INJECT_KEYCODE)\n def keycode(\n self, keycode: int, action: int = const.ACTION_DOWN, repeat: int = 0\n ) -> bytes:\n \"\"\"\n Send keycode to device\n\n Args:\n keycode: const.KEYCODE_*\n action: ACTION_DOWN | ACTION_UP\n repeat: repeat count\n \"\"\"\n return struct.pack(\">Biii\", action, keycode, repeat, 0)\n\n @inject(const.TYPE_INJECT_TEXT)\n def text(self, text: str) -> bytes:\n \"\"\"\n Send text to device\n\n Args:\n text: text to send\n \"\"\"\n\n buffer = text.encode(\"utf-8\")\n return struct.pack(\">i\", len(buffer)) + buffer\n\n # @inject(const.TYPE_INJECT_TOUCH_EVENT)\n # def touch(self, x: int, y: int, action: int = const.ACTION_DOWN, touch_id: int = -1) -> bytes:\n # \"\"\"\n # Touch screen\n #\n # Args:\n # x: horizontal position\n # y: vertical position\n # action: ACTION_DOWN | ACTION_UP | ACTION_MOVE\n # touch_id: Default using virtual id -1, you can specify it to emulate multi finger touch\n # \"\"\"\n # x, y = max(x, 0), max(y, 0)\n # return struct.pack(\n # \">BqiiHHHi\",\n # action,\n # touch_id,\n # int(x),\n # int(y),\n # int(self.parent.resolution[0]),\n # int(self.parent.resolution[1]),\n # 0xFFFF,\n # 1,\n # )\n def touch(self, x, y):\n self.adbutil_devices.shell(f\"input tap {x} {y}\")\n\n @inject(const.TYPE_INJECT_SCROLL_EVENT)\n def scroll(self, x: int, y: int, h: int, v: int) -> bytes:\n \"\"\"\n Scroll screen\n\n Args:\n x: horizontal position\n y: vertical position\n h: horizontal movement\n v: vertical movement\n \"\"\"\n\n x, y = max(x, 0), max(y, 0)\n return struct.pack(\n \">iiHHii\",\n int(x),\n int(y),\n int(self.parent.resolution[0]),\n int(self.parent.resolution[1]),\n int(h),\n int(v),\n )\n\n @inject(const.TYPE_BACK_OR_SCREEN_ON)\n def back_or_turn_screen_on(self, action: int = const.ACTION_DOWN) -> bytes:\n \"\"\"\n If the screen is off, it is turned on only on ACTION_DOWN\n\n Args:\n action: ACTION_DOWN | ACTION_UP\n \"\"\"\n return struct.pack(\">B\", action)\n\n @inject(const.TYPE_EXPAND_NOTIFICATION_PANEL)\n def expand_notification_panel(self) -> bytes:\n \"\"\"\n Expand notification panel\n \"\"\"\n return b\"\"\n\n @inject(const.TYPE_EXPAND_SETTINGS_PANEL)\n def expand_settings_panel(self) -> bytes:\n \"\"\"\n Expand settings panel\n \"\"\"\n return b\"\"\n\n @inject(const.TYPE_COLLAPSE_PANELS)\n def collapse_panels(self) -> bytes:\n \"\"\"\n Collapse all panels\n \"\"\"\n return b\"\"\n\n def get_clipboard(self) -> str:\n \"\"\"\n Get clipboard\n \"\"\"\n # Since this function need socket response, we can't auto inject it any more\n s: socket.socket = self.parent.control_socket\n\n with self.parent.control_socket_lock:\n # Flush socket\n s.setblocking(False)\n while True:\n try:\n s.recv(1024)\n except BlockingIOError:\n break\n s.setblocking(True)\n\n # Read package\n package = struct.pack(\">B\", const.TYPE_GET_CLIPBOARD)\n s.send(package)\n (code,) = struct.unpack(\">B\", s.recv(1))\n assert code == 0\n (length,) = struct.unpack(\">i\", s.recv(4))\n\n return s.recv(length).decode(\"utf-8\")\n\n @inject(const.TYPE_SET_CLIPBOARD)\n def set_clipboard(self, text: str, paste: bool = False) -> bytes:\n \"\"\"\n Set clipboard\n\n Args:\n text: the string you want to set\n paste: paste now\n \"\"\"\n buffer = text.encode(\"utf-8\")\n return struct.pack(\">?i\", paste, len(buffer)) + buffer\n\n @inject(const.TYPE_SET_SCREEN_POWER_MODE)\n def set_screen_power_mode(\n self, mode: int = const.POWER_MODE_NORMAL\n ) -> bytes:\n \"\"\"\n Set screen power mode\n\n Args:\n mode: POWER_MODE_OFF | POWER_MODE_NORMAL\n \"\"\"\n return struct.pack(\">b\", mode)\n\n @inject(const.TYPE_ROTATE_DEVICE)\n def rotate_device(self) -> bytes:\n \"\"\"\n Rotate device\n \"\"\"\n return b\"\"\n\n def swipe(\n self,\n start_x: int,\n start_y: int,\n end_x: int,\n end_y: int,\n move_step_length: int = 5,\n move_steps_delay: float = 0.005,\n ) -> None:\n \"\"\"\n Swipe on screen\n\n Args:\n start_x: start horizontal position\n start_y: start vertical position\n end_x: start horizontal position\n end_y: end vertical position\n move_step_length: length per step\n move_steps_delay: sleep seconds after each step\n :return:\n \"\"\"\n\n self.touch(start_x, start_y, const.ACTION_DOWN)\n next_x = start_x\n next_y = start_y\n\n if end_x > self.parent.resolution[0]:\n end_x = self.parent.resolution[0]\n\n if end_y > self.parent.resolution[1]:\n end_y = self.parent.resolution[1]\n\n decrease_x = True if start_x > end_x else False\n decrease_y = True if start_y > end_y else False\n while True:\n if decrease_x:\n next_x -= move_step_length\n if next_x < end_x:\n next_x = end_x\n else:\n next_x += move_step_length\n if next_x > end_x:\n next_x = end_x\n\n if decrease_y:\n next_y -= move_step_length\n if next_y < end_y:\n next_y = end_y\n else:\n next_y += move_step_length\n if next_y > end_y:\n next_y = end_y\n\n self.touch(next_x, next_y, const.ACTION_MOVE)\n\n if next_x == end_x and next_y == end_y:\n self.touch(next_x, next_y, const.ACTION_UP)\n break\n time.sleep(move_steps_delay)" } ]
import os import abc import socket import struct import threading import time import numpy as np import numpy.typing as npt import cv2 as cv import cv2 from pathlib import Path from time import sleep from typing import Any, Callable, Optional, Tuple, Union from adbutils import AdbConnection, AdbDevice, AdbError, Network, adb from av.codec import CodecContext # type: ignore from av.error import InvalidDataError # type: ignore from loguru import logger from .const import EVENT_DISCONNECT, EVENT_FRAME, EVENT_INIT, LOCK_SCREEN_ORIENTATION_UNLOCKED, EVENT_ONCHANGE from .control import ControlSender
3,923
def __deploy_server(self) -> None: """ Deploy server to android device. Push the scrcpy-server.jar into the Android device using the adb.push(...). Then a basic connection between client and server is established. """ cmd = [ "CLASSPATH=/data/local/tmp/scrcpy-server.jar", "app_process", "/", "com.genymobile.scrcpy.Server", VERSION, # Scrcpy server version "info", # Log level: info, verbose... f"{self.max_size}", # Max screen width (long side) f"{self.bitrate}", # Bitrate of video f"{self.max_fps}", # Max frame per second f"{self.lock_screen_orientation}", # Lock screen orientation "true", # Tunnel forward "-", # Crop screen "false", # Send frame rate to client "true", # Control enabled "0", # Display id "false", # Show touches "true" if self.stay_awake else "false", # Stay awake "-", # Codec (video encoding) options "-", # Encoder name "false", # Power off screen after server closed ] self.device.push(JAR, "/data/local/tmp/") self.__server_stream: AdbConnection = self.device.shell(cmd, stream=True) def start(self, threaded: bool = False) -> None: """ Start the client-server connection. In order to avoid unpredictable behaviors, this method must be called after the on_init and on_frame callback are specify. Args: threaded : If set to True the stream loop willl run in a separated thread. This mean that the code after client.strart() will be run. Otherwise the client.start() method starts a endless loop and the code after this method will never run. todo new_thread """ assert self.alive is False self.__deploy_server() self.__init_server_connection() self.alive = True for func in self.listeners[EVENT_INIT]: func(self) if threaded: # 不阻塞当前thread threading.Thread(target=self.__stream_loop).start() else: self.__stream_loop() def stop(self) -> None: """ [ok]Close the various socket connection. Stop listening (both threaded and blocked) """ self.alive = False try: self.__server_stream.close() except Exception: pass try: self.control_socket.close() except Exception: pass try: self.__video_socket.close() except Exception: pass def __del__(self): self.stop() def __calculate_diff(self, img1, img2): if img1 is None: return 1 gray1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY) gray2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY) # 计算两张灰度图像的差异 diff = cv2.absdiff(gray1, gray2) # 设置阈值,忽略差异值较小的像素 threshold = 30 _, thresholded_diff = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY) # 计算差异像素的总数 total_diff_pixels = np.sum(thresholded_diff / 255) # 除以255得到二值图像中白色像素的数量 # 计算图像的总像素数 total_pixels = gray1.size # 计算变化率 change_rate = total_diff_pixels / total_pixels return change_rate def __stream_loop(self) -> None: """ Core loop for video parsing. While the connection is open (self.alive == True) recive raw h264 video stream and decode it into frames. These frame are those passed to on_frame callbacks. """ codec = CodecContext.create("h264", "r") while self.alive: try: raw = self.__video_socket.recv(0x10000) if raw == b"": raise ConnectionError("Video stream is disconnected") for packet in codec.parse(raw): for frame in codec.decode(packet): # codec.decode(packet)包含多帧 frame = frame.to_ndarray(format="bgr24")
Frame = npt.NDArray[np.int8] VERSION = "1.20" HERE = Path(__file__).resolve().parent JAR = HERE / f"scrcpy-server.jar" class Client: def __init__( self, device: Optional[Union[AdbDevice, str]] = None, max_size: int = 0, bitrate: int = 8000000, max_fps: int = 0, block_frame: bool = True, stay_awake: bool = True, lock_screen_orientation: int = LOCK_SCREEN_ORIENTATION_UNLOCKED, skip_same_frame=False ): """ [ok]Create a scrcpy client. The client won't be started until you call .start() Args: device: Android device to coennect to. Colud be also specify by serial string. If device is None the client try to connect to the first available device in adb deamon. max_size: Specify the maximum dimension of the video stream. This dimensioin refer both to width and hight.0: no limit[已校验, max size of width or height] bitrate: bitrate max_fps: Maximum FPS (Frame Per Second) of the video stream. If it is set to 0 it means that there is not limit to FPS. This feature is supported by android 10 or newer. [flip]: 没有这个参数, 会自动处理 block_frame: If set to true, the on_frame callbacks will be only apply on not empty frames. Otherwise try to apply on_frame callbacks on every frame, but this could raise exceptions in callbacks if they are not able to handle None value for frame. True:跳过空白帧 stay_awake: keep Android device awake while the client-server connection is alive. lock_screen_orientation: lock screen in a particular orientation. The available screen orientation are specify in const.py in variables LOCK_SCREEN_ORIENTATION* """ # Params挪到后面去 self.max_size = max_size self.bitrate = bitrate self.max_fps = max_fps self.block_frame = block_frame self.stay_awake = stay_awake self.lock_screen_orientation = lock_screen_orientation self.skip_same_frame = skip_same_frame self.min_frame_interval = 1 / max_fps if device is None: try: device = adb.device_list()[0] except IndexError: raise Exception("Cannot connect to phone") elif isinstance(device, str): device = adb.device(serial=device) self.device = device self.listeners = dict(frame=[], init=[], disconnect=[], onchange=[]) # User accessible self.last_frame: Optional[np.ndarray] = None self.resolution: Optional[Tuple[int, int]] = None self.device_name: Optional[str] = None self.control = ControlSender(self) # Need to destroy self.alive = False self.__server_stream: Optional[AdbConnection] = None self.__video_socket: Optional[socket.socket] = None self.control_socket: Optional[socket.socket] = None self.control_socket_lock = threading.Lock() def __init_server_connection(self) -> None: """ Connect to android server, there will be two sockets: video and control socket. This method will also set resolution property. """ for _ in range(30): # 超时 写死 try: self.__video_socket = self.device.create_connection( Network.LOCAL_ABSTRACT, "scrcpy" ) break except AdbError: sleep(0.1) pass else: raise ConnectionError("Failed to connect scrcpy-server after 3 seconds") dummy_byte = self.__video_socket.recv(1) if not len(dummy_byte): raise ConnectionError("Did not receive Dummy Byte!") self.control_socket = self.device.create_connection( Network.LOCAL_ABSTRACT, "scrcpy" ) self.device_name = self.__video_socket.recv(64).decode("utf-8").rstrip("\x00") if not len(self.device_name): raise ConnectionError("Did not receive Device Name!") res = self.__video_socket.recv(4) self.resolution = struct.unpack(">HH", res) self.__video_socket.setblocking(False) def __deploy_server(self) -> None: """ Deploy server to android device. Push the scrcpy-server.jar into the Android device using the adb.push(...). Then a basic connection between client and server is established. """ cmd = [ "CLASSPATH=/data/local/tmp/scrcpy-server.jar", "app_process", "/", "com.genymobile.scrcpy.Server", VERSION, # Scrcpy server version "info", # Log level: info, verbose... f"{self.max_size}", # Max screen width (long side) f"{self.bitrate}", # Bitrate of video f"{self.max_fps}", # Max frame per second f"{self.lock_screen_orientation}", # Lock screen orientation "true", # Tunnel forward "-", # Crop screen "false", # Send frame rate to client "true", # Control enabled "0", # Display id "false", # Show touches "true" if self.stay_awake else "false", # Stay awake "-", # Codec (video encoding) options "-", # Encoder name "false", # Power off screen after server closed ] self.device.push(JAR, "/data/local/tmp/") self.__server_stream: AdbConnection = self.device.shell(cmd, stream=True) def start(self, threaded: bool = False) -> None: """ Start the client-server connection. In order to avoid unpredictable behaviors, this method must be called after the on_init and on_frame callback are specify. Args: threaded : If set to True the stream loop willl run in a separated thread. This mean that the code after client.strart() will be run. Otherwise the client.start() method starts a endless loop and the code after this method will never run. todo new_thread """ assert self.alive is False self.__deploy_server() self.__init_server_connection() self.alive = True for func in self.listeners[EVENT_INIT]: func(self) if threaded: # 不阻塞当前thread threading.Thread(target=self.__stream_loop).start() else: self.__stream_loop() def stop(self) -> None: """ [ok]Close the various socket connection. Stop listening (both threaded and blocked) """ self.alive = False try: self.__server_stream.close() except Exception: pass try: self.control_socket.close() except Exception: pass try: self.__video_socket.close() except Exception: pass def __del__(self): self.stop() def __calculate_diff(self, img1, img2): if img1 is None: return 1 gray1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY) gray2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY) # 计算两张灰度图像的差异 diff = cv2.absdiff(gray1, gray2) # 设置阈值,忽略差异值较小的像素 threshold = 30 _, thresholded_diff = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY) # 计算差异像素的总数 total_diff_pixels = np.sum(thresholded_diff / 255) # 除以255得到二值图像中白色像素的数量 # 计算图像的总像素数 total_pixels = gray1.size # 计算变化率 change_rate = total_diff_pixels / total_pixels return change_rate def __stream_loop(self) -> None: """ Core loop for video parsing. While the connection is open (self.alive == True) recive raw h264 video stream and decode it into frames. These frame are those passed to on_frame callbacks. """ codec = CodecContext.create("h264", "r") while self.alive: try: raw = self.__video_socket.recv(0x10000) if raw == b"": raise ConnectionError("Video stream is disconnected") for packet in codec.parse(raw): for frame in codec.decode(packet): # codec.decode(packet)包含多帧 frame = frame.to_ndarray(format="bgr24")
if len(self.listeners[EVENT_ONCHANGE]) == 0 and not self.skip_same_frame:
4
2023-12-23 12:52:58+00:00
8k
andreafailla/pix2beats
ui.py
[ { "identifier": "resize_and_convert", "path": "backend.py", "snippet": "def resize_and_convert(filename, tmpdir, n_pixels=None):\n \"\"\"\n Resize the image, convert to hsv, and save as png\n\n :param filename:\n :param tmpdir:\n :param n_pixels:\n :return:\n \"\"\"\n # Saves\n img = Image.open(filename).convert(\"RGB\")\n if n_pixels is not None:\n # Calculate the aspect ratio\n aspect_ratio = img.width / img.height\n\n # Calculate the new width based on the desired number of pixels\n new_width = int((n_pixels * aspect_ratio) ** 0.5)\n\n # Resize the image while maintaining the aspect ratio\n img = img.resize((new_width, int(new_width / aspect_ratio)))\n if not filename.startswith(tmpdir):\n img.save(f\"{tmpdir}/{filename.split('.')[0]}_resized.png\", \"PNG\")\n\n return img" }, { "identifier": "trackmaker", "path": "backend.py", "snippet": "def trackmaker(\n img, scale, key, octave, harmony, randomize_octaves, t_value, n_pixels, gain_db, drive_db, cutoff_hz,\n resonance_lad, drive_lad, delay_seconds, room_size, damping, wet_level, dry_level, width, rate_hz_chorus\n):\n # Make the scale from parameters above\n scale_to_use = get_scale(octave, key, scale)\n # Make the track!\n track, harmony = get_track_layers(img, scale=scale_to_use, t=t_value, n_pixels=n_pixels,\n randomize_octaves=randomize_octaves, harmonize=harmony)\n\n # Write the track into a file\n track_combined = np.vstack((track, harmony))\n wavfile.write('track.wav', rate=SAMPLE_RATE,\n data=track_combined.T.astype(np.float32))\n\n # Read the track\n try:\n with AudioFile('track.wav', 'r') as f:\n audio = f.read(f.frames)\n\n # Apply the pedalboard effects\n effected = apply_pb_effects(\n gain_db, drive_db, cutoff_hz, resonance_lad,\n drive_lad, delay_seconds, damping, room_size,\n wet_level, dry_level, width, rate_hz_chorus,\n audio, SAMPLE_RATE\n )\n\n # Write the audio back as a wav file:\n with AudioFile('track.wav', 'w', SAMPLE_RATE, effected.shape[0]) as f:\n f.write(effected)\n\n # Read the processed track\n with open('track.wav', 'rb') as f:\n audio_bytes = f.read()\n\n # Remove the track\n if os.path.exists('track.wav'):\n os.remove('track.wav')\n\n return audio_bytes\n except ValueError:\n return None" }, { "identifier": "rolling_title", "path": "backend.py", "snippet": "def rolling_title(placeholder, text, delay=0.05):\n \"\"\"\n Displays title with rolling effect\n Placeholder is the container where the title will be displayed\n \"\"\"\n while True:\n\n for i in range(len(text)):\n time.sleep(delay)\n placeholder.markdown(f'#### {text[:i + 1]}')\n time.sleep(1)\n for i in range(len(text)):\n time.sleep(delay)\n placeholder.markdown(f'#### {text[:len(text) - i]}')" }, { "identifier": "SCALES", "path": "constants.py", "snippet": "SCALES = {\n \"Major\": [0, 2, 4, 5, 7, 9, 11],\n \"Natural Minor\": [0, 2, 3, 5, 7, 8, 10],\n \"Dorian\": [0, 2, 3, 5, 7, 9, 10],\n \"Mixolydian\": [0, 2, 4, 5, 7, 9, 10],\n \"Aeolian\": [0, 2, 3, 5, 7, 8, 10],\n \"Phrygian\": [0, 1, 3, 5, 7, 8, 10],\n \"Lydian\": [0, 2, 4, 6, 7, 9, 11],\n \"Harmonic Minor\": [0, 2, 3, 5, 7, 8, 11],\n \"Melodic Minor\": [0, 2, 3, 5, 7, 8, 9, 10, 11],\n \"Locrian\": [0, 1, 3, 5, 6, 8, 10],\n \"Blues\": [0, 2, 3, 4, 5, 7, 9, 10, 11],\n \"Chromatic\": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],\n}" }, { "identifier": "NOTES", "path": "constants.py", "snippet": "NOTES = [\"A\", \"A#\", \"B\", \"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\"]" }, { "identifier": "HARMONIES", "path": "constants.py", "snippet": "HARMONIES = {\n \"None\": 1,\n \"Major second\": 9 / 8,\n \"Minor third\": 6 / 5,\n \"Major third\": 5 / 4,\n \"Perfect fourth\": 4 / 3,\n \"Diatonic tritone\": 45 / 32,\n \"Perfect fifth\": 3 / 2,\n \"Minor sixth\": 8 / 5,\n \"Major sixth\": 5 / 3,\n \"Minor seventh\": 9 / 5,\n \"Major seventh\": 15 / 8,\n}" }, { "identifier": "SAMPLE_IMAGES", "path": "constants.py", "snippet": "SAMPLE_IMAGES = [\"mona_lisa.png\", \"pixel_art_landscape.png\", \"sunflower.png\"]" }, { "identifier": "PRESETS", "path": "my_presets.py", "snippet": "PRESETS = {\n 'None':\n {'scale': 'Major', 'key': 'A', 'octave': 2, 'harmony': 'None',\n 'randomize_octaves': True, 'resize_to_n_pixels': False,\n 't_value': 0.2, 'n_pixels': 64,\n 'gain_db': 0.0, 'drive_db': 0.0,\n 'cutoff_hz': 0.0, 'resonance_lad': 0.0, 'drive_lad': 1.0, 'delay_seconds': 0.0,\n 'room_size': 0.0, 'damping': 0.0, 'wet_level': 0.0, 'dry_level': 0.1, 'width': 0.0,\n 'rate_hz_chorus': 0.0},\n 'Bitcrusher': {'scale': 'Natural Minor', 'key': 'G', 'octave': 2, 'harmony': 'Perfect fifth',\n 'randomize_octaves': True, 'resize_to_n_pixels': False, 't_value': 0.1, 'n_pixels': 100,\n 'gain_db': 9.0, 'drive_db': 14.0, 'cutoff_hz': 81.0, 'resonance_lad': 0.4, 'drive_lad': 5.8,\n 'delay_seconds': 0.0, 'room_size': 0.1, 'damping': 0.0, 'wet_level': 0.0, 'dry_level': 0.3,\n 'width': 0.0, 'rate_hz_chorus': 0.0},\n 'Sleepy Silly Penguin': {\"scale\": \"Dorian\", \"key\": \"F\", \"octave\": 3, \"harmony\": \"Major third\",\n \"randomize_octaves\": False, \"t_value\": 0.22, \"n_pixels\": 143, \"gain_db\": 0.0,\n \"drive_db\": 0.0, \"cutoff_hz\": 0.0, \"resonance_lad\": 0.0, \"drive_lad\": 1.0,\n \"delay_seconds\": 0.0, \"room_size\": 0.0, \"damping\": 0.0, \"wet_level\": 0.0, \"dry_level\": 0.1,\n \"width\": 0.0, \"rate_hz_chorus\": 0.3},\n 'Underground Cave': {\"scale\": \"Mixolydian\", \"key\": \"C\", \"octave\": 2, \"harmony\": \"Major sixth\",\n \"randomize_octaves\": False, \"t_value\": 0.2, \"n_pixels\": 219, \"gain_db\": 0.0,\n \"drive_db\": 0.0, \"cutoff_hz\": 0.0, \"resonance_lad\": 0.2, \"drive_lad\": 1.0,\n \"delay_seconds\": 0.1, \"room_size\": 0.2, \"damping\": 0.3, \"wet_level\": 0.0, \"dry_level\": 0.1,\n \"width\": 0.0, \"rate_hz_chorus\": 1.4},\n 'Distorted Bass': {\"scale\": \"Aeolian\", \"key\": \"A#\", \"octave\": 1, \"harmony\": \"None\", \"randomize_octaves\": False,\n \"t_value\": 0.3, \"n_pixels\": 64, \"gain_db\": 12.0, \"drive_db\": 4.0, \"cutoff_hz\": 0.0,\n \"resonance_lad\": 0.2, \"drive_lad\": 1.0, \"delay_seconds\": 0.0, \"room_size\": 0.1,\n \"damping\": 0.0, \"wet_level\": 0.0, \"dry_level\": 0.6, \"width\": 0.0, \"rate_hz_chorus\": 0.0},\n 'Bitcrusher (re:)': {\"scale\": \"Natural Minor\", \"key\": \"G\", \"octave\": 3, \"harmony\": \"Major seventh\",\n \"randomize_octaves\": True, \"t_value\": 0.1, \"n_pixels\": 100, \"gain_db\": 9.0, \"drive_db\": 14.0,\n \"cutoff_hz\": 81.0, \"resonance_lad\": 0.4, \"drive_lad\": 5.8, \"delay_seconds\": 0.0,\n \"room_size\": 0.1, \"damping\": 0.0, \"wet_level\": 0.0, \"dry_level\": 0.3, \"width\": 0.0,\n \"rate_hz_chorus\": 0.0}\n\n}" } ]
import json # io import tempfile import streamlit as st # UI from PIL import Image # image processing from backend import resize_and_convert, trackmaker # processing from backend import rolling_title # animation from constants import SCALES, NOTES, HARMONIES, SAMPLE_IMAGES # constants from my_presets import PRESETS
4,694
step=0.1, key="wet_level", help="The wet_level parameter controls the amount of wet signal. ", ) with rev4: dry_level = st.slider( "dry_level", min_value=0.1, max_value=1.0, step=0.1, key="dry_level", help="The dry_level parameter controls the amount of dry signal. ", ) with rev5: width = st.slider( "width", min_value=0.0, max_value=1.0, step=0.1, key="width", help="The width parameter controls the width of the stereo image. ", ) st.markdown("### Ladder Filter") lf1, lf2, lf3 = st.columns(3) # Ladder Filter Parameters with lf1: cutoff_hz = st.slider( "cutoff_hz", min_value=0.0, max_value=1000.0, step=1.0, key="cutoff_hz", help="The cutoff_hz parameter controls the cutoff frequency of the filter. ", ) with lf2: resonance_lad = st.slider( "resonance", min_value=0.0, max_value=1.0, step=0.1, key="resonance_lad", help="The resonance parameter controls the resonance of the filter. ", ) with lf3: drive_lad = st.slider( "drive", min_value=1.0, max_value=100.0, step=0.1, key="drive_lad", help="The drive parameter controls the drive of the filter. ", ) return { "scale": scale, "key": key, "octave": octave, "harmony": harmony, "randomize_octaves": randomize_octaves, "resize_to_n_pixels": resize_to_n_pixels, "t_value": t_value, "n_pixels": n_pixels, "gain_db": gain_db, "drive_db": drive_db, "cutoff_hz": cutoff_hz, "resonance_lad": resonance_lad, "drive_lad": drive_lad, "delay_seconds": delay_seconds, "room_size": room_size, "damping": damping, "wet_level": wet_level, "dry_level": dry_level, "width": width, "rate_hz_chorus": rate_hz_chorus, } def export_buttons(filename, param_dict, track, tmpdir): b0, b1, _ = st.columns([1, 1, 2], gap="small") with b0: exp_track_name = ( filename[len(tmpdir) + 1 :] if filename.startswith(tmpdir) else filename ) st.download_button( "Download Track", data=track, file_name=f"{exp_track_name}.wav", mime="audio/wav", ) with b1: exp_preset_name = ( filename.split("/")[-1] if filename.startswith(tmpdir) else filename ) st.download_button( "Export Preset", data=json.dumps(param_dict), file_name=f"{exp_preset_name}.json", mime="application/json", ) if __name__ == "__main__": # all newly created files will be deleted when the context manager exits with tempfile.TemporaryDirectory() as tmpdir: init_session_state() # tells to use the default parameters plh = write_intro() # returns placeholder for the rolling title handle_presets() # load/upload presets filename = make_sidebar_and_select_file() # select an image param_dict = make_widgets_and_get_parameters() if filename is not None: # convert the image to RGB and resize it if necessary
def init_session_state(): for k, v in PRESETS["None"].items(): if k not in st.session_state: if k != "octave": st.session_state[k] = v else: octave_options = ["Low", "Mid", "High"] st.session_state[k] = octave_options[v - 1] def update_session_state(preset): for k, v in preset.items(): if k != "octave": st.session_state[k] = v else: octave_options = ["Low", "Mid", "High"] st.session_state[k] = octave_options[v - 1] def write_intro(): """Defines general settings and introduces the app. :return: placeholder for the rolling title """ st.set_page_config( page_title="Pix2Beats", page_icon=":musical_note:", layout="centered", initial_sidebar_state="expanded", ) st.markdown( """ <style> .stApp { background: url("https://images.unsplash.com/photo-1557695126-fa2ce36f6828?q=80&w=2670&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"); background-size: cover; background-opacity: 0; } </style>""", unsafe_allow_html=True, ) st.title(":blue[Pix]2:red[Beats]") plh = st.empty() # Display the description st.markdown( """ Welcome to :blue[Pix]2:red[Beats]—a web application at the intersection of visual art and musical expression. Harnessing the power of Artificial Intelligence, :blue[Pix]2:red[Beats] transforms your images into sounds, unlocking a fascinating synergy between the realms of visual and auditory creativity. At the heart of :blue[Pix]2:red[Beats] lies the intuition that both images and sound can be effortlessly represented as matrices of numbers. This unique foundation allows us to create a one-of-a-kind mapping between color spaces and musical scales. Choose an image, tinker with the parameters, and let :blue[Pix]2:red[Beats] do the rest :musical_note: """ ) return plh def handle_presets(): presetsel, presetupl, _ = st.columns([1, 1, 2]) with presetsel: preset_name = st.selectbox( "***Choose a preset***", PRESETS.keys(), key="preset_select", help="Tip: you can modify an existing preset by selecting it and then selecting " "*None* from this list.", ) if preset_name is not None: if preset_name != "None": update_session_state(PRESETS[preset_name]) with presetupl: uploaded_preset = st.file_uploader( "***...or upload your own!***", type=["json"] ) css = """ <style> [data-testid='stFileUploader'] { width: max-content; } [data-testid='stFileUploader'] section { padding: 0; float: left; } [data-testid='stFileUploader'] section > input + div { display: none; } [data-testid='stFileUploader'] section + div { float: right; padding-top: 0; } </style> """ st.markdown(css, unsafe_allow_html=True) if uploaded_preset is not None: preset_name = uploaded_preset.name.split(".")[0] preset = json.load(uploaded_preset) PRESETS[preset_name] = preset update_session_state(preset) def make_sidebar_and_select_file(): """ Create the sidebar for the app The sidebar lets the user select an image to use :return: the image filename """ filename = None if ( st.sidebar.radio( "Image to use", ("Use Example Image", "Upload Image"), label_visibility="hidden", ) == "Use Example Image" ): filename = st.sidebar.selectbox("Choose a sample image", SAMPLE_IMAGES) img = Image.open(filename) else: img = st.sidebar.file_uploader("Upload an image", type=["jpg", "png", "jpeg"]) if img is not None: filename = img.name img = Image.open(img) filename = tmpdir + "/" + filename img.save(filename) # Display the image if filename is not None: st.sidebar.image(img) return filename def make_widgets_and_get_parameters(): """ UI to get the parameters required to generate the track :return: list of parameters """ col1, col2, col3 = st.columns([1, 1, 2]) with col1: scale_options = list(SCALES.keys()) scale = st.selectbox("***Choose the scale***", scale_options, key="scale") key = st.selectbox("***Choose the key***", NOTES, key="key") with col2: octave_options = ["Low", "Mid", "High"] octave = st.selectbox("***Choose the octave***", octave_options, key="octave") octave = octave_options.index(octave) + 1 harmony_options = list(HARMONIES.keys()) harmony = st.selectbox( "*Choose how to harmonize*", harmony_options, key="harmony" ) with col3: t_value = st.slider( "***Note duration (seconds)***", min_value=0.10, max_value=1.0, step=0.01, key="t_value", ) n_pixels = st.slider( "***Pixels to sample***", min_value=64, max_value=320, step=1, key="n_pixels", ) randomize_octaves = st.checkbox( "***Randomize octaves***", key="randomize_octaves", help="If checked, the octaves of the notes will be randomized. " "Otherwise, the notes will be played in the same octave.", ) resize_to_n_pixels = st.checkbox( "***Resize image to N pixels***", key="resize_to_n_pixels", help="If checked, the image will be resized to N pixels. " "Otherwise, the image will be used as is. " "N is the number of pixels selected above.", ) # ***Start Pedalboard Definitions*** st.markdown("## Pedalboard") with st.expander("###### Click here to see the pedalboard"): col4, col5, col6, col7 = st.columns(4) # Chorus Parameters with col4: st.markdown("### Chorus") rate_hz_chorus = st.slider( "rate_hz", min_value=0.0, max_value=100.0, step=0.1, key="rate_hz_chorus", help="The rate_hz parameter controls the rate of the chorus effect. ", ) # Delay Parameters with col5: st.markdown("### Delay") delay_seconds = st.slider( "delay_seconds", key="delay_seconds", min_value=0.0, max_value=2.0, step=0.1, help="The delay_seconds parameter controls the delay of the effect. ", ) # Distortion Parameters with col6: st.markdown("### Distortion") drive_db = st.slider( "drive_db", min_value=0.0, max_value=100.0, step=1.0, key="drive_db", help="The drive_db parameter controls the amount of distortion. ", ) # Gain Parameters with col7: st.markdown("### Gain") gain_db = st.slider( "gain_db", min_value=0.0, max_value=100.0, step=1.0, key="gain_db", help="The gain_db parameter controls the gain of the effect. ", ) st.markdown("### Reverb") rev1, rev2, rev3, rev4, rev5 = st.columns(5) # Reverb Parameters with rev1: room_size = st.slider( "room_size", min_value=0.0, max_value=1.0, step=0.1, key="room_size", help="The room_size parameter controls the size of the reverbing room. ", ) with rev2: damping = st.slider( "damping", min_value=0.0, max_value=1.0, step=0.1, key="damping" ) with rev3: wet_level = st.slider( "wet_level", min_value=0.0, max_value=1.0, step=0.1, key="wet_level", help="The wet_level parameter controls the amount of wet signal. ", ) with rev4: dry_level = st.slider( "dry_level", min_value=0.1, max_value=1.0, step=0.1, key="dry_level", help="The dry_level parameter controls the amount of dry signal. ", ) with rev5: width = st.slider( "width", min_value=0.0, max_value=1.0, step=0.1, key="width", help="The width parameter controls the width of the stereo image. ", ) st.markdown("### Ladder Filter") lf1, lf2, lf3 = st.columns(3) # Ladder Filter Parameters with lf1: cutoff_hz = st.slider( "cutoff_hz", min_value=0.0, max_value=1000.0, step=1.0, key="cutoff_hz", help="The cutoff_hz parameter controls the cutoff frequency of the filter. ", ) with lf2: resonance_lad = st.slider( "resonance", min_value=0.0, max_value=1.0, step=0.1, key="resonance_lad", help="The resonance parameter controls the resonance of the filter. ", ) with lf3: drive_lad = st.slider( "drive", min_value=1.0, max_value=100.0, step=0.1, key="drive_lad", help="The drive parameter controls the drive of the filter. ", ) return { "scale": scale, "key": key, "octave": octave, "harmony": harmony, "randomize_octaves": randomize_octaves, "resize_to_n_pixels": resize_to_n_pixels, "t_value": t_value, "n_pixels": n_pixels, "gain_db": gain_db, "drive_db": drive_db, "cutoff_hz": cutoff_hz, "resonance_lad": resonance_lad, "drive_lad": drive_lad, "delay_seconds": delay_seconds, "room_size": room_size, "damping": damping, "wet_level": wet_level, "dry_level": dry_level, "width": width, "rate_hz_chorus": rate_hz_chorus, } def export_buttons(filename, param_dict, track, tmpdir): b0, b1, _ = st.columns([1, 1, 2], gap="small") with b0: exp_track_name = ( filename[len(tmpdir) + 1 :] if filename.startswith(tmpdir) else filename ) st.download_button( "Download Track", data=track, file_name=f"{exp_track_name}.wav", mime="audio/wav", ) with b1: exp_preset_name = ( filename.split("/")[-1] if filename.startswith(tmpdir) else filename ) st.download_button( "Export Preset", data=json.dumps(param_dict), file_name=f"{exp_preset_name}.json", mime="application/json", ) if __name__ == "__main__": # all newly created files will be deleted when the context manager exits with tempfile.TemporaryDirectory() as tmpdir: init_session_state() # tells to use the default parameters plh = write_intro() # returns placeholder for the rolling title handle_presets() # load/upload presets filename = make_sidebar_and_select_file() # select an image param_dict = make_widgets_and_get_parameters() if filename is not None: # convert the image to RGB and resize it if necessary
img = resize_and_convert(
0
2023-12-30 13:12:10+00:00
8k
AbstractUmbra/GreatAsset
great_asset/save_file.py
[ { "identifier": "decrypt", "path": "great_asset/crypt.py", "snippet": "def decrypt(\n *, path: str | PathLike[str] | Path | None = None, data: bytes | None = None\n) -> Any: # it returns the type of file we decrypt but alas\n if not path and not data:\n raise ValueError(\"Either `path` or `data` must be provided.\")\n\n if path:\n if not isinstance(path, Path):\n path = Path(path)\n\n with path.open(\"rb\") as fp:\n read_data = fp.read()\n else:\n read_data = data\n assert read_data # guarded earlier\n\n # The initialisation vector is the first 16 bytes of the save file.\n init_vector = read_data[:16]\n # then we take the proceeding N bytes as the data\n _to_decrypt = read_data[16:]\n\n # create the decryption key from the provided data\n decryption_key = PBKDF2(CRYPTO_PASSWORD, init_vector, dkLen=16, count=100)\n\n # with the key we create the needed cipher\n cipher = AES.new(decryption_key, AES.MODE_CBC, init_vector) # type: ignore # the upstream types aren't great\n\n # and now we decrypt the data\n decrypted_data = unpad(cipher.decrypt(_to_decrypt), AES.block_size, style=\"pkcs7\")\n\n # and it's always UTF-8\n resolved_data = decrypted_data.decode(\"utf-8\")\n\n return _from_json(resolved_data)" }, { "identifier": "encrypt", "path": "great_asset/crypt.py", "snippet": "def encrypt(path: str | PathLike[str] | Path, /) -> bytes:\n if not isinstance(path, Path):\n path = Path(path)\n\n with path.open(\"rb\") as fp:\n data_to_encrypt = fp.read()\n\n # Generate a random IV (Initialization Vector)\n init_vector = Random.new().read(16)\n\n # Derive the key using PBKDF2 with SHA1 hash algorithm\n key = PBKDF2(CRYPTO_PASSWORD, init_vector, dkLen=16, count=100)\n\n # Create AES cipher object\n cipher = AES.new(key, AES.MODE_CBC, init_vector) # type: ignore # the upstream types aren't great\n\n # Pad the data with PKCS7 before encryption\n padded_data = pad(data_to_encrypt, AES.block_size, style=\"pkcs7\")\n\n # Encrypt the data\n encrypted_data = init_vector + cipher.encrypt(padded_data)\n\n return encrypted_data" }, { "identifier": "BestiaryEntry", "path": "great_asset/enums.py", "snippet": "class BestiaryEntry(Enum):\n snare_flea = 0\n bracken = 1\n thumper = 2\n eyeless_dog = 3\n hoarding_bug = 4\n hygroderes = 5\n slime = 5\n forest_keepers = 6\n giants = 6\n coil_head = 7\n spring_head = 7\n lasso_man = 8 # not implemented?\n earth_leviathan = 9\n sand_worm = 9\n jester = 10\n jack_in_the_box = 10\n spore_lizard = 11\n bunker_spider = 12\n spider = 12\n manticoil = 13\n circuit_bees = 14\n bees = 14\n roaming_locusts = 15\n locusts = 15\n baboon_hawk = 16\n nutcracker = 17\n\n @staticmethod\n def all() -> list[\"BestiaryEntry\"]:\n return list(BestiaryEntry)" }, { "identifier": "ExtraUnlock", "path": "great_asset/enums.py", "snippet": "class ExtraUnlock(Enum):\n orange_suit = 0\n green_suit = 1\n hazard_suit = 2\n pyjama_suit = 3\n purple_suit = 24\n\n @staticmethod\n def all() -> list[\"ExtraUnlock\"]:\n return list(ExtraUnlock)" }, { "identifier": "Item", "path": "great_asset/enums.py", "snippet": "class Item(Enum):\n binoculars = 0 # not yet implemented\n boom_box = 1\n cardboard_box = 2\n flashlight = 3\n jetpack = 4\n key = 5\n lockpick = 6\n handheld_monitor = 8 # not yet implemented\n pro_flashlight = 9\n shovel = 10\n flashbang = 11\n extension_ladder = 12\n tzp_inhalant = 13\n walkie_talkie = 14\n stun_gun = 15" }, { "identifier": "Moon", "path": "great_asset/enums.py", "snippet": "class Moon(Enum):\n experimentation = 0\n assurance = 1\n vow = 2\n company_building = 3\n march = 4\n rend = 5\n dine = 6\n offense = 7\n titan = 8" }, { "identifier": "Scrap", "path": "great_asset/enums.py", "snippet": "class Scrap(Enum):\n apparatus = 7\n magic_7_ball = 16\n airhorn = 17\n bell = 18\n big_bolt = 19\n bottles = 20\n hairbrush = 21\n candy = 22\n cash_register = 23\n chemical_jug = 24\n clown_horn = 25\n large_axel = 26\n teeth = 27\n dustpan = 28\n egg_beater = 29\n v_type_engine = 30\n golden_cup = 31\n lamp = 32\n painting = 33\n plastic_fish = 34\n laser_pointer = 35\n gold_bar = 36\n hairdryer = 37\n magnifying_glass = 38\n tattered_metal_sheet = 39\n cookie_mold_pan = 40\n coffee_mug = 41\n perfume_bottle = 42\n old_phone = 43\n jar_of_pickles = 44\n pill_bottle = 45\n remote = 46\n ring = 47\n robot_toy = 48\n rubber_ducky = 49\n red_soda = 50\n steering_wheel = 51\n stop_sign = 52\n tea_kettle = 53\n toothpaste = 54\n toy_cube = 55\n bee_hive = 56\n radar_booster = 57\n yield_sign = 58\n shotgun = 59\n shotgun_shell = 60\n spray_paint = 61\n homemade_flashbang = 62\n gift_box = 63\n flask = 64\n tragedy = 65\n comedy = 66\n whoopie_cushion = 67" }, { "identifier": "ShipUnlock", "path": "great_asset/enums.py", "snippet": "class ShipUnlock(Enum):\n cozy_lights = 4, \"Cozy lights\"\n teleporter = 5, \"Teleporter\"\n television = 6, \"Television\"\n tv = 6, \"Television\"\n cupboard = 7, \"Cupboard\"\n file_cabinet = 8, \"File Cabinet\"\n toilet = 9, \"Toilet\"\n shower = 10, \"Shower\"\n light_switch = 11, \"Light switch\"\n record_player = 12, \"Record player\"\n table = 13, \"Table\"\n romantic_table = 14, \"Romantic table\"\n bunkbeds = 15, \"Bunkbeds\"\n terminal = 16, \"Terminal\"\n signal_translator = 17, \"Signal translator\"\n signal_transmitter = 17, \"Signal translator\"\n loud_horn = 18, \"Loud horn\"\n inverse_teleporter = 19, \"Inverse Teleporter\"\n jack_o_lantern = 20, \"JackOLantern\"\n welcome_mat = 21, \"Welcome mat\"\n goldfish = 22, \"Goldfish\"\n plushie_pajama_man = 23, \"Plushie pajama man\"\n plushie_pyjama_man = 23, \"Plushie pajama man\"\n\n def __init__(self, value: int, serialised_name: str) -> None:\n self._serialised_value: int = value\n self._serialised_name: str = serialised_name\n\n @property\n def serialised_value(self) -> int:\n return self._serialised_value\n\n @property\n def serialised_name(self) -> str:\n return self._serialised_name\n\n @staticmethod\n def all() -> list[\"ShipUnlock\"]:\n return list(ShipUnlock)" }, { "identifier": "GrabbableScrap", "path": "great_asset/item.py", "snippet": "class GrabbableScrap(NamedTuple):\n id: int\n value: int\n pos: InnerVectorValue" }, { "identifier": "MISSING", "path": "great_asset/utils.py", "snippet": " def _to_json(obj: Any, /) -> str:\n def _to_json(obj: Any, /) -> str:\n def __eq__(self, other: object) -> bool:\n def __bool__(self) -> bool:\n def __hash__(self) -> int:\n def __repr__(self) -> str:\ndef resolve_save_path(save_number: SaveValue, /) -> pathlib.Path:\nclass _MissingSentinel:\nMISSING: Any = _MissingSentinel()" }, { "identifier": "Vector", "path": "great_asset/vector.py", "snippet": "class Vector:\n def __init__(self, x: float, y: float, z: float) -> None:\n self.x: float = float(x)\n self.y: float = float(y)\n self.z: float = float(z)\n\n def __repr__(self) -> str:\n return f\"<Vector x={self.x} y={self.y} z={self.z}>\"\n\n @classmethod\n def default(cls) -> Vector:\n return cls(-3.5, 2.5, -12.5)\n\n @classmethod\n def in_cupboard(cls, cupboard_position: VectorValue | None = None) -> Vector:\n if cupboard_position:\n position = cls.from_dict(cupboard_position[\"value\"])\n return cls(\n uniform(position.x, position.x - 0.5),\n choice(SHELVES),\n uniform(position.z, position.z - 0.5),\n )\n return cls(uniform(-3.0, -3.5), choice(SHELVES), uniform(-12, -12.5))\n\n @classmethod\n def from_dict(cls, payload: InnerVectorValue) -> Vector:\n return cls(**payload)\n\n def serialise(self) -> InnerVectorValue:\n return {\"x\": self.x, \"y\": self.y, \"z\": self.z}" } ]
import random from pathlib import Path from typing import TYPE_CHECKING, Any, Generic, Self, TypeVar from .crypt import decrypt, encrypt from .enums import BestiaryEntry, ExtraUnlock, Item, Moon, Scrap, ShipUnlock from .item import GrabbableScrap from .utils import MISSING, SaveValue, _to_json, resolve_save_path # type: ignore[reportPrivateUsage] we allow this here. from .vector import Vector from os import PathLike from types import TracebackType from .types_.config_file import ConfigFile as ConfigFileType from .types_.save_file import ( SaveFile as SaveFileType, ) from .types_.shared import *
4,862
self._upsert_value(key, value) if dump_to_file: self._dump(overwrite=overwrite) self._written = True def _dump(self, overwrite: bool) -> None: decrypted_result = _to_json(self._inner_data) with TEMP_FILE.open("wb") as fp: fp.write(decrypted_result.encode("utf-8")) encrypted_result = encrypt(TEMP_FILE) p = self.path if overwrite else self.path.with_suffix(".over") with p.open("wb") as fp: fp.write(encrypted_result) class SaveFile(_BaseSaveFile["SaveFileType"]): # late init variable types _extra_data: dict[str, Any] _credits: int _current_planet_id: int _deadline: float _deaths: int _elapsed_days: int _quotas_met: int _quota_threshold: int _seed: int _steps_taken: int __slots__ = ( "_inner_data", "_extra_data", "_credits", "_current_planet_id", "_current_quota_progress", "_deadline", "_deaths", "_elapsed_days", "_quotas_met", "_quota_threshold", "_seed", "_steps_taken", # these values need a richer interface "_enemy_scans", "_ship_item_save_data", "_unlocked_ship_objects", "_scrap", "__ship_grabbable_items", "__ship_grabbable_item_positions", "__ship_scrap", "path", ) @classmethod def resolve_from_file(cls, save_file_number: SaveValue, /) -> SaveFile: path = resolve_save_path(save_file_number) return cls(path) def validate_contents(self, data: SaveFileType, /) -> None: # type: ignore # we narrowed the type in the subclass if not any( [ data.get("GroupCredits"), data.get("DeadlineTime"), data.get("Stats_StepsTaken"), data.get("Stats_DaysSpent"), data.get("ProfitQuota"), data.get("CurrentPlanetID"), ] ): raise ValueError("This doesn't appear to be a valid Lethal Company save file!") def _parse_file(self) -> None: super()._parse_file() self._credits = self._inner_data["GroupCredits"]["value"] self._current_planet_id = self._inner_data["CurrentPlanetID"]["value"] self._current_quota_progress = self._inner_data["QuotaFulfilled"]["value"] self._deadline = self._inner_data["DeadlineTime"]["value"] self._deaths = self._inner_data["Stats_Deaths"]["value"] self._elapsed_days = self._inner_data["Stats_DaysSpent"]["value"] self._quotas_met = self._inner_data["QuotasPassed"]["value"] self._quota_threshold = self._inner_data["ProfitQuota"]["value"] self._seed = self._inner_data["RandomSeed"]["value"] self._steps_taken = self._inner_data["Stats_StepsTaken"]["value"] # TODO: richer interface here. self._enemy_scans = self._inner_data.get("EnemyScans", {"__type": "System.Int32[],mscorlib", "value": []}) self._ship_item_save_data = self._inner_data.get( "shipItemSaveData", {"__type": "System.Int32[],mscorlib", "value": []} ) self._unlocked_ship_objects = self._inner_data.get( "UnlockedShipObjects", {"__type": "System.Int32[],mscorlib", "value": []} ) self.__ship_grabbable_items = self._inner_data.get( "shipGrabbableItemIDs", {"__type": "System.Int32[],mscorlib", "value": []} ) self.__ship_grabbable_item_positions = self._inner_data.get( "shipGrabbableItemPos", {"__type": "UnityEngine.Vector3[],UnityEngine.CoreModule", "value": []} ) self.__ship_scrap = self._inner_data.get("shipScrapValues", {"__type": "System.Int32[],mscorlib", "value": []}) self._parse_scrap_mapping() # this key is mostly laziness for now # we'll serialise anything in here into the final payload # for now this will just be how we add the UnlockedStored_X keys self._extra_data = {} def _parse_scrap_mapping(self) -> None: # shipGrabbableItems contains all touchable items on the ship, including tools which have no value # shipScrapValues are an array of values assigned to each piece of scrap # it works because GrabbableItems[1]: ScrapValues[1], each index aligns and that's how the values are assigned, like a zip # once the scrapvalues runs out of elements, the rest of the items are treated as no value, like tools
""" The MIT License (MIT) Copyright (c) 2023-present AbstractUmbra Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import annotations if TYPE_CHECKING: SaveT = TypeVar("SaveT", "SaveFileType", "ConfigFileType") TEMP_FILE = Path("./_previously_decrypted_file.json") TIPS = [ "LC_MoveObjectsTip", "LC_StorageTip", "LC_LightningTip", "LCTip_SecureDoors", "LC_EclipseTip", "LCTip_SellScrap", "LCTip_UseManual", "LC_IntroTip1", ] __all__ = ( "SaveFile", "ConfigFile", ) class _BaseSaveFile(Generic[SaveT]): _inner_data: SaveT _file_type: str _extra_data: dict[str, Any] _written: bool _skip_parsing: bool __slots__ = ( "_inner_data", "_file_type", "_extra_data", "_written", "_skip_parsing", "path", ) def __init__(self, path: str | PathLike[str] | Path, /) -> None: self._skip_parsing = False self._written = False if not isinstance(path, Path): path = Path(path) if not path.exists(): raise ValueError("The path given does not exist") self.path: Path = path self._parse_file() @classmethod def from_data(cls, *, data: bytes, path: Path | None = None, save_number: SaveValue | None = None) -> Self: _number = save_number or "" path = path or Path(f"./LCSaveFile{_number}") file = cls.__new__(cls) file._skip_parsing = True decrypted: SaveT = decrypt(data=data) file.validate_contents(decrypted) file._inner_data = decrypted file.path = path file._parse_file() return file def __enter__(self) -> Self: return self def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None ) -> None: if not self._written and not exc_type: self.write() def validate_contents(self, data: SaveFileType | ConfigFileType, /) -> None: raise NotImplementedError def _parse_file(self) -> None: if self._skip_parsing: return data = decrypt(path=self.path) self.validate_contents(data) self._inner_data = data def _upsert_value(self, key_name: str, value: Any) -> None: if value is MISSING: return # If the value is the sentinel type, do nothing and move onto the next if isinstance(value, int): _type = "int" elif isinstance(value, list): if value and isinstance(value[0], int): _type = "System.Int32[],mscorlib" elif value and isinstance(value[0], dict): _type = "UnityEngine.Vector3[],UnityEngine.CoreModule" else: raise ValueError("Unexpected or unknown array type passed for `value`") elif isinstance(value, bool): _type = "bool" else: raise ValueError("Unexpected type passed for `value`: %r (%s)", value, type(value)) try: self._inner_data[key_name]["value"] = value except KeyError: self._inner_data[key_name] = {"__type": _type, "value": value} def write(self, *, dump_to_file: bool = True, overwrite: bool = True) -> None: for key, value in self._extra_data.items(): self._upsert_value(key, value) if dump_to_file: self._dump(overwrite=overwrite) self._written = True def _dump(self, overwrite: bool) -> None: decrypted_result = _to_json(self._inner_data) with TEMP_FILE.open("wb") as fp: fp.write(decrypted_result.encode("utf-8")) encrypted_result = encrypt(TEMP_FILE) p = self.path if overwrite else self.path.with_suffix(".over") with p.open("wb") as fp: fp.write(encrypted_result) class SaveFile(_BaseSaveFile["SaveFileType"]): # late init variable types _extra_data: dict[str, Any] _credits: int _current_planet_id: int _deadline: float _deaths: int _elapsed_days: int _quotas_met: int _quota_threshold: int _seed: int _steps_taken: int __slots__ = ( "_inner_data", "_extra_data", "_credits", "_current_planet_id", "_current_quota_progress", "_deadline", "_deaths", "_elapsed_days", "_quotas_met", "_quota_threshold", "_seed", "_steps_taken", # these values need a richer interface "_enemy_scans", "_ship_item_save_data", "_unlocked_ship_objects", "_scrap", "__ship_grabbable_items", "__ship_grabbable_item_positions", "__ship_scrap", "path", ) @classmethod def resolve_from_file(cls, save_file_number: SaveValue, /) -> SaveFile: path = resolve_save_path(save_file_number) return cls(path) def validate_contents(self, data: SaveFileType, /) -> None: # type: ignore # we narrowed the type in the subclass if not any( [ data.get("GroupCredits"), data.get("DeadlineTime"), data.get("Stats_StepsTaken"), data.get("Stats_DaysSpent"), data.get("ProfitQuota"), data.get("CurrentPlanetID"), ] ): raise ValueError("This doesn't appear to be a valid Lethal Company save file!") def _parse_file(self) -> None: super()._parse_file() self._credits = self._inner_data["GroupCredits"]["value"] self._current_planet_id = self._inner_data["CurrentPlanetID"]["value"] self._current_quota_progress = self._inner_data["QuotaFulfilled"]["value"] self._deadline = self._inner_data["DeadlineTime"]["value"] self._deaths = self._inner_data["Stats_Deaths"]["value"] self._elapsed_days = self._inner_data["Stats_DaysSpent"]["value"] self._quotas_met = self._inner_data["QuotasPassed"]["value"] self._quota_threshold = self._inner_data["ProfitQuota"]["value"] self._seed = self._inner_data["RandomSeed"]["value"] self._steps_taken = self._inner_data["Stats_StepsTaken"]["value"] # TODO: richer interface here. self._enemy_scans = self._inner_data.get("EnemyScans", {"__type": "System.Int32[],mscorlib", "value": []}) self._ship_item_save_data = self._inner_data.get( "shipItemSaveData", {"__type": "System.Int32[],mscorlib", "value": []} ) self._unlocked_ship_objects = self._inner_data.get( "UnlockedShipObjects", {"__type": "System.Int32[],mscorlib", "value": []} ) self.__ship_grabbable_items = self._inner_data.get( "shipGrabbableItemIDs", {"__type": "System.Int32[],mscorlib", "value": []} ) self.__ship_grabbable_item_positions = self._inner_data.get( "shipGrabbableItemPos", {"__type": "UnityEngine.Vector3[],UnityEngine.CoreModule", "value": []} ) self.__ship_scrap = self._inner_data.get("shipScrapValues", {"__type": "System.Int32[],mscorlib", "value": []}) self._parse_scrap_mapping() # this key is mostly laziness for now # we'll serialise anything in here into the final payload # for now this will just be how we add the UnlockedStored_X keys self._extra_data = {} def _parse_scrap_mapping(self) -> None: # shipGrabbableItems contains all touchable items on the ship, including tools which have no value # shipScrapValues are an array of values assigned to each piece of scrap # it works because GrabbableItems[1]: ScrapValues[1], each index aligns and that's how the values are assigned, like a zip # once the scrapvalues runs out of elements, the rest of the items are treated as no value, like tools
self._scrap: list[GrabbableScrap] = []
8
2023-12-25 11:03:20+00:00
8k
Shaokang-Agent/S2L
marlgrid/envs/doorkey.py
[ { "identifier": "MultiGridEnv", "path": "marlgrid/base.py", "snippet": "class MultiGridEnv(gym.Env):\n def __init__(\n self,\n agents = [],\n grid_size=None,\n width=None,\n height=None,\n max_steps=100,\n reward_decay=True,\n seed=1337,\n respawn=False,\n ghost_mode=True,\n agent_spawn_kwargs = {}\n ):\n\n if grid_size is not None:\n assert width == None and height == None\n width, height = grid_size, grid_size\n\n self.respawn = respawn\n\n self.window = None\n\n self.width = width\n self.height = height\n self.max_steps = max_steps\n self.reward_decay = reward_decay\n self.seed(seed=seed)\n self.agent_spawn_kwargs = agent_spawn_kwargs\n self.ghost_mode = ghost_mode\n\n self.agents = []\n for agent in agents:\n self.add_agent(agent)\n\n self.reset()\n\n def seed(self, seed=1337):\n # Seed the random number generator\n self.np_random, _ = gym.utils.seeding.np_random(seed)\n return [seed]\n\n @property\n def action_space(self):\n return gym.spaces.Tuple(\n [agent.action_space for agent in self.agents]\n )\n\n @property\n def observation_space(self):\n return gym.spaces.Tuple(\n [agent.observation_space for agent in self.agents]\n )\n\n @property\n def num_agents(self):\n return len(self.agents)\n \n def add_agent(self, agent_interface):\n if isinstance(agent_interface, dict):\n self.agents.append(GridAgentInterface(**agent_interface))\n elif isinstance(agent_interface, GridAgentInterface):\n self.agents.append(agent_interface)\n else:\n raise ValueError(\n \"To add an agent to a marlgrid environment, call add_agent with either a GridAgentInterface object \"\n \" or a dictionary that can be used to initialize one.\")\n\n def reset(self, **kwargs):\n for agent in self.agents:\n agent.agents = []\n agent.reset(new_episode=True)\n\n self._gen_grid(self.width, self.height)\n\n for agent in self.agents:\n if agent.spawn_delay == 0:\n self.place_obj(agent, **self.agent_spawn_kwargs)\n agent.activate()\n\n self.step_count = 0\n obs = self.gen_obs()\n return obs\n\n def gen_obs_grid(self, agent):\n # If the agent is inactive, return an empty grid and a visibility mask that hides everything.\n if not agent.active:\n # below, not sure orientation is correct but as of 6/27/2020 that doesn't matter because\n # agent views are usually square and this grid won't be used for anything.\n grid = MultiGrid((agent.view_size, agent.view_size), orientation=agent.dir+1)\n vis_mask = np.zeros((agent.view_size, agent.view_size), dtype=np.bool)\n return grid, vis_mask\n\n topX, topY, botX, botY = agent.get_view_exts()\n\n grid = self.grid.slice(\n topX, topY, agent.view_size, agent.view_size, rot_k=agent.dir + 1\n )\n\n # Process occluders and visibility\n # Note that this incurs some slight performance cost\n vis_mask = agent.process_vis(grid.opacity)\n\n # Warning about the rest of the function:\n # Allows masking away objects that the agent isn't supposed to see.\n # But breaks consistency between the states of the grid objects in the parial views\n # and the grid objects overall.\n if len(getattr(agent, 'hide_item_types', []))>0:\n for i in range(grid.width):\n for j in range(grid.height):\n item = grid.get(i,j)\n if (item is not None) and (item is not agent) and (item.type in agent.hide_item_types):\n if len(item.agents) > 0:\n grid.set(i,j,item.agents[0])\n else:\n grid.set(i,j,None)\n\n return grid, vis_mask\n\n def gen_agent_obs(self, agent):\n \"\"\"\n Generate the agent's view (partially observable, low-resolution encoding)\n \"\"\"\n grid, vis_mask = self.gen_obs_grid(agent)\n grid_image = grid.render(tile_size=agent.view_tile_size, visible_mask=vis_mask, top_agent=agent)\n if agent.observation_style=='image':\n return grid_image\n else:\n ret = {'pov': grid_image}\n if agent.observe_rewards:\n ret['reward'] = getattr(agent, 'step_reward', 0)\n if agent.observe_position:\n agent_pos = agent.pos if agent.pos is not None else (0,0)\n ret['position'] = np.array(agent_pos)/np.array([self.width, self.height], dtype=np.float)\n if agent.observe_orientation:\n agent_dir = agent.dir if agent.dir is not None else 0\n ret['orientation'] = agent_dir\n return ret\n\n def gen_obs(self):\n return [self.gen_agent_obs(agent) for agent in self.agents]\n\n def __str__(self):\n return self.grid.__str__()\n\n def check_agent_position_integrity(self, title=''):\n '''\n This function checks whether each agent is present in the grid in exactly one place.\n This is particularly helpful for validating the world state when ghost_mode=False and\n agents can stack, since the logic for moving them around gets a bit messy.\n Prints a message and drops into pdb if there's an inconsistency.\n '''\n agent_locs = [[] for _ in range(len(self.agents))]\n for i in range(self.grid.width):\n for j in range(self.grid.height):\n x = self.grid.get(i,j)\n for k,agent in enumerate(self.agents):\n if x==agent:\n agent_locs[k].append(('top', (i,j)))\n if hasattr(x, 'agents') and agent in x.agents:\n agent_locs[k].append(('stacked', (i,j)))\n if not all([len(x)==1 for x in agent_locs]):\n print(f\"{title} > Failed integrity test!\")\n for a, al in zip(self.agents, agent_locs):\n print(\" > \", a.color,'-', al)\n import pdb; pdb.set_trace()\n\n def step(self, actions):\n # Spawn agents if it's time.\n for agent in self.agents:\n if not agent.active and not agent.done and self.step_count >= agent.spawn_delay:\n self.place_obj(agent, **self.agent_spawn_kwargs)\n agent.activate()\n \n assert len(actions) == len(self.agents)\n\n step_rewards = np.zeros((len(self.agents,)), dtype=np.float)\n\n self.step_count += 1\n\n iter_agents = list(enumerate(zip(self.agents, actions)))\n iter_order = np.arange(len(iter_agents))\n self.np_random.shuffle(iter_order)\n for shuffled_ix in iter_order:\n agent_no, (agent, action) = iter_agents[shuffled_ix]\n agent.step_reward = 0\n\n if agent.active:\n\n cur_pos = agent.pos[:]\n cur_cell = self.grid.get(*cur_pos)\n fwd_pos = agent.front_pos[:]\n fwd_cell = self.grid.get(*fwd_pos)\n agent_moved = False\n\n # Rotate left\n if action == agent.actions.left:\n agent.dir = (agent.dir - 1) % 4\n\n # Rotate right\n elif action == agent.actions.right:\n agent.dir = (agent.dir + 1) % 4\n\n # Move forward\n elif action == agent.actions.forward:\n # Under the follow conditions, the agent can move forward.\n can_move = fwd_cell is None or fwd_cell.can_overlap()\n if self.ghost_mode is False and isinstance(fwd_cell, GridAgent):\n can_move = False\n\n if can_move:\n agent_moved = True\n # Add agent to new cell\n if fwd_cell is None:\n self.grid.set(*fwd_pos, agent)\n agent.pos = fwd_pos\n else:\n fwd_cell.agents.append(agent)\n agent.pos = fwd_pos\n\n # Remove agent from old cell\n if cur_cell == agent:\n self.grid.set(*cur_pos, None)\n else:\n assert cur_cell.can_overlap()\n cur_cell.agents.remove(agent)\n\n # Add agent's agents to old cell\n for left_behind in agent.agents:\n cur_obj = self.grid.get(*cur_pos)\n if cur_obj is None:\n self.grid.set(*cur_pos, left_behind)\n elif cur_obj.can_overlap():\n cur_obj.agents.append(left_behind)\n else: # How was \"agent\" there in teh first place?\n raise ValueError(\"?!?!?!\")\n\n # After moving, the agent shouldn't contain any other agents.\n agent.agents = [] \n # test_integrity(f\"After moving {agent.color} fellow\")\n\n # Rewards can be got iff. fwd_cell has a \"get_reward\" method\n if hasattr(fwd_cell, 'get_reward'):\n rwd = fwd_cell.get_reward(agent)\n if bool(self.reward_decay):\n rwd *= (1.0-0.9*(self.step_count/self.max_steps))\n step_rewards[agent_no] += rwd\n agent.reward(rwd)\n \n\n if isinstance(fwd_cell, (Lava, Goal)):\n agent.done = True\n\n # TODO: verify pickup/drop/toggle logic in an environment that \n # supports the relevant interactions.\n # Pick up an object\n elif action == agent.actions.pickup:\n if fwd_cell and fwd_cell.can_pickup():\n if agent.carrying is None:\n agent.carrying = fwd_cell\n agent.carrying.cur_pos = np.array([-1, -1])\n self.grid.set(*fwd_pos, None)\n else:\n pass\n\n # Drop an object\n elif action == agent.actions.drop:\n if not fwd_cell and agent.carrying:\n self.grid.set(*fwd_pos, agent.carrying)\n agent.carrying.cur_pos = fwd_pos\n agent.carrying = None\n else:\n pass\n\n # Toggle/activate an object\n elif action == agent.actions.toggle:\n if fwd_cell:\n wasted = bool(fwd_cell.toggle(agent, fwd_pos))\n else:\n pass\n\n # Done action (not used by default)\n elif action == agent.actions.done:\n pass\n\n else:\n raise ValueError(f\"Environment can't handle action {action}.\")\n\n agent.on_step(fwd_cell if agent_moved else None)\n\n \n # If any of the agents individually are \"done\" (hit lava or in some cases a goal) \n # but the env requires respawning, then respawn those agents.\n for agent in self.agents:\n if agent.done:\n if self.respawn:\n resting_place_obj = self.grid.get(*agent.pos)\n if resting_place_obj == agent:\n if agent.agents:\n self.grid.set(*agent.pos, agent.agents[0])\n agent.agents[0].agents += agent.agents[1:]\n else:\n self.grid.set(*agent.pos, None)\n else:\n resting_place_obj.agents.remove(agent)\n resting_place_obj.agents += agent.agents[:]\n agent.agents = []\n \n agent.reset(new_episode=False)\n self.place_obj(agent, **self.agent_spawn_kwargs)\n agent.activate()\n else: # if the agent shouldn't be respawned, then deactivate it.\n agent.deactivate()\n\n # The episode overall is done if all the agents are done, or if it exceeds the step limit.\n done = (self.step_count >= self.max_steps) or all([agent.done for agent in self.agents])\n\n obs = [self.gen_agent_obs(agent) for agent in self.agents]\n\n return obs, step_rewards, done, {}\n\n def put_obj(self, obj, i, j):\n \"\"\"\n Put an object at a specific position in the grid. Replace anything that is already there.\n \"\"\"\n self.grid.set(i, j, obj)\n if obj is not None:\n obj.set_position((i,j))\n return True\n\n def try_place_obj(self,obj, pos):\n ''' Try to place an object at a certain position in the grid.\n If it is possible, then do so and return True.\n Otherwise do nothing and return False. '''\n # grid_obj: whatever object is already at pos.\n grid_obj = self.grid.get(*pos)\n\n # If the target position is empty, then the object can always be placed.\n if grid_obj is None:\n self.grid.set(*pos, obj)\n obj.set_position(pos)\n return True\n\n # Otherwise only agents can be placed, and only if the target position can_overlap.\n if not (grid_obj.can_overlap() and obj.is_agent):\n return False\n\n # If ghost mode is off and there's already an agent at the target cell, the agent can't\n # be placed there.\n if (not self.ghost_mode) and (grid_obj.is_agent or (len(grid_obj.agents)>0)):\n return False\n\n grid_obj.agents.append(obj)\n obj.set_position(pos)\n return True\n\n def place_obj(self, obj, top=(0,0), size=None, reject_fn=None, max_tries=1e5):\n max_tries = int(max(1, min(max_tries, 1e5)))\n top = (max(top[0], 0), max(top[1], 0))\n if size is None:\n size = (self.grid.width, self.grid.height)\n bottom = (min(top[0] + size[0], self.grid.width), min(top[1] + size[1], self.grid.height))\n\n # agent_positions = [tuple(agent.pos) if agent.pos is not None else None for agent in self.agents]\n for try_no in range(max_tries):\n pos = self.np_random.integers(top, bottom)\n if (reject_fn is not None) and reject_fn(pos):\n continue\n else:\n if self.try_place_obj(obj, pos):\n break\n else:\n raise RecursionError(\"Rejection sampling failed in place_obj.\")\n\n return pos\n\n def place_agents(self, top=None, size=None, rand_dir=True, max_tries=1000):\n # warnings.warn(\"Placing agents with the function place_agents is deprecated.\")\n pass\n\n def render(\n self,\n mode=\"human\",\n close=False,\n highlight=True,\n tile_size=TILE_PIXELS,\n show_agent_views=True,\n max_agents_per_col=3,\n agent_col_width_frac = 0.3,\n agent_col_padding_px = 2,\n pad_grey = 100\n ):\n \"\"\"\n Render the whole-grid human view\n \"\"\"\n\n if close:\n if self.window:\n self.window.close()\n return\n\n if mode == \"human\" and not self.window:\n # from gym.envs.classic_control.rendering import SimpleImageViewer\n\n self.window = SimpleImageViewer(caption=\"Marlgrid\")\n\n # Compute which cells are visible to the agent\n highlight_mask = np.full((self.width, self.height), False, dtype=np.bool)\n for agent in self.agents:\n if agent.active:\n xlow, ylow, xhigh, yhigh = agent.get_view_exts()\n dxlow, dylow = max(0, 0-xlow), max(0, 0-ylow)\n dxhigh, dyhigh = max(0, xhigh-self.grid.width), max(0, yhigh-self.grid.height)\n if agent.see_through_walls:\n highlight_mask[xlow+dxlow:xhigh-dxhigh, ylow+dylow:yhigh-dyhigh] = True\n else:\n a,b = self.gen_obs_grid(agent)\n highlight_mask[xlow+dxlow:xhigh-dxhigh, ylow+dylow:yhigh-dyhigh] |= (\n rotate_grid(b, a.orientation)[dxlow:(xhigh-xlow)-dxhigh, dylow:(yhigh-ylow)-dyhigh]\n )\n\n\n # Render the whole grid\n img = self.grid.render(\n tile_size, highlight_mask=highlight_mask if highlight else None\n )\n rescale = lambda X, rescale_factor=2: np.kron(\n X, np.ones((int(rescale_factor), int(rescale_factor), 1))\n )\n\n if show_agent_views:\n\n target_partial_width = int(img.shape[0]*agent_col_width_frac-2*agent_col_padding_px)\n target_partial_height = (img.shape[1]-2*agent_col_padding_px)//max_agents_per_col\n\n agent_views = [self.gen_agent_obs(agent) for agent in self.agents]\n agent_views = [view['pov'] if isinstance(view, dict) else view for view in agent_views]\n agent_views = [rescale(view, min(target_partial_width/view.shape[0], target_partial_height/view.shape[1])) for view in agent_views]\n # import pdb; pdb.set_trace()\n agent_views = [agent_views[pos:pos+max_agents_per_col] for pos in range(0, len(agent_views), max_agents_per_col)]\n\n f_offset = lambda view: np.array([target_partial_height - view.shape[1], target_partial_width - view.shape[0]])//2\n \n cols = []\n for col_views in agent_views:\n col = np.full(( img.shape[0],target_partial_width+2*agent_col_padding_px,3), pad_grey, dtype=np.uint8)\n for k, view in enumerate(col_views):\n offset = f_offset(view) + agent_col_padding_px\n offset[0] += k*target_partial_height\n col[offset[0]:offset[0]+view.shape[0], offset[1]:offset[1]+view.shape[1],:] = view\n cols.append(col)\n\n img = np.concatenate((img, *cols), axis=1)\n\n if mode == \"human\":\n if not self.window.isopen:\n self.window.imshow(img)\n self.window.window.set_caption(\"Marlgrid\")\n else:\n self.window.imshow(img)\n\n return img" }, { "identifier": "MultiGrid", "path": "marlgrid/base.py", "snippet": "class MultiGrid:\n\n tile_cache = {}\n\n def __init__(self, shape, obj_reg=None, orientation=0):\n self.orientation = orientation\n if isinstance(shape, tuple):\n self.width, self.height = shape\n self.grid = np.zeros((self.width, self.height), dtype=np.uint8) # w,h\n elif isinstance(shape, np.ndarray):\n self.width, self.height = shape.shape\n self.grid = shape\n else:\n raise ValueError(\"Must create grid from shape tuple or array.\")\n\n if self.width < 3 or self.height < 3:\n raise ValueError(\"Grid needs width, height >= 3\")\n\n self.obj_reg = ObjectRegistry(objs=[None]) if obj_reg is None else obj_reg\n\n @property\n def opacity(self):\n transparent_fun = np.vectorize(lambda k: (self.obj_reg.key_to_obj_map[k].see_behind() if hasattr(self.obj_reg.key_to_obj_map[k], 'see_behind') else True))\n return ~transparent_fun(self.grid)\n\n def __getitem__(self, *args, **kwargs):\n return self.__class__(\n np.ndarray.__getitem__(self.grid, *args, **kwargs),\n obj_reg=self.obj_reg,\n orientation=self.orientation,\n )\n\n def rotate_left(self, k=1):\n return self.__class__(\n rotate_grid(self.grid, rot_k=k), # np.rot90(self.grid, k=k),\n obj_reg=self.obj_reg,\n orientation=(self.orientation - k) % 4,\n )\n\n\n def slice(self, topX, topY, width, height, rot_k=0):\n \"\"\"\n Get a subset of the grid\n \"\"\"\n sub_grid = self.__class__(\n (width, height),\n obj_reg=self.obj_reg,\n orientation=(self.orientation - rot_k) % 4,\n )\n x_min = max(0, topX)\n x_max = min(topX + width, self.width)\n y_min = max(0, topY)\n y_max = min(topY + height, self.height)\n\n x_offset = x_min - topX\n y_offset = y_min - topY\n sub_grid.grid[\n x_offset : x_max - x_min + x_offset, y_offset : y_max - y_min + y_offset\n ] = self.grid[x_min:x_max, y_min:y_max]\n\n sub_grid.grid = rotate_grid(sub_grid.grid, rot_k)\n\n sub_grid.width, sub_grid.height = sub_grid.grid.shape\n\n return sub_grid\n\n def set(self, i, j, obj):\n assert i >= 0 and i < self.width\n assert j >= 0 and j < self.height\n self.grid[i, j] = self.obj_reg.get_key(obj)\n\n def get(self, i, j):\n assert i >= 0 and i < self.width\n assert j >= 0 and j < self.height\n\n return self.obj_reg.key_to_obj_map[self.grid[i, j]]\n\n def horz_wall(self, x, y, length=None, obj_type=Wall):\n if length is None:\n length = self.width - x\n for i in range(0, length):\n self.set(x + i, y, obj_type())\n\n def vert_wall(self, x, y, length=None, obj_type=Wall):\n if length is None:\n length = self.height - y\n for j in range(0, length):\n self.set(x, y + j, obj_type())\n\n def wall_rect(self, x, y, w, h, obj_type=Wall):\n self.horz_wall(x, y, w, obj_type=obj_type)\n self.horz_wall(x, y + h - 1, w, obj_type=obj_type)\n self.vert_wall(x, y, h, obj_type=obj_type)\n self.vert_wall(x + w - 1, y, h, obj_type=obj_type)\n\n def __str__(self):\n render = (\n lambda x: \" \"\n if x is None or not hasattr(x, \"str_render\")\n else x.str_render(dir=self.orientation)\n )\n hstars = \"*\" * (2 * self.width + 2)\n return (\n hstars\n + \"\\n\"\n + \"\\n\".join(\n \"*\" + \"\".join(render(self.get(i, j)) for i in range(self.width)) + \"*\"\n for j in range(self.height)\n )\n + \"\\n\"\n + hstars\n )\n\n def encode(self, vis_mask=None):\n \"\"\"\n Produce a compact numpy encoding of the grid\n \"\"\"\n\n if vis_mask is None:\n vis_mask = np.ones((self.width, self.height), dtype=bool)\n\n array = np.zeros((self.width, self.height, 3), dtype=\"uint8\")\n\n for i in range(self.width):\n for j in range(self.height):\n if vis_mask[i, j]:\n v = self.get(i, j)\n if v is None:\n array[i, j, :] = 0\n else:\n array[i, j, :] = v.encode()\n return array\n\n @classmethod\n def decode(cls, array):\n raise NotImplementedError\n width, height, channels = array.shape\n assert channels == 3\n vis_mask[i, j] = np.ones(shape=(width, height), dtype=np.bool)\n grid = cls((width, height))\n\n \n @classmethod\n def cache_render_fun(cls, key, f, *args, **kwargs):\n if key not in cls.tile_cache:\n cls.tile_cache[key] = f(*args, **kwargs)\n return np.copy(cls.tile_cache[key])\n\n @classmethod\n def cache_render_obj(cls, obj, tile_size, subdivs):\n if obj is None:\n return cls.cache_render_fun((tile_size, None), cls.empty_tile, tile_size, subdivs)\n else:\n img = cls.cache_render_fun(\n (tile_size, obj.__class__.__name__, *obj.encode()),\n cls.render_object, obj, tile_size, subdivs\n )\n if hasattr(obj, 'render_post'):\n return obj.render_post(img)\n else:\n return img\n\n @classmethod\n def empty_tile(cls, tile_size, subdivs):\n alpha = max(0, min(20, tile_size-10))\n img = np.full((tile_size, tile_size, 3), alpha, dtype=np.uint8)\n img[1:,:-1] = 0\n return img\n\n @classmethod\n def render_object(cls, obj, tile_size, subdivs):\n img = np.zeros((tile_size*subdivs,tile_size*subdivs, 3), dtype=np.uint8)\n obj.render(img)\n # if 'Agent' not in obj.type and len(obj.agents) > 0:\n # obj.agents[0].render(img)\n return downsample(img, subdivs).astype(np.uint8)\n\n @classmethod\n def blend_tiles(cls, img1, img2):\n '''\n This function renders one \"tile\" on top of another. Kinda janky, works surprisingly well.\n Assumes img2 is a downscaled monochromatic with a black (0,0,0) background.\n '''\n alpha = img2.sum(2, keepdims=True)\n max_alpha = alpha.max()\n if max_alpha == 0:\n return img1\n return (\n ((img1 * (max_alpha-alpha))+(img2*alpha)\n )/max_alpha\n ).astype(img1.dtype)\n\n @classmethod\n def render_tile(cls, obj, tile_size=TILE_PIXELS, subdivs=3, top_agent=None):\n subdivs = 3\n\n if obj is None:\n img = cls.cache_render_obj(obj, tile_size, subdivs)\n else:\n if ('Agent' in obj.type) and (top_agent in obj.agents):\n # If the tile is a stack of agents that includes the top agent, then just render the top agent.\n img = cls.cache_render_obj(top_agent, tile_size, subdivs)\n else: \n # Otherwise, render (+ downsize) the item in the tile.\n img = cls.cache_render_obj(obj, tile_size, subdivs)\n # If the base obj isn't an agent but has agents on top, render an agent and blend it in.\n if len(obj.agents)>0 and 'Agent' not in obj.type:\n if top_agent in obj.agents:\n img_agent = cls.cache_render_obj(top_agent, tile_size, subdivs)\n else:\n img_agent = cls.cache_render_obj(obj.agents[0], tile_size, subdivs)\n img = cls.blend_tiles(img, img_agent)\n\n # Render the tile border if any of the corners are black.\n if (img[([0,0,-1,-1],[0,-1,0,-1])]==0).all(axis=-1).any():\n img = img + cls.cache_render_fun((tile_size, None), cls.empty_tile, tile_size, subdivs)\n return img\n\n def render(self, tile_size, highlight_mask=None, visible_mask=None, top_agent=None):\n width_px = self.width * tile_size\n height_px = self.height * tile_size\n\n img = np.zeros(shape=(height_px, width_px), dtype=np.uint8)[...,None]+COLORS['shadow']\n\n for j in range(0, self.height):\n for i in range(0, self.width):\n if visible_mask is not None and not visible_mask[i,j]:\n continue\n obj = self.get(i, j)\n\n tile_img = MultiGrid.render_tile(\n obj,\n tile_size=tile_size,\n top_agent=top_agent\n )\n\n ymin = j * tile_size\n ymax = (j + 1) * tile_size\n xmin = i * tile_size\n xmax = (i + 1) * tile_size\n\n img[ymin:ymax, xmin:xmax, :] = rotate_grid(tile_img, self.orientation)\n \n if highlight_mask is not None:\n hm = np.kron(highlight_mask.T, np.full((tile_size, tile_size), 255, dtype=np.uint16)\n )[...,None] # arcane magic.\n img = np.right_shift(img.astype(np.uint16)*8+hm*2, 3).clip(0,255).astype(np.uint8)\n\n return img" } ]
from ..base import MultiGridEnv, MultiGrid from ..objects import *
7,087
class DoorKeyEnv(MultiGridEnv): """ Environment with a door and key, sparse reward. Similar to DoorKeyEnv in https://github.com/maximecb/gym-minigrid/blob/master/gym_minigrid/envs/doorkey.py """ mission = "use the key to open the door and then get to the goal" metadata = {} def _gen_grid(self, width, height): # Create an empty grid
class DoorKeyEnv(MultiGridEnv): """ Environment with a door and key, sparse reward. Similar to DoorKeyEnv in https://github.com/maximecb/gym-minigrid/blob/master/gym_minigrid/envs/doorkey.py """ mission = "use the key to open the door and then get to the goal" metadata = {} def _gen_grid(self, width, height): # Create an empty grid
self.grid = MultiGrid((width, height))
1
2023-12-24 06:50:38+00:00
8k
gh-PonyM/textual-jsonschema-form
textual_jsonschema_form/converter.py
[ { "identifier": "JSONFieldParametersBase", "path": "textual_jsonschema_form/core.py", "snippet": "def strip_cmp_path(ref: str) -> str:\n def get_factory(self):\n def field_label(self):\n def get_options(self):\n def used_imports(self) -> Generator[str, None, None]:\n def default(self) -> str | None:\n def format(self) -> str | None:\n def __post_init__(self):\n def validate_params(cls, params: dict) -> set[str]:\n def extract(\n cls, params: dict, available: set[str]\n ) -> tuple[list[ValidatorType], dict]:\n def from_json_field(cls, field_name: str, required: bool, params: dict):\nclass JSONFieldParametersBase(Generic[FactoryType, ValidatorType], abc.ABC):" }, { "identifier": "ArrayField", "path": "textual_jsonschema_form/fields.py", "snippet": "class ArrayField(FormField):\n # TODO: How to annotate the callable using the protocol, but complains it needs a widget\n\n ACTION_BTN_CLASS_ADD: ClassVar[str] = \"jsonform-action-btn-add\"\n ACTION_BTN_CLASS_REMOVE: ClassVar[str] = \"jsonform-action-btn-remove\"\n\n def __init__(\n self,\n id: str,\n subfield_factory: Callable,\n label: str,\n required: bool,\n data: Iterable | None = None,\n parent_id: str | None = None,\n parent_label: str | None = None,\n classes: str | None = None,\n ):\n super().__init__(\n id=id,\n classes=classes or BaseForm.FORM_INPUTS_CONTAINER_CLASS,\n )\n self.subfield_factory = subfield_factory\n self.label = label\n self.data = data\n self.parent_id = parent_id\n self.parent_label = parent_label\n self.required = required\n\n @property\n def form_input_query(self) -> str:\n \"\"\"Query string for all inputs fields that are immediate children\"\"\"\n return f\"#{self.id} > .{BaseForm.FORM_INPUTS_CLASS}\"\n\n @property\n def form_input_fields(self):\n \"\"\"Returns a dom query of all immediate children, that follow the protocol of a Field\"\"\"\n return self.query(self.form_input_query)\n\n def on_mount(self):\n if self.data:\n self.add_input_fields(self.data)\n\n @property\n def form_data(self):\n \"\"\"Returns the data from all fields and sub-forms\"\"\"\n return {BaseForm.data_key(d.id): d.form_data for d in self.form_input_fields}\n\n @form_data.setter\n def form_data(self, data: Iterable):\n self.remove_children()\n self.add_input_fields(data)\n\n def add_input_field(self, field_data: Any):\n field = self.subfield_factory(classes=BaseForm.FORM_INPUTS_CLASS)\n if field_data:\n field.form_data = field_data\n self.mount(\n Horizontal(field, ActionBtn(\"-\", classes=self.ACTION_BTN_CLASS_REMOVE))\n )\n\n def add_input_fields(self, data: Iterable):\n for item in data:\n self.add_input_field(item)\n\n @classmethod\n def action_add_btn_id(cls, field_id: str):\n return f\"{field_id}-add-field\"\n\n def compose(self):\n with Horizontal():\n yield BaseForm._label(\n BaseForm.join_labels(self.parent_label, self.label), self.required\n )\n yield ActionBtn(\n \"+\",\n id=self.action_add_btn_id(self.id), # type: ignore\n classes=self.ACTION_BTN_CLASS_ADD,\n )\n yield self.subfield_factory(classes=BaseForm.FORM_INPUTS_CLASS)\n\n @on(ActionBtn.Pressed)\n def handle_action_buttons(self, event: ActionBtn.Pressed):\n if self.ACTION_BTN_CLASS_ADD in event.button.classes:\n self.add_input_field(None)\n elif self.ACTION_BTN_CLASS_REMOVE in event.button.classes:\n event.button.parent.query_one(f\".{BaseForm.FORM_INPUTS_CLASS}\").remove() # type: ignore\n\n @property\n def is_valid(self) -> bool:\n \"\"\"Returns in all fields are valid. The field should be validated if it wasn't already when\n calling this property.\"\"\"\n return all(tuple(d.is_valid for d in self.form_input_fields))\n\n def validated(self) -> bool:\n return True" }, { "identifier": "FormInput", "path": "textual_jsonschema_form/fields.py", "snippet": "class FormInput(Input):\n \"\"\"The base input class\"\"\"\n\n INPUT_DATE_FORMAT: ClassVar[str] = \"%d.%m.%Y\"\n INPUT_DATETIME_FORMAT: ClassVar[str] = \"%d.%m.%Y %H:%M\"\n INPUT_VALIDATE_ON: ClassVar[Iterable[InputValidationOn] | None] = (\"changed\",)\n ALWAYS_REVALIDATE_FORMATS: ClassVar[set[str]] = {\n \"path\",\n \"directory-path\",\n \"file-path\",\n }\n\n def __init__(\n self,\n value: str | None = None,\n placeholder: str = \"\",\n highlighter: Highlighter | None = None,\n password: bool = False,\n *,\n restrict: str | None = None,\n type: InputType = \"text\",\n max_length: int = 0,\n suggester: Suggester | None = None,\n validators: Validator | Iterable[Validator] | None = None,\n validate_on: Iterable[InputValidationOn] | None = None,\n valid_empty: bool = False,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n format: str | None = None,\n minimum: int | None = None,\n maximum: int | None = None,\n exclusive_minimum: int | None = None,\n exclusive_maximum: int | None = None,\n ) -> None:\n # If no validators are given and the valid_empty is False, no validation occurs\n # Subsequent validator can pass when an empty value is given, e.g. for path validation\n if not validators and not valid_empty:\n validators = [Function(empty_value, \"Field can not be empty\")]\n super().__init__(\n value,\n placeholder,\n highlighter,\n password,\n restrict=restrict,\n type=type,\n max_length=max_length,\n suggester=suggester,\n validate_on=validate_on or self.INPUT_VALIDATE_ON,\n validators=validators,\n valid_empty=valid_empty,\n name=name,\n id=id,\n classes=classes,\n disabled=disabled,\n )\n if format:\n validator = self.get_validator_for_format(format)\n if not validator:\n raise NotImplementedError(f\"format {format} not supported\")\n self.validators.append(validator)\n if any(\n f is not None\n for f in (exclusive_minimum, exclusive_maximum, minimum, maximum)\n ):\n self.validators.append(\n NumberRange(minimum, maximum, exclusive_minimum, exclusive_maximum)\n )\n self.format = format\n\n @classmethod\n def get_validator_for_format(cls, fmt: str) -> Validator | None:\n \"\"\"Returns the validator for a specific format. If the field is optional, use a validator that\n returns True on empty values\"\"\"\n return {\n \"date\": Function(\n partial(valid_date_by_format, date_fmt=cls.INPUT_DATE_FORMAT),\n \"Is not a valid date of format\"\n f\" '{example_date(cls.INPUT_DATE_FORMAT)}'\",\n ),\n \"date-time\": Function(\n partial(valid_date_by_format, date_fmt=cls.INPUT_DATETIME_FORMAT),\n \"Is not a valid date of format\"\n f\" '{example_date(cls.INPUT_DATETIME_FORMAT)}'\",\n ),\n \"file-path\": Function(\n valid_file_path,\n \"File does not exist\",\n ),\n \"directory-path\": Function(\n valid_folder,\n \"Directory does not exist\",\n ),\n \"path\": Function(\n is_absolute_path,\n \"Use absolute paths or '~' to expand the current user\",\n ),\n }.get(fmt)\n\n @classmethod\n def _getter_formatter(cls, fmt: str | None, default: Callable[[str], Any]):\n \"\"\"Formatters for conversion from field value to the correct jsonschema type and format.\"\"\"\n return {\n \"number\": float,\n \"integer\": int,\n \"path\": Path,\n \"file-path\": Path,\n \"directory-path\": Path,\n \"date\": lambda x: datetime.strptime(x, cls.INPUT_DATETIME_FORMAT).date(),\n \"date-time\": lambda x: datetime.strptime(x, cls.INPUT_DATETIME_FORMAT),\n None: default,\n }.get(fmt, default)\n\n @classmethod\n def _setter_formatter(cls, fmt: str | None, default: Callable[[Any], str]):\n \"\"\"Returns a special setter function that is responsible to populate the field value with\n the string representation of this data. The representation must be compatible with validators if any.\n \"\"\"\n return {\n \"date\": partial(date_to_string, fmt=cls.INPUT_DATE_FORMAT),\n \"date-time\": partial(date_to_string, fmt=cls.INPUT_DATETIME_FORMAT),\n \"path\": lambda p: str(p),\n \"file-path\": lambda p: str(p),\n \"directory-path\": lambda p: str(p),\n None: lambda x: str(x),\n }.get(fmt, default)\n\n def validated(self) -> bool:\n \"\"\"Checks if the field has any validators. Some formats are always re-validated on submit,\n e.g. for a file path\"\"\"\n if not self.validate_on and not self.validators:\n return True\n if self.format in self.ALWAYS_REVALIDATE_FORMATS:\n return False\n classes = self.classes\n if \"-valid\" in classes or \"-invalid\" in classes:\n return True\n return False\n\n @property\n def is_valid(self) -> bool:\n \"\"\"Checks if the field is valid, calling the 'validate' if the field needs to be re-validated\"\"\"\n if not self.validated():\n self.validate(self.value)\n return super().is_valid\n\n @property\n def form_data(self):\n return (\n self._getter_formatter(self.format or self.type, lambda x: x)(self.value)\n if self.value != \"\"\n else None\n )\n\n @form_data.setter\n def form_data(self, data):\n if data is None:\n self.value = \"\"\n return\n self.value = self._setter_formatter(self.format, lambda x: str(x))(data)" }, { "identifier": "FormStrMultiSelect", "path": "textual_jsonschema_form/fields.py", "snippet": "class FormStrMultiSelect(SelectionList[str]):\n \"\"\"A SelectionList implementation for strings\"\"\"\n\n @property\n def is_valid(self) -> bool:\n return True\n\n def validated(self) -> bool:\n return True\n\n @classmethod\n def from_obj(\n cls, data: Iterable[str], options: Iterable[str], **kwargs\n ) -> FormStrMultiSelect:\n choices: tuple[tuple[str, str] | tuple[str, str, bool], ...] = tuple(\n (t, t, True) if t in (data or []) else (t, t) for t in options\n )\n return cls(*choices, **kwargs)\n\n @property\n def form_data(self) -> list[str]:\n return self.selected\n\n @form_data.setter\n def form_data(self, data):\n self.deselect_all()\n for item in data:\n self.select(item)" }, { "identifier": "FormStrSelect", "path": "textual_jsonschema_form/fields.py", "snippet": "class FormStrSelect(Select[str]):\n \"\"\"A Select Widget implementation for strings\"\"\"\n\n @property\n def form_data(self) -> str | None:\n if isinstance(self.value, NoSelection) or self.value == Select.BLANK:\n return None\n return self.value\n\n @form_data.setter\n def form_data(self, data):\n self.value = data\n\n @property\n def is_valid(self) -> bool:\n return True\n\n def validated(self) -> bool:\n return True" }, { "identifier": "FormSwitch", "path": "textual_jsonschema_form/fields.py", "snippet": "class FormSwitch(Switch):\n \"\"\"A Switch Widget implementation\"\"\"\n\n @property\n def form_data(self) -> bool:\n return self.value\n\n @form_data.setter\n def form_data(self, data):\n self.value = bool(data)\n\n @property\n def is_valid(self) -> bool:\n return True\n\n def validated(self) -> bool:\n return True" }, { "identifier": "textual_converter", "path": "textual_jsonschema_form/registry.py", "snippet": "class TextualConverter:\n def __get_key(self, type_: str, fmt: str | None) -> str:\n def register(self, type_: str, format: str | None = None):\n def type_format_registration(converter: type[JSONFieldParametersBase]):\n def lookup(\n self, type_: str, defs: dict | None = None\n ) -> type[JSONFieldParametersBase]:" }, { "identifier": "NumberRange", "path": "textual_jsonschema_form/validators.py", "snippet": "class NumberRange(Number):\n \"\"\"Extends the default number validator to incorporate exclusive minimum and maximum values as they appear in\n the jsonschema specification\"\"\"\n\n def __init__(\n self,\n minimum: float | int | None = None,\n maximum: float | int | None = None,\n exclusive_minimum: float | int | None = None,\n exclusive_maximum: float | int | None = None,\n ):\n super().__init__(minimum=minimum, maximum=maximum)\n self.exclusive_minimum = exclusive_minimum\n self.exclusive_maximum = exclusive_maximum\n\n def _validate_range(self, value: float) -> bool:\n is_valid = super()._validate_range(value)\n if not is_valid:\n return is_valid\n if self.exclusive_minimum is not None and value <= self.exclusive_minimum:\n return False\n if self.exclusive_maximum is not None and value >= self.exclusive_maximum:\n return False\n return True\n\n def describe_failure(self, failure: Failure) -> str | None:\n if isinstance(failure, Number.NotInRange):\n if self.exclusive_minimum is not None:\n if self.exclusive_maximum is not None:\n return (\n f\"Must be greater than {self.exclusive_minimum} and smaller\"\n f\" than {self.exclusive_maximum}\"\n )\n elif self.maximum is not None:\n return (\n f\"Must be greater than {self.exclusive_minimum} and smaller or\"\n f\" equal to {self.maximum}\"\n )\n return f\"Must be greater than {self.exclusive_minimum}\"\n elif self.exclusive_maximum is not None:\n if self.minimum is None:\n return f\"Must be smaller than {self.exclusive_maximum}\"\n return (\n f\"Must be greater or equal {self.minimum} and smaller than\"\n f\" {self.exclusive_maximum}\"\n )\n return super().describe_failure(failure)" } ]
from collections.abc import Iterable from dataclasses import dataclass from typing import ClassVar from textual.suggester import SuggestFromList from textual.validation import Integer, Number, Validator from .core import JSONFieldParametersBase, ValidatorType, strip_cmp_path from .fields import ArrayField, FormInput, FormStrMultiSelect, FormStrSelect, FormSwitch from .registry import textual_converter from .validators import NumberRange
4,149
from __future__ import annotations class InputBase(JSONFieldParametersBase[FormInput, Validator]): factory: ClassVar[type[FormInput]] = FormInput @classmethod def extract(cls, params: dict, available: set[str]) -> tuple[list[Validator], dict]: attrs = {} if "default" in available: attrs["default"] = params.get("default") return [], attrs def get_options(self): """These are all kwargs that the default input field takes""" value = self.attrs.get("default", "") return { "valid_empty": not (self.required or value), "value": str(value), "name": self.field_name, "placeholder": self.description, "restrict": self.attrs.get("restrict"), "validate_on": ("changed", "submitted"), "password": self.attrs.get("password", False), "max_length": self.attrs.get("max_length", 0), "suggester": self.attrs.get("suggester"), "validators": self.validators, "type": {"string": "text"}.get(self.type, self.type), } def get_factory(self) -> type[FormInput]: return self.factory @textual_converter.register("string") @dataclass class TextualStringParam(InputBase): supported = {"string"} allowed = { "format", "pattern", "enum", "default", } ignore = JSONFieldParametersBase.ignore | {"minLength", "maxLength", "writeOnly"} SUGGESTER_FOR_ENUM: ClassVar[bool] = False @classmethod def extract(cls, params: dict, available: set[str]): validators, attrs = InputBase.extract(params, available) if "enum" in available: if cls.SUGGESTER_FOR_ENUM: attrs["suggester"] = SuggestFromList( params["enum"], case_sensitive=False ) attrs["choices"] = params["enum"] if "maxLength" in available: attrs["max_length"] = int(params["maxLength"]) if "pattern" in available: attrs["restrict"] = params["pattern"] if "format" in available: fmt = attrs["format"] = params["format"] if fmt == "password": attrs["password"] = True return validators, attrs def get_factory(self): return (
from __future__ import annotations class InputBase(JSONFieldParametersBase[FormInput, Validator]): factory: ClassVar[type[FormInput]] = FormInput @classmethod def extract(cls, params: dict, available: set[str]) -> tuple[list[Validator], dict]: attrs = {} if "default" in available: attrs["default"] = params.get("default") return [], attrs def get_options(self): """These are all kwargs that the default input field takes""" value = self.attrs.get("default", "") return { "valid_empty": not (self.required or value), "value": str(value), "name": self.field_name, "placeholder": self.description, "restrict": self.attrs.get("restrict"), "validate_on": ("changed", "submitted"), "password": self.attrs.get("password", False), "max_length": self.attrs.get("max_length", 0), "suggester": self.attrs.get("suggester"), "validators": self.validators, "type": {"string": "text"}.get(self.type, self.type), } def get_factory(self) -> type[FormInput]: return self.factory @textual_converter.register("string") @dataclass class TextualStringParam(InputBase): supported = {"string"} allowed = { "format", "pattern", "enum", "default", } ignore = JSONFieldParametersBase.ignore | {"minLength", "maxLength", "writeOnly"} SUGGESTER_FOR_ENUM: ClassVar[bool] = False @classmethod def extract(cls, params: dict, available: set[str]): validators, attrs = InputBase.extract(params, available) if "enum" in available: if cls.SUGGESTER_FOR_ENUM: attrs["suggester"] = SuggestFromList( params["enum"], case_sensitive=False ) attrs["choices"] = params["enum"] if "maxLength" in available: attrs["max_length"] = int(params["maxLength"]) if "pattern" in available: attrs["restrict"] = params["pattern"] if "format" in available: fmt = attrs["format"] = params["format"] if fmt == "password": attrs["password"] = True return validators, attrs def get_factory(self): return (
FormStrSelect
4
2023-12-26 17:05:27+00:00
8k
smonsays/modular-hyperteacher
tests/data/envs/test_grid.py
[ { "identifier": "MOVES", "path": "metax/data/envs/grid.py", "snippet": "class MOVES(enum.Enum):\n UP = 0\n RIGHT = 1\n DOWN = 2\n LEFT = 3" }, { "identifier": "CompositionalGrid", "path": "metax/data/envs/grid.py", "snippet": "class CompositionalGrid(Environment):\n def __init__(\n self,\n grid_size: int,\n num_interactions: int,\n num_mazes: int,\n num_objects: int,\n num_distractors: int,\n frac_ood: float,\n task_support: str,\n seed: int,\n ) -> None:\n super().__init__()\n assert grid_size > 5, \"grid_size must be greater than 5\"\n\n self.grid_size = grid_size\n self.num_interactions = num_interactions\n self.num_directions = 4 # split grid into 4 quadrants for the goal position\n self.num_objects = num_objects\n self.num_mazes = num_mazes\n self.num_distractors = num_distractors\n self.frac_ood = frac_ood\n self.task_support = task_support\n self.seed = seed\n self.rng = jax.random.PRNGKey(seed)\n self.num_factors = 4 # direction, interaction, maze, object\n\n # Static matrices\n self._delta_position = jnp.concatenate((\n jnp.array([[-1, 0], [0, 1], [1, 0], [0, -1]]), # up, right, down, left\n jnp.zeros((self.num_interactions, 2), dtype=jnp.int32), # no movement for interaction\n ))\n size_low, size_high = grid_size // 2, (grid_size // 2) + grid_size % 2\n self._quadrants = jnp.stack((\n np.block([\n [np.ones((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.ones((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.ones((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.ones((size_low, size_low))]\n ]),\n ))\n\n # Pregenerate possible goals and randomly split into in/out of distribution\n self.tasks_all = np.array(list(itertools.product(\n range(self.num_directions),\n range(self.num_interactions),\n range(self.num_mazes),\n range(self.num_objects),\n )))\n\n if self.task_support == \"non_compositional\":\n # in/out split with non-compositional support\n self.tasks_in_dist = np.array(list(itertools.product(\n range(self.num_directions - 1), # hold out one goal quadrant from in_dist\n range(self.num_interactions),\n range(self.num_mazes),\n range(self.num_objects),\n )))\n\n @partial(np.vectorize, signature=\"(k),(n,k)->()\")\n def elem_in_array(elem, array):\n return np.any(np.all(elem == array, axis=1))\n\n self.tasks_out_dist = self.tasks_all[~elem_in_array(self.tasks_all, self.tasks_in_dist)]\n\n elif \"_hot\" in self.task_support:\n num_hot = int(self.task_support.split(\"_\")[0])\n mask = jnp.sum(self.tasks_all > 0, axis=1) <= num_hot\n self.tasks_in_dist = jnp.array(self.tasks_all[mask])\n self.tasks_out_dist = jnp.array(self.tasks_all[~mask])\n\n elif self.task_support == \"random\":\n self.tasks_all = jax.random.permutation(self.rng, self.tasks_all)\n self.num_ood = int(len(self.tasks_all) * self.frac_ood)\n self.tasks_in_dist = jnp.array(self.tasks_all[: -self.num_ood])\n self.tasks_out_dist = jnp.array(self.tasks_all[-self.num_ood:])\n\n # Make sure all features for every factor are present in the in-distribution tasks\n assert len(jnp.unique(self.tasks_in_dist[:, 0])) == self.num_directions\n assert len(jnp.unique(self.tasks_in_dist[:, 1])) == self.num_interactions\n assert len(jnp.unique(self.tasks_in_dist[:, 2])) == self.num_mazes\n assert len(jnp.unique(self.tasks_in_dist[:, 3])) == self.num_objects\n else:\n raise ValueError(f\"Invalid task support: {self.task_support}\")\n\n assert len(self.tasks_in_dist) > 0\n assert len(self.tasks_out_dist) > 0\n\n # Create random mazes\n if self.num_mazes > 0:\n self.mazes = jnp.stack([\n self.generate_random_maze(self.grid_size, seed=self.seed + i)\n for i in range(self.num_mazes)\n ])\n else:\n self.mazes = jnp.zeros((1, self.grid_size, self.grid_size))\n\n # Precompute optimal paths, this is potentially expensive for large grid sizes\n optimal_paths, shortest_paths = list(\n zip(*[self._precompute_optimal_paths(m) for m in self.mazes])\n )\n self.optimal_paths, shortest_paths = jnp.stack(optimal_paths), jnp.stack(shortest_paths)\n self.valid_goal_dist = shortest_paths >= self.grid_size\n\n @property\n def num_actions(self) -> int:\n return 4 + self.num_interactions\n\n @property\n def observation_shape(self) -> Tuple[int]:\n # encodes positions of agent, objects and walls\n return (self.grid_size, self.grid_size, self.num_objects + 2)\n\n def reset_goal(self, rng: PRNGKey, mode: str) -> Array:\n assert mode in [\"ood\", \"test\", \"train\"]\n if mode == \"ood\":\n task_code = jax.random.choice(rng, self.tasks_out_dist)\n else:\n task_code = jax.random.choice(rng, self.tasks_in_dist)\n\n task_id = jnp.ravel_multi_index(\n task_code,\n dims=(self.num_directions, self.num_interactions, self.num_mazes, self.num_objects),\n mode=\"wrap\",\n )\n emb_dim = max(self.num_directions, self.num_interactions, self.num_mazes, self.num_objects)\n embedding = jax.nn.one_hot(task_code, emb_dim)\n\n return CompositionalGridGoal(*task_code), {\"task_id\": task_id, \"embedding\": embedding}\n\n def reset(\n self, rng: PRNGKey, goal: Optional[CompositionalGridGoal] = None\n ) -> Tuple[CompositionalGridState, EnvironmentInteraction]:\n \"\"\"Resets the environment to a random, initial state\"\"\"\n rng_distractor, rng_pos1, rng_pos2, rng_pos3, rng_goal = jax.random.split(rng, 5)\n\n if goal is None:\n # Sample a goal from train distribution if None specified\n goal, _ = self.reset_goal(rng_goal, mode=\"train\")\n\n # Sample distractor objects distinct from goal object\n distractors = jax.random.choice(\n key=rng_distractor,\n a=self.num_objects,\n shape=(self.num_distractors,),\n replace=True,\n p=1.0 - (jnp.arange(self.num_objects) == goal.object)\n )\n\n # Sample distinct, random positions for agent, distractors and the goal respecting direction\n position_goal = jax.random.choice(\n key=rng_pos2,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(1, ),\n p=((1.0 - self.mazes[goal.maze]) * self._quadrants[goal.direction]).reshape(-1),\n )\n goal_coord = self._coord_to_idx(position_goal[0][0], position_goal[0][1])\n position_agent = jax.random.choice(\n key=rng_pos1,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(1, ),\n p=((1.0 - self.mazes[goal.maze]).reshape(-1) * self.valid_goal_dist[goal.maze][goal_coord]),\n )\n positions_distractors = jax.random.choice(\n key=rng_pos3,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(self.num_distractors, ),\n replace=False,\n p=1.0 - self.mazes[goal.maze].reshape(-1),\n )\n\n positions = jnp.concatenate([position_goal, positions_distractors, position_agent])\n\n env_state = CompositionalGridState(\n done=False, timestep=0, distractors=distractors, positions=positions, goal=goal\n )\n emission = EnvironmentInteraction(\n observation=self.observe(env_state), reward=0.0, done=False, timestep=0\n )\n\n return env_state, emission\n\n def _step(\n self, rng: PRNGKey, env_state, action: Array\n ) -> Tuple[CompositionalGridState, EnvironmentInteraction]:\n pos_agent = env_state.positions[-1, :]\n\n # Check if agent reached goal (positive reward)\n goal_reached = jnp.logical_and(\n action == (len(MOVES) + env_state.goal.interaction),\n jnp.all(pos_agent == env_state.positions[0, :]),\n )\n reward = 1.0 * goal_reached\n\n # Move the agent to new position and check if valid\n pos_new = self._delta_position[action] + pos_agent\n pos_invalid = jnp.logical_or(\n jnp.logical_or(jnp.any(pos_new < 0), jnp.any(pos_new >= self.grid_size)), # in grid?\n self.mazes[env_state.goal.maze][pos_new[0], pos_new[1]], # in wall?\n )\n pos_new = jnp.where(pos_invalid, pos_agent, pos_new)\n\n # Update state\n positions = env_state.positions.at[-1].set(pos_new)\n env_state = CompositionalGridState(\n done=goal_reached,\n timestep=env_state.timestep + 1,\n distractors=env_state.distractors,\n positions=positions,\n goal=env_state.goal,\n )\n\n emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=reward,\n done=env_state.done,\n timestep=env_state.timestep,\n )\n\n return env_state, emission\n\n def observe(self, env_state: CompositionalGridState) -> Array:\n \"\"\"\n Encode the environment state as an asrray of shape (grid_size, grid_size, num_factors * num_objects + 1).\n For each position in the grid, the code word has the following structure:\n [factor_0_feature_0, ..., factor_0_feature_n, ..., factor_n_feature_0, ..., factor_n_feature_n, wall?, agent?]\n \"\"\"\n objects = jnp.concatenate([jnp.array([env_state.goal.object]), env_state.distractors])\n objects_hot = jax.nn.one_hot(objects, num_classes=self.num_objects)\n pos_objects, pos_agent = env_state.positions[0:-1, :], env_state.positions[-1, :]\n\n # Build the grid\n grid = jnp.zeros(self.observation_shape)\n grid = grid.at[\n jnp.expand_dims(pos_objects[:, 0], axis=1),\n jnp.expand_dims(pos_objects[:, 1], axis=1),\n :-2,\n ].set(jnp.expand_dims(objects_hot, axis=1))\n grid = grid.at[:, :, -2].set(self.mazes[env_state.goal.maze]) # walls encoded in penultimate channel\n grid = grid.at[pos_agent[0], pos_agent[1], -1].set(1.0) # agent encoded in last channel\n\n return grid\n\n def _features_to_idx(self, features: Array) -> Array:\n \"\"\"Converts features to a unique feature index\"\"\"\n idx = [factor * self.num_objects + feature for factor, feature in enumerate(features)]\n return jnp.array(idx)\n\n def _coord_to_idx(self, x, y):\n \"\"\"Converts coordinates to a unique grid index\"\"\"\n return x * self.grid_size + y\n\n def _idx_to_coord(self, idx):\n \"\"\"Converts a grid index to grid coordinates\"\"\"\n return idx // self.grid_size, idx % self.grid_size\n\n def demonstrate(\n self, rng: PRNGKey, env_state: CompositionalGridState\n ) -> EnvironmentInteraction:\n \"\"\"Given a state, compute the optimal trajectory to the goal.\"\"\"\n pos_agent, pos_goal = env_state.positions[-1, :], env_state.positions[0, :]\n idx_agent, idx_goal = self._coord_to_idx(*pos_agent), self._coord_to_idx(*pos_goal)\n optimal_actions = self.optimal_paths[env_state.goal.maze][idx_agent, idx_goal]\n\n # Fill placeholder actions with correct interaction\n mask_pad = (optimal_actions == -1)\n optimal_actions *= ~mask_pad\n optimal_actions += (len(MOVES) + env_state.goal.interaction) * mask_pad\n\n def env_step(carry, action):\n rng, env_state = carry\n rng, rng_step = jax.random.split(rng)\n env_state, emission = self.step(rng_step, env_state, action)\n return (rng, env_state), emission\n\n _, trajectory = jax.lax.scan(env_step, (rng, env_state), optimal_actions)\n\n # Append initial emission and remove last emission from trajectory\n initial_emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=0.0,\n done=False,\n timestep=0,\n )\n trajectory = jtu.tree_map(\n lambda x, y: jnp.concatenate((jnp.expand_dims(x, axis=0), y)),\n initial_emission, trajectory\n )\n trajectory = jtu.tree_map(lambda x: x[:-1], trajectory)\n\n return trajectory, optimal_actions\n\n def _precompute_optimal_paths(self, maze: Array):\n \"\"\"Precompute the optimal trajectories for all possible states.\"\"\"\n # Create an array that encodes the graph structure of the grid to compute all shortest paths\n coordinates, no_walls_coords = [], np.argwhere(maze == 0)\n for x, y in no_walls_coords:\n edges = []\n if x > 0 and not maze[x - 1, y]:\n edges.append([x - 1, y])\n if x < self.grid_size - 1 and not maze[x + 1, y]:\n edges.append([x + 1, y])\n if y > 0 and not maze[x, y - 1]:\n edges.append([x, y - 1])\n if y < self.grid_size - 1 and not maze[x, y + 1]:\n edges.append([x, y + 1])\n\n idx_curr = self._coord_to_idx(x, y)\n coordinates += [(idx_curr, self._coord_to_idx(i, k)) for (i, k) in edges]\n\n coordinates = np.array(coordinates)\n connectivity = np.zeros((self.grid_size**2, self.grid_size**2))\n connectivity[coordinates[:, 0], coordinates[:, 1]] = 1.0\n shortest_paths, predecessors = shortest_path(connectivity, return_predecessors=True)\n max_num_actions = (self.grid_size**2) - 1\n\n def get_path(predecessors, start, end):\n \"\"\"Get the full path from the predecessor matrix.\"\"\"\n path = [end]\n while path[-1] != start:\n path.append(predecessors[start, path[-1]])\n return path[::-1]\n\n def path_to_actions(path):\n \"\"\"Convert path to actions.\"\"\"\n # Pad with placeholder actions, need to be overwritten with correct interaction in self.demonstrate()\n actions = np.full((max_num_actions), -1)\n for i in range(len(path) - 1):\n x1, y1 = self._idx_to_coord(path[i])\n x2, y2 = self._idx_to_coord(path[i + 1])\n action = np.array([x2 - x1, y2 - y1])\n action = np.where(np.all(self._delta_position == action, axis=1))[0][0]\n actions[i] = action\n return np.array(actions)\n\n # Precompute optimal paths for all possible positions\n optimal_paths = -1 * np.ones(\n (self.grid_size**2, self.grid_size**2, max_num_actions), dtype=int\n )\n for start in no_walls_coords:\n for goal in no_walls_coords:\n start_idx, goal_idx = self._coord_to_idx(*start), self._coord_to_idx(*goal)\n path = get_path(predecessors, start_idx, goal_idx)\n actions = path_to_actions(path)\n optimal_paths[start_idx, goal_idx, :] = actions\n\n return jnp.array(optimal_paths), jnp.array(shortest_paths)\n\n @staticmethod\n def generate_random_maze(\n grid_size: int, complexity: float = 0.75, density: float = 0.75, seed: int = 0\n ):\n \"\"\"\n Generate a random maze array.\n Walls are encoded as 1 and free space as 0.\n\n Adapted from https://github.com/zuoxingdong/mazelab/blob/master/mazelab/generators/random_maze.py\n which is based on https://en.wikipedia.org/wiki/Maze_generation_algorithm\n \"\"\"\n assert grid_size % 2 == 1, \"Maze size must be odd\"\n grid_size_pad = grid_size + 2\n np_rng = np.random.default_rng(seed)\n\n # Adjust complexity and density relative to maze size\n complexity = int(complexity * (5 * (grid_size_pad + grid_size_pad)))\n density = int(density * ((grid_size_pad // 2) * (grid_size_pad // 2)))\n\n # Fill borders\n grid = np.zeros((grid_size_pad, grid_size_pad), dtype=bool)\n grid[0, :] = grid[-1, :] = 1\n grid[:, 0] = grid[:, -1] = 1\n\n # Make aisles\n for _ in range(density):\n x, y = (\n np_rng.integers(0, grid_size_pad // 2 + 1) * 2,\n np_rng.integers(0, grid_size_pad // 2 + 1) * 2,\n )\n grid[y, x] = 1\n for j in range(complexity):\n neighbours = []\n if x > 1:\n neighbours.append((y, x - 2))\n if x < grid_size_pad - 2:\n neighbours.append((y, x + 2))\n if y > 1:\n neighbours.append((y - 2, x))\n if y < grid_size_pad - 2:\n neighbours.append((y + 2, x))\n if len(neighbours):\n y_, x_ = neighbours[np_rng.integers(0, len(neighbours))]\n if grid[y_, x_] == 0:\n grid[y_, x_] = 1\n grid[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1\n x, y = x_, y_\n\n return grid.astype(int)[1:-1, 1:-1]" }, { "identifier": "CompositionalGridGoal", "path": "metax/data/envs/grid.py", "snippet": "class CompositionalGridGoal(NamedTuple):\n direction: int\n interaction: int\n maze: int\n object: int" } ]
import unittest import jax import jax.numpy as jnp import matplotlib.pyplot as plt from functools import partial from metax.data.envs.grid import (MOVES, CompositionalGrid, CompositionalGridGoal) from mpl_toolkits.axes_grid1 import ImageGrid
5,368
""" Copyright (c) Simon Schug All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ class CompositionalGridTestCase(unittest.TestCase): rng = jax.random.PRNGKey(0) def test_reset(self): env = CompositionalGrid( grid_size := 7, num_interactions := 4, num_mazes := 2, num_objects := 3, num_distractors := 2, frac_ood := 0.2, task_support := "1_hot", seed := 2022, ) state, emission = env.reset(rng=self.rng) # state, emission = jax.jit(env.reset)(rng=self.rng) assert state.timestep == 0 assert emission.observation.shape == (grid_size, grid_size, num_objects + 2) assert jnp.all(jnp.concatenate((env.tasks_in_dist, env.tasks_out_dist)).shape == env.tasks_all.shape) assert len(jnp.unique(jnp.concatenate((env.tasks_in_dist, env.tasks_out_dist)), axis=1)) == len(env.tasks_all) def test_step(self): env = CompositionalGrid( grid_size := 7, num_interactions := 4, num_mazes := 6, num_objects := 5, num_distractors := 2, frac_ood := 0.2, task_support := "random", seed := 2022, )
""" Copyright (c) Simon Schug All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ class CompositionalGridTestCase(unittest.TestCase): rng = jax.random.PRNGKey(0) def test_reset(self): env = CompositionalGrid( grid_size := 7, num_interactions := 4, num_mazes := 2, num_objects := 3, num_distractors := 2, frac_ood := 0.2, task_support := "1_hot", seed := 2022, ) state, emission = env.reset(rng=self.rng) # state, emission = jax.jit(env.reset)(rng=self.rng) assert state.timestep == 0 assert emission.observation.shape == (grid_size, grid_size, num_objects + 2) assert jnp.all(jnp.concatenate((env.tasks_in_dist, env.tasks_out_dist)).shape == env.tasks_all.shape) assert len(jnp.unique(jnp.concatenate((env.tasks_in_dist, env.tasks_out_dist)), axis=1)) == len(env.tasks_all) def test_step(self): env = CompositionalGrid( grid_size := 7, num_interactions := 4, num_mazes := 6, num_objects := 5, num_distractors := 2, frac_ood := 0.2, task_support := "random", seed := 2022, )
goal = CompositionalGridGoal(direction := 0, interaction := 1, maze := 2, object := 3)
2
2023-12-22 16:35:49+00:00
8k
AContesini/Convert_PDF_to_DOCX_or_vice-versa
venv/Lib/site-packages/tqdm/std.py
[ { "identifier": "TMonitor", "path": "venv/Lib/site-packages/tqdm/_monitor.py", "snippet": "class TMonitor(Thread):\n \"\"\"\n Monitoring thread for tqdm bars.\n Monitors if tqdm bars are taking too much time to display\n and readjusts miniters automatically if necessary.\n\n Parameters\n ----------\n tqdm_cls : class\n tqdm class to use (can be core tqdm or a submodule).\n sleep_interval : float\n Time to sleep between monitoring checks.\n \"\"\"\n _test = {} # internal vars for unit testing\n\n def __init__(self, tqdm_cls, sleep_interval):\n Thread.__init__(self)\n self.daemon = True # kill thread when main killed (KeyboardInterrupt)\n self.woken = 0 # last time woken up, to sync with monitor\n self.tqdm_cls = tqdm_cls\n self.sleep_interval = sleep_interval\n self._time = self._test.get(\"time\", time)\n self.was_killed = self._test.get(\"Event\", Event)()\n atexit.register(self.exit)\n self.start()\n\n def exit(self):\n self.was_killed.set()\n if self is not current_thread():\n self.join()\n return self.report()\n\n def get_instances(self):\n # returns a copy of started `tqdm_cls` instances\n return [i for i in self.tqdm_cls._instances.copy()\n # Avoid race by checking that the instance started\n if hasattr(i, 'start_t')]\n\n def run(self):\n cur_t = self._time()\n while True:\n # After processing and before sleeping, notify that we woke\n # Need to be done just before sleeping\n self.woken = cur_t\n # Sleep some time...\n self.was_killed.wait(self.sleep_interval)\n # Quit if killed\n if self.was_killed.is_set():\n return\n # Then monitor!\n # Acquire lock (to access _instances)\n with self.tqdm_cls.get_lock():\n cur_t = self._time()\n # Check tqdm instances are waiting too long to print\n instances = self.get_instances()\n for instance in instances:\n # Check event in loop to reduce blocking time on exit\n if self.was_killed.is_set():\n return\n # Only if mininterval > 1 (else iterations are just slow)\n # and last refresh exceeded maxinterval\n if (\n instance.miniters > 1\n and (cur_t - instance.last_print_t) >= instance.maxinterval\n ):\n # force bypassing miniters on next iteration\n # (dynamic_miniters adjusts mininterval automatically)\n instance.miniters = 1\n # Refresh now! (works only for manual tqdm)\n instance.refresh(nolock=True)\n # Remove accidental long-lived strong reference\n del instance\n if instances != self.get_instances(): # pragma: nocover\n warn(\"Set changed size during iteration\" +\n \" (see https://github.com/tqdm/tqdm/issues/481)\",\n TqdmSynchronisationWarning, stacklevel=2)\n # Remove accidental long-lived strong references\n del instances\n\n def report(self):\n return not self.was_killed.is_set()" }, { "identifier": "CallbackIOWrapper", "path": "venv/Lib/site-packages/tqdm/utils.py", "snippet": "class CallbackIOWrapper(ObjectWrapper):\n def __init__(self, callback, stream, method=\"read\"):\n \"\"\"\n Wrap a given `file`-like object's `read()` or `write()` to report\n lengths to the given `callback`\n \"\"\"\n super(CallbackIOWrapper, self).__init__(stream)\n func = getattr(stream, method)\n if method == \"write\":\n @wraps(func)\n def write(data, *args, **kwargs):\n res = func(data, *args, **kwargs)\n callback(len(data))\n return res\n self.wrapper_setattr('write', write)\n elif method == \"read\":\n @wraps(func)\n def read(*args, **kwargs):\n data = func(*args, **kwargs)\n callback(len(data))\n return data\n self.wrapper_setattr('read', read)\n else:\n raise KeyError(\"Can only wrap read/write methods\")" }, { "identifier": "Comparable", "path": "venv/Lib/site-packages/tqdm/utils.py", "snippet": "class Comparable(object):\n \"\"\"Assumes child has self._comparable attr/@property\"\"\"\n def __lt__(self, other):\n return self._comparable < other._comparable\n\n def __le__(self, other):\n return (self < other) or (self == other)\n\n def __eq__(self, other):\n return self._comparable == other._comparable\n\n def __ne__(self, other):\n return not self == other\n\n def __gt__(self, other):\n return not self <= other\n\n def __ge__(self, other):\n return not self < other" }, { "identifier": "DisableOnWriteError", "path": "venv/Lib/site-packages/tqdm/utils.py", "snippet": "class DisableOnWriteError(ObjectWrapper):\n \"\"\"\n Disable the given `tqdm_instance` upon `write()` or `flush()` errors.\n \"\"\"\n @staticmethod\n def disable_on_exception(tqdm_instance, func):\n \"\"\"\n Quietly set `tqdm_instance.miniters=inf` if `func` raises `errno=5`.\n \"\"\"\n tqdm_instance = proxy(tqdm_instance)\n\n def inner(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except OSError as e:\n if e.errno != 5:\n raise\n try:\n tqdm_instance.miniters = float('inf')\n except ReferenceError:\n pass\n except ValueError as e:\n if 'closed' not in str(e):\n raise\n try:\n tqdm_instance.miniters = float('inf')\n except ReferenceError:\n pass\n return inner\n\n def __init__(self, wrapped, tqdm_instance):\n super(DisableOnWriteError, self).__init__(wrapped)\n if hasattr(wrapped, 'write'):\n self.wrapper_setattr(\n 'write', self.disable_on_exception(tqdm_instance, wrapped.write))\n if hasattr(wrapped, 'flush'):\n self.wrapper_setattr(\n 'flush', self.disable_on_exception(tqdm_instance, wrapped.flush))\n\n def __eq__(self, other):\n return self._wrapped == getattr(other, '_wrapped', other)" }, { "identifier": "FormatReplace", "path": "venv/Lib/site-packages/tqdm/utils.py", "snippet": "class FormatReplace(object):\n \"\"\"\n >>> a = FormatReplace('something')\n >>> \"{:5d}\".format(a)\n 'something'\n \"\"\" # NOQA: P102\n def __init__(self, replace=''):\n self.replace = replace\n self.format_called = 0\n\n def __format__(self, _):\n self.format_called += 1\n return self.replace" }, { "identifier": "SimpleTextIOWrapper", "path": "venv/Lib/site-packages/tqdm/utils.py", "snippet": "class SimpleTextIOWrapper(ObjectWrapper):\n \"\"\"\n Change only `.write()` of the wrapped object by encoding the passed\n value and passing the result to the wrapped object's `.write()` method.\n \"\"\"\n # pylint: disable=too-few-public-methods\n def __init__(self, wrapped, encoding):\n super(SimpleTextIOWrapper, self).__init__(wrapped)\n self.wrapper_setattr('encoding', encoding)\n\n def write(self, s):\n \"\"\"\n Encode `s` and pass to the wrapped object's `.write()` method.\n \"\"\"\n return self._wrapped.write(s.encode(self.wrapper_getattr('encoding')))\n\n def __eq__(self, other):\n return self._wrapped == getattr(other, '_wrapped', other)" }, { "identifier": "_is_ascii", "path": "venv/Lib/site-packages/tqdm/utils.py", "snippet": "def _is_ascii(s):\n if isinstance(s, str):\n for c in s:\n if ord(c) > 255:\n return False\n return True\n return _supports_unicode(s)" }, { "identifier": "_screen_shape_wrapper", "path": "venv/Lib/site-packages/tqdm/utils.py", "snippet": "def _screen_shape_wrapper(): # pragma: no cover\n \"\"\"\n Return a function which returns console dimensions (width, height).\n Supported: linux, osx, windows, cygwin.\n \"\"\"\n _screen_shape = None\n if IS_WIN:\n _screen_shape = _screen_shape_windows\n if _screen_shape is None:\n _screen_shape = _screen_shape_tput\n if IS_NIX:\n _screen_shape = _screen_shape_linux\n return _screen_shape" }, { "identifier": "_supports_unicode", "path": "venv/Lib/site-packages/tqdm/utils.py", "snippet": "def _supports_unicode(fp):\n try:\n return _is_utf(fp.encoding)\n except AttributeError:\n return False" }, { "identifier": "_term_move_up", "path": "venv/Lib/site-packages/tqdm/utils.py", "snippet": "def _term_move_up(): # pragma: no cover\n return '' if (os.name == 'nt') and (colorama is None) else '\\x1b[A'" }, { "identifier": "disp_len", "path": "venv/Lib/site-packages/tqdm/utils.py", "snippet": "def disp_len(data):\n \"\"\"\n Returns the real on-screen length of a string which may contain\n ANSI control codes and wide chars.\n \"\"\"\n return _text_width(RE_ANSI.sub('', data))" }, { "identifier": "disp_trim", "path": "venv/Lib/site-packages/tqdm/utils.py", "snippet": "def disp_trim(data, length):\n \"\"\"\n Trim a string which may contain ANSI control characters.\n \"\"\"\n if len(data) == disp_len(data):\n return data[:length]\n\n ansi_present = bool(RE_ANSI.search(data))\n while disp_len(data) > length: # carefully delete one char at a time\n data = data[:-1]\n if ansi_present and bool(RE_ANSI.search(data)):\n # assume ANSI reset is required\n return data if data.endswith(\"\\033[0m\") else data + \"\\033[0m\"\n return data" }, { "identifier": "envwrap", "path": "venv/Lib/site-packages/tqdm/utils.py", "snippet": "def envwrap(prefix, types=None, is_method=False):\n \"\"\"\n Override parameter defaults via `os.environ[prefix + param_name]`.\n Maps UPPER_CASE env vars map to lower_case param names.\n camelCase isn't supported (because Windows ignores case).\n\n Precedence (highest first):\n - call (`foo(a=3)`)\n - environ (`FOO_A=2`)\n - signature (`def foo(a=1)`)\n\n Parameters\n ----------\n prefix : str\n Env var prefix, e.g. \"FOO_\"\n types : dict, optional\n Fallback mappings `{'param_name': type, ...}` if types cannot be\n inferred from function signature.\n Consider using `types=collections.defaultdict(lambda: ast.literal_eval)`.\n is_method : bool, optional\n Whether to use `functools.partialmethod`. If (default: False) use `functools.partial`.\n\n Examples\n --------\n ```\n $ cat foo.py\n from tqdm.utils import envwrap\n @envwrap(\"FOO_\")\n def test(a=1, b=2, c=3):\n print(f\"received: a={a}, b={b}, c={c}\")\n\n $ FOO_A=42 FOO_C=1337 python -c 'import foo; foo.test(c=99)'\n received: a=42, b=2, c=99\n ```\n \"\"\"\n if types is None:\n types = {}\n i = len(prefix)\n env_overrides = {k[i:].lower(): v for k, v in os.environ.items() if k.startswith(prefix)}\n part = partialmethod if is_method else partial\n\n def wrap(func):\n params = signature(func).parameters\n # ignore unknown env vars\n overrides = {k: v for k, v in env_overrides.items() if k in params}\n # infer overrides' `type`s\n for k in overrides:\n param = params[k]\n if param.annotation is not param.empty: # typehints\n for typ in getattr(param.annotation, '__args__', (param.annotation,)):\n try:\n overrides[k] = typ(overrides[k])\n except Exception:\n pass\n else:\n break\n elif param.default is not None: # type of default value\n overrides[k] = type(param.default)(overrides[k])\n else:\n try: # `types` fallback\n overrides[k] = types[k](overrides[k])\n except KeyError: # keep unconverted (`str`)\n pass\n return part(func, **overrides)\n return wrap" } ]
import sys from collections import OrderedDict, defaultdict from contextlib import contextmanager from datetime import datetime, timedelta from numbers import Number from time import time from warnings import warn from weakref import WeakSet from ._monitor import TMonitor from .utils import ( CallbackIOWrapper, Comparable, DisableOnWriteError, FormatReplace, SimpleTextIOWrapper, _is_ascii, _screen_shape_wrapper, _supports_unicode, _term_move_up, disp_len, disp_trim, envwrap) from threading import RLock from multiprocessing import RLock from warnings import catch_warnings, simplefilter from pandas.core.frame import DataFrame from pandas.core.series import Series from pandas import Panel from pandas.core.window.rolling import _Rolling_and_Expanding from pandas.core.window import _Rolling_and_Expanding from pandas.core.window.expanding import Expanding from pandas.core.window.rolling import Rolling from pandas.core.groupby.generic import SeriesGroupBy # , NDFrameGroupBy from pandas.core.groupby.generic import DataFrameGroupBy from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy from pandas.core.groupby.groupby import GroupBy from pandas.core.groupby import GroupBy from pandas.core.groupby.groupby import PanelGroupBy from pandas.core.groupby import PanelGroupBy from pandas.core.common import is_builtin_func
5,913
tqdm_kwargs = tqdm_kwargs.copy() deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)] def inner_generator(df_function='apply'): def inner(df, func, *args, **kwargs): """ Parameters ---------- df : (DataFrame|Series)[GroupBy] Data (may be grouped). func : function To be applied on the (grouped) data. **kwargs : optional Transmitted to `df.apply()`. """ # Precompute total iterations total = tqdm_kwargs.pop("total", getattr(df, 'ngroups', None)) if total is None: # not grouped if df_function == 'applymap': total = df.size elif isinstance(df, Series): total = len(df) elif (_Rolling_and_Expanding is None or not isinstance(df, _Rolling_and_Expanding)): # DataFrame or Panel axis = kwargs.get('axis', 0) if axis == 'index': axis = 0 elif axis == 'columns': axis = 1 # when axis=0, total is shape[axis1] total = df.size // df.shape[axis] # Init bar if deprecated_t[0] is not None: t = deprecated_t[0] deprecated_t[0] = None else: t = cls(total=total, **tqdm_kwargs) if len(args) > 0: # *args intentionally not supported (see #244, #299) TqdmDeprecationWarning( "Except func, normal arguments are intentionally" + " not supported by" + " `(DataFrame|Series|GroupBy).progress_apply`." + " Use keyword arguments instead.", fp_write=getattr(t.fp, 'write', sys.stderr.write)) try: # pandas>=1.3.0 except ImportError: is_builtin_func = df._is_builtin_func try: func = is_builtin_func(func) except TypeError: pass # Define bar updating wrapper def wrapper(*args, **kwargs): # update tbar correctly # it seems `pandas apply` calls `func` twice # on the first column/row to decide whether it can # take a fast or slow code path; so stop when t.total==t.n t.update(n=1 if not t.total or t.n < t.total else 0) return func(*args, **kwargs) # Apply the provided function (in **kwargs) # on the df using our wrapper (which provides bar updating) try: return getattr(df, df_function)(wrapper, **kwargs) finally: t.close() return inner # Monkeypatch pandas to provide easy methods # Enable custom tqdm progress in pandas! Series.progress_apply = inner_generator() SeriesGroupBy.progress_apply = inner_generator() Series.progress_map = inner_generator('map') SeriesGroupBy.progress_map = inner_generator('map') DataFrame.progress_apply = inner_generator() DataFrameGroupBy.progress_apply = inner_generator() DataFrame.progress_applymap = inner_generator('applymap') if Panel is not None: Panel.progress_apply = inner_generator() if PanelGroupBy is not None: PanelGroupBy.progress_apply = inner_generator() GroupBy.progress_apply = inner_generator() GroupBy.progress_aggregate = inner_generator('aggregate') GroupBy.progress_transform = inner_generator('transform') if Rolling is not None and Expanding is not None: Rolling.progress_apply = inner_generator() Expanding.progress_apply = inner_generator() elif _Rolling_and_Expanding is not None: _Rolling_and_Expanding.progress_apply = inner_generator() # override defaults via env vars @envwrap("TQDM_", is_method=True, types={'total': float, 'ncols': int, 'miniters': float, 'position': int, 'nrows': int}) def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, write_bytes=False, lock_args=None, nrows=None, colour=None, delay=0.0, gui=False, **kwargs): """see tqdm.tqdm for arguments""" if file is None: file = sys.stderr if write_bytes: # Despite coercing unicode into bytes, py2 sys.std* streams # should have bytes written to them.
""" Customisable progressbar decorator for iterators. Includes a default `range` iterator printing to `stderr`. Usage: >>> from tqdm import trange, tqdm >>> for i in trange(10): ... ... """ __author__ = "https://github.com/tqdm/tqdm#contributions" __all__ = ['tqdm', 'trange', 'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning', 'TqdmExperimentalWarning', 'TqdmDeprecationWarning', 'TqdmMonitorWarning'] class TqdmTypeError(TypeError): pass class TqdmKeyError(KeyError): pass class TqdmWarning(Warning): """base class for all tqdm warnings. Used for non-external-code-breaking errors, such as garbled printing. """ def __init__(self, msg, fp_write=None, *a, **k): if fp_write is not None: fp_write("\n" + self.__class__.__name__ + ": " + str(msg).rstrip() + '\n') else: super(TqdmWarning, self).__init__(msg, *a, **k) class TqdmExperimentalWarning(TqdmWarning, FutureWarning): """beta feature, unstable API and behaviour""" pass class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning): # not suppressed if raised pass class TqdmMonitorWarning(TqdmWarning, RuntimeWarning): """tqdm monitor errors which do not affect external functionality""" pass def TRLock(*args, **kwargs): """threading RLock""" try: return RLock(*args, **kwargs) except (ImportError, OSError): # pragma: no cover pass class TqdmDefaultWriteLock(object): """ Provide a default write lock for thread and multiprocessing safety. Works only on platforms supporting `fork` (so Windows is excluded). You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance before forking in order for the write lock to work. On Windows, you need to supply the lock from the parent to the children as an argument to joblib or the parallelism lib you use. """ # global thread lock so no setup required for multithreading. # NB: Do not create multiprocessing lock as it sets the multiprocessing # context, disallowing `spawn()`/`forkserver()` th_lock = TRLock() def __init__(self): # Create global parallelism locks to avoid racing issues with parallel # bars works only if fork available (Linux/MacOSX, but not Windows) cls = type(self) root_lock = cls.th_lock if root_lock is not None: root_lock.acquire() cls.create_mp_lock() self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None] if root_lock is not None: root_lock.release() def acquire(self, *a, **k): for lock in self.locks: lock.acquire(*a, **k) def release(self): for lock in self.locks[::-1]: # Release in inverse order of acquisition lock.release() def __enter__(self): self.acquire() def __exit__(self, *exc): self.release() @classmethod def create_mp_lock(cls): if not hasattr(cls, 'mp_lock'): try: cls.mp_lock = RLock() except (ImportError, OSError): # pragma: no cover cls.mp_lock = None @classmethod def create_th_lock(cls): assert hasattr(cls, 'th_lock') warn("create_th_lock not needed anymore", TqdmDeprecationWarning, stacklevel=2) class Bar(object): """ `str.format`-able bar with format specifiers: `[width][type]` - `width` + unspecified (default): use `self.default_len` + `int >= 0`: overrides `self.default_len` + `int < 0`: subtract from `self.default_len` - `type` + `a`: ascii (`charset=self.ASCII` override) + `u`: unicode (`charset=self.UTF` override) + `b`: blank (`charset=" "` override) """ ASCII = " 123456789#" UTF = u" " + u''.join(map(chr, range(0x258F, 0x2587, -1))) BLANK = " " COLOUR_RESET = '\x1b[0m' COLOUR_RGB = '\x1b[38;2;%d;%d;%dm' COLOURS = {'BLACK': '\x1b[30m', 'RED': '\x1b[31m', 'GREEN': '\x1b[32m', 'YELLOW': '\x1b[33m', 'BLUE': '\x1b[34m', 'MAGENTA': '\x1b[35m', 'CYAN': '\x1b[36m', 'WHITE': '\x1b[37m'} def __init__(self, frac, default_len=10, charset=UTF, colour=None): if not 0 <= frac <= 1: warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2) frac = max(0, min(1, frac)) assert default_len > 0 self.frac = frac self.default_len = default_len self.charset = charset self.colour = colour @property def colour(self): return self._colour @colour.setter def colour(self, value): if not value: self._colour = None return try: if value.upper() in self.COLOURS: self._colour = self.COLOURS[value.upper()] elif value[0] == '#' and len(value) == 7: self._colour = self.COLOUR_RGB % tuple( int(i, 16) for i in (value[1:3], value[3:5], value[5:7])) else: raise KeyError except (KeyError, AttributeError): warn("Unknown colour (%s); valid choices: [hex (#00ff00), %s]" % ( value, ", ".join(self.COLOURS)), TqdmWarning, stacklevel=2) self._colour = None def __format__(self, format_spec): if format_spec: _type = format_spec[-1].lower() try: charset = {'a': self.ASCII, 'u': self.UTF, 'b': self.BLANK}[_type] except KeyError: charset = self.charset else: format_spec = format_spec[:-1] if format_spec: N_BARS = int(format_spec) if N_BARS < 0: N_BARS += self.default_len else: N_BARS = self.default_len else: charset = self.charset N_BARS = self.default_len nsyms = len(charset) - 1 bar_length, frac_bar_length = divmod(int(self.frac * N_BARS * nsyms), nsyms) res = charset[-1] * bar_length if bar_length < N_BARS: # whitespace padding res = res + charset[frac_bar_length] + charset[0] * (N_BARS - bar_length - 1) return self.colour + res + self.COLOUR_RESET if self.colour else res class EMA(object): """ Exponential moving average: smoothing to give progressively lower weights to older values. Parameters ---------- smoothing : float, optional Smoothing factor in range [0, 1], [default: 0.3]. Increase to give more weight to recent values. Ranges from 0 (yields old value) to 1 (yields new value). """ def __init__(self, smoothing=0.3): self.alpha = smoothing self.last = 0 self.calls = 0 def __call__(self, x=None): """ Parameters ---------- x : float New value to include in EMA. """ beta = 1 - self.alpha if x is not None: self.last = self.alpha * x + beta * self.last self.calls += 1 return self.last / (1 - beta ** self.calls) if self.calls else self.last class tqdm(Comparable): """ Decorate an iterable object, returning an iterator which acts exactly like the original iterable, but prints a dynamically updating progressbar every time a value is requested. Parameters ---------- iterable : iterable, optional Iterable to decorate with a progressbar. Leave blank to manually manage the updates. desc : str, optional Prefix for the progressbar. total : int or float, optional The number of expected iterations. If unspecified, len(iterable) is used if possible. If float("inf") or as a last resort, only basic progress statistics are displayed (no ETA, no progressbar). If `gui` is True and this parameter needs subsequent updating, specify an initial arbitrary large positive number, e.g. 9e9. leave : bool, optional If [default: True], keeps all traces of the progressbar upon termination of iteration. If `None`, will leave only if `position` is `0`. file : `io.TextIOWrapper` or `io.StringIO`, optional Specifies where to output the progress messages (default: sys.stderr). Uses `file.write(str)` and `file.flush()` methods. For encoding, see `write_bytes`. ncols : int, optional The width of the entire output message. If specified, dynamically resizes the progressbar to stay within this bound. If unspecified, attempts to use environment width. The fallback is a meter width of 10 and no limit for the counter and statistics. If 0, will not print any meter (only stats). mininterval : float, optional Minimum progress display update interval [default: 0.1] seconds. maxinterval : float, optional Maximum progress display update interval [default: 10] seconds. Automatically adjusts `miniters` to correspond to `mininterval` after long display update lag. Only works if `dynamic_miniters` or monitor thread is enabled. miniters : int or float, optional Minimum progress display update interval, in iterations. If 0 and `dynamic_miniters`, will automatically adjust to equal `mininterval` (more CPU efficient, good for tight loops). If > 0, will skip display of specified number of iterations. Tweak this and `mininterval` to get very efficient loops. If your progress is erratic with both fast and slow iterations (network, skipping items, etc) you should set miniters=1. ascii : bool or str, optional If unspecified or False, use unicode (smooth blocks) to fill the meter. The fallback is to use ASCII characters " 123456789#". disable : bool, optional Whether to disable the entire progressbar wrapper [default: False]. If set to None, disable on non-TTY. unit : str, optional String that will be used to define the unit of each iteration [default: it]. unit_scale : bool or int or float, optional If 1 or True, the number of iterations will be reduced/scaled automatically and a metric prefix following the International System of Units standard will be added (kilo, mega, etc.) [default: False]. If any other non-zero number, will scale `total` and `n`. dynamic_ncols : bool, optional If set, constantly alters `ncols` and `nrows` to the environment (allowing for window resizes) [default: False]. smoothing : float, optional Exponential moving average smoothing factor for speed estimates (ignored in GUI mode). Ranges from 0 (average speed) to 1 (current/instantaneous speed) [default: 0.3]. bar_format : str, optional Specify a custom bar string formatting. May impact performance. [default: '{l_bar}{bar}{r_bar}'], where l_bar='{desc}: {percentage:3.0f}%|' and r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, ' '{rate_fmt}{postfix}]' Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt, percentage, elapsed, elapsed_s, ncols, nrows, desc, unit, rate, rate_fmt, rate_noinv, rate_noinv_fmt, rate_inv, rate_inv_fmt, postfix, unit_divisor, remaining, remaining_s, eta. Note that a trailing ": " is automatically removed after {desc} if the latter is empty. initial : int or float, optional The initial counter value. Useful when restarting a progress bar [default: 0]. If using float, consider specifying `{n:.3f}` or similar in `bar_format`, or specifying `unit_scale`. position : int, optional Specify the line offset to print this bar (starting from 0) Automatic if unspecified. Useful to manage multiple bars at once (eg, from threads). postfix : dict or *, optional Specify additional stats to display at the end of the bar. Calls `set_postfix(**postfix)` if possible (dict). unit_divisor : float, optional [default: 1000], ignored unless `unit_scale` is True. write_bytes : bool, optional Whether to write bytes. If (default: False) will write unicode. lock_args : tuple, optional Passed to `refresh` for intermediate output (initialisation, iterating, and updating). nrows : int, optional The screen height. If specified, hides nested bars outside this bound. If unspecified, attempts to use environment height. The fallback is 20. colour : str, optional Bar colour (e.g. 'green', '#00ff00'). delay : float, optional Don't display until [default: 0] seconds have elapsed. gui : bool, optional WARNING: internal parameter - do not use. Use tqdm.gui.tqdm(...) instead. If set, will attempt to use matplotlib animations for a graphical output [default: False]. Returns ------- out : decorated iterator. """ monitor_interval = 10 # set to 0 to disable the thread monitor = None _instances = WeakSet() @staticmethod def format_sizeof(num, suffix='', divisor=1000): """ Formats a number (greater than unity) with SI Order of Magnitude prefixes. Parameters ---------- num : float Number ( >= 1) to format. suffix : str, optional Post-postfix [default: '']. divisor : float, optional Divisor between prefixes [default: 1000]. Returns ------- out : str Number with Order of Magnitude SI unit postfix. """ for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 999.5: if abs(num) < 99.95: if abs(num) < 9.995: return '{0:1.2f}'.format(num) + unit + suffix return '{0:2.1f}'.format(num) + unit + suffix return '{0:3.0f}'.format(num) + unit + suffix num /= divisor return '{0:3.1f}Y'.format(num) + suffix @staticmethod def format_interval(t): """ Formats a number of seconds as a clock time, [H:]MM:SS Parameters ---------- t : int Number of seconds. Returns ------- out : str [H:]MM:SS """ mins, s = divmod(int(t), 60) h, m = divmod(mins, 60) if h: return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s) else: return '{0:02d}:{1:02d}'.format(m, s) @staticmethod def format_num(n): """ Intelligent scientific notation (.3g). Parameters ---------- n : int or float or Numeric A Number. Returns ------- out : str Formatted number. """ f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-') n = str(n) return f if len(f) < len(n) else n @staticmethod def status_printer(file): """ Manage the printing and in-place updating of a line of characters. Note that if the string is longer than a line, then in-place updating may not work (it will print a new line at each refresh). """ fp = file fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover if fp in (sys.stderr, sys.stdout): getattr(sys.stderr, 'flush', lambda: None)() getattr(sys.stdout, 'flush', lambda: None)() def fp_write(s): fp.write(str(s)) fp_flush() last_len = [0] def print_status(s): len_s = disp_len(s) fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0))) last_len[0] = len_s return print_status @staticmethod def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it', unit_scale=False, rate=None, bar_format=None, postfix=None, unit_divisor=1000, initial=0, colour=None, **extra_kwargs): """ Return a string-based progress bar given some parameters Parameters ---------- n : int or float Number of finished iterations. total : int or float The expected total number of iterations. If meaningless (None), only basic progress statistics are displayed (no ETA). elapsed : float Number of seconds passed since start. ncols : int, optional The width of the entire output message. If specified, dynamically resizes `{bar}` to stay within this bound [default: None]. If `0`, will not print any bar (only stats). The fallback is `{bar:10}`. prefix : str, optional Prefix message (included in total width) [default: '']. Use as {desc} in bar_format string. ascii : bool, optional or str, optional If not set, use unicode (smooth blocks) to fill the meter [default: False]. The fallback is to use ASCII characters " 123456789#". unit : str, optional The iteration unit [default: 'it']. unit_scale : bool or int or float, optional If 1 or True, the number of iterations will be printed with an appropriate SI metric prefix (k = 10^3, M = 10^6, etc.) [default: False]. If any other non-zero number, will scale `total` and `n`. rate : float, optional Manual override for iteration rate. If [default: None], uses n/elapsed. bar_format : str, optional Specify a custom bar string formatting. May impact performance. [default: '{l_bar}{bar}{r_bar}'], where l_bar='{desc}: {percentage:3.0f}%|' and r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, ' '{rate_fmt}{postfix}]' Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt, percentage, elapsed, elapsed_s, ncols, nrows, desc, unit, rate, rate_fmt, rate_noinv, rate_noinv_fmt, rate_inv, rate_inv_fmt, postfix, unit_divisor, remaining, remaining_s, eta. Note that a trailing ": " is automatically removed after {desc} if the latter is empty. postfix : *, optional Similar to `prefix`, but placed at the end (e.g. for additional stats). Note: postfix is usually a string (not a dict) for this method, and will if possible be set to postfix = ', ' + postfix. However other types are supported (#382). unit_divisor : float, optional [default: 1000], ignored unless `unit_scale` is True. initial : int or float, optional The initial counter value [default: 0]. colour : str, optional Bar colour (e.g. 'green', '#00ff00'). Returns ------- out : Formatted meter and stats, ready to display. """ # sanity check: total if total and n >= (total + 0.5): # allow float imprecision (#849) total = None # apply custom scale if necessary if unit_scale and unit_scale not in (True, 1): if total: total *= unit_scale n *= unit_scale if rate: rate *= unit_scale # by default rate = self.avg_dn / self.avg_dt unit_scale = False elapsed_str = tqdm.format_interval(elapsed) # if unspecified, attempt to use rate = average speed # (we allow manual override since predicting time is an arcane art) if rate is None and elapsed: rate = (n - initial) / elapsed inv_rate = 1 / rate if rate else None format_sizeof = tqdm.format_sizeof rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else '{0:5.2f}'.format(rate)) if rate else '?') + unit + '/s' rate_inv_fmt = ( (format_sizeof(inv_rate) if unit_scale else '{0:5.2f}'.format(inv_rate)) if inv_rate else '?') + 's/' + unit rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt if unit_scale: n_fmt = format_sizeof(n, divisor=unit_divisor) total_fmt = format_sizeof(total, divisor=unit_divisor) if total is not None else '?' else: n_fmt = str(n) total_fmt = str(total) if total is not None else '?' try: postfix = ', ' + postfix if postfix else '' except TypeError: pass remaining = (total - n) / rate if rate and total else 0 remaining_str = tqdm.format_interval(remaining) if rate else '?' try: eta_dt = (datetime.now() + timedelta(seconds=remaining) if rate and total else datetime.utcfromtimestamp(0)) except OverflowError: eta_dt = datetime.max # format the stats displayed to the left and right sides of the bar if prefix: # old prefix setup work around bool_prefix_colon_already = (prefix[-2:] == ": ") l_bar = prefix if bool_prefix_colon_already else prefix + ": " else: l_bar = '' r_bar = f'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}{postfix}]' # Custom bar formatting # Populate a dict with all available progress indicators format_dict = { # slight extension of self.format_dict 'n': n, 'n_fmt': n_fmt, 'total': total, 'total_fmt': total_fmt, 'elapsed': elapsed_str, 'elapsed_s': elapsed, 'ncols': ncols, 'desc': prefix or '', 'unit': unit, 'rate': inv_rate if inv_rate and inv_rate > 1 else rate, 'rate_fmt': rate_fmt, 'rate_noinv': rate, 'rate_noinv_fmt': rate_noinv_fmt, 'rate_inv': inv_rate, 'rate_inv_fmt': rate_inv_fmt, 'postfix': postfix, 'unit_divisor': unit_divisor, 'colour': colour, # plus more useful definitions 'remaining': remaining_str, 'remaining_s': remaining, 'l_bar': l_bar, 'r_bar': r_bar, 'eta': eta_dt, **extra_kwargs} # total is known: we can predict some stats if total: # fractional and percentage progress frac = n / total percentage = frac * 100 l_bar += '{0:3.0f}%|'.format(percentage) if ncols == 0: return l_bar[:-1] + r_bar[1:] format_dict.update(l_bar=l_bar) if bar_format: format_dict.update(percentage=percentage) # auto-remove colon for empty `{desc}` if not prefix: bar_format = bar_format.replace("{desc}: ", '') else: bar_format = "{l_bar}{bar}{r_bar}" full_bar = FormatReplace() nobar = bar_format.format(bar=full_bar, **format_dict) if not full_bar.format_called: return nobar # no `{bar}`; nothing else to do # Formatting progress bar space available for bar's display full_bar = Bar(frac, max(1, ncols - disp_len(nobar)) if ncols else 10, charset=Bar.ASCII if ascii is True else ascii or Bar.UTF, colour=colour) if not _is_ascii(full_bar.charset) and _is_ascii(bar_format): bar_format = str(bar_format) res = bar_format.format(bar=full_bar, **format_dict) return disp_trim(res, ncols) if ncols else res elif bar_format: # user-specified bar_format but no total l_bar += '|' format_dict.update(l_bar=l_bar, percentage=0) full_bar = FormatReplace() nobar = bar_format.format(bar=full_bar, **format_dict) if not full_bar.format_called: return nobar full_bar = Bar(0, max(1, ncols - disp_len(nobar)) if ncols else 10, charset=Bar.BLANK, colour=colour) res = bar_format.format(bar=full_bar, **format_dict) return disp_trim(res, ncols) if ncols else res else: # no total: no progressbar, ETA, just progress stats return (f'{(prefix + ": ") if prefix else ""}' f'{n_fmt}{unit} [{elapsed_str}, {rate_fmt}{postfix}]') def __new__(cls, *_, **__): instance = object.__new__(cls) with cls.get_lock(): # also constructs lock if non-existent cls._instances.add(instance) # create monitoring thread if cls.monitor_interval and (cls.monitor is None or not cls.monitor.report()): try: cls.monitor = TMonitor(cls, cls.monitor_interval) except Exception as e: # pragma: nocover warn("tqdm:disabling monitor support" " (monitor_interval = 0) due to:\n" + str(e), TqdmMonitorWarning, stacklevel=2) cls.monitor_interval = 0 return instance @classmethod def _get_free_pos(cls, instance=None): """Skips specified instance.""" positions = {abs(inst.pos) for inst in cls._instances if inst is not instance and hasattr(inst, "pos")} return min(set(range(len(positions) + 1)).difference(positions)) @classmethod def _decr_instances(cls, instance): """ Remove from list and reposition another unfixed bar to fill the new gap. This means that by default (where all nested bars are unfixed), order is not maintained but screen flicker/blank space is minimised. (tqdm<=4.44.1 moved ALL subsequent unfixed bars up.) """ with cls._lock: try: cls._instances.remove(instance) except KeyError: # if not instance.gui: # pragma: no cover # raise pass # py2: maybe magically removed already # else: if not instance.gui: last = (instance.nrows or 20) - 1 # find unfixed (`pos >= 0`) overflow (`pos >= nrows - 1`) instances = list(filter( lambda i: hasattr(i, "pos") and last <= i.pos, cls._instances)) # set first found to current `pos` if instances: inst = min(instances, key=lambda i: i.pos) inst.clear(nolock=True) inst.pos = abs(instance.pos) @classmethod def write(cls, s, file=None, end="\n", nolock=False): """Print a message via tqdm (without overlap with bars).""" fp = file if file is not None else sys.stdout with cls.external_write_mode(file=file, nolock=nolock): # Write the message fp.write(s) fp.write(end) @classmethod @contextmanager def external_write_mode(cls, file=None, nolock=False): """ Disable tqdm within context and refresh tqdm when exits. Useful when writing to standard output stream """ fp = file if file is not None else sys.stdout try: if not nolock: cls.get_lock().acquire() # Clear all bars inst_cleared = [] for inst in getattr(cls, '_instances', []): # Clear instance if in the target output file # or if write output + tqdm output are both either # sys.stdout or sys.stderr (because both are mixed in terminal) if hasattr(inst, "start_t") and (inst.fp == fp or all( f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))): inst.clear(nolock=True) inst_cleared.append(inst) yield # Force refresh display of bars we cleared for inst in inst_cleared: inst.refresh(nolock=True) finally: if not nolock: cls._lock.release() @classmethod def set_lock(cls, lock): """Set the global lock.""" cls._lock = lock @classmethod def get_lock(cls): """Get the global lock. Construct it if it does not exist.""" if not hasattr(cls, '_lock'): cls._lock = TqdmDefaultWriteLock() return cls._lock @classmethod def pandas(cls, **tqdm_kwargs): """ Registers the current `tqdm` class with pandas.core. ( frame.DataFrame | series.Series | groupby.(generic.)DataFrameGroupBy | groupby.(generic.)SeriesGroupBy ).progress_apply A new instance will be created every time `progress_apply` is called, and each instance will automatically `close()` upon completion. Parameters ---------- tqdm_kwargs : arguments for the tqdm instance Examples -------- >>> import pandas as pd >>> import numpy as np >>> from tqdm import tqdm >>> from tqdm.gui import tqdm as tqdm_gui >>> >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6))) >>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc >>> # Now you can use `progress_apply` instead of `apply` >>> df.groupby(0).progress_apply(lambda x: x**2) References ---------- <https://stackoverflow.com/questions/18603270/\ progress-indicator-during-pandas-operations-python> """ try: with catch_warnings(): simplefilter("ignore", category=FutureWarning) except ImportError: # pandas>=1.2.0 Panel = None Rolling, Expanding = None, None try: # pandas>=1.0.0 except ImportError: try: # pandas>=0.18.0 except ImportError: # pandas>=1.2.0 try: # pandas>=1.2.0 _Rolling_and_Expanding = Rolling, Expanding except ImportError: # pragma: no cover _Rolling_and_Expanding = None try: # pandas>=0.25.0 except ImportError: # pragma: no cover try: # pandas>=0.23.0 except ImportError: try: # pandas>=0.23.0 except ImportError: # pragma: no cover try: # pandas>=0.23.0 except ImportError: try: except ImportError: # pandas>=0.25.0 PanelGroupBy = None tqdm_kwargs = tqdm_kwargs.copy() deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)] def inner_generator(df_function='apply'): def inner(df, func, *args, **kwargs): """ Parameters ---------- df : (DataFrame|Series)[GroupBy] Data (may be grouped). func : function To be applied on the (grouped) data. **kwargs : optional Transmitted to `df.apply()`. """ # Precompute total iterations total = tqdm_kwargs.pop("total", getattr(df, 'ngroups', None)) if total is None: # not grouped if df_function == 'applymap': total = df.size elif isinstance(df, Series): total = len(df) elif (_Rolling_and_Expanding is None or not isinstance(df, _Rolling_and_Expanding)): # DataFrame or Panel axis = kwargs.get('axis', 0) if axis == 'index': axis = 0 elif axis == 'columns': axis = 1 # when axis=0, total is shape[axis1] total = df.size // df.shape[axis] # Init bar if deprecated_t[0] is not None: t = deprecated_t[0] deprecated_t[0] = None else: t = cls(total=total, **tqdm_kwargs) if len(args) > 0: # *args intentionally not supported (see #244, #299) TqdmDeprecationWarning( "Except func, normal arguments are intentionally" + " not supported by" + " `(DataFrame|Series|GroupBy).progress_apply`." + " Use keyword arguments instead.", fp_write=getattr(t.fp, 'write', sys.stderr.write)) try: # pandas>=1.3.0 except ImportError: is_builtin_func = df._is_builtin_func try: func = is_builtin_func(func) except TypeError: pass # Define bar updating wrapper def wrapper(*args, **kwargs): # update tbar correctly # it seems `pandas apply` calls `func` twice # on the first column/row to decide whether it can # take a fast or slow code path; so stop when t.total==t.n t.update(n=1 if not t.total or t.n < t.total else 0) return func(*args, **kwargs) # Apply the provided function (in **kwargs) # on the df using our wrapper (which provides bar updating) try: return getattr(df, df_function)(wrapper, **kwargs) finally: t.close() return inner # Monkeypatch pandas to provide easy methods # Enable custom tqdm progress in pandas! Series.progress_apply = inner_generator() SeriesGroupBy.progress_apply = inner_generator() Series.progress_map = inner_generator('map') SeriesGroupBy.progress_map = inner_generator('map') DataFrame.progress_apply = inner_generator() DataFrameGroupBy.progress_apply = inner_generator() DataFrame.progress_applymap = inner_generator('applymap') if Panel is not None: Panel.progress_apply = inner_generator() if PanelGroupBy is not None: PanelGroupBy.progress_apply = inner_generator() GroupBy.progress_apply = inner_generator() GroupBy.progress_aggregate = inner_generator('aggregate') GroupBy.progress_transform = inner_generator('transform') if Rolling is not None and Expanding is not None: Rolling.progress_apply = inner_generator() Expanding.progress_apply = inner_generator() elif _Rolling_and_Expanding is not None: _Rolling_and_Expanding.progress_apply = inner_generator() # override defaults via env vars @envwrap("TQDM_", is_method=True, types={'total': float, 'ncols': int, 'miniters': float, 'position': int, 'nrows': int}) def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, write_bytes=False, lock_args=None, nrows=None, colour=None, delay=0.0, gui=False, **kwargs): """see tqdm.tqdm for arguments""" if file is None: file = sys.stderr if write_bytes: # Despite coercing unicode into bytes, py2 sys.std* streams # should have bytes written to them.
file = SimpleTextIOWrapper(
5
2023-12-24 15:46:18+00:00
8k
willfinnigan/RetroBioCat_2
rbc2/expansion/expanders/chemistry_expanders.py
[ { "identifier": "Expansion_Config", "path": "rbc2/configs/expansion_config.py", "snippet": "class Expansion_Config():\n\n def __init__(self):\n\n # rule application\n self.allow_chiral_symmetry = False\n self.check_chiral_products = True\n self.combine_enantiomers = True\n self.allow_cyclic_reaction_outcomes = False\n self.clean_brackets = True\n\n # reaction parsing\n self.allow_backwards = False\n self.allow_duplicates = False\n self.duplicates_require_same_expander = True\n self.duplicates_require_same_domain = False\n self.duplicates_require_same_name = False\n self.merge_duplicate_metadata = True\n self.force_rdkit_smis = True\n\n # expanders general\n self.max_reactions = None # max reactions (not options)\n\n # reaction filtering and blocking\n self.use_max_mw_for_enzymes = False\n self.max_mw_to_use_enzymes = 300\n\n\n\n def update_from_dict(self, attr_dict):\n current_dict = self.to_dict()\n for key, value in attr_dict.items():\n if key in current_dict:\n setattr(self, key, value)\n return self\n\n def to_dict(self):\n return self.__dict__" }, { "identifier": "AizynthfinderActionGetter", "path": "rbc2/expansion/expanders/action_getters/aizynthfinder/aizynthfinder_actions.py", "snippet": "class AizynthfinderActionGetter():\n\n def __init__(self,\n template_column='retro_template',\n cutoff_cumulative=0.995,\n cutoff_number=50,\n log_level='WARNING'):\n\n self.logger = add_logger('AIZynthfinder_Actions', level=log_level)\n self.policy_model = None\n self.templates = None\n\n self.template_column = template_column\n self.cutoff_cumulative = cutoff_cumulative\n self.cutoff_number = cutoff_number\n\n if does_aizynthfinder_exist() == False:\n download_aizynthfinder_model()\n\n def load_model(self):\n if self.policy_model == None:\n policy_path = data_folder + '/uspto_model.hdf5'\n self.policy_model = load_keras_models.LocalKerasModel(policy_path)\n if self.templates == None:\n templates_path = data_folder + '/uspto_templates.hdf5'\n self.templates = pd.read_hdf(templates_path, \"table\")\n\n def get_actions(self, smi):\n reactions = []\n priors = []\n template_column = self.template_column\n\n mol = Chem.MolFromSmiles(smi)\n\n all_transforms_prop = self._predict(mol)\n\n probable_transforms_idx = self._cutoff_predictions(all_transforms_prop)\n\n possible_moves = self.templates.iloc[probable_transforms_idx]\n probs = all_transforms_prop[probable_transforms_idx]\n\n priors.extend(probs)\n for idx, (move_index, move) in enumerate(possible_moves.iterrows()):\n metadata = dict(move)\n del metadata[template_column]\n metadata[\"policy_probability\"] = round(float(probs[idx]), 5)\n metadata[\"template_code\"] = move_index\n\n reaction = {'smarts': move[template_column],\n 'metadata': metadata,\n 'prior': priors[idx]}\n\n reactions.append(reaction)\n\n return reactions\n\n def get_rxns(self, smile):\n if self.policy_model == None:\n self.load_model()\n\n reactions = self.get_actions(smile)\n rxns = {}\n metadata = {}\n\n for reaction in reactions:\n name = f\"Chem_{reaction['metadata']['classification']}\"\n num = 1\n extra_string = f\"__{num}\"\n while name+extra_string in rxns:\n extra_string = f\"__{num}\"\n num += 1\n name = name+extra_string\n smarts = reaction['smarts']\n if self._does_smarts_only_one_reactants(smarts):\n rxns[name] = [smarts]\n else:\n rxns[name] = []\n metadata[name] = reaction['metadata']\n return rxns, metadata\n\n def _predict(self, mol):\n fingerprint = fingerprints.get_mol_fingerprint(mol, 2, nBits=len(self.policy_model))\n fp_arr = fingerprint.reshape([1, len(self.policy_model)])\n return np.array(self.policy_model.predict(fp_arr)).flatten()\n\n @staticmethod\n def _does_smarts_only_one_reactants(smarts):\n if '>>' not in smarts:\n return False\n if '.' in smarts.split('>>')[0]:\n return False\n return True\n\n def _cutoff_predictions(self, predictions):\n \"\"\"\n Get the top transformations, by selecting those that have:\n * cumulative probability less than a threshold (cutoff_cumulative)\n * or at most N (cutoff_number)\n \"\"\"\n\n sortidx = np.argsort(predictions)[::-1]\n cumsum = np.cumsum(predictions[sortidx])\n if any(cumsum >= self.cutoff_cumulative):\n maxidx = np.argmin(cumsum < self.cutoff_cumulative)\n else:\n maxidx = len(cumsum)\n\n maxidx = min(maxidx, self.cutoff_number) or 1\n return sortidx[:maxidx]" }, { "identifier": "Askcos_Action_Getter", "path": "rbc2/expansion/expanders/action_getters/askcos/askcos_action_getter.py", "snippet": "class Askcos_Action_Getter():\n\n def __init__(self,\n cutoff_cumulative=0.995,\n cutoff_number=50,\n allow_multi_product_templates=False,\n log_level='WARNING'):\n self.logger = add_logger('AskcosActionGetter', level=log_level)\n\n self.cutoff_cumulative = cutoff_cumulative\n self.cutoff_number = cutoff_number\n self.allow_multi_product_templates = allow_multi_product_templates\n\n self.fp_length = 2048\n self.fp_radius = 2\n self.policy_model = None\n self.templates = None\n\n if does_askcos_exist() == False:\n download_askcos_model()\n\n def load_model(self):\n if self.policy_model is None:\n policy_path = data_folder + '/reaxys'\n self.policy_model = keras.models.load_model(policy_path)\n if self.templates is None:\n templates_path = data_folder + '/reaxys/retro.templates.json.gz'\n self.templates = pd.read_json(templates_path)\n\n def _does_template_have_multiple_products(self, smarts):\n if self.allow_multi_product_templates == True:\n return False\n\n products = smarts.split('>>')[0].split('.')\n if len(products) != 1:\n return True\n\n return False\n\n def get_rxns(self, smiles):\n self.load_model()\n\n fp = self._smiles_to_fp(smiles).reshape(1, -1)\n indices, scores = self._predictions_from_model(fp)\n indices, scores = self._filter_by_cumulative_proba(indices, scores)\n indices, scores = self._filter_by_max_number(indices, scores)\n possible_moves = self.templates.iloc[indices]\n\n smarts_dict, metadata_dict = {}, {}\n for i, (index, row) in enumerate(possible_moves.iterrows()):\n name = f\"AC_{i+1}\"\n smarts = row['reaction_smarts']\n if self._does_template_have_multiple_products(smarts):\n continue\n metadata = self._remove_unnecessary_metadata(dict(row))\n metadata['score'] = float(scores[i])\n smarts_dict[name] = [smarts]\n metadata_dict[name] = metadata\n\n self.logger.debug(f'{len(smarts_dict)} options retrieved for {smiles}')\n return smarts_dict, metadata_dict\n\n def _predictions_from_model(self, fp):\n scores = self.policy_model(fp).reshape(-1)\n scores = softmax(scores)\n indices = np.argsort(-scores)\n scores = scores[indices]\n return indices, scores\n\n def _filter_by_cumulative_proba(self, indices, scores):\n cum_scores = np.cumsum(scores)\n scores = scores[cum_scores <= self.cutoff_cumulative]\n indices = indices[cum_scores <= self.cutoff_cumulative]\n return indices, scores\n\n def _filter_by_max_number(self, indices, scores):\n indices = indices[:self.cutoff_number]\n scores = scores[:self.cutoff_number]\n return indices, scores\n\n def _remove_unnecessary_metadata(self, metadata):\n metadata.pop('index')\n metadata.pop('reaction_smarts')\n return metadata\n\n def _smiles_to_fp(self, smiles):\n mol = Chem.MolFromSmiles(smiles)\n if not mol:\n return np.zeros((self.fp_length,), dtype=np.float32)\n return np.array(\n AllChem.GetMorganFingerprintAsBitVect(mol, self.fp_radius, nBits=self.fp_length, useChirality=True), dtype=np.float32\n )" }, { "identifier": "RingBreaker_ActionGetter", "path": "rbc2/expansion/expanders/action_getters/ring_breaker/ringbreaker_actions.py", "snippet": "class RingBreaker_ActionGetter():\n\n\n def __init__(self,\n cutoff_cumulative=0.995,\n cutoff_number=10,\n log_level='WARNING'):\n\n self.logger = add_logger('RingBreaker_Actions', level=log_level)\n\n self.cutoff_cumulative = cutoff_cumulative\n self.cutoff_number = cutoff_number\n\n self.policy_model = None\n self.templates = None\n self.lib = None\n\n if does_ringbreaker_exist() == False:\n download_ringbreaker_model()\n\n def init_template_library(self):\n # Initalise template library\n\n templates_path = f\"{data_folder}/data/uspto_ringformations.csv\"\n lib = pd.read_csv(templates_path)\n lib = lib.drop([\"Unnamed: 0\", \"index\", \"selectivity\", \"outcomes\", \"ring_change\"], axis=1)\n template_labels = LabelEncoder()\n lib['template_code'] = template_labels.fit_transform(lib['template_hash'])\n lib = lib.drop([\"reaction_hash\", \"reactants\"], axis=1)\n\n self.lib = lib.set_index('template_code').T.to_dict('list')\n\n def init_model(self):\n # Initialise policy\n top10_acc = functools.partial(top_k_categorical_accuracy, k=10)\n top10_acc.__name__ = 'top10_acc'\n\n top50_acc = functools.partial(top_k_categorical_accuracy, k=50)\n top50_acc.__name__ = 'top50_acc'\n\n model_path = f\"{data_folder}/models/checkpoints/weights.hdf5\"\n self.policy = load_model(model_path, custom_objects={'top10_acc': top10_acc, 'top50_acc': top50_acc})\n\n def load_model(self):\n if self.policy_model is None:\n self.init_model()\n if self.lib is None:\n self.init_template_library()\n\n def _smiles_to_ecfp(self, product, size=2048):\n mol = Chem.MolFromSmiles(product)\n ecfp = GetMorganFingerprintAsBitVect(mol, 2, nBits=2048)\n arr = np.zeros((0,), dtype=np.int8)\n cDataStructs.ConvertToNumpyArray(ecfp, arr)\n return arr.reshape([1, 2048])\n\n def _get_prediction(self, target):\n return self.policy(self._smiles_to_ecfp(target))\n\n def _get_templates(self, prediction, lib, topN=50):\n sort_pred = np.argsort(prediction)[::-1]\n predicted_templates = {}\n for i in range(1, topN + 1):\n pred_temp = lib[sort_pred[-1][-i]]\n predicted_templates[i] = {'template': pred_temp[-2], 'classification': pred_temp[-3], 'ID': pred_temp[0]}\n\n return predicted_templates\n\n def _filter_by_cumulative_predictions(self, actions):\n filtered_actions = []\n for a in actions:\n cumulative = a[\"cumulative_probability\"]\n prob = a['prior']\n if cumulative - prob <= self.cutoff_cumulative:\n filtered_actions.append(a)\n return filtered_actions\n\n def _get_actions(self, smi):\n\n cutoff = self.cutoff_number\n\n prediction = self._get_prediction(smi)\n predicted_templates = self._get_templates(prediction, self.lib, cutoff + 1)\n sort_pred = np.sort(prediction)[::-1]\n\n actions = []\n\n for i in range(1, cutoff + 1):\n template = predicted_templates[i]['template']\n\n\n metadata = {\"policy_probability\": round(float(sort_pred[-1][-i]),5),\n \"template_code\": predicted_templates[i]['ID'],\n 'classification': predicted_templates[i]['classification']}\n\n reaction = {'smarts': template,\n 'metadata': metadata,\n 'prior': sort_pred[-1][-i],\n \"cumulative_probability\": sum(sort_pred[-1][-i:])}\n\n actions.append(reaction)\n\n actions = self._filter_by_cumulative_predictions(actions)\n\n return actions\n\n def get_rxns(self, smile):\n if self.policy_model == None:\n self.load_model()\n\n reactions = self._get_actions(smile)\n rxns = {}\n metadata = {}\n\n for reaction in reactions:\n name = f\"Chem_RB_{reaction['metadata']['classification']}\"\n num = 1\n extra_string = f\"__{num}\"\n while name+extra_string in rxns:\n extra_string = f\"__{num}\"\n num += 1\n name = name+extra_string\n rxns[name] = [reaction['smarts']]\n metadata[name] = reaction['metadata']\n return rxns, metadata" }, { "identifier": "DefaultExpander", "path": "rbc2/expansion/default_expander_interface.py", "snippet": "class DefaultExpander(Expander):\n \"\"\"A default base class for expanders. Contains methods which should be overwritten by subclasses,\n and others that optionally can be overwritten\"\"\"\n\n def __init__(self,\n network: Optional[Network] = None,\n config: Optional[Expansion_Config] = None):\n\n self.network = network\n\n self.config = config\n if self.config is None:\n self.config = Expansion_Config()\n\n self.rule_applicator = RuleApplicator(config=self.config)\n self.option_evaluation_method = create_evaluate_option_method(self.rule_applicator,\n self.config,\n self.network,\n reaction_processing_function=self.reaction_processing_function)\n\n self.calls = 0\n\n # these should change\n self.action_getter = None\n self.rxn_type = ''\n self.rxn_domain = ''\n self.score_key = ''\n\n def precedent_evaluation_function(self, reaction: Reaction) -> bool:\n \"\"\" Overwrite this function to evaluate precedents for a reaction\"\"\"\n return True\n\n def reaction_processing_function(self, reactions: List[Reaction]) -> List[Reaction]:\n \"\"\" Overwrite this function to apply processing once reactions have been constructed. Eg to remove cofactors\"\"\"\n return reactions\n\n def get_options(self, smi: str) -> List[ReactionOption]:\n if self.network is None:\n return self._create_new_options(smi)\n\n if self.network.are_options_available(smi, self.rxn_type):\n options = self.network.get_reaction_options(smi, self.rxn_type)\n sorted_options = sort_options_by_score(options)\n return sorted_options\n else:\n return self._create_new_options(smi)\n\n def create_option(self,\n smi,\n name,\n smarts,\n template_metadata,\n score) -> ReactionOption:\n\n option = ReactionOption(smi,\n name,\n smarts,\n template_metadata,\n self.rxn_type,\n self.rxn_domain,\n score,\n self.option_evaluation_method,\n precedent_search_function=self.precedent_evaluation_function)\n return option\n\n def get_reactions(self, smi: str) -> List[Reaction]:\n\n # expand is generate options, then create reactions.\n options = self.get_options(smi)\n\n reactions = []\n for opt in options:\n new_reactions = opt.evaluate()\n reactions.extend(new_reactions)\n\n if self.config.max_reactions is not None:\n reactions = reactions[:self.config.max_reactions]\n\n return reactions\n\n def number_of_rule_applications(self) -> int:\n return self.rule_applicator.rule_applications\n\n def number_of_calls(self) -> int:\n return self.calls\n\n def _create_new_options(self, smi: str):\n self.calls += 1\n smarts_dict, metadata_dict = self.action_getter.get_rxns(smi)\n options = []\n for i, name in enumerate(smarts_dict):\n score = metadata_dict[name][self.score_key]\n\n option = self.create_option(smi=smi,\n name=name,\n smarts=smarts_dict[name],\n template_metadata={name: metadata_dict[name]},\n score=score)\n\n options.append(option)\n\n sorted_options = sort_options_by_score(options)\n\n if self.network is not None:\n self.network.bulk_add_options(smi, self.rxn_type, sorted_options)\n\n return sorted_options" }, { "identifier": "Network", "path": "rbc2/reaction_network_entities/network.py", "snippet": "class Network():\n \"\"\" Network is used to keep a record of the outcome of all expansions.\"\"\"\n\n def __init__(self, reactions: Sequence[Reaction] = ()):\n\n self.smi_produced_by: dict[Smi: Set[Reaction]] = defaultdict(set)\n self.smi_substrate_of: dict[Smi: Set[Reaction]] = defaultdict(set)\n self.reaction_options: dict[Smi: dict[ExpanderID: List[ReactionOption]]] = defaultdict(lambda: defaultdict(dict))\n self.reactions: Set[Reaction] = set()\n\n if len(reactions) != 0:\n for rxn in reactions:\n self.add_reaction(rxn)\n\n def add_reaction(self, reaction: Reaction):\n self.reactions.add(reaction)\n self.smi_produced_by[reaction.product].add(reaction)\n for smi in reaction.substrates:\n self.smi_substrate_of[smi].add(reaction)\n\n def remove_reaction(self, reaction: Reaction):\n self.reactions.discard(reaction)\n self.smi_produced_by[reaction.product].discard(reaction)\n for smi in reaction.substrates:\n self.smi_substrate_of[smi].discard(reaction)\n\n def add_option(self, option: ReactionOption):\n self.reaction_options[option.target_smi][option.rxn_type][option.unique_id] = option\n\n def bulk_add_options(self, smi: Smi, rxn_type: RxnType, list_options: List[ReactionOption]):\n self.reaction_options[smi][rxn_type] = {option.unique_id: option for option in list_options}\n\n def remove_option(self, option: ReactionOption):\n self.reaction_options[option.target_smi][option.rxn_type].pop(option.unique_id, None)\n\n def get_reaction_options(self, smi: Smi, rxn_type: RxnType) -> list[ReactionOption]:\n options_for_smi = self.reaction_options.get(smi, {})\n options_for_rxn_type = options_for_smi.get(rxn_type, {})\n return list(options_for_rxn_type.values())\n\n def are_options_available(self, smi: Smi, rxn_type: RxnType) -> bool:\n return self.reaction_options.get(smi, {}).get(rxn_type, False) is not False\n\n def get_reactions_which_molecule_is_produced_by(self, smi: Smi) -> Set[Reaction]:\n return self.smi_produced_by.get(smi, set())\n\n def get_reactions_which_molecule_is_substrate_of(self, smi: Smi) -> Set[Reaction]:\n return self.smi_substrate_of.get(smi, set())\n\n def all_smis(self) -> Set[Smi]:\n all_smis = set(self.smi_produced_by.keys())\n all_smis.update(set(self.smi_substrate_of.keys()))\n return all_smis\n\n def all_reactions(self) -> List[Reaction]:\n return list(self.reactions)\n\n def all_reaction_options(self) -> List[ReactionOption]:\n all_options = []\n for smi, rxn_type_options in self.reaction_options.items():\n for rxn_type, options_dict in rxn_type_options.items():\n for option_id, option in options_dict.items():\n all_options.append(option)\n return all_options\n\n def save(self):\n \"\"\"Save the network to a dict\"\"\"\n data = {\"reactions\": reactions_to_dicts(self.all_reactions()),\n \"reaction_options\": [option_to_dict(opt) for opt in self.all_reaction_options()]}\n return data\n\n def load(self, data: dict, expanders: List[Expander]):\n \"\"\"\n Load the network from data dict\n ReactionOptions will only be loaded if the relevant expander is provided\n \"\"\"\n\n # check each expander is associated with this network\n for expander in expanders:\n if expander.network != self:\n raise Exception(\"Can not load reaction options when expander is not associated with the same network\")\n\n # load reactions\n reaction_unique_id_dict = {}\n for reaction_dict in data['reactions']:\n reaction = reaction_from_dict(reaction_dict)\n reaction_unique_id_dict[reaction.unique_id] = reaction\n self.add_reaction(reaction)\n\n # load reaction options\n expander_dict = {exp.rxn_type: exp for exp in expanders}\n for opt_dict in data['reaction_options']:\n rxn_type = opt_dict['rxn_type']\n expander = expander_dict.get(rxn_type, None)\n if expander is None:\n continue\n\n option = option_from_dict(opt_dict, expander)\n\n # add reactions from ids\n for unique_id in opt_dict.get('reaction_ids', []):\n reaction = reaction_unique_id_dict.get(unique_id, None)\n if reaction is None:\n continue\n option.reactions.append(reaction)\n\n self.add_option(option)\n\n\n def get_pa_route(self, start_smi, starting_material_evaluator: StartingMaterialEvaluatorInterface):\n def get_smi_produced_by(smi):\n return list(self.smi_produced_by[smi])\n return get_pa_route(start_smi, starting_material_evaluator, get_smi_produced_by)" } ]
from typing import Optional from rbc2.configs.expansion_config import Expansion_Config from rbc2.expansion.expanders.action_getters.aizynthfinder.aizynthfinder_actions import \ AizynthfinderActionGetter from rbc2.expansion.expanders.action_getters.askcos.askcos_action_getter import Askcos_Action_Getter from rbc2.expansion.expanders.action_getters.ring_breaker.ringbreaker_actions import \ RingBreaker_ActionGetter from rbc2.expansion.default_expander_interface import DefaultExpander from rbc2.reaction_network_entities.network import Network
5,785
class AIZynthfinderExpander(DefaultExpander): def __init__(self, network: Optional[Network] = None, config: Optional[Expansion_Config] = None, template_column='retro_template', cutoff_cumulative=0.995, cutoff_number=50): super().__init__(network=network, config=config) self.action_getter = AizynthfinderActionGetter(template_column=template_column, cutoff_cumulative=cutoff_cumulative, cutoff_number=cutoff_number) self.rxn_type = 'aizynthfinder' self.rxn_domain = 'chemistry' self.score_key = 'policy_probability' class RingBreakerPolicyExpander(DefaultExpander): def __init__(self, network: Optional[Network] = None, config: Optional[Expansion_Config] = None, cutoff_cumulative=0.995, cutoff_number=10): super().__init__(network=network, config=config)
class AIZynthfinderExpander(DefaultExpander): def __init__(self, network: Optional[Network] = None, config: Optional[Expansion_Config] = None, template_column='retro_template', cutoff_cumulative=0.995, cutoff_number=50): super().__init__(network=network, config=config) self.action_getter = AizynthfinderActionGetter(template_column=template_column, cutoff_cumulative=cutoff_cumulative, cutoff_number=cutoff_number) self.rxn_type = 'aizynthfinder' self.rxn_domain = 'chemistry' self.score_key = 'policy_probability' class RingBreakerPolicyExpander(DefaultExpander): def __init__(self, network: Optional[Network] = None, config: Optional[Expansion_Config] = None, cutoff_cumulative=0.995, cutoff_number=10): super().__init__(network=network, config=config)
self.action_getter = RingBreaker_ActionGetter(cutoff_number=cutoff_number,
3
2023-12-30 11:33:41+00:00
8k
gregorybchris/typogenetics
tests/test_typogenetics.py
[ { "identifier": "Base", "path": "typogenetics/typogenetics.py", "snippet": "class Base(StrEnum):\n C = auto()\n G = auto()\n T = auto()\n A = auto()\n\n @classmethod\n def from_str(cls, base_str: str) -> \"Base\":\n return {\n \"C\": cls.C,\n \"G\": cls.G,\n \"T\": cls.T,\n \"A\": cls.A,\n }[base_str]\n\n def __repr__(self) -> str:\n return self.value.upper()\n\n def __str__(self) -> str:\n return self.__repr__()\n\n def is_type(self, base_type: BaseType) -> bool:\n if base_type == BaseType.PURINE:\n return self.is_purine()\n return self.is_pyrimidine()\n\n def is_purine(self) -> bool:\n return self in [Base.A, Base.G]\n\n def is_pyrimidine(self) -> bool:\n return self in [Base.C, Base.T]\n\n def get_complement(self) -> \"Base\":\n return {\n Base.C: Base.G,\n Base.G: Base.C,\n Base.T: Base.A,\n Base.A: Base.T,\n }[self]" }, { "identifier": "Enzyme", "path": "typogenetics/typogenetics.py", "snippet": "class Enzyme:\n amino_acids: List[AminoAcid]\n\n def iter_amino_acids(self) -> Iterator[AminoAcid]:\n yield from self.amino_acids\n\n @classmethod\n def from_str(cls, enzyme_str: str) -> \"Enzyme\":\n amino_acids = []\n for amino_acid_str in enzyme_str.split(\"-\"):\n amino_acid = AminoAcid(amino_acid_str)\n amino_acids.append(amino_acid)\n return cls(amino_acids)\n\n def __repr__(self) -> str:\n return \"-\".join([str(b) for b in self.amino_acids])\n\n def __str__(self) -> str:\n return self.__repr__()\n\n def __getitem__(self, index: int) -> AminoAcid:\n return self.amino_acids[index]\n\n def __len__(self) -> int:\n return len(self.amino_acids)" }, { "identifier": "Folder", "path": "typogenetics/typogenetics.py", "snippet": "class Folder:\n \"\"\"\n | ins | dir |\n | --- | --- |\n | cut | s |\n | del | s |\n | swi | r |\n | mvr | s |\n | mvl | s |\n | cop | r |\n | off | l |\n | ina | s |\n | inc | r |\n | ing | r |\n | int | l |\n | rpy | r |\n | rpu | l |\n | lpy | l |\n | lpu | l |\n\n | first | last | base |\n | ----- | ---- | ---- |\n | R | R | A |\n | R | U | C |\n | R | D | G |\n | R | L | T |\n \"\"\"\n\n @classmethod\n def fold(cls, enzyme: Enzyme) -> Orientation:\n turning_number = 0\n for amino_acid in enzyme.iter_amino_acids():\n turn = cls.get_turn(amino_acid)\n turning_number += turn.to_int()\n return Orientation.from_turning_number(turning_number)\n\n @classmethod\n def get_turn(cls, amino_acid: AminoAcid) -> Turn:\n return {\n AminoAcid.CUT: Turn.S,\n AminoAcid.DEL: Turn.S,\n AminoAcid.SWI: Turn.R,\n AminoAcid.MVR: Turn.S,\n AminoAcid.MVL: Turn.S,\n AminoAcid.COP: Turn.R,\n AminoAcid.OFF: Turn.L,\n AminoAcid.INA: Turn.S,\n AminoAcid.INC: Turn.R,\n AminoAcid.ING: Turn.R,\n AminoAcid.INT: Turn.L,\n AminoAcid.RPY: Turn.R,\n AminoAcid.RPU: Turn.L,\n AminoAcid.LPY: Turn.L,\n AminoAcid.LPU: Turn.L,\n }[amino_acid]\n\n @classmethod\n def get_binding_site(cls, enzyme: Enzyme, strand: Strand) -> Optional[int]:\n orientation = cls.fold(enzyme)\n binding_affinity = cls.get_binding_affinity(orientation)\n for unit, base in enumerate(strand.iter_bases()):\n if base == binding_affinity:\n return unit\n return None\n\n @classmethod\n def get_binding_affinity(cls, orientation: Orientation) -> Base:\n return {\n Orientation.R: Base.A,\n Orientation.U: Base.C,\n Orientation.D: Base.G,\n Orientation.L: Base.T,\n }[orientation]" }, { "identifier": "Orientation", "path": "typogenetics/typogenetics.py", "snippet": "class Orientation(StrEnum):\n U = auto()\n D = auto()\n L = auto()\n R = auto()\n\n @classmethod\n def from_turning_number(cls, turning_number: int) -> \"Orientation\":\n return {\n 0: cls.R,\n 1: cls.D,\n 2: cls.L,\n 3: cls.U,\n }[turning_number % 4]" }, { "identifier": "Rewriter", "path": "typogenetics/typogenetics.py", "snippet": "class Rewriter:\n \"\"\"\n | ins | action |\n | --- | ---------------------------------------------- |\n | cut | cut strand(s) |\n | del | delete a base from strand |\n | swi | switch enzyme to other strand |\n | mvr | move one unit to the right |\n | mvl | move one unit to the left |\n | cop | turn on Copy mode |\n | off | turn off Copy mode |\n | ina | insert A to the right of this unit |\n | inc | insert C to the right of this unit |\n | ing | insert G to the right of this unit |\n | int | insert T to the right of this unit |\n | rpy | search for the nearest pyrimidine to the right |\n | rpu | search for the nearest purine to the right |\n | lpy | search for the nearest pyrimidine to the left |\n | lpu | search for the nearest purine to the left |\n \"\"\"\n\n # pylint: disable=too-many-branches\n @classmethod\n def rewrite(cls, enzyme: Enzyme, strand: Strand) -> List[Strand]:\n copy_mode = False\n\n unit = Folder.get_binding_site(enzyme, strand)\n logger.debug(f\"Rewriting strand {strand} with enzyme {enzyme}, unit={unit}\")\n if unit is None:\n return [strand]\n\n pairs = [BasePair(base, None) for base in strand.iter_bases()]\n\n logger.debug(f\"Init @ {unit}, copy={copy_mode}\")\n logger.debug(cls.pairs_to_string(pairs))\n\n strands = []\n for amino_acid in enzyme.iter_amino_acids():\n logger.debug(f\"Applying {amino_acid} @ {unit}, copy={copy_mode}\")\n\n if amino_acid == AminoAcid.CUT:\n cut_pairs = pairs[unit + 1 :]\n strands += cls.strands_from_pairs(cut_pairs)\n pairs = pairs[: unit + 1]\n elif amino_acid == AminoAcid.DEL:\n pairs[unit].bind = None\n unit -= 1\n # NOTE: It's not clear from the specification which direction we should move\n # after a deletion, we here we choose left arbitrarily.\n if unit < 0:\n logger.debug(\"Reached end of strand\")\n break\n if pairs[unit].bind is None:\n logger.debug(\"Reached end of strand\")\n break\n elif amino_acid == AminoAcid.SWI:\n if pairs[unit].comp is None:\n logger.debug(\"Tried to switch to empty base pair complement\")\n break\n for pair in pairs:\n pair.swap()\n pairs = pairs[::-1]\n unit = len(pairs) - unit - 1\n elif amino_acid in [AminoAcid.MVR, AminoAcid.MVL]:\n unit += cls.amino_acid_to_direction(amino_acid)\n if unit < 0 or unit >= len(pairs):\n logger.debug(\"Reached end of strand\")\n break\n if pairs[unit].bind is None:\n logger.debug(\"Reached end of strand\")\n break\n if copy_mode:\n pairs[unit].add_comp()\n elif amino_acid == AminoAcid.COP:\n copy_mode = True\n pair = pairs[unit]\n assert pair.bind is not None\n pair.comp = pair.bind.get_complement()\n elif amino_acid == AminoAcid.OFF:\n copy_mode = False\n elif amino_acid in [AminoAcid.INA, AminoAcid.INC, AminoAcid.ING, AminoAcid.INT]:\n bind = cls.amino_acid_to_base(amino_acid)\n comp = bind.get_complement() if copy_mode else None\n pairs.insert(unit + 1, BasePair(bind, comp))\n elif amino_acid in [AminoAcid.RPY, AminoAcid.RPU, AminoAcid.LPY, AminoAcid.LPU]:\n end_of_strand = False\n while True:\n unit += cls.amino_acid_to_direction(amino_acid)\n if unit < 0 or unit >= len(pairs):\n end_of_strand = True\n break\n pair = pairs[unit]\n bind_base = pair.bind\n if bind_base is None:\n end_of_strand = True\n break\n if copy_mode:\n pair.add_comp()\n if bind_base.is_type(cls.amino_acid_to_base_type(amino_acid)):\n break\n if end_of_strand:\n logger.debug(\"Reached end of strand\")\n break\n\n logger.debug(cls.pairs_to_string(pairs))\n\n strands += cls.strands_from_pairs(pairs)\n return strands\n\n @classmethod\n def strands_from_pairs(cls, pairs: List[BasePair]) -> List[Strand]:\n strands = []\n bind_bases = []\n comp_bases = []\n\n for pair in pairs:\n if pair.bind is not None:\n bind_bases.append(pair.bind)\n elif len(bind_bases) > 0:\n strands.append(Strand(bind_bases))\n bind_bases = []\n\n if pair.comp is not None:\n comp_bases.append(pair.comp)\n elif len(comp_bases) > 0:\n strands.append(Strand(comp_bases[::-1]))\n comp_bases = []\n\n if len(bind_bases) > 0:\n strands.append(Strand(bind_bases))\n if len(comp_bases) > 0:\n strands.append(Strand(comp_bases[::-1]))\n\n return strands\n\n @classmethod\n def amino_acid_to_base(cls, amino_acid: AminoAcid) -> Base:\n return {\n AminoAcid.INA: Base.A,\n AminoAcid.INC: Base.C,\n AminoAcid.ING: Base.G,\n AminoAcid.INT: Base.T,\n }[amino_acid]\n\n @classmethod\n def amino_acid_to_base_type(cls, amino_acid: AminoAcid) -> BaseType:\n return {\n AminoAcid.RPY: BaseType.PYRIMIDINE,\n AminoAcid.RPU: BaseType.PURINE,\n AminoAcid.LPY: BaseType.PYRIMIDINE,\n AminoAcid.LPU: BaseType.PURINE,\n }[amino_acid]\n\n @classmethod\n def amino_acid_to_direction(cls, amino_acid: AminoAcid) -> int:\n return {\n AminoAcid.RPY: 1,\n AminoAcid.RPU: 1,\n AminoAcid.LPY: -1,\n AminoAcid.LPU: -1,\n AminoAcid.MVR: 1,\n AminoAcid.MVL: -1,\n }[amino_acid]\n\n @classmethod\n def pairs_to_string(cls, pairs: List[BasePair]) -> str:\n res = \"[ \"\n comp_map = {Base.A: \"∀\", Base.C: \"Ↄ\", Base.G: \"⅁\", Base.T: \"⊥\"}\n for pair in pairs:\n if pair.comp is None:\n res += \" \"\n else:\n res += str(comp_map[pair.comp]) + \" \"\n res += \"]\\n[ \"\n for pair in pairs:\n if pair.bind is None:\n res += \" \"\n else:\n res += str(pair.bind) + \" \"\n res += \"]\"\n return res" }, { "identifier": "Strand", "path": "typogenetics/typogenetics.py", "snippet": "class Strand:\n bases: List[Base]\n\n @classmethod\n def from_str(cls, strand_str: str) -> \"Strand\":\n bases = []\n for base_str in strand_str:\n if base_str == \" \":\n continue\n base = Base.from_str(base_str)\n bases.append(base)\n return cls(bases)\n\n def iter_bases(self) -> Iterator[Base]:\n yield from self.bases\n\n def iter_duplets(self) -> Iterator[Duplet]:\n unit = 0\n while True:\n if unit + 1 >= len(self):\n break\n\n yield (self[unit], self[unit + 1])\n\n unit += 2\n\n def __repr__(self) -> str:\n return \"\".join([str(b) for b in self.bases])\n\n def __str__(self) -> str:\n return self.__repr__()\n\n def __getitem__(self, unit: int) -> Base:\n return self.bases[unit]\n\n def __len__(self) -> int:\n return len(self.bases)" }, { "identifier": "Translator", "path": "typogenetics/typogenetics.py", "snippet": "class Translator:\n \"\"\"\n | | A | C | G | T |\n | --- | --- | --- | --- | --- |\n | A | | cut | del | swi |\n | C | mvr | mvl | cop | off |\n | G | ina | inc | ing | int |\n | T | rpy | rpu | lpy | lpu |\n \"\"\"\n\n @classmethod\n def translate(cls, strand: Strand) -> List[Enzyme]:\n enzymes = []\n amino_acids: List[AminoAcid] = []\n for duplet in strand.iter_duplets():\n amino_acid = cls._translate_duplet(duplet)\n if amino_acid is None and len(amino_acids) > 0:\n enzyme = Enzyme(amino_acids)\n enzymes.append(enzyme)\n amino_acids = []\n elif amino_acid is not None:\n amino_acids.append(amino_acid)\n\n if len(amino_acids) > 0:\n enzyme = Enzyme(amino_acids)\n enzymes.append(enzyme)\n\n return enzymes\n\n @classmethod\n def _translate_duplet(cls, duplet: Duplet) -> Optional[AminoAcid]:\n return {\n (Base.A, Base.A): None,\n (Base.A, Base.C): AminoAcid.CUT,\n (Base.A, Base.G): AminoAcid.DEL,\n (Base.A, Base.T): AminoAcid.SWI,\n (Base.C, Base.A): AminoAcid.MVR,\n (Base.C, Base.C): AminoAcid.MVL,\n (Base.C, Base.G): AminoAcid.COP,\n (Base.C, Base.T): AminoAcid.OFF,\n (Base.G, Base.A): AminoAcid.INA,\n (Base.G, Base.C): AminoAcid.INC,\n (Base.G, Base.G): AminoAcid.ING,\n (Base.G, Base.T): AminoAcid.INT,\n (Base.T, Base.A): AminoAcid.RPY,\n (Base.T, Base.C): AminoAcid.RPU,\n (Base.T, Base.G): AminoAcid.LPY,\n (Base.T, Base.T): AminoAcid.LPU,\n }[duplet]" } ]
from typogenetics.typogenetics import Base, Enzyme, Folder, Orientation, Rewriter, Strand, Translator
4,263
class TestTypogenetics: def test_strand_from_str(self) -> None: assert Strand.from_str("CG GA TA CT AA AC CG A") == Strand( [ Base.C, Base.G, Base.G, Base.A, Base.T, Base.A, Base.C, Base.T, Base.A, Base.A, Base.A, Base.C, Base.C, Base.G, Base.A, ] ) def test_translator(self) -> None: strand = Strand.from_str("CG GA TA CT AA AC CG A") assert Translator.translate(strand) == [ Enzyme.from_str("cop-ina-rpy-off"), Enzyme.from_str("cut-cop"), ] def test_folder(self) -> None: enzyme = Enzyme.from_str("cop-ina-rpy-off") strand = Strand.from_str("CG GA TA CT AA AC CG A")
class TestTypogenetics: def test_strand_from_str(self) -> None: assert Strand.from_str("CG GA TA CT AA AC CG A") == Strand( [ Base.C, Base.G, Base.G, Base.A, Base.T, Base.A, Base.C, Base.T, Base.A, Base.A, Base.A, Base.C, Base.C, Base.G, Base.A, ] ) def test_translator(self) -> None: strand = Strand.from_str("CG GA TA CT AA AC CG A") assert Translator.translate(strand) == [ Enzyme.from_str("cop-ina-rpy-off"), Enzyme.from_str("cut-cop"), ] def test_folder(self) -> None: enzyme = Enzyme.from_str("cop-ina-rpy-off") strand = Strand.from_str("CG GA TA CT AA AC CG A")
assert Folder.fold(enzyme) == Orientation.D
2
2023-12-28 08:59:06+00:00
8k
DerwenAI/textgraphs
textgraphs/pipe.py
[ { "identifier": "SPACY_MODEL", "path": "textgraphs/defaults.py", "snippet": "SPACY_MODEL: str = \"en_core_web_sm\"" }, { "identifier": "Edge", "path": "textgraphs/elem.py", "snippet": "class Edge:\n \"\"\"\nA data class representing an edge between two nodes.\n \"\"\"\n src_node: int\n dst_node: int\n kind: RelEnum\n rel: str\n prob: float\n count: int = 1" }, { "identifier": "Node", "path": "textgraphs/elem.py", "snippet": "class Node: # pylint: disable=R0902\n \"\"\"\nA data class representing one node, i.e., an extracted phrase.\n \"\"\"\n node_id: int\n key: str\n span: typing.Union[ spacy.tokens.span.Span, spacy.tokens.token.Token ]\n text: str\n pos: str\n kind: NodeEnum\n loc: typing.List[ typing.List[ int ] ] = field(default_factory = lambda: [])\n label: typing.Optional[ str ] = None\n length: int = 1\n sub_obj: bool = False\n count: int = 0\n neighbors: int = 0\n weight: float = 0.0\n entity: typing.List[ LinkedEntity ] = field(default_factory = lambda: [])\n annotated: bool = False\n\n\n def get_linked_label (\n self\n ) -> typing.Optional[ str ]:\n \"\"\"\nWhen this node has a linked entity, return that IRI.\nOtherwise return its `label` value.\n\n returns:\na label for the linked entity\n \"\"\"\n if len(self.entity) > 0:\n return self.entity[0].iri\n\n return self.label\n\n\n def get_name (\n self\n ) -> str:\n \"\"\"\nReturn a brief name for the graphical depiction of this Node.\n\n returns:\nbrief label to be used in a graph\n \"\"\"\n if self.kind == NodeEnum.IRI:\n return self.label # type: ignore\n if self.kind == NodeEnum.LEM:\n return self.key\n\n return self.text\n\n\n def get_stacked_count (\n self\n ) -> int:\n \"\"\"\nReturn a modified count, to redact verbs and linked entities from\nthe stack-rank partitions.\n\n returns:\ncount, used for re-ranking extracted entities\n \"\"\"\n if self.pos == \"VERB\" or self.kind == NodeEnum.IRI:\n return 0\n\n return self.count\n\n\n def get_pos (\n self\n ) -> typing.Tuple[ int, int ]:\n \"\"\"\nGenerate a position span for `OpenNRE`.\n\n returns:\na position span needed for `OpenNRE` relation extraction\n \"\"\"\n position: typing.Tuple[ int, int ] = ( self.span.idx, self.span.idx + len(self.text) - 1, )\n return position" }, { "identifier": "NodeEnum", "path": "textgraphs/elem.py", "snippet": "class NodeEnum (enum.IntEnum):\n \"\"\"\nEnumeration for the kinds of node categories\n \"\"\"\n DEP = 0 # `spaCy` parse dependency\n LEM = 1 # lemmatized token\n ENT = 2 # named entity\n CHU = 3 # noun chunk\n IRI = 4 # IRI for linked entity\n\n def __str__ (\n self\n ) -> str:\n \"\"\"\nCodec for representing as a string.\n\n returns:\ndecoded string representation of the enumerated value\n \"\"\"\n decoder: typing.List[ str ] = [\n \"dep\",\n \"lem\",\n \"ent\",\n \"chu\",\n \"iri\",\n ]\n\n return decoder[self.value]" }, { "identifier": "NounChunk", "path": "textgraphs/elem.py", "snippet": "class NounChunk: # pylint: disable=R0902\n \"\"\"\nA data class representing one noun chunk, i.e., a candidate as an extracted phrase.\n \"\"\"\n span: spacy.tokens.span.Span\n text: str\n length: int\n lemma_key: str\n unseen: bool\n sent_id: int\n start: int = 0" }, { "identifier": "SimpleGraph", "path": "textgraphs/graph.py", "snippet": "class SimpleGraph:\n \"\"\"\nAn in-memory graph used to build a `MultiDiGraph` in NetworkX.\n \"\"\"\n\n def __init__ (\n self\n ) -> None:\n \"\"\"\nConstructor.\n \"\"\"\n self.nodes: typing.Dict[ str, Node ] = OrderedDict()\n self.edges: typing.Dict[ str, Edge ] = {}\n self.lemma_graph: nx.MultiDiGraph = nx.MultiDiGraph()\n\n\n def reset (\n self\n ) -> None:\n \"\"\"\nRe-initialize the data structures, resetting all but the configuration.\n \"\"\"\n self.nodes = OrderedDict()\n self.edges = {}\n self.lemma_graph = nx.MultiDiGraph()\n\n\n def make_node ( # pylint: disable=R0913,R0914\n self,\n tokens: typing.List[ Node ],\n key: str,\n span: spacy.tokens.token.Token,\n kind: NodeEnum,\n text_id: int,\n para_id: int,\n sent_id: int,\n *,\n label: typing.Optional[ str ] = None,\n length: int = 1,\n linked: bool = True,\n ) -> Node:\n \"\"\"\nLookup and return a `Node` object.\nBy default, link matching keys into the same node.\nOtherwise instantiate a new node if it does not exist already.\n\n tokens:\nlist of parsed tokens\n\n key:\nlemma key (invariant)\n\n span:\ntoken span for the parsed entity\n\n kind:\nthe kind of this `Node` object\n\n text_id:\ntext (top-level document) identifier\n\n para_id:\nparagraph identitifer\n\n sent_id:\nsentence identifier\n\n label:\nnode label (for a new object)\n\n length:\nlength of token span\n\n linked:\nflag for whether this links to an entity\n\n returns:\nthe constructed `Node` object\n \"\"\"\n token_id: int = 0\n token_text: str = key\n token_pos: str = \"PROPN\"\n\n if span is not None:\n token_id = span.i\n token_text = span.text\n token_pos = span.pos_\n\n location: typing.List[ int ] = [ # type: ignore\n text_id,\n para_id,\n sent_id,\n token_id,\n ]\n\n if not linked:\n # construct a placeholder node (stopwords)\n self.nodes[key] = Node(\n len(self.nodes),\n key,\n span,\n span.text,\n span.pos_,\n kind,\n loc = [ location ],\n length = length,\n )\n\n elif key in self.nodes:\n # link to previously constructed entity node\n self.nodes[key].loc.append(location)\n self.nodes[key].count += 1\n\n # construct a new node for entity or lemma\n else:\n self.nodes[key] = Node(\n len(self.nodes),\n key,\n span,\n token_text,\n token_pos,\n kind,\n loc = [ location ],\n label = label,\n length = length,\n count = 1,\n )\n\n node: Node = self.nodes.get(key) # type: ignore\n\n if kind not in [ NodeEnum.CHU, NodeEnum.IRI ]:\n tokens.append(node)\n\n return node # type: ignore\n\n\n def make_edge ( # pylint: disable=R0913\n self,\n src_node: Node,\n dst_node: Node,\n kind: RelEnum,\n rel: str,\n prob: float,\n *,\n debug: bool = False,\n ) -> typing.Optional[ Edge ]:\n \"\"\"\nLookup an edge, creating a new one if it does not exist already,\nand increment the count if it does.\n\n src_node:\nsource node in the triple\n\n dst_node:\ndestination node in the triple\n\n kind:\nthe kind of this `Edge` object\n\n rel:\nrelation label\n\n prob:\nprobability of this `Edge` within the graph\n\n debug:\ndebugging flag\n\n returns:\nthe constructed `Edge` object; this may be `None` if the input parameters indicate skipping the edge\n \"\"\"\n key: str = \".\".join([\n str(src_node.node_id),\n str(dst_node.node_id),\n rel.replace(\" \", \"_\"),\n str(kind.value),\n ])\n\n if debug:\n ic(key)\n\n if key in self.edges:\n self.edges[key].count += 1\n\n elif src_node.node_id != dst_node.node_id:\n # preclude cycles in the graph\n self.edges[key] = Edge(\n src_node.node_id,\n dst_node.node_id,\n kind,\n rel,\n prob,\n )\n\n if debug:\n ic(self.edges.get(key))\n\n return self.edges.get(key)\n\n\n def construct_lemma_graph (\n self,\n *,\n debug: bool = False,\n ) -> None:\n \"\"\"\nConstruct the base level of the _lemma graph_ from the collected\nelements. This gets represented in `NetworkX` as a directed graph\nwith parallel edges.\n\n debug:\ndebugging flag\n \"\"\"\n # add the nodes\n self.lemma_graph.add_nodes_from([\n node.node_id\n for node in self.nodes.values()\n ])\n\n # populate the minimum required node properties\n for node_key, node in self.nodes.items():\n nx_node = self.lemma_graph.nodes[node.node_id]\n nx_node[\"title\"] = node_key\n nx_node[\"size\"] = node.count\n nx_node[\"value\"] = node.weight\n\n if debug:\n ic(nx_node)\n\n # add the edges and their properties\n self.lemma_graph.add_edges_from([\n (\n edge.src_node,\n edge.dst_node,\n {\n \"kind\": str(edge.kind),\n \"title\": edge.rel,\n \"weight\": float(edge.count),\n \"prob\": edge.prob,\n \"count\": edge.count,\n },\n )\n for edge_key, edge in self.edges.items()\n ])\n\n\n def dump_lemma_graph (\n self,\n ) -> str:\n \"\"\"\nDump the _lemma graph_ as a JSON string in _node-link_ format,\nsuitable for serialization and subsequent use in JavaScript,\nNeo4j, Graphistry, etc.\n\nMake sure to call beforehand: `TextGraphs.calc_phrase_ranks()`\n\n returns:\na JSON representation of the exported _lemma graph_\n \"\"\"\n # populate the optional node properties\n for node in self.nodes.values():\n nx_node = self.lemma_graph.nodes[node.node_id]\n nx_node[\"name\"] = node.text\n nx_node[\"kind\"] = str(node.kind)\n nx_node[\"iri\"] = node.label\n nx_node[\"subobj\"] = node.sub_obj\n nx_node[\"pos\"] = node.pos\n nx_node[\"loc\"] = str(node.loc)\n\n return json.dumps(\n nx.node_link_data(self.lemma_graph),\n sort_keys = True,\n indent = 2,\n separators = ( \",\", \":\" ),\n )" } ]
import abc import asyncio import functools import itertools import operator import traceback import typing import networkx as nx # pylint: disable=E0401 import spacy # pylint: disable=E0401 from icecream import ic # pylint: disable=E0401,W0611 from .defaults import SPACY_MODEL from .elem import Edge, Node, NodeEnum, NounChunk from .graph import SimpleGraph
4,434
""" for src, iri, dst in self.gen_triples(pipe, debug = debug): await queue.put(( src, iri, dst, )) class Pipeline: # pylint: disable=R0902,R0903 """ Manage parsing of a document, which is assumed to be paragraph-sized. """ def __init__ ( # pylint: disable=R0913 self, text_input: str, tok_pipe: spacy.Language, ner_pipe: spacy.Language, aux_pipe: spacy.Language, kg: KnowledgeGraph, # pylint: disable=C0103 infer_rels: typing.List[ InferRel ], ) -> None: """ Constructor. text_input: raw text to be parsed tok_pipe: the `spaCy.Language` pipeline used for tallying individual tokens ner_pipe: the `spaCy.Language` pipeline used for tallying named entities aux_pipe: the `spaCy.Language` pipeline used for auxiliary components (e.g., `DBPedia Spotlight`) kg: knowledge graph used for entity linking infer_rels: a list of components for inferring relations """ self.text: str = text_input # `tok_doc` provides a stream of individual tokens self.tok_doc: spacy.tokens.Doc = tok_pipe(self.text) # `ner_doc` provides the merged-entity spans from NER self.ner_doc: spacy.tokens.Doc = ner_pipe(self.text) # `aux_doc` e.g., span re-indexing for Spotlight entity linking self.aux_doc: spacy.tokens.Doc = aux_pipe(self.text) self.kg: KnowledgeGraph = kg # pylint: disable=C0103 self.infer_rels: typing.List[ InferRel ] = infer_rels # list of Node objects for each parsed token, in sequence self.tokens: typing.List[ Node ] = [] # set of Edge objects generated by this Pipeline self.edges: typing.List[ Edge ] = [] @classmethod def get_lemma_key ( cls, span: typing.Union[ spacy.tokens.span.Span, spacy.tokens.token.Token ], *, placeholder: bool = False, ) -> str: """ Compose a unique, invariant lemma key for the given span. span: span of tokens within the lemma placeholder: flag for whether to create a placeholder returns: a composed lemma key """ if isinstance(span, spacy.tokens.token.Token): terms: typing.List[ str ] = [ span.lemma_.strip().lower(), span.pos_, ] if placeholder: terms.insert(0, str(span.i)) else: terms = functools.reduce( operator.iconcat, [ [ token.lemma_.strip().lower(), token.pos_, ] for token in span ], [], ) return ".".join(terms) def get_ent_lemma_keys ( self, ) -> typing.Iterator[ typing.Tuple[ str, int ]]: """ Iterate through the fully qualified lemma keys for an extracted entity. yields: the lemma keys within an extracted entity """ for ent in self.tok_doc.ents: yield self.get_lemma_key(ent), len(ent) def link_noun_chunks ( self, nodes: dict, *, debug: bool = False,
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Leveraging a factory pattern for NLP pipelines. This class handles processing for one "chunk" of raw text input to analyze, which is typically a paragraph. In other words, objects in this class are expected to get recycled when processing moves on to the next paragraph, to ease memory requirements. see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md """ ###################################################################### ## class definitions class Component (abc.ABC): # pylint: disable=R0903 """ Abstract base class for a `spaCy` pipeline component. """ @abc.abstractmethod def augment_pipe ( self, factory: "PipelineFactory", ) -> None: """ Encapsulate a `spaCy` call to `add_pipe()` configuration. factory: a `PipelineFactory` used to configure components """ raise NotImplementedError class KnowledgeGraph (Component): """ Base class for a _knowledge graph_ interface. """ def augment_pipe ( self, factory: "PipelineFactory", ) -> None: """ Encapsulate a `spaCy` call to `add_pipe()` configuration. factory: a `PipelineFactory` used to configure components """ pass # pylint: disable=W0107 def remap_ner ( self, label: typing.Optional[ str ], ) -> typing.Optional[ str ]: """ Remap the OntoTypes4 values from NER output to more general-purpose IRIs. label: input NER label, an `OntoTypes4` value returns: an IRI for the named entity """ return label def normalize_prefix ( self, iri: str, *, debug: bool = False, # pylint: disable=W0613 ) -> str: """ Normalize the given IRI to use standard namespace prefixes. iri: input IRI, in fully-qualified domain representation debug: debugging flag returns: the compact IRI representation, using an RDF namespace prefix """ return iri def perform_entity_linking ( self, graph: SimpleGraph, pipe: "Pipeline", *, debug: bool = False, ) -> None: """ Perform _entity linking_ based on "spotlight" and other services. graph: source graph pipe: configured pipeline for the current document debug: debugging flag """ pass # pylint: disable=W0107 def resolve_rel_iri ( self, rel: str, *, lang: str = "en", # pylint: disable=W0613 debug: bool = False, # pylint: disable=W0613 ) -> typing.Optional[ str ]: """ Resolve a `rel` string from a _relation extraction_ model which has been trained on this knowledge graph. rel: relation label, generation these source from Wikidata for many RE projects lang: language identifier debug: debugging flag returns: a resolved IRI """ return rel class InferRel (abc.ABC): # pylint: disable=R0903 """ Abstract base class for a _relation extraction_ model wrapper. """ @abc.abstractmethod def gen_triples ( self, pipe: "Pipeline", *, debug: bool = False, ) -> typing.Iterator[typing.Tuple[ Node, str, Node ]]: """ Infer relations as triples through a generator _iteratively_. pipe: configured pipeline for the current document debug: debugging flag yields: generated triples """ raise NotImplementedError async def gen_triples_async ( self, pipe: "Pipeline", queue: asyncio.Queue, *, debug: bool = False, ) -> None: """ Infer relations as triples produced to a queue _concurrently_. pipe: configured pipeline for the current document queue: queue of inference tasks to be performed debug: debugging flag """ for src, iri, dst in self.gen_triples(pipe, debug = debug): await queue.put(( src, iri, dst, )) class Pipeline: # pylint: disable=R0902,R0903 """ Manage parsing of a document, which is assumed to be paragraph-sized. """ def __init__ ( # pylint: disable=R0913 self, text_input: str, tok_pipe: spacy.Language, ner_pipe: spacy.Language, aux_pipe: spacy.Language, kg: KnowledgeGraph, # pylint: disable=C0103 infer_rels: typing.List[ InferRel ], ) -> None: """ Constructor. text_input: raw text to be parsed tok_pipe: the `spaCy.Language` pipeline used for tallying individual tokens ner_pipe: the `spaCy.Language` pipeline used for tallying named entities aux_pipe: the `spaCy.Language` pipeline used for auxiliary components (e.g., `DBPedia Spotlight`) kg: knowledge graph used for entity linking infer_rels: a list of components for inferring relations """ self.text: str = text_input # `tok_doc` provides a stream of individual tokens self.tok_doc: spacy.tokens.Doc = tok_pipe(self.text) # `ner_doc` provides the merged-entity spans from NER self.ner_doc: spacy.tokens.Doc = ner_pipe(self.text) # `aux_doc` e.g., span re-indexing for Spotlight entity linking self.aux_doc: spacy.tokens.Doc = aux_pipe(self.text) self.kg: KnowledgeGraph = kg # pylint: disable=C0103 self.infer_rels: typing.List[ InferRel ] = infer_rels # list of Node objects for each parsed token, in sequence self.tokens: typing.List[ Node ] = [] # set of Edge objects generated by this Pipeline self.edges: typing.List[ Edge ] = [] @classmethod def get_lemma_key ( cls, span: typing.Union[ spacy.tokens.span.Span, spacy.tokens.token.Token ], *, placeholder: bool = False, ) -> str: """ Compose a unique, invariant lemma key for the given span. span: span of tokens within the lemma placeholder: flag for whether to create a placeholder returns: a composed lemma key """ if isinstance(span, spacy.tokens.token.Token): terms: typing.List[ str ] = [ span.lemma_.strip().lower(), span.pos_, ] if placeholder: terms.insert(0, str(span.i)) else: terms = functools.reduce( operator.iconcat, [ [ token.lemma_.strip().lower(), token.pos_, ] for token in span ], [], ) return ".".join(terms) def get_ent_lemma_keys ( self, ) -> typing.Iterator[ typing.Tuple[ str, int ]]: """ Iterate through the fully qualified lemma keys for an extracted entity. yields: the lemma keys within an extracted entity """ for ent in self.tok_doc.ents: yield self.get_lemma_key(ent), len(ent) def link_noun_chunks ( self, nodes: dict, *, debug: bool = False,
) -> typing.List[ NounChunk ]:
4
2023-12-25 11:42:53+00:00
8k
proger/nanokitchen
mixer_seq_simple.py
[ { "identifier": "Mamba", "path": "mamba_simple.py", "snippet": "class Mamba(nn.Module):\n def __init__(\n self,\n d_model,\n d_state=16,\n d_conv=4,\n expand=2,\n dt_rank=\"auto\",\n dt_min=0.001,\n dt_max=0.1,\n dt_init=\"random\",\n dt_scale=1.0,\n dt_init_floor=1e-4,\n conv_bias=True,\n bias=False,\n use_fast_path=False, # Fused kernel options\n layer_idx=None,\n device=None,\n dtype=None,\n ):\n factory_kwargs = {\"device\": device, \"dtype\": dtype}\n super().__init__()\n self.d_model = d_model\n self.d_state = d_state\n self.d_conv = d_conv\n self.expand = expand\n self.d_inner = int(self.expand * self.d_model)\n self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == \"auto\" else dt_rank\n self.use_fast_path = use_fast_path\n self.layer_idx = layer_idx\n\n self.in_proj = Linear(self.d_model, self.d_inner * 2, bias=bias, **factory_kwargs)\n\n self.conv1d = nn.Conv1d(\n in_channels=self.d_inner,\n out_channels=self.d_inner,\n bias=conv_bias,\n kernel_size=d_conv,\n groups=self.d_inner,\n padding=d_conv - 1,\n **factory_kwargs,\n )\n\n self.activation = \"silu\"\n self.act = nn.SiLU()\n\n self.x_proj = nn.Linear(\n self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs\n )\n self.dt_proj = Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs)\n\n # Initialize special dt projection to preserve variance at initialization\n dt_init_std = self.dt_rank**-0.5 * dt_scale\n if dt_init == \"constant\":\n nn.init.constant_(self.dt_proj.weight, dt_init_std)\n elif dt_init == \"random\":\n nn.init.uniform_(self.dt_proj.weight, -dt_init_std, dt_init_std)\n else:\n raise NotImplementedError\n\n # Initialize dt bias so that F.softplus(dt_bias) is between dt_min and dt_max\n dt = torch.exp(\n torch.rand(self.d_inner, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min))\n + math.log(dt_min)\n ).clamp(min=dt_init_floor)\n # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759\n inv_dt = dt + torch.log(-torch.expm1(-dt))\n with torch.no_grad():\n self.dt_proj.bias.copy_(inv_dt)\n # Our initialization would set all Linear.bias to zero, need to mark this one as _no_reinit\n self.dt_proj.bias._no_reinit = True\n\n # S4D real initialization\n A = repeat(\n torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device),\n \"n -> d n\",\n d=self.d_inner,\n ).contiguous()\n A_log = torch.log(A) # Keep A_log in fp32\n self.A_log = nn.Parameter(A_log)\n self.A_log._no_weight_decay = True\n\n # D \"skip\" parameter\n self.D = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32\n self.D._no_weight_decay = True\n\n self.out_proj = Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs)\n\n def forward(self, hidden_states, inference_params=None):\n \"\"\"\n hidden_states: (B, L, D)\n Returns: same shape as hidden_states\n \"\"\"\n batch, seqlen, dim = hidden_states.shape\n\n conv_state, ssm_state = None, None\n if inference_params is not None:\n conv_state, ssm_state = self._get_states_from_cache(inference_params, batch)\n if inference_params.seqlen_offset > 0:\n # The states are updated inplace\n out, _, _ = self.step(hidden_states, conv_state, ssm_state)\n return out\n\n # We do matmul and transpose BLH -> HBL at the same time\n xz = rearrange(\n self.in_proj(rearrange(hidden_states, \"b l d -> (b l) d\")),\n \"(b l) d -> b d l\",\n l=seqlen,\n )\n if self.in_proj.bias is not None:\n xz = xz + rearrange(self.in_proj.bias.to(dtype=xz.dtype), \"d -> d 1\")\n\n A = -torch.exp(self.A_log.float()) # (d_inner, d_state)\n # In the backward pass we write dx and dz next to each other to avoid torch.cat\n if self.use_fast_path and inference_params is None: # Doesn't support outputting the states\n out = mamba_inner_fn(\n xz,\n self.conv1d.weight,\n self.conv1d.bias,\n self.x_proj.weight,\n self.dt_proj.weight,\n self.out_proj.weight,\n self.out_proj.bias,\n A,\n None, # input-dependent B\n None, # input-dependent C\n self.D.float(),\n delta_bias=self.dt_proj.bias.float(),\n delta_softplus=True,\n )\n else:\n x, z = xz.chunk(2, dim=1)\n # Compute short convolution\n if conv_state is not None:\n # If we just take x[:, :, -self.d_conv :], it will error if seqlen < self.d_conv\n # Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise.\n conv_state.copy_(F.pad(x, (self.d_conv - x.shape[-1], 0))) # Update state (B D W)\n if causal_conv1d_fn is None:\n x = self.act(self.conv1d(x)[..., :seqlen])\n else:\n assert self.activation in [\"silu\", \"swish\"]\n x = causal_conv1d_fn(\n x=x,\n weight=rearrange(self.conv1d.weight, \"d 1 w -> d w\"),\n bias=self.conv1d.bias,\n activation=self.activation,\n )\n\n # We're careful here about the layout, to avoid extra transposes.\n # We want dt to have d as the slowest moving dimension\n # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.\n x_dbl = self.x_proj(rearrange(x, \"b d l -> (b l) d\")) # (bl d)\n dt, B, C = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1)\n #dt = self.dt_proj.weight @ dt.t()\n #dt = rearrange(dt, \"d (b l) -> b d l\", l=seqlen)\n dt = self.dt_proj(dt)\n dt = rearrange(dt, \"(b l) d -> b d l\", l=seqlen)\n B = rearrange(B, \"(b l) dstate -> b dstate l\", l=seqlen).contiguous()\n C = rearrange(C, \"(b l) dstate -> b dstate l\", l=seqlen).contiguous()\n assert self.activation in [\"silu\", \"swish\"]\n y = selective_scan_fn(\n x,\n dt,\n A,\n B,\n C,\n self.D.float(),\n z=z,\n delta_bias=self.dt_proj.bias.float(),\n delta_softplus=True,\n return_last_state=ssm_state is not None,\n )\n if ssm_state is not None:\n y, last_state = y\n ssm_state.copy_(last_state)\n y = rearrange(y, \"b d l -> b l d\")\n out = self.out_proj(y)\n return out\n\n def step(self, hidden_states, conv_state, ssm_state):\n dtype = hidden_states.dtype\n assert hidden_states.shape[1] == 1, \"Only support decoding with 1 token at a time for now\"\n xz = self.in_proj(hidden_states.squeeze(1)) # (B 2D)\n x, z = xz.chunk(2, dim=-1) # (B D)\n\n # Conv step\n if causal_conv1d_update is None:\n conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W)\n conv_state[:, :, -1] = x\n x = torch.sum(conv_state * rearrange(self.conv1d.weight, \"d 1 w -> d w\"), dim=-1) # (B D)\n if self.conv1d.bias is not None:\n x = x + self.conv1d.bias\n x = self.act(x).to(dtype=dtype)\n else:\n x = causal_conv1d_update(\n x,\n conv_state,\n rearrange(self.conv1d.weight, \"d 1 w -> d w\"),\n self.conv1d.bias,\n self.activation,\n )\n\n x_db = self.x_proj(x) # (B dt_rank+2*d_state)\n dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1)\n # Don't add dt_bias here\n dt = F.linear(dt, self.dt_proj.weight) # (B d_inner)\n A = -torch.exp(self.A_log.float()) # (d_inner, d_state)\n\n # SSM step\n if selective_state_update is None:\n # Discretize A and B\n dt = F.softplus(dt + self.dt_proj.bias.to(dtype=dt.dtype))\n dA = torch.exp(torch.einsum(\"bd,dn->bdn\", dt, A))\n dB = torch.einsum(\"bd,bn->bdn\", dt, B)\n ssm_state.copy_(ssm_state * dA + rearrange(x, \"b d -> b d 1\") * dB)\n y = torch.einsum(\"bdn,bn->bd\", ssm_state.to(dtype), C)\n y = y + self.D.to(dtype) * x\n y = y * self.act(z) # (B D)\n else:\n y = selective_state_update(\n ssm_state, x, dt, A, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True\n )\n\n out = self.out_proj(y)\n return out.unsqueeze(1), conv_state, ssm_state\n\n def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):\n device = self.out_proj.weight.device\n conv_dtype = self.conv1d.weight.dtype if dtype is None else dtype\n conv_state = torch.zeros(\n batch_size, self.d_model * self.expand, self.d_conv, device=device, dtype=conv_dtype\n )\n ssm_dtype = self.dt_proj.weight.dtype if dtype is None else dtype\n # ssm_dtype = torch.float32\n ssm_state = torch.zeros(\n batch_size, self.d_model * self.expand, self.d_state, device=device, dtype=ssm_dtype\n )\n return conv_state, ssm_state\n\n def _get_states_from_cache(self, inference_params, batch_size, initialize_states=False):\n assert self.layer_idx is not None\n if self.layer_idx not in inference_params.key_value_memory_dict:\n batch_shape = (batch_size,)\n conv_state = torch.zeros(\n batch_size,\n self.d_model * self.expand,\n self.d_conv,\n device=self.conv1d.weight.device,\n dtype=self.conv1d.weight.dtype,\n )\n ssm_state = torch.zeros(\n batch_size,\n self.d_model * self.expand,\n self.d_state,\n device=self.dt_proj.weight.device,\n dtype=self.dt_proj.weight.dtype,\n # dtype=torch.float32,\n )\n inference_params.key_value_memory_dict[self.layer_idx] = (conv_state, ssm_state)\n else:\n conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx]\n # TODO: What if batch size changes between generation, and we reuse the same states?\n if initialize_states:\n conv_state.zero_()\n ssm_state.zero_()\n return conv_state, ssm_state" }, { "identifier": "Block", "path": "mamba_simple.py", "snippet": "class Block(nn.Module):\n def __init__(\n self, dim, mixer_cls, norm_cls=nn.LayerNorm, fused_add_norm=False, residual_in_fp32=False\n ):\n \"\"\"\n Simple block wrapping a mixer class with LayerNorm/RMSNorm and residual connection\"\n\n This Block has a slightly different structure compared to a regular\n prenorm Transformer block.\n The standard block is: LN -> MHA/MLP -> Add.\n [Ref: https://arxiv.org/abs/2002.04745]\n Here we have: Add -> LN -> Mixer, returning both\n the hidden_states (output of the mixer) and the residual.\n This is purely for performance reasons, as we can fuse add and LayerNorm.\n The residual needs to be provided (except for the very first block).\n \"\"\"\n super().__init__()\n self.residual_in_fp32 = residual_in_fp32\n self.fused_add_norm = fused_add_norm\n self.mixer = mixer_cls(dim)\n self.norm = norm_cls(dim)\n if self.fused_add_norm:\n assert RMSNorm is not None, \"RMSNorm import fails\"\n assert isinstance(\n self.norm, (nn.LayerNorm, RMSNorm)\n ), \"Only LayerNorm and RMSNorm are supported for fused_add_norm\"\n\n def forward(\n self, hidden_states: Tensor, residual: Optional[Tensor] = None, inference_params=None\n ):\n r\"\"\"Pass the input through the encoder layer.\n\n Args:\n hidden_states: the sequence to the encoder layer (required).\n residual: hidden_states = Mixer(LN(residual))\n \"\"\"\n if not self.fused_add_norm:\n residual = (hidden_states + residual) if residual is not None else hidden_states\n hidden_states = self.norm(residual.to(dtype=self.norm.weight.dtype))\n if self.residual_in_fp32:\n residual = residual.to(torch.float32)\n else:\n fused_add_norm_fn = rms_norm_fn if isinstance(self.norm, RMSNorm) else layer_norm_fn\n hidden_states, residual = fused_add_norm_fn(\n hidden_states,\n self.norm.weight,\n self.norm.bias,\n residual=residual,\n prenorm=True,\n residual_in_fp32=self.residual_in_fp32,\n eps=self.norm.eps,\n )\n hidden_states = self.mixer(hidden_states, inference_params=inference_params)\n return hidden_states, residual\n\n def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):\n return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)" } ]
import math import json import os import torch import torch.nn as nn from functools import partial from collections import namedtuple from mamba_ssm.models.config_mamba import MambaConfig from mamba_ssm.utils.generation import GenerationMixin from mamba_ssm.utils.hf import load_config_hf, load_state_dict_hf from mamba_simple import Mamba, Block from mamba_ssm.ops.triton.layernorm import RMSNorm, layer_norm_fn, rms_norm_fn
3,838
# Copyright (c) 2023, Albert Gu, Tri Dao. try: except ImportError: RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None def create_block( d_model, ssm_cfg=None, norm_epsilon=1e-5, rms_norm=False, residual_in_fp32=False, fused_add_norm=False, layer_idx=None, device=None, dtype=None, ): if ssm_cfg is None: ssm_cfg = {} factory_kwargs = {"device": device, "dtype": dtype}
# Copyright (c) 2023, Albert Gu, Tri Dao. try: except ImportError: RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None def create_block( d_model, ssm_cfg=None, norm_epsilon=1e-5, rms_norm=False, residual_in_fp32=False, fused_add_norm=False, layer_idx=None, device=None, dtype=None, ): if ssm_cfg is None: ssm_cfg = {} factory_kwargs = {"device": device, "dtype": dtype}
mixer_cls = partial(Mamba, layer_idx=layer_idx, **ssm_cfg, **factory_kwargs)
0
2023-12-27 12:13:00+00:00
8k
linancn/TianGong-AI-LangServe
src/agents/agent.py
[ { "identifier": "SearchInternet", "path": "src/tools/search_internet.py", "snippet": "class SearchInternet(BaseTool):\n name = \"search_internet_tool\"\n description = \"Search the internet for the up-to-date information.\"\n\n class InputSchema(BaseModel):\n query: str\n\n args_schema: Type[BaseModel] = InputSchema\n\n def _run(\n self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None\n ) -> str:\n \"\"\"Use the tool synchronously.\"\"\"\n search = DuckDuckGoSearchResults()\n results = search.run(query)\n\n pattern = r\"\\[snippet: (.*?), title: (.*?), link: (.*?)\\]\"\n matches = re.findall(pattern, results)\n\n docs = [\n {\"snippet\": match[0], \"title\": match[1], \"link\": match[2]}\n for match in matches\n ]\n\n docs_list = []\n\n for doc in docs:\n docs_list.append(\n {\n \"content\": doc[\"snippet\"],\n \"source\": \"[{}]({})\".format(doc[\"title\"], doc[\"link\"]),\n }\n )\n\n return docs_list\n\n async def _arun(\n self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None\n ) -> str:\n \"\"\"Use the tool asynchronously.\"\"\"\n search = DuckDuckGoSearchResults()\n results = search.run(query)\n\n pattern = r\"\\[snippet: (.*?), title: (.*?), link: (.*?)\\]\"\n matches = re.findall(pattern, results)\n\n docs = [\n {\"snippet\": match[0], \"title\": match[1], \"link\": match[2]}\n for match in matches\n ]\n\n docs_list = []\n\n for doc in docs:\n docs_list.append(\n {\n \"content\": doc[\"snippet\"],\n \"source\": \"[{}]({})\".format(doc[\"title\"], doc[\"link\"]),\n }\n )\n\n return docs_list" }, { "identifier": "SearchLCADB", "path": "src/tools/search_lca_db.py", "snippet": "class SearchLCADB(BaseTool):\n name = \"search_lca_tool\"\n description = \"Use original query to search in the Life Cycle Assessment Database.\"\n\n class InputSchema(BaseModel):\n query: str\n\n args_schema: Type[BaseModel] = InputSchema\n\n def _run(\n self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None\n ) -> str:\n \"\"\"Use the tool synchronously.\"\"\"\n results = xata.data().search_branch(\n branch_name=xata_branch, payload={\"query\": query}\n )\n\n docs = results[\"records\"]\n\n return docs\n\n async def _arun(\n self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None\n ) -> str:\n \"\"\"Use the tool asynchronously.\"\"\"\n results = xata.data().search_branch(\n branch_name=xata_branch, payload={\"query\": query}\n )\n\n docs = results[\"records\"]\n\n return docs" }, { "identifier": "SearchVectorDB", "path": "src/tools/search_vector_db.py", "snippet": "class SearchVectorDB(BaseTool):\n name = \"search_vectordb_tool\"\n description = \"Use original query to semantic search in academic or professional vector database.\"\n\n llm_model = os.getenv(\"OPENAI_MODEL\")\n openai_api_key = os.getenv(\"OPENAI_API_KEY\")\n langchain_verbose = os.getenv(\"LANGCHAIN_VERBOSE\", \"False\") == \"True\"\n pinecone_api_key = os.getenv(\"PINECONE_API_KEY\")\n pinecone_environment = os.getenv(\"PINECONE_ENVIRONMENT\")\n pinecone_index = os.getenv(\"PINECONE_INDEX\")\n\n class InputSchema(BaseModel):\n query: str\n\n args_schema: Type[BaseModel] = InputSchema\n\n def vector_database_query_func_calling_chain(self):\n func_calling_json_schema = {\n \"title\": \"get_querys_and_filters_to_search_vector_database\",\n \"description\": \"Extract the queries and filters for a vector database semantic search\",\n \"type\": \"object\",\n \"properties\": {\n \"query\": {\n \"title\": \"Query\",\n \"description\": \"The queries extracted for a vector database semantic search\",\n \"type\": \"string\",\n },\n \"source\": {\n \"title\": \"Source Filter\",\n \"description\": \"Journal Name or Source extracted for a vector database semantic search, MUST be in upper case\",\n \"type\": \"string\",\n \"enum\": [\n \"AGRICULTURE, ECOSYSTEMS & ENVIRONMENT\",\n \"ANNUAL REVIEW OF ECOLOGY, EVOLUTION, AND SYSTEMATICS\",\n \"ANNUAL REVIEW OF ENVIRONMENT AND RESOURCES\",\n \"APPLIED CATALYSIS B: ENVIRONMENTAL\",\n \"BIOGEOSCIENCES\",\n \"BIOLOGICAL CONSERVATION\",\n \"BIOTECHNOLOGY ADVANCES\",\n \"CONSERVATION BIOLOGY\",\n \"CONSERVATION LETTERS\",\n \"CRITICAL REVIEWS IN ENVIRONMENTAL SCIENCE AND TECHNOLOGY\",\n \"DIVERSITY AND DISTRIBUTIONS\",\n \"ECOGRAPHY\",\n \"ECOLOGICAL APPLICATIONS\",\n \"ECOLOGICAL ECONOMICS\",\n \"ECOLOGICAL MONOGRAPHS\",\n \"ECOLOGY\",\n \"ECOLOGY LETTERS\",\n \"ECONOMIC SYSTEMS RESEARCH\",\n \"ECOSYSTEM HEALTH AND SUSTAINABILITY\",\n \"ECOSYSTEM SERVICES\",\n \"ECOSYSTEMS\",\n \"ENERGY & ENVIRONMENTAL SCIENCE\",\n \"ENVIRONMENT INTERNATIONAL\",\n \"ENVIRONMENTAL CHEMISTRY LETTERS\",\n \"ENVIRONMENTAL HEALTH PERSPECTIVES\",\n \"ENVIRONMENTAL POLLUTION\",\n \"ENVIRONMENTAL SCIENCE & TECHNOLOGY\",\n \"ENVIRONMENTAL SCIENCE & TECHNOLOGY LETTERS\",\n \"ENVIRONMENTAL SCIENCE AND ECOTECHNOLOGY\",\n \"ENVIRONMENTAL SCIENCE AND POLLUTION RESEARCH\",\n \"EVOLUTION\",\n \"FOREST ECOSYSTEMS\",\n \"FRONTIERS IN ECOLOGY AND THE ENVIRONMENT\",\n \"FRONTIERS OF ENVIRONMENTAL SCIENCE & ENGINEERING\",\n \"FUNCTIONAL ECOLOGY\",\n \"GLOBAL CHANGE BIOLOGY\",\n \"GLOBAL ECOLOGY AND BIOGEOGRAPHY\",\n \"GLOBAL ENVIRONMENTAL CHANGE\",\n \"INTERNATIONAL SOIL AND WATER CONSERVATION RESEARCH\",\n \"JOURNAL OF ANIMAL ECOLOGY\",\n \"JOURNAL OF APPLIED ECOLOGY\",\n \"JOURNAL OF BIOGEOGRAPHY\",\n \"JOURNAL OF CLEANER PRODUCTION\",\n \"JOURNAL OF ECOLOGY\",\n \"JOURNAL OF ENVIRONMENTAL INFORMATICS\",\n \"JOURNAL OF ENVIRONMENTAL MANAGEMENT\",\n \"JOURNAL OF HAZARDOUS MATERIALS\",\n \"JOURNAL OF INDUSTRIAL ECOLOGY\",\n \"JOURNAL OF PLANT ECOLOGY\",\n \"LANDSCAPE AND URBAN PLANNING\",\n \"LANDSCAPE ECOLOGY\",\n \"METHODS IN ECOLOGY AND EVOLUTION\",\n \"MICROBIOME\",\n \"MOLECULAR ECOLOGY\",\n \"NATURE\",\n \"NATURE CLIMATE CHANGE\",\n \"NATURE COMMUNICATIONS\",\n \"NATURE ECOLOGY & EVOLUTION\",\n \"NATURE ENERGY\",\n \"NATURE REVIEWS EARTH & ENVIRONMENT\",\n \"NATURE SUSTAINABILITY\",\n \"ONE EARTH\",\n \"PEOPLE AND NATURE\",\n \"PROCEEDINGS OF THE NATIONAL ACADEMY OF SCIENCES\",\n \"PROCEEDINGS OF THE ROYAL SOCIETY B: BIOLOGICAL SCIENCES\",\n \"RENEWABLE AND SUSTAINABLE ENERGY REVIEWS\",\n \"RESOURCES, CONSERVATION AND RECYCLING\",\n \"REVIEWS IN ENVIRONMENTAL SCIENCE AND BIO/TECHNOLOGY\",\n \"SCIENCE\",\n \"SCIENCE ADVANCES\",\n \"SCIENCE OF THE TOTAL ENVIRONMENT\",\n \"SCIENTIFIC DATA\",\n \"SUSTAINABLE CITIES AND SOCIETY\",\n \"SUSTAINABLE MATERIALS AND TECHNOLOGIES\",\n \"SUSTAINABLE PRODUCTION AND CONSUMPTION\",\n \"THE AMERICAN NATURALIST\",\n \"THE INTERNATIONAL JOURNAL OF LIFE CYCLE ASSESSMENT\",\n \"THE ISME JOURNAL\",\n \"THE LANCET PLANETARY HEALTH\",\n \"TRENDS IN ECOLOGY & EVOLUTION\",\n \"WASTE MANAGEMENT\",\n \"WATER RESEARCH\",\n ],\n },\n \"created_at\": {\n \"title\": \"Date Filter\",\n \"description\": 'Date extracted for a vector database semantic search, in MongoDB\\'s query and projection operators, in format like {\"$gte\": 1609459200.0, \"$lte\": 1640908800.0}',\n \"type\": \"string\",\n },\n },\n \"required\": [\"query\"],\n }\n\n prompt_func_calling_msgs = [\n SystemMessage(\n content=\"You are a world class algorithm for extracting the queries and filters for a vector database semantic search. Make sure to answer in the correct structured format\"\n ),\n HumanMessagePromptTemplate.from_template(\"{input}\"),\n ]\n\n prompt_func_calling = ChatPromptTemplate(messages=prompt_func_calling_msgs)\n\n llm_func_calling = ChatOpenAI(\n api_key=self.openai_api_key,\n model_name=self.llm_model,\n temperature=0,\n streaming=False,\n )\n\n query_func_calling_chain = create_structured_output_chain(\n output_schema=func_calling_json_schema,\n llm=llm_func_calling,\n prompt=prompt_func_calling,\n verbose=self.langchain_verbose,\n )\n\n return query_func_calling_chain\n\n def _run(\n self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None\n ) -> str:\n \"\"\"Use the tool synchronously.\"\"\"\n\n embeddings = OpenAIEmbeddings(api_key=self.openai_api_key)\n pinecone.init(\n api_key=self.pinecone_api_key,\n environment=self.pinecone_environment,\n )\n vectorstore = Pinecone.from_existing_index(\n index_name=self.pinecone_index,\n embedding=embeddings,\n )\n\n query_response = self.vector_database_query_func_calling_chain().run(query)\n\n query = query_response.get(\"query\")\n\n try:\n created_at = json.loads(query_response.get(\"created_at\", None))\n except TypeError:\n created_at = None\n\n source = query_response.get(\"source\", None)\n\n filters = {}\n if created_at:\n filters[\"created_at\"] = created_at\n if source:\n filters[\"source\"] = source\n\n if filters:\n docs = vectorstore.similarity_search(query, k=16, filter=filters)\n else:\n docs = vectorstore.similarity_search(query, k=16)\n\n docs_list = []\n for doc in docs:\n date = datetime.datetime.fromtimestamp(doc.metadata[\"created_at\"])\n formatted_date = date.strftime(\"%Y-%m\") # Format date as 'YYYY-MM'\n source_entry = \"[{}. {}. {}. {}.]({})\".format(\n doc.metadata[\"source_id\"],\n doc.metadata[\"source\"],\n doc.metadata[\"author\"],\n formatted_date,\n doc.metadata[\"url\"],\n )\n docs_list.append({\"content\": doc.page_content, \"source\": source_entry})\n\n return docs_list\n\n async def _arun(\n self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None\n ) -> str:\n \"\"\"Use the tool asynchronously.\"\"\"\n\n embeddings = OpenAIEmbeddings(api_key=self.openai_api_key)\n pinecone.init(\n api_key=self.pinecone_api_key,\n environment=self.pinecone_environment,\n )\n vectorstore = Pinecone.from_existing_index(\n index_name=self.pinecone_index,\n embedding=embeddings,\n )\n\n query_response = self.vector_database_query_func_calling_chain().run(query)\n\n query = query_response.get(\"query\")\n\n try:\n created_at = json.loads(query_response.get(\"created_at\", None))\n except TypeError:\n created_at = None\n\n source = query_response.get(\"source\", None)\n\n filters = {}\n if created_at:\n filters[\"created_at\"] = created_at\n if source:\n filters[\"source\"] = source\n\n if filters:\n docs = vectorstore.similarity_search(query, k=16, filter=filters)\n else:\n docs = vectorstore.similarity_search(query, k=16)\n\n docs_list = []\n for doc in docs:\n date = datetime.datetime.fromtimestamp(doc.metadata[\"created_at\"])\n formatted_date = date.strftime(\"%Y-%m\") # Format date as 'YYYY-MM'\n source_entry = \"[{}. {}. {}. {}.]({})\".format(\n doc.metadata[\"source_id\"],\n doc.metadata[\"source\"],\n doc.metadata[\"author\"],\n formatted_date,\n doc.metadata[\"url\"],\n )\n docs_list.append({\"content\": doc.page_content, \"source\": source_entry})\n\n return docs_list" }, { "identifier": "SearchESG", "path": "src/tools/search_esg.py", "snippet": "class SearchESG(BaseTool):\n name = \"search_ESG_tool\"\n description = \"Search for the ESG information.\"\n\n def _run(\n self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None\n ) -> str:\n \"\"\"Use the tool synchronously.\"\"\"\n\n function_desc = \"Generate the query and filters for a semantic search\"\n function_para = {\n \"type\": \"object\",\n \"properties\": {\n \"query\": {\n \"description\": \"The queries extracted for a Xata database semantic search\",\n \"type\": \"string\",\n },\n \"corporate\": {\n \"description\": \"Corporate Name or Corporate extracted for a Xata database semantic search, MUST be in short name\",\n \"type\": \"string\",\n \"enum\": [\n \"Apple\",\n \"Tesla\",\n \"Toyota\",\n \"BYD\",\n ],\n },\n \"created_at\": {\n \"description\": 'Date extracted for a Xata database semantic search, in MongoDB\\'s query and projection operators, in format like {\"$gte\": 1609459200.0, \"$lte\": 1640908800.0}',\n \"type\": \"string\",\n },\n },\n \"required\": [\"query\"],\n }\n\n prompt = ChatPromptTemplate.from_messages(\n [\n (\n \"system\",\n \"You are a world class algorithm for extracting the queries and filters for a Xata database semantic search. Make sure to answer in the correct structured format\",\n ),\n (\"human\", \"{input}\"),\n ]\n )\n\n model_name = \"gpt-4\"\n\n query_response = function_calling(\n function_desc, function_para, prompt, model_name, query\n )\n\n query_response = json.loads(query_response)\n query = query_response.get(\"query\")\n\n try:\n corporate = query_response.get(\"corporate\", None)\n except TypeError:\n corporate = None\n\n filters = {}\n\n if corporate:\n search_reportid = xata.data().query(\n \"ESG_Reports\",\n {\n \"columns\": [\"id\"],\n \"filter\": {\n \"companyShortName\": corporate,\n },\n },\n )\n report_ids = [item[\"id\"] for item in search_reportid[\"records\"]]\n filters = {\"reportId\": {\"$any\": report_ids}}\n\n response = client.embeddings.create(input=query, model=\"text-embedding-ada-002\")\n vector = response.data[0].embedding\n\n result = xata.data().vector_search(\n \"ESG_Embeddings\", # reference table\n {\n \"queryVector\": vector,\n \"column\": \"vector\",\n \"size\": 10,\n \"filter\": filters,\n },\n )\n\n result_list = [item[\"text\"] for item in result[\"records\"]]\n\n return result_list\n\n async def _arun(\n self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None\n ) -> str:\n \"\"\"Use the tool asynchronously.\"\"\"\n\n function_desc = \"Generate the query and filters for a semantic search\"\n function_para = {\n \"type\": \"object\",\n \"properties\": {\n \"query\": {\n \"description\": \"The queries extracted for a Xata database semantic search\",\n \"type\": \"string\",\n },\n \"corporate\": {\n \"description\": \"Corporate Name or Corporate extracted for a Xata database semantic search, MUST be in short name\",\n \"type\": \"string\",\n \"enum\": [\n \"Apple\",\n \"Tesla\",\n \"Toyota\",\n \"BYD\",\n ],\n },\n \"created_at\": {\n \"description\": 'Date extracted for a Xata database semantic search, in MongoDB\\'s query and projection operators, in format like {\"$gte\": 1609459200.0, \"$lte\": 1640908800.0}',\n \"type\": \"string\",\n },\n },\n \"required\": [\"query\"],\n }\n\n prompt = ChatPromptTemplate.from_messages(\n [\n (\n \"system\",\n \"You are a world class algorithm for extracting the queries and filters for a Xata database semantic search. Make sure to answer in the correct structured format\",\n ),\n (\"human\", \"{input}\"),\n ]\n )\n\n model_name = \"gpt-4\"\n\n query_response = function_calling(\n function_desc, function_para, prompt, model_name, query\n )\n\n query_response = json.loads(query_response)\n query = query_response.get(\"query\")\n\n try:\n corporate = query_response.get(\"corporate\", None)\n except TypeError:\n corporate = None\n\n filters = {}\n\n if corporate:\n search_reportid = xata.data().query(\n \"ESG_Reports\",\n {\n \"columns\": [\"id\"],\n \"filter\": {\n \"companyShortName\": corporate,\n },\n },\n )\n report_ids = [item[\"id\"] for item in search_reportid[\"records\"]]\n filters = {\"reportId\": {\"$any\": report_ids}}\n\n response = client.embeddings.create(input=query, model=\"text-embedding-ada-002\")\n vector = response.data[0].embedding\n\n result = xata.data().vector_search(\n \"ESG_Embeddings\", # reference table\n {\n \"queryVector\": vector,\n \"column\": \"vector\",\n \"size\": 10,\n \"filter\": filters,\n },\n )\n\n result_list = [item[\"text\"] for item in result[\"records\"]]\n\n return result_list" } ]
import os from dotenv import load_dotenv from langchain.agents import AgentExecutor from langchain.agents.format_scratchpad.openai_tools import ( format_to_openai_tool_messages, ) from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser from langchain.chat_models import ChatOpenAI from langchain.memory import XataChatMessageHistory from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain.tools.render import format_tool_to_openai_tool from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.runnables.history import RunnableWithMessageHistory from src.tools.search_internet import SearchInternet from src.tools.search_lca_db import SearchLCADB from src.tools.search_vector_db import SearchVectorDB from src.tools.search_esg import SearchESG
4,874
load_dotenv() def init_chat_history(session_id: str) -> BaseChatMessageHistory: xata_api_key = os.getenv("XATA_API_KEY") xata_db_url = os.getenv("XATA_DB_URL") xata_table_name = os.getenv("XATA_TABLE_NAME") return XataChatMessageHistory( session_id=session_id, api_key=xata_api_key, db_url=xata_db_url, table_name=xata_table_name, ) def openai_agent(): # lc_tools = [SearchInternet(), SearchVectorDB(), SearchLCADB(), SearchESG()]
load_dotenv() def init_chat_history(session_id: str) -> BaseChatMessageHistory: xata_api_key = os.getenv("XATA_API_KEY") xata_db_url = os.getenv("XATA_DB_URL") xata_table_name = os.getenv("XATA_TABLE_NAME") return XataChatMessageHistory( session_id=session_id, api_key=xata_api_key, db_url=xata_db_url, table_name=xata_table_name, ) def openai_agent(): # lc_tools = [SearchInternet(), SearchVectorDB(), SearchLCADB(), SearchESG()]
lc_tools = [SearchESG(), SearchInternet()]
0
2023-12-25 06:34:52+00:00
8k
0xn0ne/sensitive-helper
sensitive-helper.py
[ { "identifier": "compress", "path": "utils/compress.py", "snippet": "def zip_info(file_path: pathlib.Path) -> Dict[str, Any]:\ndef uncompress_zip(\n file_path: Union[pathlib.Path, str], extract_dir: Union[pathlib.Path, str] = None, is_error: bool = True\n) -> Union[pathlib.Path, Any]:\ndef is_tar(file_path: pathlib.Path):\ndef uncompress_tar(\n file_path: Union[pathlib.Path, str], extract_dir: Union[pathlib.Path, str] = None\n) -> Union[pathlib.Path, Any]:\ndef is_gz(file_path: pathlib.Path):\ndef uncompress_gz(\n file_path: Union[pathlib.Path, str], extract_dir: Union[pathlib.Path, str] = None\n) -> Union[pathlib.Path, Any]:\ndef is_7z(file_path: pathlib.Path):\ndef uncompress_7z(\n file_path: Union[pathlib.Path, str], extract_dir: Union[pathlib.Path, str] = None\n) -> Union[pathlib.Path, Any]:\ndef is_rar(file_path: pathlib.Path):\ndef uncompress_rar(\n file_path: Union[pathlib.Path, str], extract_dir: Union[pathlib.Path, str] = None\n) -> Union[pathlib.Path, Any]:\ndef is_bz(file_path: pathlib.Path):\ndef uncompress(\n file_path: Union[pathlib.Path, str],\n extract_dir: Union[pathlib.Path, str] = None,\n is_error: bool = True,\n is_recursive: bool = False,\n max_level=64,\n) -> Union[pathlib.Path, Any]:" }, { "identifier": "configurator", "path": "utils/configurator.py", "snippet": "_G_CFG = {}\ndef maps_merge(*maps: Dict) -> Dict:\ndef map_merge(dst: Dict, src: Dict):\n def __init__(self, template: Dict = None):\n def get(self, keys: AnyStr, _defult: Any = None, sep: AnyStr = '.'):\n def set(self, keys: AnyStr, value: Any, sep: AnyStr = '.'):\n def loads(self, content: str, fmt: str = 'json', reload: bool = False):\n def dumps(self, fmt: str = 'json'):\n def exists(self, key: AnyStr):\n def gen_pretty(self, objs: Iterable = None, depth: int = 3, filters: List[str] = None):\n def gen_value(value, depth_next, parent_next):\n def recursion_pretty(obj_data, depth_curr, parent: str = ''):\n def __str__(self):\n def __init__(self, filepath: str = 'configs.json', template: Dict = None):\n def load(self, strict: bool = False, quiet: bool = False) -> Union[Exception, Any]:\n def save(self, exist_ok: bool = True):\ndef new(\n name: str = '__DEFAULT__', base_class: Union[Any, FileConfigurator] = FileConfigurator, *args, **kwargs\n) -> FileConfigurator:\nclass BaseConfigurator:\nclass FileConfigurator(BaseConfigurator):" }, { "identifier": "office", "path": "utils/office.py", "snippet": "def docx_handler(file_path: Union[pathlib.Path, str]) -> pathlib.Path:\ndef xlsx_handler(file_path: Union[pathlib.Path, str]):\ndef pptx_handler():" }, { "identifier": "process", "path": "utils/process.py", "snippet": "class ProcessPoolHelper(concurrent.futures.ProcessPoolExecutor):\n def __init__(self, max_workers=None, mp_context=None, initializer=None, initargs=()):\n def submit_super(self, fn, /, *args, **kwargs) -> concurrent.futures.Future:\n def result_yield(self, timeout: float = None) -> Generator[Any, None, None]:\ndef __test_performance_func(min: int = 500, max: int = 600):\ndef __test_return_func(min: int = 500, max: int = 600):\ndef __test_return_dict_func(min: int = 500, max: int = 600):" } ]
import base64 import binascii import csv import json import pathlib import re import time import pandas import tqdm import argparse from typing import Any, AnyStr, Dict, List, Union from utils import compress, configurator, office, process
4,252
return ret def to_csv(data: Union[Dict[str, Any], List[Dict[str, Any]]], filename: str = 'output.csv'): """ 输入数据应为:cache = {'a': [1, 0, 9], 'b': [3, 7, 6]} """ dataframe = pandas.DataFrame(data) dataframe.to_csv(filename, quoting=csv.QUOTE_MINIMAL) # 考虑字典数据、列表数据、函数数据 FUZZY_UNIVERSAL_STRING = r'["\'`]?\s*[=:(\{\[]\s*["\'`][\x20-\x7F]{,128}?[\'"`]' # PATH_COMMON_STRING = r'users?|windows?|program files(\(x\d{2,3}\))?|s?bin|etc|usr|boot|dev|home|proc|opt|sys|srv|var' __DEFAULT_CONFIG = { 'target_path': '', 'config_path': 'config.yaml', 'output_format': 'csv', 'process_number': 5, 'exclude_files': [r'\.DS_Store'], 'row_split': '[\x00-\x1F\x7F]+', 'rules': { 'AKSK': [r'LTAI\w+'], 'JSON WEB TOKEN(JWT)': [r'ey[0-9a-zA-Z/+]{4,}={,2}\.[0-9a-zA-Z/+]{6,}={,2}\.[A-Za-z0-9-_]+'], 'FUZZY MATCH': { 'flags': 'I', 'regexp': [ r'(APP|ACCESS|USER|PASS|OSS|ECS|CVM|AWS)[\w]{,8}(NAME|ID|KEY|NUM|ENC|CODE|SEC|WORD)[\w]{,16}%s' % FUZZY_UNIVERSAL_STRING, # 考虑驼峰写法,下划线写法,MAP键值下面单词后必须接大写字母、下划线、中划线,否侧可能出现如: r'(USR|PWD|COOKIE)[_\-A-Z][\w]{,16}%s' % FUZZY_UNIVERSAL_STRING, r'(SECRET|SIGN|TOKEN)[\w]{,16}%s' % FUZZY_UNIVERSAL_STRING, ], }, 'BASE64': [r'[0-9a-zA-Z/+]{8,}={,2}'], 'URL': { 'regexp': [r'(ftp|https?):\/\/[%.\w\-]+([\w\-\.,@?^=%&amp;:/~\+#]*[\w\-\@?^=%&amp;/~\+#])?'], 're_filters': [ r'(adobe|amap|android|apache|bing|digicert|eclipse|freecodecamp|github|githubusercontent|gnu|godaddy|google|googlesource|youtube|youtu|jd' r'|npmjs|microsoft|openxmlformats|outlook|mozilla|openssl|oracle|qq|spring|sun|umang|w3|wikipedia|xml)\.(' r'org|com|cn|net|edu|io|be)', r'(ali|baidu|cdn|example|ssh|ssl)[\w-]*\.(org|com|cn|net|edu|io)', ], }, 'EMAIL': [r'[a-zA-Z0-9][-+.\w]{1,127}@([a-zA-Z0-9][-a-zA-Z0-9]{0,63}.){,3}(org|com|cn|net|edu|mail)'], 'PHONE': [r'(13[0-9]|14[5-9]|15[0-3,5-9]|16[6]|17[0-8]|18[0-9]|19[8,9])\d{8}'], 'FILE PATH': { 'flags': 'I|X', 'regexp': [ r'([a-z]:\\)?([\\/])(users?|windows?|program files(\(x\d{2,3}\))?|s?bin|etc|usr|boot|dev|home|proc|opt' r'|sys|srv|var)(\2[.\w!#\(~\[\{][.\w!#&\(\)+=~\[\]\{\}\s]{2,63}){1,16}' ], 're_filters': [ # r'[\\/].*sdk.*', # r'[\\/](alibaba|aliyun|annotation|apache|chromium|collections|eclipse|facebook|functions|github|google' # r'|internal|jetbrains|oppo|reactnative|reflect|sdklib|sequences|taobao|tencent|unionpay|view|vivo' # r'|webkit|xiaomi)', ], }, }, 'is_re_all': False, 'is_silent': False, } cfg = {} if __name__ == '__main__': parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=''' ███████╗███████╗███╗ ██╗███████╗██╗████████╗██╗██╗ ██╗███████╗ ██╔════╝██╔════╝████╗ ██║██╔════╝██║╚══██╔══╝██║██║ ██║██╔════╝ ███████╗█████╗ ██╔██╗ ██║███████╗██║ ██║ ██║██║ ██║█████╗ ╚════██║██╔══╝ ██║╚██╗██║╚════██║██║ ██║ ██║╚██╗ ██╔╝██╔══╝ ███████║███████╗██║ ╚████║███████║██║ ██║ ██║ ╚████╔╝ ███████╗ ╚══════╝╚══════╝╚═╝ ╚═══╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═══╝ ╚══════╝ v0.1.4 by 0xn0ne, https://github.com/0xn0ne/sensitive-helper ''', ) parser.add_argument( '-t', '--target-path', required=True, help='search for file paths or folder paths for sensitive cache (eg. ~/download/folder).', ) parser.add_argument('-p', '--process-number', default=5, type=int, help='number of program processes (default: 5).') parser.add_argument( '-c', '--config-path', default='configs.yaml', help='path to the yaml configuration file (default: configs.yaml).', ) parser.add_argument('-o', '--output-format', help='output file format, available formats json, csv (default: csv).') parser.add_argument( '-e', '--exclude-files', nargs='+', help='excluded files, using regular matching (eg. \\.DS_Store .*bin .*doc).' ) parser.add_argument( '-a', '--is-re-all', action='store_true', help='hit a single regular expression per file or match all regular expressions to exit the match loop.', ) parser.add_argument( '-s', '--is-silent', action='store_true', help='silent mode: when turned on, no hit data will be output on the console. use a progress bar instead.', ) args = parser.parse_args() print(parser.description) nargs = dict(args.__dict__) for key in args.__dict__: if nargs[key] is None: del nargs[key]
#!/bin/python3 # _*_ coding:utf-8 _*_ # # sensitive-helper.py # 本地文件敏感信息搜索工具 def log_run_times(func): def wrapper(*args, **kwargs): s_time = time.time() ret = func(*args, **kwargs) total_time = time.time() - s_time if total_time <= 1: return ret with open('run_times.log', 'a') as _f: _f.write('total time(s): {}, args: {}\n'.format(time.time() - s_time, args[0][:127])) return ret return wrapper def string_to_reg_flags(flags: str): flags_int = 0 for flag in flags.split('|'): flags_int |= getattr(re, flag) return flags_int def is_filter_base64(result: AnyStr): if len(result) % 4 != 0: return True, '' try: # 编码错误的全都丢掉,不丢掉也看不懂 ret_extend = base64.b64decode(result).decode('utf-8') if not re.search(r'^[\u0020-\u007F\u2010-\u202f\u3000-\u301f\u4e00-\u9fa5\uff00-\uffef]+$', ret_extend): return True, '' # \u0020-\u007F:英文可视字符集 # \u2010-\u202f:中文部分符号集 # \u3000-\u301f:中文部分符号集 # \u4e00-\u9fa5:中文常见文字集 # \u2e80-\u9fff:中文文字及中文异形文字集 # \uff00-\uffef:中文部分符号集 except UnicodeDecodeError: return True, '' except binascii.Error: return True, '' return False, ret_extend def is_filter_jwt(result: AnyStr): times = 0 res_split = result.split(b'.') while times < 2: if len(res_split[times]) % 4 != 0: return True, '' times += 1 return False, '' def is_filter_result(result: AnyStr, filters: List[AnyStr], flags: int): if not filters: return False, '' for fil in filters: if re.search(fil, result, flags): return True, '' return False, '' # @log_run_times def search_content( file_object: Union[pathlib.Path, bytes], rules: Dict[str, List[str]], split: bytes = b'[\x00-\x1F\x7F]+', is_re_all: bool = False, ) -> List[Dict[str, str]]: ret = [] row_contents = [file_object] if isinstance(file_object, pathlib.Path): row_contents = re.split(split, file_object.read_bytes()) for row_one in row_contents: # 按控制字符进行分割行 if len(row_one) < 12: # 单行内容少于8个字符,丢掉 continue for rule_name in rules: rule = rules[rule_name] flags = 0 filters = None if isinstance(rule, Dict): if 'flags' in rule: flags = string_to_reg_flags(rule['flags']) if 're_filters' in rule: filters = rule['re_filters'] rule = rule['regexp'] for regexp in rule: r_result = re.search(regexp, row_one, flags) if not r_result: continue try: result_byte = r_result.group() result_text = result_byte.decode('utf-8') except UnicodeDecodeError: continue is_filter, extend = is_filter_result(result_byte, filters, flags) if rule_name == 'BASE64': is_filter, extend = is_filter_base64(result_byte) if rule_name == 'JSON WEB TOKEN(JWT)': is_filter, extend = is_filter_jwt(result_byte) if is_filter: continue ret.append( { 'file': file_object.__str__(), 'group': rule_name, 'regexp': regexp.decode('utf-8'), 'match': result_text, 'extend': extend, } ) if not is_re_all: # 如果关闭了匹配所有正则组数据且已发现有用数据,则退出循环 return ret return ret def gen_file_list(src_path: str, exclude_files: List[str]) -> List[pathlib.Path]: tar_path = pathlib.Path(src_path) ret = [] if tar_path.is_file(): ret.append(tar_path) else: for filepath in tar_path.glob('**/*'): is_skip = False if filepath.is_dir(): continue filename = filepath.name for r_exclude in exclude_files: # 文件名正则匹配,在排除名单中则排除文件 if re.match(r_exclude, filename): is_skip = True break if is_skip: continue if filename.endswith('.docx') and not filename.startswith('~$'): office.docx_handler(filepath) elif filename.endswith('.xlsx') and not filename.startswith('~$'): office.xlsx_handler(filepath) else: compress.uncompress(filepath, is_error=False, is_recursive=True) ret.append(filepath) return ret def run(): pool = process.ProcessPoolHelper(max_workers=cfg.get('process_number')) print('[*] file loading...') filelist = gen_file_list(cfg.get('target_path'), cfg.get('exclude_files')) if not filelist: print('[!] the file path is empty. please check whether the path is correct.\n') return filelist = sorted(filelist, key=lambda x: x.stat().st_size, reverse=True) ret = [] result_filter_list = [] groups = cfg.get('rules') for filepath in filelist: pool.submit_super(search_content, filepath, groups, cfg.get('row_split'), cfg.get('is_re_all')) print('[*] analyzing...\n') result_gen = pool.result_yield() if cfg.get('is_silent'): result_gen = tqdm.tqdm( pool.result_yield(), total=len(filelist), mininterval=1, ncols=80, bar_format='{n_fmt}/{total_fmt} [{bar}] {elapsed}<{remaining},{rate_fmt}{postfix}', ) for results in result_gen: if not results: continue for result in results: union_data = [result['file'], result['match']] # 相同文件,相同匹配字符串去重 if union_data in result_filter_list: continue result_filter_list.append([result['file'], result['match']]) ret.append(result) if not cfg.get('is_silent'): print('[+] group: {}, match: {}, file: {}'.format(result['group'], result['match'], result['file'])) output_format = cfg.get('output_format') filename = 'results_{}.csv'.format(time.strftime("%H%M%S", time.localtime())) if output_format == 'json': filename = 'results.json' with open(filename, 'w', encoding='utf-8') as _f: _f.write(json.dumps(ret)) else: to_csv(ret, filename) print('[*] total file number:', len(filelist)) print('[+] output to:', pathlib.Path(filename).absolute()) return ret def to_csv(data: Union[Dict[str, Any], List[Dict[str, Any]]], filename: str = 'output.csv'): """ 输入数据应为:cache = {'a': [1, 0, 9], 'b': [3, 7, 6]} """ dataframe = pandas.DataFrame(data) dataframe.to_csv(filename, quoting=csv.QUOTE_MINIMAL) # 考虑字典数据、列表数据、函数数据 FUZZY_UNIVERSAL_STRING = r'["\'`]?\s*[=:(\{\[]\s*["\'`][\x20-\x7F]{,128}?[\'"`]' # PATH_COMMON_STRING = r'users?|windows?|program files(\(x\d{2,3}\))?|s?bin|etc|usr|boot|dev|home|proc|opt|sys|srv|var' __DEFAULT_CONFIG = { 'target_path': '', 'config_path': 'config.yaml', 'output_format': 'csv', 'process_number': 5, 'exclude_files': [r'\.DS_Store'], 'row_split': '[\x00-\x1F\x7F]+', 'rules': { 'AKSK': [r'LTAI\w+'], 'JSON WEB TOKEN(JWT)': [r'ey[0-9a-zA-Z/+]{4,}={,2}\.[0-9a-zA-Z/+]{6,}={,2}\.[A-Za-z0-9-_]+'], 'FUZZY MATCH': { 'flags': 'I', 'regexp': [ r'(APP|ACCESS|USER|PASS|OSS|ECS|CVM|AWS)[\w]{,8}(NAME|ID|KEY|NUM|ENC|CODE|SEC|WORD)[\w]{,16}%s' % FUZZY_UNIVERSAL_STRING, # 考虑驼峰写法,下划线写法,MAP键值下面单词后必须接大写字母、下划线、中划线,否侧可能出现如: r'(USR|PWD|COOKIE)[_\-A-Z][\w]{,16}%s' % FUZZY_UNIVERSAL_STRING, r'(SECRET|SIGN|TOKEN)[\w]{,16}%s' % FUZZY_UNIVERSAL_STRING, ], }, 'BASE64': [r'[0-9a-zA-Z/+]{8,}={,2}'], 'URL': { 'regexp': [r'(ftp|https?):\/\/[%.\w\-]+([\w\-\.,@?^=%&amp;:/~\+#]*[\w\-\@?^=%&amp;/~\+#])?'], 're_filters': [ r'(adobe|amap|android|apache|bing|digicert|eclipse|freecodecamp|github|githubusercontent|gnu|godaddy|google|googlesource|youtube|youtu|jd' r'|npmjs|microsoft|openxmlformats|outlook|mozilla|openssl|oracle|qq|spring|sun|umang|w3|wikipedia|xml)\.(' r'org|com|cn|net|edu|io|be)', r'(ali|baidu|cdn|example|ssh|ssl)[\w-]*\.(org|com|cn|net|edu|io)', ], }, 'EMAIL': [r'[a-zA-Z0-9][-+.\w]{1,127}@([a-zA-Z0-9][-a-zA-Z0-9]{0,63}.){,3}(org|com|cn|net|edu|mail)'], 'PHONE': [r'(13[0-9]|14[5-9]|15[0-3,5-9]|16[6]|17[0-8]|18[0-9]|19[8,9])\d{8}'], 'FILE PATH': { 'flags': 'I|X', 'regexp': [ r'([a-z]:\\)?([\\/])(users?|windows?|program files(\(x\d{2,3}\))?|s?bin|etc|usr|boot|dev|home|proc|opt' r'|sys|srv|var)(\2[.\w!#\(~\[\{][.\w!#&\(\)+=~\[\]\{\}\s]{2,63}){1,16}' ], 're_filters': [ # r'[\\/].*sdk.*', # r'[\\/](alibaba|aliyun|annotation|apache|chromium|collections|eclipse|facebook|functions|github|google' # r'|internal|jetbrains|oppo|reactnative|reflect|sdklib|sequences|taobao|tencent|unionpay|view|vivo' # r'|webkit|xiaomi)', ], }, }, 'is_re_all': False, 'is_silent': False, } cfg = {} if __name__ == '__main__': parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=''' ███████╗███████╗███╗ ██╗███████╗██╗████████╗██╗██╗ ██╗███████╗ ██╔════╝██╔════╝████╗ ██║██╔════╝██║╚══██╔══╝██║██║ ██║██╔════╝ ███████╗█████╗ ██╔██╗ ██║███████╗██║ ██║ ██║██║ ██║█████╗ ╚════██║██╔══╝ ██║╚██╗██║╚════██║██║ ██║ ██║╚██╗ ██╔╝██╔══╝ ███████║███████╗██║ ╚████║███████║██║ ██║ ██║ ╚████╔╝ ███████╗ ╚══════╝╚══════╝╚═╝ ╚═══╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═══╝ ╚══════╝ v0.1.4 by 0xn0ne, https://github.com/0xn0ne/sensitive-helper ''', ) parser.add_argument( '-t', '--target-path', required=True, help='search for file paths or folder paths for sensitive cache (eg. ~/download/folder).', ) parser.add_argument('-p', '--process-number', default=5, type=int, help='number of program processes (default: 5).') parser.add_argument( '-c', '--config-path', default='configs.yaml', help='path to the yaml configuration file (default: configs.yaml).', ) parser.add_argument('-o', '--output-format', help='output file format, available formats json, csv (default: csv).') parser.add_argument( '-e', '--exclude-files', nargs='+', help='excluded files, using regular matching (eg. \\.DS_Store .*bin .*doc).' ) parser.add_argument( '-a', '--is-re-all', action='store_true', help='hit a single regular expression per file or match all regular expressions to exit the match loop.', ) parser.add_argument( '-s', '--is-silent', action='store_true', help='silent mode: when turned on, no hit data will be output on the console. use a progress bar instead.', ) args = parser.parse_args() print(parser.description) nargs = dict(args.__dict__) for key in args.__dict__: if nargs[key] is None: del nargs[key]
cfg = configurator.new(filepath=args.config_path, template=__DEFAULT_CONFIG)
1
2023-12-26 03:30:39+00:00
8k
lvyufeng/uie_mindspore
uie_predictor.py
[ { "identifier": "ErnieMTokenizerFast", "path": "tokenizer.py", "snippet": "class ErnieMTokenizerFast(PreTrainedTokenizerFast):\n r\"\"\"\n Construct a \"fast\" ERNIE-M tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.\n This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\n refer to this superclass for more information regarding those methods.\n Args:\n vocab_file (`str`):\n File containing the vocabulary.\n sentencepiece_model_file (`str`):\n [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that\n contains the vocabulary necessary to instantiate a tokenizer.\n do_lower_case (`bool`, *optional*, defaults to `True`):\n Whether or not to lowercase the input when tokenizing.\n unk_token (`str`, *optional*, defaults to `\"[UNK]\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n sep_token (`str`, *optional*, defaults to `\"[SEP]\"`):\n The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for\n sequence classification or for a text and a question for question answering. It is also used as the last\n token of a sequence built with special tokens.\n pad_token (`str`, *optional*, defaults to `\"[PAD]\"`):\n The token used for padding, for example when batching sequences of different lengths.\n cls_token (`str`, *optional*, defaults to `\"[CLS]\"`):\n The classifier token which is used when doing sequence classification (classification of the whole sequence\n instead of per-token classification). It is the first token of the sequence when built with special tokens.\n mask_token (`str`, *optional*, defaults to `\"[MASK]\"`):\n The token used for masking values. This is the token used when training this model with masked language\n modeling. This is the token which the model will try to predict.\n clean_text (`bool`, *optional*, defaults to `True`):\n Whether or not to clean the text before tokenization by removing any control characters and replacing all\n whitespaces by the classic one.\n tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):\n Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this\n issue](https://github.com/huggingface/transformers/issues/328)).\n strip_accents (`bool`, *optional*):\n Whether or not to strip all accents. If this option is not specified, then it will be determined by the\n value for `lowercase` (as in the original ERNIE-M).\n wordpieces_prefix (`str`, *optional*, defaults to `\"##\"`):\n The prefix for subwords.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n slow_tokenizer_class = ErnieMTokenizer\n\n def __init__(\n self,\n vocab_file=None,\n sentencepiece_model_file=None,\n tokenizer_file=None,\n do_lower_case=True,\n unk_token=\"[UNK]\",\n sep_token=\"[SEP]\",\n pad_token=\"[PAD]\",\n cls_token=\"[CLS]\",\n mask_token=\"[MASK]\",\n tokenize_chinese_chars=True,\n strip_accents=None,\n **kwargs\n ):\n super().__init__(\n vocab_file,\n sentencepiece_model_file,\n tokenizer_file=tokenizer_file,\n do_lower_case=do_lower_case,\n unk_token=unk_token,\n sep_token=sep_token,\n pad_token=pad_token,\n cls_token=cls_token,\n mask_token=mask_token,\n tokenize_chinese_chars=tokenize_chinese_chars,\n strip_accents=strip_accents,\n **kwargs,\n )\n\n normalizer_state = json.loads(\n self.backend_tokenizer.normalizer.__getstate__())\n if (\n normalizer_state.get(\"lowercase\", do_lower_case) != do_lower_case\n or normalizer_state.get(\"strip_accents\", strip_accents) != strip_accents\n or normalizer_state.get(\"handle_chinese_chars\", tokenize_chinese_chars) != tokenize_chinese_chars\n ):\n normalizer_class = getattr(\n normalizers, normalizer_state.pop(\"type\"))\n normalizer_state[\"lowercase\"] = do_lower_case\n normalizer_state[\"strip_accents\"] = strip_accents\n normalizer_state[\"handle_chinese_chars\"] = tokenize_chinese_chars\n self.backend_tokenizer.normalizer = normalizer_class(\n **normalizer_state)\n\n self.do_lower_case = do_lower_case\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. A ERNIE-M sequence has the following format:\n - single sequence: `[CLS] X [SEP]`\n - pair of sequences: `[CLS] A [SEP] B [SEP]`\n Args:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n Returns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.\n \"\"\"\n output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n\n if token_ids_1:\n output += [self.sep_token_id] + token_ids_1 + [self.sep_token_id]\n\n return output\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ERNIE-M sequence\n pair mask has the following format:\n ```\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n ```\n If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n Returns:\n `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).\n \"\"\"\n\n if token_ids_1 is None:\n return (len(token_ids_0) + 2) * [0]\n return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n files = self._tokenizer.model.save(\n save_directory, name=filename_prefix)\n return tuple(files)\n\n @property\n def added_tokens_encoder(self) -> Dict[str, int]:\n \"\"\"\n Returns the sorted mapping from string to index. The added tokens encoder is cached for performance\n optimisation in `self._added_tokens_encoder` for the slow tokenizers.\n \"\"\"\n # return {k.content: v for v, k in sorted(self._tokenizer.get_vocab().items(), key=lambda item: item[0])}\n return self._tokenizer.get_vocab()" }, { "identifier": "logger", "path": "utils.py", "snippet": "def set_seed(seed):\ndef get_span(start_ids, end_ids, with_prob=False):\ndef get_bool_ids_greater_than(probs, limit=0.5, return_prob=False):\n def __init__(self):\n def compute(self, start_probs, end_probs, gold_start_ids, gold_end_ids):\n def update(self, num_correct_spans, num_infer_spans, num_label_spans):\n def eval_span(self, predict_start_ids, predict_end_ids, label_start_ids,\n label_end_ids):\n def accumulate(self):\n def reset(self):\n def name(self):\ndef convert_example(example, tokenizer, max_seq_len):\ndef map_offset(ori_offset, offset_mapping):\ndef reader(data_path, max_seq_len=512):\ndef unify_prompt_name(prompt):\n def __init__(self, name: str = None):\n def disable(self):\n def enable(self):\n def is_enable(self) -> bool:\n def __call__(self, log_level: str, msg: str):\n def use_terminator(self, terminator: str):\n def processing(self, msg: str, interval: float = 0.1):\n def _printer():\ndef get_id_and_prob(spans, offset_map):\ndef cut_chinese_sent(para):\ndef dbc2sbc(s):\n def __init__(self, patience=7, verbose=False, delta=0, save_dir='checkpoint/early_stopping', trace_func=print):\n def __call__(self, val_loss, model):\n def save_checkpoint(self, val_loss, model):\ndef get_relation_type_dict(relation_data):\n def compare(a, b):\ndef add_entity_negative_example(examples, texts, prompts, label_set,\n negative_ratio):\ndef add_relation_negative_example(redundants, text, num_positive, ratio):\ndef add_full_negative_example(examples, texts, relation_prompts, predicate_set,\n subject_goldens):\ndef generate_cls_example(text, labels, prompt_prefix, options):\ndef convert_cls_examples(raw_examples,\n prompt_prefix=\"情感倾向\",\n options=[\"正向\", \"负向\"]):\ndef convert_ext_examples(raw_examples,\n negative_ratio,\n prompt_prefix=\"情感倾向\",\n options=[\"正向\", \"负向\"],\n separator=\"##\",\n is_train=True):\n def _sep_cls_label(label, separator):\ndef get_path_from_url(url,\n root_dir,\n check_exist=True,\n decompress=True):\n def is_url(path):\n def _map_path(url, root_dir):\n def _get_download(url, fullname):\n def _download(url, path):\n def _uncompress_file_zip(filepath):\n def _is_a_single_file(file_list):\n def _is_a_single_dir(file_list):\n def _uncompress_file_tar(filepath, mode=\"r:*\"):\n def _decompress(fname):\nclass SpanEvaluator:\nclass Logger(object):\nclass EarlyStopping:\nBAR_FORMAT = f'{{desc}}: {Fore.GREEN}{{percentage:3.0f}}%{Fore.RESET} {Fore.BLUE}{{bar}}{Fore.RESET} {Fore.GREEN}{{n_fmt}}/{{total_fmt}} {Fore.RED}{{rate_fmt}}{{postfix}}{Fore.RESET} eta {Fore.CYAN}{{remaining}}{Fore.RESET}'\nBAR_FORMAT_NO_TIME = f'{{desc}}: {Fore.GREEN}{{percentage:3.0f}}%{Fore.RESET} {Fore.BLUE}{{bar}}{Fore.RESET} {Fore.GREEN}{{n_fmt}}/{{total_fmt}}{Fore.RESET}'\nBAR_TYPE = [\n \"░▝▗▖▘▚▞▛▙█\",\n \"░▖▘▝▗▚▞█\",\n \" ▖▘▝▗▚▞█\",\n \"░▒█\",\n \" >=\",\n \" ▏▎▍▌▋▊▉█\"\n \"░▏▎▍▌▋▊▉█\"\n]\n DOWNLOAD_RETRY_LIMIT = 3" } ]
import re import numpy as np import math import argparse import mindspore from mindnlp.transformers import UIE, UIEM from tokenizer import ErnieMTokenizerFast from utils import logger, get_bool_ids_greater_than, get_span, get_id_and_prob, cut_chinese_sent, dbc2sbc from mindnlp.transformers import BertTokenizerFast
5,206
if cnt_org not in input_mapping.keys(): input_mapping[cnt_org] = [cnt_short] else: input_mapping[cnt_org].append(cnt_short) cnt_short += 1 else: temp_text_list = [ sen[i:i + max_text_len] for i in range(0, lens, max_text_len) ] short_input_texts.extend(temp_text_list) short_idx = cnt_short cnt_short += math.ceil(lens / max_text_len) temp_text_id = [ short_idx + i for i in range(cnt_short - short_idx) ] if cnt_org not in input_mapping.keys(): input_mapping[cnt_org] = temp_text_id else: input_mapping[cnt_org].extend(temp_text_id) cnt_org += 1 return short_input_texts, input_mapping def _single_stage_predict(self, inputs): input_texts = [] prompts = [] for i in range(len(inputs)): input_texts.append(inputs[i]["text"]) prompts.append(inputs[i]["prompt"]) # max predict length should exclude the length of prompt and summary tokens max_predict_len = self._max_seq_len - len(max(prompts)) - 3 short_input_texts, self.input_mapping = self._auto_splitter( input_texts, max_predict_len, split_sentence=self._split_sentence) short_texts_prompts = [] for k, v in self.input_mapping.items(): short_texts_prompts.extend([prompts[k] for i in range(len(v))]) short_inputs = [{ "text": short_input_texts[i], "prompt": short_texts_prompts[i] } for i in range(len(short_input_texts))] sentence_ids = [] probs = [] input_ids = [] token_type_ids = [] attention_mask = [] offset_maps = [] if self._multilingual: padding_type = "max_length" else: padding_type = "longest" encoded_inputs = self._tokenizer( text=short_texts_prompts, text_pair=short_input_texts, stride=2, truncation=True, max_length=self._max_seq_len, padding=padding_type, add_special_tokens=True, return_token_type_ids=True, return_attention_mask=True, return_offsets_mapping=True, return_tensors="np") start_prob_concat, end_prob_concat = [], [] for batch_start in range(0, len(short_input_texts), self._batch_size): input_ids = encoded_inputs["input_ids"][batch_start:batch_start+self._batch_size] token_type_ids = encoded_inputs["token_type_ids"][batch_start:batch_start+self._batch_size] attention_mask = encoded_inputs["attention_mask"][batch_start:batch_start+self._batch_size] offset_maps = encoded_inputs["offset_mapping"][batch_start:batch_start+self._batch_size] if self._multilingual: input_ids = np.array( input_ids, dtype="int64") attention_mask = np.array( attention_mask, dtype="int64") position_ids = (np.cumsum(np.ones_like(input_ids), axis=1) - np.ones_like(input_ids))*attention_mask input_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids } else: input_dict = { "input_ids": np.array( input_ids, dtype="int64"), "token_type_ids": np.array( token_type_ids, dtype="int64"), "attention_mask": np.array( attention_mask, dtype="int64") } outputs = self.inference_backend.infer(input_dict) start_prob, end_prob = outputs[0], outputs[1] start_prob_concat.append(start_prob) end_prob_concat.append(end_prob) start_prob_concat = np.concatenate(start_prob_concat) end_prob_concat = np.concatenate(end_prob_concat) start_ids_list = get_bool_ids_greater_than( start_prob_concat, limit=self._position_prob, return_prob=True) end_ids_list = get_bool_ids_greater_than( end_prob_concat, limit=self._position_prob, return_prob=True) input_ids = input_dict['input_ids'] sentence_ids = [] probs = [] for start_ids, end_ids, ids, offset_map in zip(start_ids_list, end_ids_list, input_ids.tolist(), offset_maps): for i in reversed(range(len(ids))): if ids[i] != 0: ids = ids[:i] break span_list = get_span(start_ids, end_ids, with_prob=True)
class MindSporeInferBackend: def __init__(self, model_path_prefix, multilingual=False, use_fp16=False): logger.info(">>> [MindSporeInferBackend] Creating Engine ...") if multilingual: self.model = UIEM.from_pretrained(model_path_prefix) else: self.model = UIE.from_pretrained(model_path_prefix) self.model.set_train(False) if use_fp16: logger.info( ">>> [MindSporeInferBackend] Use FP16 to inference ...") self.model = self.model.half() logger.info(">>> [MindSporeInferBackend] Engine Created ...") def infer(self, input_dict): for input_name, input_value in input_dict.items(): input_value = mindspore.Tensor(input_value) input_dict[input_name] = input_value outputs = self.model(**input_dict) start_prob, end_prob = outputs[0], outputs[1] start_prob = start_prob.asnumpy() end_prob = end_prob.asnumpy() return start_prob, end_prob class UIEPredictor(object): def __init__(self, model, schema, task_path=None, schema_lang="zh", engine='mindspore', position_prob=0.5, max_seq_len=512, batch_size=64, split_sentence=False, use_fp16=False): if model in ['uie-m-base', 'uie-m-large']: self._multilingual = True else: self._multilingual = False self._model = model self._engine = engine self._task_path = task_path self._position_prob = position_prob self._max_seq_len = max_seq_len self._batch_size = batch_size self._split_sentence = split_sentence self._use_fp16 = use_fp16 self._schema_tree = None self._is_en = True if model in ['uie-base-en' ] or schema_lang == 'en' else False self.set_schema(schema) self._prepare_predictor() def _prepare_predictor(self): assert self._engine in ['mindspore'], "engine must be mindspore!" if self._task_path is None: self._task_path = self._model if self._multilingual: self._tokenizer = ErnieMTokenizerFast.from_pretrained( self._task_path) else: self._tokenizer = BertTokenizerFast.from_pretrained( self._task_path) if self._engine == 'mindspore': self.inference_backend = MindSporeInferBackend( self._task_path, multilingual=self._multilingual, use_fp16=self._use_fp16) def set_schema(self, schema): if isinstance(schema, dict) or isinstance(schema, str): schema = [schema] self._schema_tree = self._build_tree(schema) def __call__(self, inputs): texts = inputs if isinstance(texts, str): texts = [texts] results = self._multi_stage_predict(texts) return results def _multi_stage_predict(self, datas): """ Traversal the schema tree and do multi-stage prediction. Args: datas (list): a list of strings Returns: list: a list of predictions, where the list's length equals to the length of `datas` """ results = [{} for _ in range(len(datas))] # input check to early return if len(datas) < 1 or self._schema_tree is None: return results # copy to stay `self._schema_tree` unchanged schema_list = self._schema_tree.children[:] while len(schema_list) > 0: node = schema_list.pop(0) examples = [] input_map = {} cnt = 0 idx = 0 if not node.prefix: for data in datas: examples.append({ "text": data, "prompt": dbc2sbc(node.name) }) input_map[cnt] = [idx] idx += 1 cnt += 1 else: for pre, data in zip(node.prefix, datas): if len(pre) == 0: input_map[cnt] = [] else: for p in pre: if self._is_en: if re.search(r'\[.*?\]$', node.name): prompt_prefix = node.name[:node.name.find( "[", 1)].strip() cls_options = re.search( r'\[.*?\]$', node.name).group() # Sentiment classification of xxx [positive, negative] prompt = prompt_prefix + p + " " + cls_options else: prompt = node.name + p else: prompt = p + node.name examples.append({ "text": data, "prompt": dbc2sbc(prompt) }) input_map[cnt] = [i + idx for i in range(len(pre))] idx += len(pre) cnt += 1 if len(examples) == 0: result_list = [] else: result_list = self._single_stage_predict(examples) if not node.parent_relations: relations = [[] for i in range(len(datas))] for k, v in input_map.items(): for idx in v: if len(result_list[idx]) == 0: continue if node.name not in results[k].keys(): results[k][node.name] = result_list[idx] else: results[k][node.name].extend(result_list[idx]) if node.name in results[k].keys(): relations[k].extend(results[k][node.name]) else: relations = node.parent_relations for k, v in input_map.items(): for i in range(len(v)): if len(result_list[v[i]]) == 0: continue if "relations" not in relations[k][i].keys(): relations[k][i]["relations"] = { node.name: result_list[v[i]] } elif node.name not in relations[k][i]["relations"].keys( ): relations[k][i]["relations"][ node.name] = result_list[v[i]] else: relations[k][i]["relations"][node.name].extend( result_list[v[i]]) new_relations = [[] for i in range(len(datas))] for i in range(len(relations)): for j in range(len(relations[i])): if "relations" in relations[i][j].keys( ) and node.name in relations[i][j]["relations"].keys(): for k in range( len(relations[i][j]["relations"][ node.name])): new_relations[i].append(relations[i][j][ "relations"][node.name][k]) relations = new_relations prefix = [[] for _ in range(len(datas))] for k, v in input_map.items(): for idx in v: for i in range(len(result_list[idx])): if self._is_en: prefix[k].append(" of " + result_list[idx][i]["text"]) else: prefix[k].append(result_list[idx][i]["text"] + "的") for child in node.children: child.prefix = prefix child.parent_relations = relations schema_list.append(child) return results def _convert_ids_to_results(self, examples, sentence_ids, probs): """ Convert ids to raw text in a single stage. """ results = [] for example, sentence_id, prob in zip(examples, sentence_ids, probs): if len(sentence_id) == 0: results.append([]) continue result_list = [] text = example["text"] prompt = example["prompt"] for i in range(len(sentence_id)): start, end = sentence_id[i] if start < 0 and end >= 0: continue if end < 0: start += (len(prompt) + 1) end += (len(prompt) + 1) result = {"text": prompt[start:end], "probability": prob[i]} result_list.append(result) else: result = { "text": text[start:end], "start": start, "end": end, "probability": prob[i] } result_list.append(result) results.append(result_list) return results def _auto_splitter(self, input_texts, max_text_len, split_sentence=False): ''' Split the raw texts automatically for model inference. Args: input_texts (List[str]): input raw texts. max_text_len (int): cutting length. split_sentence (bool): If True, sentence-level split will be performed. return: short_input_texts (List[str]): the short input texts for model inference. input_mapping (dict): mapping between raw text and short input texts. ''' input_mapping = {} short_input_texts = [] cnt_org = 0 cnt_short = 0 for text in input_texts: if not split_sentence: sens = [text] else: sens = cut_chinese_sent(text) for sen in sens: lens = len(sen) if lens <= max_text_len: short_input_texts.append(sen) if cnt_org not in input_mapping.keys(): input_mapping[cnt_org] = [cnt_short] else: input_mapping[cnt_org].append(cnt_short) cnt_short += 1 else: temp_text_list = [ sen[i:i + max_text_len] for i in range(0, lens, max_text_len) ] short_input_texts.extend(temp_text_list) short_idx = cnt_short cnt_short += math.ceil(lens / max_text_len) temp_text_id = [ short_idx + i for i in range(cnt_short - short_idx) ] if cnt_org not in input_mapping.keys(): input_mapping[cnt_org] = temp_text_id else: input_mapping[cnt_org].extend(temp_text_id) cnt_org += 1 return short_input_texts, input_mapping def _single_stage_predict(self, inputs): input_texts = [] prompts = [] for i in range(len(inputs)): input_texts.append(inputs[i]["text"]) prompts.append(inputs[i]["prompt"]) # max predict length should exclude the length of prompt and summary tokens max_predict_len = self._max_seq_len - len(max(prompts)) - 3 short_input_texts, self.input_mapping = self._auto_splitter( input_texts, max_predict_len, split_sentence=self._split_sentence) short_texts_prompts = [] for k, v in self.input_mapping.items(): short_texts_prompts.extend([prompts[k] for i in range(len(v))]) short_inputs = [{ "text": short_input_texts[i], "prompt": short_texts_prompts[i] } for i in range(len(short_input_texts))] sentence_ids = [] probs = [] input_ids = [] token_type_ids = [] attention_mask = [] offset_maps = [] if self._multilingual: padding_type = "max_length" else: padding_type = "longest" encoded_inputs = self._tokenizer( text=short_texts_prompts, text_pair=short_input_texts, stride=2, truncation=True, max_length=self._max_seq_len, padding=padding_type, add_special_tokens=True, return_token_type_ids=True, return_attention_mask=True, return_offsets_mapping=True, return_tensors="np") start_prob_concat, end_prob_concat = [], [] for batch_start in range(0, len(short_input_texts), self._batch_size): input_ids = encoded_inputs["input_ids"][batch_start:batch_start+self._batch_size] token_type_ids = encoded_inputs["token_type_ids"][batch_start:batch_start+self._batch_size] attention_mask = encoded_inputs["attention_mask"][batch_start:batch_start+self._batch_size] offset_maps = encoded_inputs["offset_mapping"][batch_start:batch_start+self._batch_size] if self._multilingual: input_ids = np.array( input_ids, dtype="int64") attention_mask = np.array( attention_mask, dtype="int64") position_ids = (np.cumsum(np.ones_like(input_ids), axis=1) - np.ones_like(input_ids))*attention_mask input_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "position_ids": position_ids } else: input_dict = { "input_ids": np.array( input_ids, dtype="int64"), "token_type_ids": np.array( token_type_ids, dtype="int64"), "attention_mask": np.array( attention_mask, dtype="int64") } outputs = self.inference_backend.infer(input_dict) start_prob, end_prob = outputs[0], outputs[1] start_prob_concat.append(start_prob) end_prob_concat.append(end_prob) start_prob_concat = np.concatenate(start_prob_concat) end_prob_concat = np.concatenate(end_prob_concat) start_ids_list = get_bool_ids_greater_than( start_prob_concat, limit=self._position_prob, return_prob=True) end_ids_list = get_bool_ids_greater_than( end_prob_concat, limit=self._position_prob, return_prob=True) input_ids = input_dict['input_ids'] sentence_ids = [] probs = [] for start_ids, end_ids, ids, offset_map in zip(start_ids_list, end_ids_list, input_ids.tolist(), offset_maps): for i in reversed(range(len(ids))): if ids[i] != 0: ids = ids[:i] break span_list = get_span(start_ids, end_ids, with_prob=True)
sentence_id, prob = get_id_and_prob(span_list, offset_map.tolist())
1
2023-12-25 11:02:24+00:00
8k
SAITPublic/BiRF
test.py
[ { "identifier": "NGPradianceField", "path": "lib/models/ngp.py", "snippet": "class NGPradianceField(torch.nn.Module):\n def __init__(\n self,\n aabb: Union[torch.Tensor, List[float]],\n num_dim: int = 3,\n use_viewdirs: bool = True,\n density_activation: Callable = lambda x: trunc_exp(x - 1),\n geo_feat_dim: int = 15,\n max_deg: int = 2,\n n_features_per_level: int = 2,\n ) -> None:\n super().__init__()\n if not isinstance(aabb, torch.Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n self.register_buffer(\"aabb\", aabb)\n self.num_dim = num_dim\n self.use_viewdirs = use_viewdirs\n self.density_activation = density_activation\n self.geo_feat_dim = geo_feat_dim\n\n if self.use_viewdirs:\n self.direction_encoding = tcnn.Encoding(\n n_input_dims=num_dim,\n encoding_config={\n \"otype\": \"Composite\",\n \"nested\": [\n {\n \"n_dims_to_encode\": 3,\n \"otype\": \"SphericalHarmonics\",\n \"degree\": 4,\n },\n ],\n },\n )\n self.mlp_base = NetworkWithInputEncoding(\n n_input_dims=num_dim,\n n_output_dims=1 + self.geo_feat_dim,\n network_config={\n \"otype\": \"FullyFusedMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"None\",\n \"n_neurons\": 128,\n \"n_hidden_layers\": 1,\n },\n n_features_per_level=n_features_per_level,\n # log2_hashmap_size=19,\n )\n if self.geo_feat_dim > 0:\n self.mlp_head = tcnn.Network(\n n_input_dims=(\n (\n self.direction_encoding.n_output_dims\n if self.use_viewdirs\n else 0\n )\n + self.geo_feat_dim\n ),\n n_output_dims=3,\n network_config={\n \"otype\": \"FullyFusedMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"Sigmoid\",\n \"n_neurons\": 128,\n \"n_hidden_layers\": 2,\n },\n )\n\n def query_density(self, x, return_feat: bool = False):\n aabb_min, aabb_max = torch.split(self.aabb, self.num_dim, dim=-1)\n x = (x - aabb_min) / (aabb_max - aabb_min)\n selector = ((x > 0.0) & (x < 1.0)).all(dim=-1)\n x = (\n self.mlp_base(x.view(-1, self.num_dim))\n .view(list(x.shape[:-1]) + [1 + self.geo_feat_dim])\n .to(x)\n )\n density_before_activation, base_mlp_out = torch.split(\n x, [1, self.geo_feat_dim], dim=-1\n )\n density = (\n self.density_activation(density_before_activation)\n * selector[..., None]\n )\n if return_feat:\n return density, base_mlp_out\n else:\n return density\n\n def _query_rgb(self, x, dir, embedding):\n aabb_min, aabb_max = torch.split(self.aabb, self.num_dim, dim=-1)\n x = (x - aabb_min) / (aabb_max - aabb_min)\n \n # tcnn requires directions in the range [0, 1]\n if self.use_viewdirs:\n dir = (dir + 1.0) / 2.0\n d = self.direction_encoding(dir.view(-1, dir.shape[-1]))\n h = torch.cat([d, embedding.view(-1, self.geo_feat_dim)], dim=-1)\n else:\n h = embedding.view(-1, self.geo_feat_dim)\n \n rgb = (\n self.mlp_head(h)\n .view(list(embedding.shape[:-1]) + [3])\n .to(embedding)\n )\n return rgb\n\n def forward(\n self,\n positions: torch.Tensor,\n directions: torch.Tensor = None,\n ):\n if self.use_viewdirs and (directions is not None):\n assert (\n positions.shape == directions.shape\n ), f\"{positions.shape} v.s. {directions.shape}\"\n density, embedding = self.query_density(positions, return_feat=True)\n self.sparsity = torch.log(1.0 + density ** 2 / 0.5).mean()\n rgb = self._query_rgb(positions, directions, embedding=embedding)\n return rgb, density" }, { "identifier": "render_image", "path": "lib/utils.py", "snippet": "def render_image(\n # scene\n radiance_field: torch.nn.Module,\n occupancy_grid: OccupancyGrid,\n rays: Rays,\n scene_aabb: torch.Tensor,\n # rendering options\n near_plane: Optional[float] = None,\n far_plane: Optional[float] = None,\n render_step_size: float = 1e-3,\n render_bkgd: Optional[torch.Tensor] = None,\n cone_angle: float = 0.0,\n alpha_thre: float = 0.0,\n # test options\n test_chunk_size: int = 8192,\n):\n \"\"\"Render the pixels of an image.\"\"\"\n rays_shape = rays.origins.shape\n if len(rays_shape) == 3:\n height, width, _ = rays_shape\n num_rays = height * width\n rays = namedtuple_map(\n lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays\n )\n else:\n num_rays, _ = rays_shape\n\n def sigma_fn(t_starts, t_ends, ray_indices):\n t_origins = chunk_rays.origins[ray_indices]\n t_dirs = chunk_rays.viewdirs[ray_indices]\n positions = t_origins + t_dirs * (t_starts + t_ends) / 2.0\n return radiance_field.query_density(positions)\n\n def rgb_sigma_fn(t_starts, t_ends, ray_indices):\n t_origins = chunk_rays.origins[ray_indices]\n t_dirs = chunk_rays.viewdirs[ray_indices]\n positions = t_origins + t_dirs * (t_starts + t_ends) / 2.0\n return radiance_field(positions, t_dirs)\n\n results = []\n chunk = (\n torch.iinfo(torch.int32).max\n if radiance_field.training\n else test_chunk_size\n )\n for i in range(0, num_rays, chunk):\n chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays)\n ray_indices, t_starts, t_ends = ray_marching(\n chunk_rays.origins,\n chunk_rays.viewdirs,\n scene_aabb=scene_aabb,\n grid=occupancy_grid,\n sigma_fn=sigma_fn,\n near_plane=near_plane,\n far_plane=far_plane,\n render_step_size=render_step_size,\n stratified=radiance_field.training,\n cone_angle=cone_angle,\n alpha_thre=alpha_thre,\n )\n rgb, opacity, depth = rendering(\n t_starts,\n t_ends,\n ray_indices,\n n_rays=chunk_rays.origins.shape[0],\n rgb_sigma_fn=rgb_sigma_fn,\n render_bkgd=render_bkgd,\n )\n chunk_results = [rgb, opacity, depth, len(t_starts)]\n results.append(chunk_results)\n colors, opacities, depths, n_rendering_samples = [\n torch.cat(r, dim=0) if isinstance(r[0], torch.Tensor) else r\n for r in zip(*results)\n ]\n return (\n colors.view((*rays_shape[:-1], -1)),\n opacities.view((*rays_shape[:-1], -1)),\n depths.view((*rays_shape[:-1], -1)),\n sum(n_rendering_samples),\n )" }, { "identifier": "set_random_seed", "path": "lib/utils.py", "snippet": "def set_random_seed(seed, deterministic=False):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "load_dataset", "path": "lib/utils.py", "snippet": "def load_dataset(\n scene: str,\n data_root_fp: str,\n split: str,\n num_rays: Optional[int],\n dataset_kwargs: Dict,\n device: str,\n):\n if scene in [\"chair\", \"drums\", \"ficus\", \"hotdog\", \"lego\", \"materials\", \"mic\", \"ship\"]:\n from lib.datasets.nerf_synthetic import SubjectLoader\n data_root_fp = 'data/nerf_synthetic/'\n elif scene in [\"Bike\", \"Lifestyle\", \"Palace\", \"Robot\", \"Spaceship\", \"Steamtrain\", \"Toad\", \"Wineholder\"]:\n from lib.datasets.nsvf import SubjectLoader\n data_root_fp = 'data/Synthetic_NSVF/'\n elif scene in [\"Barn\", \"Caterpillar\", \"Family\", \"Ignatius\", \"Truck\"]:\n from lib.datasets.tanksandtemple import SubjectLoader\n data_root_fp = 'data/TanksAndTemple/'\n\n dataset = SubjectLoader(\n subject_id=scene,\n root_fp=data_root_fp,\n split=split,\n num_rays=num_rays,\n **dataset_kwargs,\n )\n\n dataset.images = dataset.images.to(device)\n dataset.camtoworlds = dataset.camtoworlds.to(device)\n dataset.K = dataset.K.to(device)\n\n return dataset, data_root_fp" }, { "identifier": "load_occgrid", "path": "lib/utils.py", "snippet": "def load_occgrid(occupancy_grid, save_path, device, res=128):\n data = np.load(f\"{save_path}/occgrid.npz\")['data']\n binary = np.unpackbits(data).reshape(res, res, res)\n binary = torch.tensor(binary).type(torch.bool).to(device)\n occupancy_grid._binary = binary\n \n return occupancy_grid" }, { "identifier": "load_model", "path": "lib/utils.py", "snippet": "def load_model(radiance_field, save_path, device):\n radiance_field.load_state_dict(torch.load(f\"{save_path}/network.ckpt\"), strict=False)\n encoding_params = np.load(f\"{save_path}/encoding.npz\")\n \n params_keys = [key for key in radiance_field.state_dict().keys() if key.startswith('mlp_base.encoding')]\n model_params = {}\n for key in params_keys:\n num = radiance_field.state_dict()[key].shape[0]\n params = np.unpackbits(encoding_params[key]).astype(np.float16)[:num]\n model_params[key] = torch.tensor(2 * params - 1).to(device)\n\n radiance_field.load_state_dict(model_params, strict=False)\n\n return radiance_field" } ]
import argparse import math import os import time import json import gin import imageio import numpy as np import torch import torch.nn.functional as F import tqdm from typing import * from datetime import datetime from torchmetrics import StructuralSimilarityIndexMeasure from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity from lib.models.ngp import NGPradianceField from lib.utils import render_image, set_random_seed, load_dataset, load_occgrid, load_model from nerfacc import ContractionType, OccupancyGrid
4,832
if os.path.exists(os.path.join(f"{data_root_fp}", str(scene), "bbox.txt")): aabb = list(np.loadtxt(os.path.join(f"{data_root_fp}", str(scene), "bbox.txt"))[:6]) contraction_type = ContractionType.AABB scene_aabb = torch.tensor(aabb, dtype=torch.float32, device=device) near_plane = None far_plane = None render_step_size = ( (scene_aabb[3:] - scene_aabb[:3]).max() * math.sqrt(3) / render_n_samples ).item() alpha_thre = 0 # setup the radiance field we want to train. grad_scaler = torch.cuda.amp.GradScaler(2**10) radiance_field = NGPradianceField( aabb=aabb, n_features_per_level=n_features, ).to(device) radiance_field = load_model(radiance_field, save_path, device=device) occupancy_grid = OccupancyGrid( roi_aabb=aabb, resolution=grid_resolution, contraction_type=contraction_type, ).to(device) occupancy_grid = load_occgrid(occupancy_grid, save_path, device=device, res=grid_resolution) # metrics SSIM = StructuralSimilarityIndexMeasure(data_range=1.0).to(device) LPIPS = LearnedPerceptualImagePatchSimilarity(net_type='vgg').to(device) radiance_field = radiance_field.half() if render_per_frame > 0: os.makedirs(f"{save_path}/imgs", exist_ok=True) # evaluation init = time.time() radiance_field.eval() psnr_list, ssim_list, lpips_list = [], [], [] with torch.no_grad(): for j in tqdm.tqdm(range(len(test_dataset))): data = test_dataset[j] render_bkgd = data["color_bkgd"] rays = data["rays"] pixels = data["pixels"] # rendering rgb, acc, depth, _ = render_image( radiance_field, occupancy_grid, rays, scene_aabb, # rendering options near_plane=near_plane, far_plane=far_plane, render_step_size=render_step_size, render_bkgd=render_bkgd, cone_angle=cone_angle, alpha_thre=alpha_thre, # test options test_chunk_size=test_chunk_size, ) if render_per_frame > 0 and j % render_per_frame == 0: imageio.imwrite( f"{save_path}/imgs/{j:03d}.png", (rgb.cpu().numpy() * 255).astype(np.uint8), ) mse = F.mse_loss(rgb, pixels) psnr = -10.0 * torch.log(mse) / np.log(10.0) rgb = rgb.permute(-1, 0, 1)[None, ...] pixels = pixels.permute(-1, 0, 1)[None, ...] ssim = SSIM(rgb, pixels) lpips = LPIPS(rgb, pixels) psnr_list.append(psnr.item()) ssim_list.append(ssim.item()) lpips_list.append(lpips.item()) psnr_avg = sum(psnr_list) / len(psnr_list) ssim_avg = sum(ssim_list) / len(ssim_list) lpips_avg = sum(lpips_list) / len(lpips_list) print(f"Evaluation PSNR: {round(psnr_avg, 2):.2f}") print(f"Evaluation SSIM: {round(ssim_avg, 3):.3f}") print(f"Evaluation LPIPS: {round(lpips_avg, 3):.3f}") test_time = time.time() - init render_speed = len(test_dataset) / test_time encoding_size = os.path.getsize(f"{save_path}/encoding.npz") network_size = os.path.getsize(f"{save_path}/network.ckpt") occgrid_size = os.path.getsize(f"{save_path}/occgrid.npz") total_size = encoding_size + network_size + occgrid_size print(f"Evaluation encoding size: {round((encoding_size / 2 ** 20), 2):.2f} MB") print(f"Evaluation network size: {round((network_size / 2 ** 20), 2):.2f} MB") print(f"Evaluation occgrid size: {round((occgrid_size / 2 ** 20), 2):.2f} MB") print(f"Evaluation total size: {round((total_size / 2 ** 20), 2):.2f} MB") results["psnr"] = round(psnr_avg, 2) results["ssim"] = round(ssim_avg, 3) results["lpips"] = round(lpips_avg, 3) results["test_time"] = round(test_time, 2) results["render_speed"] = round(render_speed, 2) results['size'] = round(total_size / 2 ** 20, 2) with open(f"{save_path}/results.json", 'w') as f: json.dump(results, f) with open(os.path.join(save_path, "config.gin"), "w") as f: f.write(gin.operative_config_str()) print("Evaluation done") return if __name__ == "__main__": device = "cuda:0" args = parse_args()
""" "Copyright (C) 2021 Samsung Electronics Co. LTD This software is a property of Samsung Electronics. No part of this software, either material or conceptual may be copied or distributed, transmitted, transcribed, stored in a retrieval system, or translated into any human or computer language in any form by any means, electronic, mechanical, manual or otherwise, or disclosed to third parties without the express written permission of Samsung Electronics. (Use of the Software is restricted to non-commercial, personal or academic, research purpose only)" """ """ Modified from NerfAcc (https://github.com/KAIR-BAIR/nerfacc) Copyright (c) 2022 Ruilong Li, UC Berkeley. """ class ExtendAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): items = getattr(namespace, self.dest) or [] items.extend(values) setattr(namespace, self.dest, items) def parse_args(): parser = argparse.ArgumentParser() parser.register('action', 'extend', ExtendAction) parser.add_argument( "configs", action="append", help="path to config files", ) parser.add_argument( "--bind", nargs='+', action="extend", help="param to bind", ) parser.add_argument( "--scene", type=str, required=True, choices=[ # nerf synthetic "chair", "drums", "ficus", "hotdog", "lego", "materials", "mic", "ship", # nsvf synthetic "Bike", "Lifestyle", "Palace", "Robot", "Spaceship", "Steamtrain", "Toad", "Wineholder", # nsvf TankAndTemple "Barn", "Caterpillar", "Family", "Ignatius", "Truck", ], help="which scene to use", ) parser.add_argument( "--n_features", type=int, default=2, help="number of features" ) parser.add_argument( "--seed", type=int, default=0, help="random seed number" ) parser.add_argument( "--ckpt_dir", type=str, default=None, help="path for checkpoint directory" ) return parser.parse_args() @gin.configurable def main( scene: str, ckpt_dir: str, n_features: int=2, seed: int = 2023, log_dir: str = "./logs", prefix: Optional[str] = None, postfix: Optional[str] = None, max_steps: int = 20000, render_n_samples: int = 1024, test_chunk_size: int = 16384, aabb: List[float] = [-1.5, -1.5, -1.5, 1.5, 1.5, 1.5], data_root_fp: str = "data/nerf_synthetic/", train_split: str = "train", cone_angle: float = 0.0, sparsity_weight: float = 2e-5, render_per_frame: int = -1, ): # log save_path = f"{log_dir}/{scene}" if ckpt_dir == None else ckpt_dir if prefix is not None: save_path = f"{prefix}_{save_path}" if postfix is not None: save_path = f"{save_path}_{postfix}" save_path = f"{save_path}_{n_features}" print(f'Evaluation for pretrained model in "{save_path}"') results = {} # setup the dataset test_dataset_kwargs = {} target_sample_batch_size = 1 << 18 grid_resolution = 128 test_dataset, data_root_fp = load_dataset( scene=scene, data_root_fp=data_root_fp, split="test", num_rays=None, dataset_kwargs=test_dataset_kwargs, device=device, ) if os.path.exists(os.path.join(f"{data_root_fp}", str(scene), "bbox.txt")): aabb = list(np.loadtxt(os.path.join(f"{data_root_fp}", str(scene), "bbox.txt"))[:6]) contraction_type = ContractionType.AABB scene_aabb = torch.tensor(aabb, dtype=torch.float32, device=device) near_plane = None far_plane = None render_step_size = ( (scene_aabb[3:] - scene_aabb[:3]).max() * math.sqrt(3) / render_n_samples ).item() alpha_thre = 0 # setup the radiance field we want to train. grad_scaler = torch.cuda.amp.GradScaler(2**10) radiance_field = NGPradianceField( aabb=aabb, n_features_per_level=n_features, ).to(device) radiance_field = load_model(radiance_field, save_path, device=device) occupancy_grid = OccupancyGrid( roi_aabb=aabb, resolution=grid_resolution, contraction_type=contraction_type, ).to(device) occupancy_grid = load_occgrid(occupancy_grid, save_path, device=device, res=grid_resolution) # metrics SSIM = StructuralSimilarityIndexMeasure(data_range=1.0).to(device) LPIPS = LearnedPerceptualImagePatchSimilarity(net_type='vgg').to(device) radiance_field = radiance_field.half() if render_per_frame > 0: os.makedirs(f"{save_path}/imgs", exist_ok=True) # evaluation init = time.time() radiance_field.eval() psnr_list, ssim_list, lpips_list = [], [], [] with torch.no_grad(): for j in tqdm.tqdm(range(len(test_dataset))): data = test_dataset[j] render_bkgd = data["color_bkgd"] rays = data["rays"] pixels = data["pixels"] # rendering rgb, acc, depth, _ = render_image( radiance_field, occupancy_grid, rays, scene_aabb, # rendering options near_plane=near_plane, far_plane=far_plane, render_step_size=render_step_size, render_bkgd=render_bkgd, cone_angle=cone_angle, alpha_thre=alpha_thre, # test options test_chunk_size=test_chunk_size, ) if render_per_frame > 0 and j % render_per_frame == 0: imageio.imwrite( f"{save_path}/imgs/{j:03d}.png", (rgb.cpu().numpy() * 255).astype(np.uint8), ) mse = F.mse_loss(rgb, pixels) psnr = -10.0 * torch.log(mse) / np.log(10.0) rgb = rgb.permute(-1, 0, 1)[None, ...] pixels = pixels.permute(-1, 0, 1)[None, ...] ssim = SSIM(rgb, pixels) lpips = LPIPS(rgb, pixels) psnr_list.append(psnr.item()) ssim_list.append(ssim.item()) lpips_list.append(lpips.item()) psnr_avg = sum(psnr_list) / len(psnr_list) ssim_avg = sum(ssim_list) / len(ssim_list) lpips_avg = sum(lpips_list) / len(lpips_list) print(f"Evaluation PSNR: {round(psnr_avg, 2):.2f}") print(f"Evaluation SSIM: {round(ssim_avg, 3):.3f}") print(f"Evaluation LPIPS: {round(lpips_avg, 3):.3f}") test_time = time.time() - init render_speed = len(test_dataset) / test_time encoding_size = os.path.getsize(f"{save_path}/encoding.npz") network_size = os.path.getsize(f"{save_path}/network.ckpt") occgrid_size = os.path.getsize(f"{save_path}/occgrid.npz") total_size = encoding_size + network_size + occgrid_size print(f"Evaluation encoding size: {round((encoding_size / 2 ** 20), 2):.2f} MB") print(f"Evaluation network size: {round((network_size / 2 ** 20), 2):.2f} MB") print(f"Evaluation occgrid size: {round((occgrid_size / 2 ** 20), 2):.2f} MB") print(f"Evaluation total size: {round((total_size / 2 ** 20), 2):.2f} MB") results["psnr"] = round(psnr_avg, 2) results["ssim"] = round(ssim_avg, 3) results["lpips"] = round(lpips_avg, 3) results["test_time"] = round(test_time, 2) results["render_speed"] = round(render_speed, 2) results['size'] = round(total_size / 2 ** 20, 2) with open(f"{save_path}/results.json", 'w') as f: json.dump(results, f) with open(os.path.join(save_path, "config.gin"), "w") as f: f.write(gin.operative_config_str()) print("Evaluation done") return if __name__ == "__main__": device = "cuda:0" args = parse_args()
set_random_seed(args.seed)
2
2023-12-28 02:08:29+00:00
8k
pkariz/grin-explorer
backend/api/bootstrap.py
[ { "identifier": "check_for_reorg", "path": "backend/api/helpers.py", "snippet": "def check_for_reorg(new_block, update_progress_fn, missing_heights, start_height):\n \"\"\"\n Checks if new_block is part of a reorg. Return tuple (reorg, set<heights>)\n where reorg is Reorg instance or None, set<heights> is a set of heights of\n blocks that were fetched in during this reorg checking process.\n \"\"\"\n # import here to avoid cyclic import\n from .bootstrap import fetch_and_store_block\n blockchain = new_block.blockchain\n fetched_heights = set()\n reorged_blocks = []\n reorg = None\n # find reorged blocks backward\n cur_block = new_block\n while True:\n prev_block = blockchain.blocks\\\n .filter(height=cur_block.height - 1, reorg__isnull=True)\\\n .first()\n if prev_block:\n if cur_block.prev_hash == prev_block.hash:\n break\n reorged_blocks.append(prev_block)\n # fetch the new block at this height\n cur_block = fetch_and_store_block(\n blockchain, prev_block.height)\n # mark height as reorged so that we don't go through it again\n # when looping through 'missing_heights'\n if prev_block.height in missing_heights:\n fetched_heights.add(prev_block.height)\n update_progress_fn(fetched_heights)\n else:\n try:\n # we still need to check for a case where current block\n # is at height X, we are missing height X-1 in our DB\n # but have height X-2 where X-2 in our DB has been\n # reorged\n if cur_block.height - 1 < start_height:\n break\n cur_block = fetch_and_store_block(\n blockchain, cur_block.height - 1)\n if cur_block:\n # mark height as reorged so that we don't go through it\n # again when looping through 'missing_heights'\n if cur_block.height in missing_heights:\n fetched_heights.add(cur_block.height)\n update_progress_fn(fetched_heights)\n # we assume the reorg is not bigger than 1000 blocks,\n # so we break if needed (this check_for_reorg is much\n # slower at fetching because it does a db lookup in each\n # loop, that's why we want to leave it if possible)\n if len(fetched_heights) > 1000 and not reorged_blocks:\n break\n except NodeBlockNotFoundException:\n # the node probably dropped that height so that's where we stop\n logger.info(\n 'check reorg backward block not found',\n extra={'height': cur_block.height - 1},\n )\n break\n # reverse reorged_blocks so that we have them ascending by height\n reorged_blocks.reverse()\n # store the first block in the new \"main\" chain\n start_main_block = cur_block\n # find reorged blocks forward\n cur_block = new_block\n # we know that we have fetched all the later blocks because we fetch\n # missing blocks in order (descending by height)\n next_block = blockchain.blocks\\\n .filter(height=cur_block.height + 1, reorg__isnull=True)\\\n .first()\n while next_block:\n if next_block.prev_hash == cur_block.hash:\n break\n # next_block has been reorged\n reorged_blocks.append(next_block)\n # fetch the new block at this height\n cur_block = fetch_and_store_block(blockchain, next_block.height)\n # mark height as reorged so that we don't go through it again when\n # looping through 'missing_heights'\n if cur_block.height in missing_heights:\n fetched_heights.add(cur_block.height)\n update_progress_fn(fetched_heights)\n next_block = blockchain.blocks\\\n .filter(height=cur_block.height + 1, reorg__isnull=True)\\\n .first()\n if reorged_blocks:\n reorg = Reorg.objects.create(\n blockchain=blockchain,\n start_reorg_block=reorged_blocks[0],\n end_reorg_block=reorged_blocks[-1],\n start_main_block=start_main_block,\n )\n return reorg, fetched_heights" }, { "identifier": "get_missing_heights_repr", "path": "backend/api/helpers.py", "snippet": "def get_missing_heights_repr(missing_heights):\n if not missing_heights:\n return None\n xs = sorted(list(missing_heights))\n # idea: find indexes of gaps, then you get [x1...x2],[x3...x4] and if x1 == x2 then\n # only one element is missing (x1), otherwise all elements between 'x1...x2' are missing\n res = []\n cur = []\n for i, x in enumerate(xs):\n if not cur:\n cur.append(x)\n if i == len(xs) - 1 or i == 0 and (len(xs) == 1 or xs[1] != x+1):\n cur.append(x)\n res.append(cur)\n cur = []\n continue\n if x != xs[i-1] + 1:\n cur.append(cur[0])\n res.append(cur)\n cur = [x]\n if i == len(xs) - 1:\n cur.append(cur[0])\n res.append(cur)\n elif i == len(xs) - 1 or xs[i+1] != x + 1:\n cur.append(x)\n res.append(cur)\n cur = []\n return list(map(\n lambda x: str(x[0]) if x[0] == x[1] else f'{x[0]}...{x[1]}',\n res\n ))" }, { "identifier": "get_prefetched_header_and_block_data", "path": "backend/api/helpers.py", "snippet": "def get_prefetched_header_and_block_data(node, height):\n if node.slug not in node_cache or height not in node_cache[node.slug]:\n node_api = NodeV2API(node)\n fetched_blocks = node_api.get_blocks(max(0, height - 999), height)['blocks']\n # we also clear existing cache for this node because\n # it's likely not going to be used anymore\n node_cache[node.slug] = {\n block['header']['height']: block\n for block in fetched_blocks\n }\n if height not in node_cache[node.slug]:\n raise NodeBlockNotFoundException()\n return node_cache[node.slug][height]" }, { "identifier": "Block", "path": "backend/api/models.py", "snippet": "class Block(TimeStampedModel):\n blockchain = models.ForeignKey(\n Blockchain, related_name='blocks', on_delete=models.CASCADE)\n hash = models.CharField(\n primary_key=True,\n max_length=64,\n validators=[MinLengthValidator(64)],\n db_index=True,\n )\n height = models.PositiveIntegerField(db_index=True)\n timestamp = models.DateTimeField(db_index=True)\n header = models.ForeignKey(\n 'BlockHeader', related_name='block', on_delete=models.CASCADE)\n prev_hash = models.CharField(\n max_length=64,\n null=True,\n blank=True,\n validators=[MinLengthValidator(64)],\n )\n nr_inputs = models.PositiveIntegerField(default=0)\n nr_outputs = models.PositiveIntegerField(default=0)\n nr_kernels = models.PositiveIntegerField(default=0)\n # when reorg is set it means this block is part of a reorg and not the main\n # chain\n reorg = models.ForeignKey(\n 'Reorg', null=True, related_name='blocks', on_delete=models.CASCADE)\n\n def __str__(self):\n suffix = ''\n if self.reorg:\n suffix = ' Reorged: {}'.format(self.reorg.id)\n return '{}: {} (prev: {})'.format(\n self.height, self.hash, self.prev_hash)\n\n def get_next_block(self):\n return Block.objects.filter(prev_hash=self.hash).first()\n\n def get_previous_block(self):\n return Block.objects.filter(hash=self.prev_hash).first()\n\n def full_print(self, prefix=''):\n \"\"\"Used for developing and debugging.\"\"\"\n print('---------------------------------------------------------------')\n print(f'{prefix}Block {self.height}: {self.hash}, reorg: {self.reorg}')\n print(f'{prefix} INPUTS:')\n for input in self.inputs.all():\n print(f'{prefix} {input}, output: {input.output}')\n print(f'{prefix} OUTPUTS:')\n for output in self.outputs.all():\n print(f'{prefix} {output}')\n print(f'{prefix} KERNELS:')\n for kernel in self.kernels.all():\n print(f'{prefix} {kernel}')\n print('---------------------------------------------------------------')" }, { "identifier": "BlockHeader", "path": "backend/api/models.py", "snippet": "class BlockHeader(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n # same as with 'Block', we want to keep 'same' headers separate if they're\n # a part of a different chain.\n blockchain = models.ForeignKey(\n Blockchain, related_name='headers', on_delete=models.CASCADE)\n version = models.IntegerField()\n kernel_root = models.CharField(max_length=64)\n output_root = models.CharField(max_length=64)\n range_proof_root = models.CharField(max_length=64)\n kernel_mmr_size = models.IntegerField()\n output_mmr_size = models.IntegerField()\n nonce = models.TextField()\n edge_bits = models.IntegerField()\n # cuckoo_solution could be an ArrayField(models.BigIntegerField) but that\n # would make syncing a few times slower\n cuckoo_solution = models.TextField(db_index=True) # ArrayField(models.BigIntegerField())\n secondary_scaling = models.IntegerField()\n # sum of the target difficulties, not the sum of the actual block difficulties\n total_difficulty = models.BigIntegerField()\n total_kernel_offset = models.CharField(max_length=64)" }, { "identifier": "Output", "path": "backend/api/models.py", "snippet": "class Output(TimeStampedModel):\n \"\"\"\n The same output can be included in two different blocks if it's a part of a\n reorg. In this case there will be two identical Output instances, except for\n the referenced block.\n \"\"\"\n id = models.BigAutoField(primary_key=True)\n\n OUTPUT_TYPE = (\n (\"Transaction\", \"Transaction\"),\n (\"Coinbase\", \"Coinbase\"),\n )\n\n block = models.ForeignKey(\n Block,\n related_name='outputs',\n on_delete=models.CASCADE,\n )\n\n output_type = models.TextField(\n choices=OUTPUT_TYPE\n )\n\n # pedersen commitment as hex\n commitment = models.CharField(\n max_length=66,\n db_index=True,\n )\n\n # on reorged blocks 'spent' is set based on the reorged chain, not main\n spent = models.BooleanField()\n\n # range proof as hex\n proof = models.TextField()\n\n # range proof hash as hex\n proof_hash = models.CharField(max_length=64)\n\n # coinbase transactions have merkle_proof None\n merkle_proof = models.TextField(null=True)\n\n mmr_index = models.IntegerField()\n\n def __str__(self):\n return (\n f'{self.commitment}({self.id}), spent: {self.spent}, '\n f'inputs: {self.inputs.all()}'\n )" }, { "identifier": "Kernel", "path": "backend/api/models.py", "snippet": "class Kernel(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n\n block = models.ForeignKey(\n Block,\n related_name='kernels',\n on_delete=models.CASCADE,\n )\n\n # plain, coinbase, heightlocked, norecentduplicate\n features = models.TextField()\n\n fee = models.BigIntegerField()\n\n fee_shift = models.IntegerField()\n\n lock_height = models.IntegerField()\n\n excess = models.CharField(max_length=66, db_index=True)\n\n excess_sig = models.CharField(max_length=142)\n\n def __str__(self):\n return f'{self.excess}'" }, { "identifier": "Input", "path": "backend/api/models.py", "snippet": "class Input(TimeStampedModel):\n \"\"\"\n The same input commitment can be included in two different blocks if it's a\n part of a reorg. In this case there will be two identical Input instances,\n except for the referenced block and possibly also with a different output.\n \"\"\"\n id = models.BigAutoField(primary_key=True)\n block = models.ForeignKey(\n Block,\n related_name='inputs',\n on_delete=models.CASCADE,\n )\n # pedersen commitment as hex\n commitment = models.CharField(max_length=66, db_index=True)\n\n # output which corresponds to this input being spent\n output = models.ForeignKey(\n Output,\n blank=True,\n null=True,\n related_name='inputs',\n on_delete=models.CASCADE,\n )\n\n def __str__(self):\n return f'{self.commitment}({self.id})'" }, { "identifier": "NodeV2API", "path": "backend/api/node.py", "snippet": "class NodeV2API:\n def __init__(self, node):\n self.foreign_api_url = node.api_url\n self.foreign_api_user = node.api_username\n self.foreign_api_password = node.api_password\n self._cached_blocks = {}\n \n\n def post(self, method, params):\n payload = {\n 'jsonrpc': '2.0',\n 'id': 1,\n 'method': method,\n 'params': params\n }\n\n response = requests.post(\n self.foreign_api_url,\n json=payload, \n auth=(self.foreign_api_user, self.foreign_api_password),\n # long read timeout because of node's compaction process\n timeout=(5, 60)\n )\n\n if response.status_code >= 300 or response.status_code < 200:\n # Requests-level error\n raise NodeError(\n method, params, response.status_code, response.reason)\n response_json = response.json()\n\n # https://github.com/mimblewimble/grin-rfcs/blob/master/text/0007-node-api-v2.md#errors\n if \"error\" in response_json:\n # One version of a node error\n raise NodeError(\n method, params,\n response_json[\"error\"][\"code\"],\n response_json[\"error\"][\"message\"]\n )\n if \"Err\" in response_json:\n # Another version of a node error\n raise NodeError(\n method, params, None, response_json[\"result\"][\"Err\"])\n return response_json\n\n def get_tip(self):\n resp = self.post('get_tip', [])\n return resp[\"result\"][\"Ok\"]\n \n def get_kernel(self, excess, min_height=None, max_height=None):\n resp = self.post('get_kernel', [excess, min_height, max_height])\n return resp[\"result\"][\"Ok\"]\n\n def get_header(self, height=None, hash=None, commit=None):\n resp = self.post('get_header', [height, hash, commit])\n return resp[\"result\"][\"Ok\"]\n\n def get_block(self, height=None, hash=None, commit=None):\n resp = self.post('get_block', [height, hash, commit])\n res = resp['result']\n try:\n return resp[\"result\"][\"Ok\"]\n except KeyError:\n if 'Err' in resp['result'] and resp['result']['Err'] == 'NotFound':\n logger.warning(\n 'NodeBlockNotFoundException',\n extra={ 'height': height, 'hash': hash },\n )\n raise NodeBlockNotFoundException()\n log_data = json.dumps(resp)\n logger.error('NodeUnknownException', extra={ 'result': log_data })\n raise NodeUnknownException()\n\n def get_blocks(self, start_height, end_height, limit=1000, proofs=True):\n if start_height < 0:\n raise Exception('Starting height must >= 0.')\n if not 1 <= limit <= 1000:\n raise Exception('Limit must be between 1 and 1000.')\n resp = self.post('get_blocks', [start_height, end_height, limit, proofs])\n res = resp['result']\n try:\n return resp[\"result\"][\"Ok\"]\n except KeyError:\n if 'Err' in resp['result'] and resp['result']['Err'] == 'NotFound':\n logger.warning(\n 'NodeBlocksFetchException',\n extra={ 'start_height': start_height, 'end_height': end_height },\n )\n raise NodeBlocksFetchException()\n log_data = json.dumps(resp)\n logger.error('NodeUnknownException', extra={ 'result': log_data })\n raise NodeUnknownException()" }, { "identifier": "NodeBlockNotFoundException", "path": "backend/api/node.py", "snippet": "class NodeBlockNotFoundException(Exception):\n pass" }, { "identifier": "UpdateBlockchainProgressError", "path": "backend/api/exceptions.py", "snippet": "class UpdateBlockchainProgressError(Exception):\n pass" } ]
from django.db import transaction from django.db.utils import IntegrityError from django.utils.dateparse import parse_datetime from .helpers import check_for_reorg, get_missing_heights_repr, get_prefetched_header_and_block_data from .models import Block, BlockHeader, Output, Kernel, Input from .node import NodeV2API, NodeBlockNotFoundException from .exceptions import UpdateBlockchainProgressError import decimal import math import logging
4,987
logger = logging.getLogger(__name__) def _get_percent_loaded(nr_missing_blocks, nr_all_blocks, decimal_places): missing_percent = Decimal('0') if nr_all_blocks: # we can calculate, otherwise we would get division by zero missing_percent = Decimal(str(nr_missing_blocks)) / Decimal(str(nr_all_blocks)) * Decimal('100') return (Decimal('100') - missing_percent).quantize( Decimal('1') / Decimal('10')**decimal_places, rounding=decimal.ROUND_DOWN ) def update_load_progress( blockchain, missing, total, step, modulo, decimal_places, verbose=False, source='default', ): if step % modulo == 0: logger.info('bc: {}, missing: {}, total: {}, step: {}, modulo: {}, decimal_places: {}, source: {}'.format( blockchain.slug, missing, total, step, modulo, decimal_places, source)) # update Blockchain's load_progress blockchain.load_progress = _get_percent_loaded( missing, total, decimal_places ) blockchain.save() if verbose: # option to get output in the shell if you bootstrap from there print('Blockchain: {} - load_progress: {}%'.format( blockchain.slug, blockchain.load_progress) ) def fetch_and_store_block(blockchain, block_height, prefetch=True): # initialize node api node_api = NodeV2API(blockchain.node) if block_height < 0: # no such block height raise NodeBlockNotFoundException() if prefetch: block_data = get_prefetched_header_and_block_data(blockchain.node, block_height) else: block_data = node_api.get_block(height=block_height) header_data = block_data['header'] timestamp = parse_datetime(header_data['timestamp']) hash = header_data['hash'] # create header instance cuckoo_solution = ','.join(map(str, header_data['cuckoo_solution'])) with transaction.atomic(): header, header_created = BlockHeader.objects.get_or_create( blockchain=blockchain, cuckoo_solution=cuckoo_solution, kernel_root=header_data['kernel_root'], defaults={ 'version': header_data['version'], 'output_root': header_data['output_root'], 'range_proof_root': header_data['range_proof_root'], 'kernel_mmr_size': header_data['kernel_mmr_size'], 'output_mmr_size': header_data['output_mmr_size'], 'nonce': str(header_data['nonce']), 'edge_bits': header_data['edge_bits'], 'secondary_scaling': header_data['secondary_scaling'], 'total_difficulty': header_data['total_difficulty'], 'total_kernel_offset': header_data['total_kernel_offset'], } ) # create block instance try: block, block_created = Block.objects.get_or_create( blockchain=blockchain, hash=hash, height=block_height, timestamp=timestamp, header=header, prev_hash=block_data['header']['previous'], reorg=None, nr_inputs=len(block_data['inputs']), nr_outputs=len(block_data['outputs']), nr_kernels=len(block_data['kernels']), ) except IntegrityError as e: # race condition so it's a duplicate. We can skip creation process # and just return the block that we already have return Block.objects.get(blockchain=blockchain, hash=hash) if not block_created: # we have already fetched all the data since it's done in an atomic # transaction, so skip unnecessary work return block # bulk create kernels kernels = [] for kernel_data in block_data['kernels']: kernels.append( Kernel( block=block, features=kernel_data['features'], fee=kernel_data['fee'], fee_shift=kernel_data['fee_shift'], lock_height=kernel_data['lock_height'], excess=kernel_data['excess'], excess_sig=kernel_data['excess_sig'], ) ) Kernel.objects.bulk_create(kernels) inputs = [] # create input instances outputs_data = Output.objects\ .filter( commitment__in=block_data['inputs'], block__reorg__isnull=True, block__blockchain=block.blockchain, )\ .values('id', 'commitment') outputs_mapper = { output_data['commitment'] : output_data['id'] for output_data in outputs_data } for input_data in block_data['inputs']: inputs.append(
Decimal = decimal.Decimal logger = logging.getLogger(__name__) def _get_percent_loaded(nr_missing_blocks, nr_all_blocks, decimal_places): missing_percent = Decimal('0') if nr_all_blocks: # we can calculate, otherwise we would get division by zero missing_percent = Decimal(str(nr_missing_blocks)) / Decimal(str(nr_all_blocks)) * Decimal('100') return (Decimal('100') - missing_percent).quantize( Decimal('1') / Decimal('10')**decimal_places, rounding=decimal.ROUND_DOWN ) def update_load_progress( blockchain, missing, total, step, modulo, decimal_places, verbose=False, source='default', ): if step % modulo == 0: logger.info('bc: {}, missing: {}, total: {}, step: {}, modulo: {}, decimal_places: {}, source: {}'.format( blockchain.slug, missing, total, step, modulo, decimal_places, source)) # update Blockchain's load_progress blockchain.load_progress = _get_percent_loaded( missing, total, decimal_places ) blockchain.save() if verbose: # option to get output in the shell if you bootstrap from there print('Blockchain: {} - load_progress: {}%'.format( blockchain.slug, blockchain.load_progress) ) def fetch_and_store_block(blockchain, block_height, prefetch=True): # initialize node api node_api = NodeV2API(blockchain.node) if block_height < 0: # no such block height raise NodeBlockNotFoundException() if prefetch: block_data = get_prefetched_header_and_block_data(blockchain.node, block_height) else: block_data = node_api.get_block(height=block_height) header_data = block_data['header'] timestamp = parse_datetime(header_data['timestamp']) hash = header_data['hash'] # create header instance cuckoo_solution = ','.join(map(str, header_data['cuckoo_solution'])) with transaction.atomic(): header, header_created = BlockHeader.objects.get_or_create( blockchain=blockchain, cuckoo_solution=cuckoo_solution, kernel_root=header_data['kernel_root'], defaults={ 'version': header_data['version'], 'output_root': header_data['output_root'], 'range_proof_root': header_data['range_proof_root'], 'kernel_mmr_size': header_data['kernel_mmr_size'], 'output_mmr_size': header_data['output_mmr_size'], 'nonce': str(header_data['nonce']), 'edge_bits': header_data['edge_bits'], 'secondary_scaling': header_data['secondary_scaling'], 'total_difficulty': header_data['total_difficulty'], 'total_kernel_offset': header_data['total_kernel_offset'], } ) # create block instance try: block, block_created = Block.objects.get_or_create( blockchain=blockchain, hash=hash, height=block_height, timestamp=timestamp, header=header, prev_hash=block_data['header']['previous'], reorg=None, nr_inputs=len(block_data['inputs']), nr_outputs=len(block_data['outputs']), nr_kernels=len(block_data['kernels']), ) except IntegrityError as e: # race condition so it's a duplicate. We can skip creation process # and just return the block that we already have return Block.objects.get(blockchain=blockchain, hash=hash) if not block_created: # we have already fetched all the data since it's done in an atomic # transaction, so skip unnecessary work return block # bulk create kernels kernels = [] for kernel_data in block_data['kernels']: kernels.append( Kernel( block=block, features=kernel_data['features'], fee=kernel_data['fee'], fee_shift=kernel_data['fee_shift'], lock_height=kernel_data['lock_height'], excess=kernel_data['excess'], excess_sig=kernel_data['excess_sig'], ) ) Kernel.objects.bulk_create(kernels) inputs = [] # create input instances outputs_data = Output.objects\ .filter( commitment__in=block_data['inputs'], block__reorg__isnull=True, block__blockchain=block.blockchain, )\ .values('id', 'commitment') outputs_mapper = { output_data['commitment'] : output_data['id'] for output_data in outputs_data } for input_data in block_data['inputs']: inputs.append(
Input(
7
2023-12-24 22:15:11+00:00
8k
Rubics-Xuan/Med-DANet
train.py
[ { "identifier": "UNet_crop", "path": "models/backup_models/UNet/Med_DANet_V2.py", "snippet": "class UNet_crop(nn.Module):\n def __init__(self, input_channels, num_classes, mode_train=True, decision_net_predict=False, **kwargs):\n super().__init__()\n\n self.mode_train = mode_train\n self.decision_net_predict = decision_net_predict\n\n self.decision_network = ShuffleNetV2(channels=4, input_size=128, num_class=3, model_size='0.5x')\n self.actions_network = ShuffleNetV2(channels=4, input_size=128, num_class=2, model_size='0.5x')\n self.CNN = Unet(in_channels=input_channels, base_channels=16, num_classes=num_classes)\n\n self.conv1 = lastdecoder(in_channels=16, out_channels=num_classes)\n self.conv2 = lastdecoder(in_channels=16, out_channels=num_classes)\n\n self.sig = nn.Sigmoid()\n self.crop_size = 96\n\n def forward(self, x, crop=True, random_train=False, decide = False, quantify_loss = False,epoch=None):\n\n if self.mode_train:\n\n if crop == False:\n\n output, wholeoutbit, weighted_bits, bits_outs = self.CNN(x,epoch)\n output = self.conv2(output) \n return output\n \n if crop:\n if random_train:\n crop_index = random.randint(0,1)\n if crop_index == 0:\n crop_size = self.crop_size\n actions = self.actions_network(x)\n actions = self.sig(actions)\n\n crop_input = get_patches_frame(x, actions, crop_size, 128).detach()\n crop, wholeoutbit, weighted_bits, bits_outs = self.CNN(crop_input,epoch)\n # b, c, h, w = x.shape\n b, _, h, w = x.shape\n c = crop.size(1)\n crop_output = torch.zeros((b, c, h, w), dtype=x.dtype, device=x.device)\n crop_output[:, :, 0:crop_size, 0:crop_size] = crop\n theta = torch.zeros(b, 2, 3)\n identity = torch.eye(2).reshape(1, 2, 2).repeat(b, 1, 1)\n theta[:, 0:2, 0:2] = identity\n actions = actions.reshape(x.size(0), 2, 1)\n offset = actions * (h - crop_size)\n theta[:, 0:2, 2:3] = - 2 * offset / h\n grid = F.affine_grid(theta.float(), torch.Size((b, c, h, w)), align_corners=False).cuda()\n crop_output = F.grid_sample(crop_output, grid)\n crop_output = self.conv1(crop_output)\n return crop_output\n if crop_index == 1:\n output, wholeoutbit, weighted_bits, bits_outs = self.CNN(x,epoch)\n output = self.conv2(output) \n return output\n\n if not random_train:\n crop_size = self.crop_size\n actions = self.actions_network(x) \n actions = self.sig(actions)\n crop1_input = get_patches_frame(x, actions, crop_size, 128).detach()\n crop1, crop1_wholeoutbit, crop1_weighted_bits, crop1_bits_outs = self.CNN(crop1_input,epoch)\n whole_output, whole_wholeoutbit, whole_weighted_bits, whole_bits_outs = self.CNN(x,epoch)\n b, _, h, w = x.shape\n c = crop1.size(1)\n\n # b, c, h, w = x.shape\n crop1_output = torch.zeros((b, c, h, w), dtype=x.dtype, device=x.device)\n crop1_output[:, :, 0:crop_size, 0:crop_size] = crop1\n theta1 = torch.zeros(b, 2, 3)\n identity = torch.eye(2).reshape(1, 2, 2).repeat(b, 1, 1)\n theta1[:, 0:2, 0:2] = identity\n actions = actions.reshape(x.size(0), 2, 1)\n offset1 = actions * (h - crop_size)\n theta1[:, 0:2, 2:3] = - 2 * offset1 / h\n grid1 = F.affine_grid(theta1.float(), torch.Size((b, c, h, w)), align_corners=False).cuda()\n crop1_output = F.grid_sample(crop1_output, grid1)\n crop1_output = self.conv1(crop1_output)\n whole_output = self.conv2(whole_output)\n if decide == False:\n if quantify_loss == True:\n\n bits_weights = torch.ones((b,10), dtype=x.dtype, device=x.device)\n bits_weights[:, 0:4] = crop1_bits_outs / 32\n bits_weights[:, 5:9] = whole_bits_outs / 32\n GFLOPs = torch.FloatTensor([53.085,95.551,95.553,169.871,185.94,94.371,169.87,169.868,314.644,319.247]).reshape(10, 1).to(x.device)\n GFLOPs_output = torch.mm(bits_weights, GFLOPs).reshape(b,1) / 1000\n return crop1_output, whole_output, GFLOPs_output\n if quantify_loss == False:\n return crop1_output, whole_output\n else:\n choice_max = self.decision_network(x)\n choice = gumbel_softmax_sampling(choice_max)\n shape = choice.size()\n _, ind = choice.max(dim=-1)\n choice_hard = torch.zeros_like(choice).view(-1, shape[-1])\n choice_hard.scatter_(1, ind.view(-1, 1), 1)\n choice_hard = choice_hard.view(*shape)\n\n choice_hard = (choice_hard - choice).detach() + choice\n bits_weights = torch.ones((b,10), dtype=x.dtype, device=x.device)\n bits_weights[:, 0:4] = crop1_bits_outs / 32\n bits_weights[:, 5:9] = whole_bits_outs / 32\n GFLOPs = torch.FloatTensor([53.085,95.551,95.553,169.871,185.94,94.371,169.87,169.868,314.644,319.247]).reshape(10, 1).to(x.device)\n crop1_GFLOPs = torch.mm(bits_weights[:, 0:5], GFLOPs[0:5, :]).reshape(b,1) / 1000\n whole_GFLOPs = torch.mm(bits_weights[:, 5:10], GFLOPs[5:10, :]).reshape(b,1) / 1000\n GFLOPS = torch.zeros((b,1), dtype=x.dtype, device=x.device)\n GFLOPs = torch.cat((GFLOPS, crop1_GFLOPs, whole_GFLOPs),dim=1)\n GFLOPs_output = (choice_hard * GFLOPs).sum(-1).reshape(x.size(0),1)\n decision_output = torch.zeros((b, 4, 128, 128), dtype=x.dtype, device=crop1_output.device)\n decision_output = torch.stack((decision_output, crop1_output, whole_output), 0).permute(1,0,2,3,4)\n choice_hard = choice_hard.reshape(b,3,1,1,1).repeat(1,1,4,h,w)\n decision_output = torch.mul(choice_hard, decision_output).sum(1)\n\n return crop1_output, whole_output, decision_output, GFLOPs_output, choice_max.argmax(1)\n\n\n\n else:\n if self.decision_net_predict:\n choice = self.decision_network(x).argmax(1)\n if choice == 0:\n output = torch.zeros((x.size(0), 4, 128, 128), dtype=x.dtype)\n GFLOPs_output = torch.zeros(1,1).sum()\n elif choice == 2:\n output, wholeoutbit, weighted_bits, bits_outs = self.CNN(x,epoch)\n output = self.conv2(output)\n\n bits_weights = torch.ones((1,5), dtype=x.dtype, device=x.device)\n bits_weights[:, 0:4] = bits_outs / 32\n whole_GFLOPs = torch.FloatTensor([94.371,169.87,169.868,314.644,319.247]).reshape(5, 1).to(x.device)\n GFLOPs_output = torch.mm(bits_weights, whole_GFLOPs).sum() / 1000\n else:\n crop_size = self.crop_size\n\n actions = self.actions_network(x)\n actions = self.sig(actions)\n b, _, h, w = x.shape\n x = get_patches_frame(x, actions, crop_size, 128)\n output_crop, wholeoutbit, weighted_bits, bits_outs = self.CNN(x,epoch)\n c = output_crop.size(1)\n output = torch.zeros((b, c, h, w), dtype=output_crop.dtype, device=output_crop.device)\n output[:, :, 0:crop_size, 0:crop_size] = output_crop\n theta = torch.zeros(1, 2, 3)\n identity = torch.eye(2).reshape(1, 2, 2)\n theta[:, 0:2, 0:2] = identity\n theta[:, 0:2, 2:3] = - (actions.reshape(1, 2, 1) * (h - crop_size)) * 2 / h\n grid = F.affine_grid(theta.float(), torch.Size((1, c, h, w)), align_corners=False).cuda()\n output = F.grid_sample(output, grid)\n output = self.conv1(output)\n\n bits_weights = torch.ones((1,5), dtype=x.dtype, device=x.device)\n bits_weights[:, 0:4] = bits_outs / 32\n crop1_GFLOPs = torch.FloatTensor([53.085,95.551,95.553,169.871,185.94]).reshape(5, 1).to(x.device)\n\n GFLOPs_output = torch.mm(bits_weights, crop1_GFLOPs).sum() / 1000\n return output, GFLOPs_output" }, { "identifier": "criterions", "path": "models/criterions.py", "snippet": "def expand_target(x, n_class,mode='softmax'):\r\ndef flatten(tensor):\r\ndef Dice(output, target, eps=1e-5):\r\ndef Dice2(output, target, decision, eps=1e-5):\r\ndef softmax_dice(output, target):\r\ndef softmax_dice2(output, target):\r\ndef softmax_dice3(output, GFLOPs_output, target):\r\ndef softmax_dice4(output, decision, GFLOPs_output, target):\r\ndef sigmoid_dice(output, target):\r\ndef Generalized_dice(output, target, eps=1e-5, weight_type='square'):\r\ndef Dual_focal_loss(output, target):\r\n C = tensor.size(1)\r" }, { "identifier": "BraTS", "path": "data/BraTS.py", "snippet": "class BraTS(Dataset):\r\n def __init__(self, list_file, root='', mode='train'):\r\n self.lines = []\r\n paths, names = [], []\r\n with open(list_file) as f:\r\n for line in f:\r\n line = line.strip()\r\n name = line.split('/')[-1]\r\n names.append(name)\r\n path = os.path.join(root, line, name + '_')\r\n paths.append(path)\r\n self.lines.append(line)\r\n self.mode = mode\r\n self.names = names\r\n self.paths = paths\r\n\r\n def __getitem__(self, item):\r\n path = self.paths[item]\r\n if self.mode == 'train':\r\n image, label = pkload(path + 'data_f32b0.pkl')\r\n sample = {'image': image, 'label': label}\r\n sample = transform(sample)\r\n return sample['image'], sample['label']\r\n elif self.mode == 'valid':\r\n image, label = pkload(path + 'data_f32b0.pkl')\r\n sample = {'image': image, 'label': label}\r\n sample = transform_valid(sample)\r\n return sample['image'], sample['label']\r\n else:\r\n image = pkload(path + 'data_f32b0.pkl')\r\n image = np.pad(image, ((0, 0), (0, 0), (0, 5), (0, 0)), mode='constant')\r\n\r\n image = np.ascontiguousarray(image.transpose(3, 0, 1, 2))\r\n image = torch.from_numpy(image).float()\r\n return image\r\n\r\n def __len__(self):\r\n return len(self.names)\r\n\r\n def collate(self, batch):\r\n return [torch.cat(v) for v in zip(*batch)]\r" }, { "identifier": "all_reduce_tensor", "path": "utils/tools.py", "snippet": "def all_reduce_tensor(tensor, op=dist.ReduceOp.SUM, world_size=1):\n tensor = tensor.clone()\n dist.all_reduce(tensor, op)\n tensor.div_(world_size)\n return tensor" } ]
import argparse import os import random import logging import numpy as np import time import math import setproctitle import torch import torch.backends.cudnn as cudnn import torch.optim import torch.distributed as dist import warnings from pickle import FALSE from sre_parse import FLAGS from models.backup_models.UNet.Med_DANet_V2 import UNet_crop from models import criterions from data.BraTS import BraTS from torch.utils.data import DataLoader from utils.tools import all_reduce_tensor from tensorboardX import SummaryWriter from torch import nn
5,555
torch.distributed.init_process_group('nccl') # 初始化GPU通信方式NCCL, PyTorch实现分布式运算是通过NCCL进行显卡通信�? torch.cuda.set_device(args.local_rank) # 为这个进程指定GPU model = UNet_crop(input_channels=4, num_classes=4, mode_train=True) model.cuda(args.local_rank) model = nn.SyncBatchNorm.convert_sync_batchnorm(model) model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) if args.start_epoch > 0: load_file = args.load_file if os.path.exists(load_file): checkpoint = torch.load(load_file, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict']) print('Successfully loading checkpoint of epoch: {} and training from epoch: {}' .format(checkpoint['epoch'], args.start_epoch)) else: print('There is no checkpoint file to load!') model.train() param_dicts = [ { "params": [p for n, p in model.named_parameters() if "decision_network" not in n and p.requires_grad], "lr": args.lr, }, { "params": [p for n, p in model.named_parameters() if "decision_network" in n and p.requires_grad], "lr": args.lr * 10, } ] optimizer = torch.optim.Adam(param_dicts, lr=args.lr, weight_decay=args.weight_decay, amsgrad=args.amsgrad) criterion = getattr(criterions, args.criterion) if args.local_rank == 0: checkpoint_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'checkpoint', args.experiment + args.date) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) train_label_for_DN = f"./traininglabels_DN/{args.experiment + args.date}" train_label_for_DN_HGG = f"{train_label_for_DN}/HGG" train_label_for_DN_LGG = f"{train_label_for_DN}/LGG" if not os.path.exists(train_label_for_DN): os.makedirs(train_label_for_DN) if not os.path.exists(train_label_for_DN_HGG): os.makedirs(train_label_for_DN_HGG) if not os.path.exists(train_label_for_DN_LGG): os.makedirs(train_label_for_DN_LGG) if args.local_rank == 0: writer = SummaryWriter() train_list = os.path.join(args.root, args.train_dir, args.train_file) train_root = os.path.join(args.root, args.train_dir) train_set = BraTS(train_list, train_root, args.mode) train_sampler = torch.utils.data.distributed.DistributedSampler(train_set) logging.info('Samples for train = {}'.format(len(train_set))) num_gpu = (len(args.gpu) + 1) // 2 num_iter_perepoch = len(train_set) // args.batch_size num_iter_perepoch = num_iter_perepoch * int(128) train_loader = DataLoader(dataset=train_set, sampler=train_sampler, batch_size=args.batch_size // num_gpu, drop_last=True, num_workers=args.num_workers, pin_memory=True) start_time_training = time.time() torch.set_grad_enabled(True) fix = 0 train_epoch = [150, 300] scale_lr = 10 for epoch in range(args.start_epoch, args.end_epoch): train_sampler.set_epoch(epoch) # shuffle setproctitle.setproctitle('{}: {}/{}'.format(args.user, epoch + 1, args.end_epoch)) start_epoch = time.time() for i, data in enumerate(train_loader): x, target = data x = x.cuda(args.local_rank, non_blocking=True) target = target.cuda(args.local_rank, non_blocking=True) # shuffle batchsize & slice dimension max_number = (args.batch_size // num_gpu) * 128 index = torch.randperm(max_number) B, D = x.size(0), x.size(-1) x = x.permute(0, 4, 1, 2, 3).contiguous().view(B * D, 4, 128, 128) target = target.permute(0, 3, 1, 2).contiguous().view(B * D, 128, 128) x = x[index] target = target[index] if epoch < train_epoch[0]: for s in range(128): current_iter = epoch * num_iter_perepoch + i * int(128) + (s + 1) warm_up_learning_rate_adjust_iter(args.lr, current_iter, num_iter_perepoch, args.end_epoch * num_iter_perepoch, optimizer, power=0.9) x_s = x[s * (args.batch_size // num_gpu):(s + 1) * (args.batch_size // num_gpu) - 1, ...] crop1_output, whole_output = model(x_s, crop=True, decide=False, quantify_loss = False,epoch= epoch) loss_crop1, loss1_crop1, loss2_crop1, loss3_crop1 = criterion(crop1_output, target[s * ( args.batch_size // num_gpu):(s + 1) * (args.batch_size // num_gpu) - 1, ...]) loss_whole, loss1_whole, loss2_whole, loss3_whole = criterion(whole_output, target[ s * (args.batch_size // num_gpu):(s + 1) * (args.batch_size // num_gpu) - 1,...]) loss = (loss_crop1 + loss_whole)/2 loss1 = (loss1_crop1 + loss1_whole)/2 loss2 = (loss2_crop1 + loss2_whole)/2 loss3 = (loss3_crop1 + loss3_whole)/2
# from data.BraTS_2020 import BraTS warnings.filterwarnings('ignore') local_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) parser = argparse.ArgumentParser() # Basic Information parser.add_argument('--user', default='shr', type=str) parser.add_argument('--experiment', default='cnn300_50crop', type=str) parser.add_argument('--date', default=local_time.split(' ')[0], type=str) parser.add_argument('--description', default='cnn300_50crop' 'training on train_0.txt!', type=str) # DataSet Information parser.add_argument('--root', default='./dataset/BraTS2019/', type=str) # parser.add_argument('--root', default='./dataset/BraTS2020/', type=str) parser.add_argument('--train_dir', default='Train', type=str) parser.add_argument('--valid_dir', default='Train', type=str) parser.add_argument('--output_dir', default='output', type=str) parser.add_argument('--submission', default='submission', type=str) parser.add_argument('--visual', default='visualization', type=str) parser.add_argument('--heatmap_dir', default='heatmap', type=str) parser.add_argument('--test_date', default=local_time.split(' ')[0], type=str) parser.add_argument('--mode', default='train', type=str) parser.add_argument('--train_file', default='train.txt', type=str) parser.add_argument('--valid_file', default='valid.txt', type=str) parser.add_argument('--dataset', default='brats', type=str) parser.add_argument('--model_name', default='cnn300_50crop', type=str) parser.add_argument('--input_C', default=4, type=int) parser.add_argument('--input_H', default=240, type=int) parser.add_argument('--input_W', default=240, type=int) parser.add_argument('--input_D', default=160, type=int) parser.add_argument('--crop_H', default=128, type=int) parser.add_argument('--crop_W', default=128, type=int) parser.add_argument('--crop_D', default=128, type=int) parser.add_argument('--output_D', default=155, type=int) # Training Information parser.add_argument('--lr', default=0.0001, type=float) parser.add_argument('--weight_decay', default=1e-5, type=float) parser.add_argument('--amsgrad', default=True, type=bool) parser.add_argument('--criterion', default='softmax_dice', type=str) parser.add_argument('--num_class', default=4, type=int) parser.add_argument('--seed', default=1085, type=int) parser.add_argument('--no_cuda', default=False, type=bool) parser.add_argument('--gpu', default='4,5,6,7', type=str) parser.add_argument('--num_workers', default=8, type=int) parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--start_epoch', default=0, type=int) parser.add_argument('--end_epoch', default=350, type=int) parser.add_argument('--save_freq', default=50, type=int) parser.add_argument('--load_file', default='', type=str) parser.add_argument('--local_rank', default=0, type=int, help='node rank for distributed training') args = parser.parse_args() def dice_score(o, t, eps=1e-8): num = 2 * (o * t).sum() + eps den = o.sum() + t.sum() + eps return num / den def softmax_output_dice(output, target): ret = [] # WT o = output > 0 t = target > 0 # ce ret += dice_score(o, t), # TC o = (output == 1) | (output == 3) t = (target == 1) | (target == 4) ret += dice_score(o, t), # ET o = (output == 3) t = (target == 4) ret += dice_score(o, t), return ret def main_worker(): if args.local_rank == 0: log_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'log', args.experiment + args.date) log_file = log_dir + '.txt' log_args(log_file) logging.info('--------------------------------------This is all argsurations----------------------------------') for arg in vars(args): logging.info('{}={}'.format(arg, getattr(args, arg))) logging.info('----------------------------------------This is a halving line----------------------------------') logging.info('{}'.format(args.description)) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) random.seed(args.seed) np.random.seed(args.seed) torch.distributed.init_process_group('nccl') # 初始化GPU通信方式NCCL, PyTorch实现分布式运算是通过NCCL进行显卡通信�? torch.cuda.set_device(args.local_rank) # 为这个进程指定GPU model = UNet_crop(input_channels=4, num_classes=4, mode_train=True) model.cuda(args.local_rank) model = nn.SyncBatchNorm.convert_sync_batchnorm(model) model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) if args.start_epoch > 0: load_file = args.load_file if os.path.exists(load_file): checkpoint = torch.load(load_file, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint['state_dict']) print('Successfully loading checkpoint of epoch: {} and training from epoch: {}' .format(checkpoint['epoch'], args.start_epoch)) else: print('There is no checkpoint file to load!') model.train() param_dicts = [ { "params": [p for n, p in model.named_parameters() if "decision_network" not in n and p.requires_grad], "lr": args.lr, }, { "params": [p for n, p in model.named_parameters() if "decision_network" in n and p.requires_grad], "lr": args.lr * 10, } ] optimizer = torch.optim.Adam(param_dicts, lr=args.lr, weight_decay=args.weight_decay, amsgrad=args.amsgrad) criterion = getattr(criterions, args.criterion) if args.local_rank == 0: checkpoint_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'checkpoint', args.experiment + args.date) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) train_label_for_DN = f"./traininglabels_DN/{args.experiment + args.date}" train_label_for_DN_HGG = f"{train_label_for_DN}/HGG" train_label_for_DN_LGG = f"{train_label_for_DN}/LGG" if not os.path.exists(train_label_for_DN): os.makedirs(train_label_for_DN) if not os.path.exists(train_label_for_DN_HGG): os.makedirs(train_label_for_DN_HGG) if not os.path.exists(train_label_for_DN_LGG): os.makedirs(train_label_for_DN_LGG) if args.local_rank == 0: writer = SummaryWriter() train_list = os.path.join(args.root, args.train_dir, args.train_file) train_root = os.path.join(args.root, args.train_dir) train_set = BraTS(train_list, train_root, args.mode) train_sampler = torch.utils.data.distributed.DistributedSampler(train_set) logging.info('Samples for train = {}'.format(len(train_set))) num_gpu = (len(args.gpu) + 1) // 2 num_iter_perepoch = len(train_set) // args.batch_size num_iter_perepoch = num_iter_perepoch * int(128) train_loader = DataLoader(dataset=train_set, sampler=train_sampler, batch_size=args.batch_size // num_gpu, drop_last=True, num_workers=args.num_workers, pin_memory=True) start_time_training = time.time() torch.set_grad_enabled(True) fix = 0 train_epoch = [150, 300] scale_lr = 10 for epoch in range(args.start_epoch, args.end_epoch): train_sampler.set_epoch(epoch) # shuffle setproctitle.setproctitle('{}: {}/{}'.format(args.user, epoch + 1, args.end_epoch)) start_epoch = time.time() for i, data in enumerate(train_loader): x, target = data x = x.cuda(args.local_rank, non_blocking=True) target = target.cuda(args.local_rank, non_blocking=True) # shuffle batchsize & slice dimension max_number = (args.batch_size // num_gpu) * 128 index = torch.randperm(max_number) B, D = x.size(0), x.size(-1) x = x.permute(0, 4, 1, 2, 3).contiguous().view(B * D, 4, 128, 128) target = target.permute(0, 3, 1, 2).contiguous().view(B * D, 128, 128) x = x[index] target = target[index] if epoch < train_epoch[0]: for s in range(128): current_iter = epoch * num_iter_perepoch + i * int(128) + (s + 1) warm_up_learning_rate_adjust_iter(args.lr, current_iter, num_iter_perepoch, args.end_epoch * num_iter_perepoch, optimizer, power=0.9) x_s = x[s * (args.batch_size // num_gpu):(s + 1) * (args.batch_size // num_gpu) - 1, ...] crop1_output, whole_output = model(x_s, crop=True, decide=False, quantify_loss = False,epoch= epoch) loss_crop1, loss1_crop1, loss2_crop1, loss3_crop1 = criterion(crop1_output, target[s * ( args.batch_size // num_gpu):(s + 1) * (args.batch_size // num_gpu) - 1, ...]) loss_whole, loss1_whole, loss2_whole, loss3_whole = criterion(whole_output, target[ s * (args.batch_size // num_gpu):(s + 1) * (args.batch_size // num_gpu) - 1,...]) loss = (loss_crop1 + loss_whole)/2 loss1 = (loss1_crop1 + loss1_whole)/2 loss2 = (loss2_crop1 + loss2_whole)/2 loss3 = (loss3_crop1 + loss3_whole)/2
reduce_loss = all_reduce_tensor(loss, world_size=num_gpu).data.cpu().numpy()
3
2023-12-28 07:26:55+00:00
8k
datrocity/pond
pond/versioned_artifact.py
[ { "identifier": "Artifact", "path": "pond/artifact/artifact.py", "snippet": "class Artifact(ABC):\n \"\"\" Knows how to read and write one type of artifact.\n\n Concrete Artifact implementation should save the metadata with the data if possible,\n so that the artifact is self-contained even if, for instance, it is sent by email.\n \"\"\"\n\n # --- Artifact class interface\n\n # todo: what is the class_id for?\n\n @classmethod\n def class_id(cls):\n \"\"\" String ID to be able to find this class from its name. \"\"\"\n return cls.__name__\n\n @classmethod\n def subclass_from_id(cls, class_id: str) -> Type['Artifact']:\n \"\"\" Find a subclass from its class ID. \"\"\"\n subclasses = cls.__subclasses__()\n for subclass in subclasses:\n if subclass.class_id() == class_id:\n break\n else:\n # todo this exception is not defined here\n raise InvalidArtifactClass(class_id)\n return subclass\n\n # --- Artifact public interface\n\n def __init__(self, data, metadata=None):\n \"\"\" Create an Artifact.\n\n Parameters\n ----------\n data: any\n The data of the artifact.\n metadata: dict\n User-defined metadata, saved with the artifact (optional).\n The metadata keys and values will be stored as strings.\n \"\"\"\n self.data = data\n if metadata is None:\n metadata = {}\n self.metadata = metadata\n\n @classmethod\n def read(cls, path, metadata=None, **kwargs):\n \"\"\" Reads the artifact from a file, given the path.\n\n Parameters\n ----------\n path: str\n Filename from which the artifact is read.\n metadata: dict or None\n The metadata for the artifact. If defined, it takes the place of any metadata\n defined in the artifact itself.\n Typically, this external artifact metadata comes from an artifact manifest. If the\n artifact has been written as a `pond` `VersionedArtifact`, then the two sources of\n metadata are identical.\n kwargs: dict\n Additional parameters for the reader.\n\n Returns\n -------\n artifact: Artifact\n An instance of the artifact.\n \"\"\"\n with open(path, 'rb') as f:\n artifact = cls.read_bytes(f, metadata, **kwargs)\n return artifact\n\n @classmethod\n def read_bytes(cls, file_, metadata=None, **kwargs):\n \"\"\" Reads the artifact from a binary file.\n\n Parameters\n ----------\n file_: file-like object\n A file-like object from which the artifact is read, opened in binary mode.\n metadata: dict or None\n The metadata for the artifact. If defined, it takes the place of any metadata\n defined in the artifact itself.\n Typically, this external artifact metadata comes from an artifact manifest. If the\n artifact has been written as a `pond` `VersionedArtifact`, then the two sources of\n metadata are identical.\n kwargs: dict\n Parameters for the reader.\n\n Returns\n -------\n artifact: Artifact\n An instance of the artifact.\n \"\"\"\n artifact = cls._read_bytes(file_, **kwargs)\n if metadata is not None:\n artifact.metadata = metadata\n return artifact\n\n # todo why the kwargs\n def write(self, path, **kwargs):\n \"\"\" Writes the artifact to file.\n\n Parameters\n ----------\n path: str\n Path to which the artifact is written.\n kwargs: dict\n Parameters for the writer.\n\n \"\"\"\n with open(path, 'wb') as f:\n self.write_bytes(f, **kwargs)\n\n # --- Abstract interface\n\n @staticmethod\n @abstractmethod\n def filename(basename):\n \"\"\" Complete a base filename with an extension.\n\n Parameters\n ----------\n basename: str\n The filename without extension.\n\n Returns\n -------\n filename: str\n The completed filename.\n\n \"\"\"\n pass\n\n @classmethod\n @abstractmethod\n def _read_bytes(cls, file_, **kwargs):\n \"\"\" Reads the artifact from a binary file.\n\n This is a private method that loads the artifact from a binary file without dealing with\n the logic of the external metadata. It is called by `Artifact.read_bytes`.\n\n Parameters\n ----------\n file_: file-like object\n A file-like object from which the artifact is read, opened in binary mode.\n kwargs: dict\n Parameters for the reader.\n\n Returns\n -------\n artifact: Artifact\n An instance of the artifact.\n \"\"\"\n pass\n\n @abstractmethod\n def write_bytes(self, file_, **kwargs):\n \"\"\" Writes the artifact to binary file.\n\n This method also need to take care of writing the artifact metadata in the file itself,\n whenever possible.\n If the artifact is being written as a `pond` `VersionedArtifact`, then the metadata is also\n stored in an external manifest.\n\n Parameters\n ----------\n file_: file-like object\n A file-like object to which the artifact is written, opened in binary mode.\n kwargs: dict\n Parameters for the writer.\n\n \"\"\"\n pass\n\n def get_artifact_metadata(self):\n \"\"\"\n This is not the user metadata!\n\n Returns\n -------\n\n \"\"\"\n return None" }, { "identifier": "DataType", "path": "pond/conventions.py", "snippet": "class WriteMode(str, Enum):\n OVERWRITE = 'overwrite'\n ERROR_IF_EXISTS = 'errorifexists'\nMANIFEST_FILENAME = 'manifest.yml'\nMETADATA_DIRNAME = '_pond'\nTXT_ENCODING = 'utf-8'\nVERSIONS_LOCK_FILENAME = '_VERSIONS_LOCK'\ndef urijoinpath(*parts: str) -> str:\ndef versioned_artifact_location(location: str, artifact_name: str):\ndef version_location(location: str, version_name: VersionName) -> str:\ndef versions_lock_file_location(location: str) -> str:\ndef version_data_location(version_location: str, data_filename: str) -> str:\ndef version_manifest_location(version_location: str) -> str:\ndef version_uri(datastore_id: str, location: str, artifact_name: str, version_name: VersionName):" }, { "identifier": "IncompatibleVersionName", "path": "pond/exceptions.py", "snippet": "class IncompatibleVersionName(Exception):\n\n def __init__(self, version_name: 'VersionName', version_name_class: Type['VersionName']):\n msg = (f'Version name: \"{version_name}\" not of type \"{version_name_class.__name__}\".')\n super().__init__(msg)" }, { "identifier": "VersionAlreadyExists", "path": "pond/exceptions.py", "snippet": "class VersionAlreadyExists(Exception):\n\n def __init__(self, version_uri: str):\n super().__init__(f'Version already exists: {version_uri}.')" }, { "identifier": "Manifest", "path": "pond/metadata/manifest.py", "snippet": "class Manifest:\n\n # --- Manifest class interface\n\n def __init__(self):\n self._sections = {}\n\n @classmethod\n def from_yaml(cls, manifest_location, datastore):\n \"\"\"\n\n Parameters\n ----------\n manifest_location\n datastore\n\n Returns\n -------\n\n \"\"\"\n manifest_dict = datastore.read_yaml(manifest_location)\n return cls.from_nested_dict(manifest_dict)\n\n @classmethod\n def from_nested_dict(cls, manifest_dict: dict):\n manifest = cls()\n for section_name, metadata in manifest_dict.items():\n # TODO make this a FrozendictMetadataSource\n source = DictMetadataSource(name=section_name, metadata=metadata)\n manifest.add_section(source)\n return manifest\n\n # --- Manifest public interface\n\n def to_yaml(self, manifest_location, datastore):\n metadata = self.collect()\n datastore.write_yaml(manifest_location, metadata)\n\n def add_section(self, metadata_source):\n \"\"\"\n\n Parameters\n ----------\n metadata_source\n If None, nothing is added but no exception is raised.\n\n Returns\n -------\n\n \"\"\"\n if metadata_source is not None:\n self._sections[metadata_source.section_name()] = metadata_source\n\n def collect_section(self, name, default_metadata=None):\n source = self._sections.get(name, None)\n if source is None:\n metadata = default_metadata\n else:\n metadata = source.collect()\n return metadata\n\n def collect(self):\n dict_ = {}\n for name, source in self._sections.items():\n source_metadata = {k: str(v) for k, v in source.collect().items()}\n dict_[name] = source_metadata\n return dict_" }, { "identifier": "Datastore", "path": "pond/storage/datastore.py", "snippet": "class Datastore(ABC):\n \"\"\" Versioned storage for the artifacts.\n\n Parameters\n ----------\n id: str\n Unique identifier for the datastore. This is used in the URI for each versioned\n artifact to uniquely identify the artifact.\n \"\"\"\n\n # -- Datastore class interface\n\n def __init__(self, id: str):\n self.id = id\n\n # -- Abstract interface\n\n @abstractmethod\n def open(self, path: str, mode: str) -> IO[Any]:\n \"\"\" Open a file-like object\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n mode: str\n Specifies the mode in which the file is opened.\n\n Returns\n -------\n IO[Any]\n An open file-like object.\n\n \"\"\"\n pass\n\n @abstractmethod\n def read(self, path: str) -> bytes:\n \"\"\" Read a sequence of bytes from the data store.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n bytes\n The sequence of bytes read from `path`.\n\n Raises\n ------\n FileNotFoundError\n If the requested path does not exist.\n \"\"\"\n pass\n\n @abstractmethod\n def write(self, path: str, data: bytes) -> None:\n \"\"\" Write a sequence of bytes to the data store.\n\n `path` contains the path relative to the root of the data store, including the name\n of the file to be created. If a file already exists at `path`, it is overwritten.\n\n Intermediate directories that do not exist will be created.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n data: bytes\n Sequence of bytes to write at `path`.\n \"\"\"\n pass\n\n @abstractmethod\n def exists(self, path: str) -> bool:\n \"\"\" Returns True if the file exists.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n bool\n True if the file exists, false otherwise\n \"\"\"\n ...\n\n @abstractmethod\n def delete(self, path: str, recursive: bool = False) -> None:\n \"\"\"Deletes a file or directory\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n recursive: bool, optional, default is False\n Whether to recursively delete the location\n \"\"\"\n ...\n\n @abstractmethod\n def makedirs(self, path: str) -> None:\n \"\"\" Creates the specified directory if needed.\n\n If the directories already exist, the method does not do anything.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n \"\"\"\n ...\n\n # -- Read/write utility methods\n\n def read_string(self, path: str) -> str:\n \"\"\" Read a string from a file.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n str\n The read string\n\n Raises\n ------\n FileNotFound\n If the file cannot be found\n \"\"\"\n return self.read(path).decode(TXT_ENCODING)\n\n def write_string(self, path: str, content: str) -> None:\n \"\"\" Write a string to a file.\n\n Intermediate directories that do not exist will be created.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n content: str\n Content to write\n \"\"\"\n self.write(path, content.encode(TXT_ENCODING))\n\n def read_yaml(self, path: str) -> Any:\n \"\"\" Read and parse a YAML file.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n Any\n The parsed object\n\n Raises\n ------\n FileNotFound\n If the file cannot be found\n \"\"\"\n return yaml_load(self.read_string(path))\n\n def write_yaml(self, path: str, content: Any) -> None:\n \"\"\" Serializes to YAML and write an object to a file.\n\n Intermediate directories that do not exist will be created.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n content: Any\n Content to write\n \"\"\"\n return self.write_string(path, yaml_dump(content))\n\n def read_json(self, path: str) -> Any:\n \"\"\" Read and parse a JSON file.\n\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n\n Returns\n -------\n Any\n The parsed object\n\n Raises\n ------\n FileNotFound\n If the file cannot be found\n \"\"\"\n return json.loads(self.read_string(path))\n\n def write_json(self, path: str, content: Any) -> None:\n \"\"\"Serializes to JSON and write an object to a file\n Parameters\n ----------\n path: str\n Path relative to the root of the data store.\n content: Any\n Content to write\n \"\"\"\n return self.write_string(path, json.dumps(content, separators=(',', ':')))" }, { "identifier": "Version", "path": "pond/version.py", "snippet": "class Version:\n\n def __init__(self, artifact_name: str, version_name: VersionName, artifact: Artifact,\n manifest: Optional[Manifest] = None):\n \"\"\" Manages a version: its manifest, name, and artifact.\n \"\"\"\n self.artifact_name = artifact_name\n self.version_name = version_name\n self.manifest = manifest\n self.artifact = artifact\n\n def get_metadata(self, location, datastore, data_filename):\n version_metadata = {\n 'uri': self.get_uri(location, datastore),\n 'filename': data_filename,\n 'date_time': datetime.datetime.now(),\n 'artifact_name': self.artifact_name,\n }\n version_metadata_source = DictMetadataSource(name='version', metadata=version_metadata)\n return version_metadata_source\n\n def write(self, location: str, datastore: Datastore, manifest: Manifest):\n # TODO: manifest is modified in-place, is that an issue?\n\n #: location of the version folder\n version_location_ = version_location(location, self.version_name)\n #: location of the manifest file\n manifest_location = version_manifest_location(version_location_)\n\n #: filename for the saved data\n data_basename = f'{self.artifact_name}_{str(self.version_name)}'\n data_filename = self.artifact.filename(data_basename)\n\n version_metadata_source = self.get_metadata(location, datastore, data_filename)\n manifest.add_section(version_metadata_source)\n artifact_metadata_source = self.artifact.get_artifact_metadata()\n manifest.add_section(artifact_metadata_source)\n manifest.to_yaml(manifest_location, datastore)\n\n datastore.makedirs(version_location_)\n data_location = version_data_location(version_location_, data_filename)\n with datastore.open(data_location, 'wb') as f:\n self.artifact.write_bytes(f)\n\n # save stored manifest\n self.manifest = manifest\n\n # todo store and recover artifact_class from manifest\n @classmethod\n def read(cls, version_name, artifact_class, location, datastore):\n #: location of the version folder\n version_location_ = version_location(location, version_name)\n #: location of the manifest file\n manifest_location = version_manifest_location(version_location_)\n\n if not datastore.exists(manifest_location):\n raise VersionDoesNotExist(location, str(version_name))\n manifest = Manifest.from_yaml(manifest_location, datastore)\n\n version_metadata = manifest.collect_section('version')\n data_filename = version_metadata['filename']\n data_location = version_data_location(version_location_, data_filename)\n user_metadata = manifest.collect_section('user')\n with datastore.open(data_location, 'rb') as f:\n artifact = artifact_class.read_bytes(f, metadata=user_metadata)\n\n version = cls(\n artifact_name=version_metadata['artifact_name'],\n version_name=version_name,\n artifact=artifact,\n manifest=manifest,\n )\n\n return version\n\n def get_uri(self, location, datastore):\n \"\"\" Build URI for a specific location and datastore. \"\"\"\n uri = version_uri(datastore.id, location, self.artifact_name, self.version_name)\n return uri\n\n def exists(self, location: str, datastore: Datastore):\n \"\"\" Does this version already exists on disk?\n\n Parameters\n ----------\n location: str\n Root location in the data store where artifacts are read/written. This is used to\n create folder-like groups inside a datastore. This can be, for instance, the name of\n a project or experiment.\n datastore: Datastore\n Data store object, representing the location where the artifacts are read/written.\n \"\"\"\n #: location of the version folder\n version_location_ = version_location(location, self.version_name)\n #: location of the manifest file\n manifest_location = version_manifest_location(version_location_)\n\n return datastore.exists(manifest_location)" }, { "identifier": "VersionName", "path": "pond/version_name.py", "snippet": "class VersionName(ABC):\n \"\"\" Base class for all kind of version naming conventions.\n\n It defines a way to sort version names and compute the next one.\n \"\"\"\n\n # --- VersionName class interface\n\n @classmethod\n def class_id(cls):\n \"\"\" String ID to be able to find this class from its name. \"\"\"\n return cls.__name__\n\n @classmethod\n def subclass_from_id(cls, class_id: str) -> Type['Artifact']:\n \"\"\" Find a subclass from its class ID. \"\"\"\n subclasses = cls.__subclasses__()\n for subclass in subclasses:\n if subclass.class_id() == class_id:\n break\n else:\n raise InvalidVersionName(class_id)\n return subclass\n\n @classmethod\n def from_string(cls, version_name: str) -> 'VersionName':\n \"\"\"Parses a string into a version name.\n\n Parameters\n ----------\n version_name: str\n Version name as a string that needs to be parsed\n\n Returns\n -------\n VersionName\n The parsed version name\n\n Raises\n ------\n InvalidVersionName\n If the version name cannot be parsed\n \"\"\"\n # Only first-level subclasses for the moment, it should be sufficient\n # At the same time, we give up defining a version name priority, and will return the\n # first VersionName subclass that can parse the string\n # TODO: remove the magic\n subclasses = cls.__subclasses__()\n for subclass in subclasses:\n try:\n version = subclass.from_string(version_name)\n break\n except InvalidVersionName:\n pass\n else:\n raise InvalidVersionName(version_name)\n return version\n\n @classmethod\n @abstractmethod\n def next(cls, prev: 'VersionName') -> 'VersionName':\n \"\"\" Generate a new version name given a previous one.\n\n If `prev` is None, this method will generate a first version name.\n\n Some subclasses of `VersionName` will ignore the argument `prev`, except in case of\n collision (e.g., datetime version names).\n\n Parameters\n ----------\n prev: Optional['VersionName']\n The previous version name.\n\n Returns\n -------\n VersionName\n A new version name.\n \"\"\"\n ...\n\n @classmethod\n def first(cls) -> 'VersionName':\n \"\"\" Generate the first version name.\n\n Alias for `VersionName.next(None)`.\n\n Returns\n -------\n VersionName\n The first version name.\n \"\"\"\n return cls.next(prev=None)\n\n # --- VersionName protected interface\n\n @abstractmethod\n def _partial_compare(self, that: 'VersionName') -> Optional[int]:\n ...\n\n # --- Magic methods\n\n def __cmp__(self, other: 'VersionName') -> int:\n cmp = self._partial_compare(other)\n return cmp if cmp is not None else _compare_classnames(self, other)\n\n def __eq__(self, other: Any) -> bool:\n return self._partial_compare(other) == 0\n\n def __ne__(self, other: Any) -> bool:\n return self._partial_compare(other) != 0\n\n def __lt__(self, other: Any) -> bool:\n return self.__cmp__(other) < 0\n\n def __le__(self, other: Any) -> bool:\n return self.__cmp__(other) <= 0\n\n def __gt__(self, other: Any) -> bool:\n return self.__cmp__(other) > 0\n\n def __ge__(self, other: Any) -> bool:\n return self.__cmp__(other) >= 0\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(\"{str(self)}\")'" } ]
import json import logging import time from typing import List, Type, Optional, Union from pond.artifact import Artifact from pond.conventions import ( DataType, WriteMode, version_manifest_location, version_location, versions_lock_file_location, versioned_artifact_location, ) from pond.exceptions import ( IncompatibleVersionName, VersionAlreadyExists, ) from pond.metadata.manifest import Manifest from pond.storage.datastore import Datastore from pond.version import Version from pond.version_name import VersionName
6,552
self.versions_manifest = { 'artifact_class': artifact_class.class_id(), 'version_name_class': version_name_class.class_id(), } self.versions_location = versioned_artifact_location(location, artifact_name) # todo this goes to conventions.py self.versions_list_location = f'{self.versions_location}/versions.json' self.versions_manifest_location = f'{self.versions_location}/manifest.yml' if not self.datastore.exists(self.versions_location): # Create the versioned artifact folder organization if it does not exist self.datastore.makedirs(self.versions_location) self._write_version_names([]) self.versions_manifest['artifact_class'] = artifact_class.class_id() self.versions_manifest['version_name_class'] = version_name_class.class_id() self._write_manifest() # --- VersionedArtifact class interface @classmethod def from_datastore(cls, artifact_name: str, location: str, datastore: Datastore): versions_location = versioned_artifact_location(location, artifact_name) versions_manifest_location = f'{versions_location}/manifest.yml' versions_manifest = datastore.read_yaml(versions_manifest_location) artifact_class_id = versions_manifest['artifact_class'] artifact_class = Artifact.subclass_from_id(artifact_class_id) version_name_class_id = versions_manifest['version_name_class'] version_name_class = VersionName.subclass_from_id(version_name_class_id) versioned_artifact = cls( artifact_name=artifact_name, location=location, datastore=datastore, artifact_class=artifact_class, version_name_class=version_name_class, ) return versioned_artifact # --- VersionedArtifact public interface def read(self, version_name: Optional[Union[str, VersionName]] = None) -> Version: """ Read a version of the artifact. Parameters ---------- version_name: Union[str, VersionName], optional Version name, given as a string (more common) or as VersionName instance. If None, the latest version name for the given artifact is used. Raises ------ VersionDoesNotExist If the requested version does not exist. Returns ------- Version The version object read from storage. """ if version_name is not None: if isinstance(version_name, str): version_name = self.version_name_class.from_string(version_name) else: version_name = self.latest_version_name() version = Version.read( version_name=version_name, artifact_class=self.artifact_class, datastore=self.datastore, location=self.versions_location, ) return version def write(self, data: DataType, manifest: Manifest, version_name: Optional[Union[str, VersionName]] = None, write_mode: WriteMode = WriteMode.ERROR_IF_EXISTS): """ Write some data to storage. Parameters ---------- data: DataType The artifact data to write. manifest: Manifest Metadata to store with the data. version_name: Union[str, VersionName], optional Version name, given as a string (more common) or as VersionName instance. If None, the latest version name for the given artifact is used. write_mode: WriteMode Write mode, either WriteMode.ERROR_IF_EXISTS or WriteMode.OVERWRITE. Raises ------ IncompatibleVersionName If the provided version name does not correspond to the version name class used in this versioned artifact. VersionAlreadyExists If the provided version name exists, and the write mode is "ERROR_IF_EXISTS". Returns ------- Version The version object read from storage. """ # todo lock if version_name is None: prev_version_name = self.latest_version_name(raise_if_none=False) version_name = self.version_name_class.next(prev_version_name) if isinstance(version_name, str): version_name = VersionName.from_string(version_name) if not isinstance(version_name, self.version_name_class):
logger = logging.getLogger(__name__) # Time to wait before retrying when creating a new version fails NEW_VERSION_WAIT_MS = 1000 class VersionedArtifact: def __init__(self, artifact_name: str, location: str, datastore: Datastore, artifact_class: Type[Artifact], version_name_class: Type[VersionName]): """ An artifact versioned and stored on disk. `VersionedArtifact` manages the versioning, data, and metadata, of an artifact. Parameters ---------- artifact_name: str Name of the artifact. location: str Root location in the data store where artifacts are read/written. This is used to create folder-like groups inside a datastore. This can be, for instance, the name of a project or experiment. datastore: Datastore Data store object, representing the storage where the artifacts are read/written. artifact_class: Type[Artifact] version_name_class: Type[VersionName] Class used to create increasing version names. The default value, `SimpleVersionName` creates version names as `v1`, `v2`, etc. """ self.artifact_name = artifact_name self.location = location self.datastore = datastore self.artifact_class = artifact_class self.version_name_class = version_name_class self.versions_manifest = { 'artifact_class': artifact_class.class_id(), 'version_name_class': version_name_class.class_id(), } self.versions_location = versioned_artifact_location(location, artifact_name) # todo this goes to conventions.py self.versions_list_location = f'{self.versions_location}/versions.json' self.versions_manifest_location = f'{self.versions_location}/manifest.yml' if not self.datastore.exists(self.versions_location): # Create the versioned artifact folder organization if it does not exist self.datastore.makedirs(self.versions_location) self._write_version_names([]) self.versions_manifest['artifact_class'] = artifact_class.class_id() self.versions_manifest['version_name_class'] = version_name_class.class_id() self._write_manifest() # --- VersionedArtifact class interface @classmethod def from_datastore(cls, artifact_name: str, location: str, datastore: Datastore): versions_location = versioned_artifact_location(location, artifact_name) versions_manifest_location = f'{versions_location}/manifest.yml' versions_manifest = datastore.read_yaml(versions_manifest_location) artifact_class_id = versions_manifest['artifact_class'] artifact_class = Artifact.subclass_from_id(artifact_class_id) version_name_class_id = versions_manifest['version_name_class'] version_name_class = VersionName.subclass_from_id(version_name_class_id) versioned_artifact = cls( artifact_name=artifact_name, location=location, datastore=datastore, artifact_class=artifact_class, version_name_class=version_name_class, ) return versioned_artifact # --- VersionedArtifact public interface def read(self, version_name: Optional[Union[str, VersionName]] = None) -> Version: """ Read a version of the artifact. Parameters ---------- version_name: Union[str, VersionName], optional Version name, given as a string (more common) or as VersionName instance. If None, the latest version name for the given artifact is used. Raises ------ VersionDoesNotExist If the requested version does not exist. Returns ------- Version The version object read from storage. """ if version_name is not None: if isinstance(version_name, str): version_name = self.version_name_class.from_string(version_name) else: version_name = self.latest_version_name() version = Version.read( version_name=version_name, artifact_class=self.artifact_class, datastore=self.datastore, location=self.versions_location, ) return version def write(self, data: DataType, manifest: Manifest, version_name: Optional[Union[str, VersionName]] = None, write_mode: WriteMode = WriteMode.ERROR_IF_EXISTS): """ Write some data to storage. Parameters ---------- data: DataType The artifact data to write. manifest: Manifest Metadata to store with the data. version_name: Union[str, VersionName], optional Version name, given as a string (more common) or as VersionName instance. If None, the latest version name for the given artifact is used. write_mode: WriteMode Write mode, either WriteMode.ERROR_IF_EXISTS or WriteMode.OVERWRITE. Raises ------ IncompatibleVersionName If the provided version name does not correspond to the version name class used in this versioned artifact. VersionAlreadyExists If the provided version name exists, and the write mode is "ERROR_IF_EXISTS". Returns ------- Version The version object read from storage. """ # todo lock if version_name is None: prev_version_name = self.latest_version_name(raise_if_none=False) version_name = self.version_name_class.next(prev_version_name) if isinstance(version_name, str): version_name = VersionName.from_string(version_name) if not isinstance(version_name, self.version_name_class):
raise IncompatibleVersionName(
2
2023-12-24 13:05:58+00:00
8k
demirogun/pyethnobiology
pyethnobiology/indices.py
[ { "identifier": "RadialPlot", "path": "pyethnobiology/visualization.py", "snippet": "class RadialPlot:\n \"\"\"\n Creates a radial bar plot to visualize data in a circular layout.\n \"\"\"\n\n def __init__(self,\n data: pd.DataFrame,\n colorbar_title: str,\n indice: str = None,\n num_rows: int = 10,\n ytick_position: str = \"onbar\",\n colors: list = None,\n show_colorbar: bool = True,\n informant_column: str = \"informant\",\n taxon_column: str = \"taxon\",\n use_column: str = \"ailments_treated\"):\n\n self.data = data\n self.colorbar_title = colorbar_title\n self.indice = indice\n self.num_rows = num_rows\n self.yticks = None\n self.num_ticks = 5\n self.ytick_position = ytick_position\n self.colors = colors\n self.show_colorbar = show_colorbar\n self.informant_column = informant_column\n self.taxon_column = taxon_column\n self.use_column = use_column\n\n def plot(self):\n \"\"\"Creates and displays the radial bar plot.\"\"\"\n\n self._prepare_data()\n self._create_plot()\n self._customize_plot()\n return self.fig, self.ax\n\n def save_plot(self, filename: str, dpi: int = 300):\n \"\"\"Saves the radial bar plot to a file.\"\"\"\n\n self._prepare_data()\n self._create_plot()\n self._customize_plot()\n self.fig.savefig(filename, bbox_inches=\"tight\", dpi=dpi)\n\n def _prepare_data(self):\n \"\"\"Prepares data for plotting.\"\"\"\n\n self.indice_df = self.data.head(self.num_rows) if isinstance(self.num_rows, int) else self.data\n self.angles = np.linspace(0.05, 2 * np.pi - 0.05, len(self.indice_df), endpoint=False)\n self.indice_values = self.indice_df[self.indice].values\n self.taxon_values = self.indice_df[self.use_column].values if self.indice == \"FIC\" else self.indice_df[\n self.taxon_column].values\n\n def _create_plot(self):\n \"\"\"Creates the base plot.\"\"\"\n\n self.fig, self.ax = plt.subplots(figsize=(9, 12.6), subplot_kw={\"projection\": \"polar\"})\n self.ax.set_theta_offset(1.2 * np.pi / 2)\n self.ax.set_ylim(0 - (self.indice_values.min() * 0.4), self.indice_values.max())\n\n self._set_colormap()\n\n if len(self.indice_values) < 6:\n width = 1.4\n else:\n width = 0.52\n\n self.bars = self.ax.bar(self.angles, self.indice_values, color=self.colors, alpha=0.9, width=width, zorder=10)\n\n def _customize_plot(self):\n \"\"\"Customizes plot appearance.\"\"\"\n\n plt.rcParams[\"text.color\"] = \"#1f1f1f\"\n plt.rcParams.update({\"font.family\": \"serif\"})\n plt.rc(\"axes\", unicode_minus=False)\n\n # Wrap taxon labels for better readability\n self.taxon_values = [\"\\n\".join(wrap(r, 5, break_long_words=False)) for r in self.taxon_values]\n\n # Customize axes and ticks\n self.ax.xaxis.grid(False)\n self.ax.spines[\"start\"].set_color(\"none\")\n self.ax.spines[\"polar\"].set_color(\"none\")\n self.ax.set_xticks(self.angles)\n self.ax.set_xticklabels(self.taxon_values, size=12)\n\n # Set y-ticks and labels based on position preference\n self._set_yticks_and_labels()\n\n # Add colorbar if enabled\n if self.show_colorbar:\n self._add_colorbar()\n\n def _set_colormap(self):\n \"\"\"Sets the colormap for the bars.\"\"\"\n\n if self.colors is None:\n # Use default colors\n self.colors = [\"#ffcc70\", \"#c63d2f\"]\n else:\n # Use provided colors\n self.colors = self.colors\n\n # Create colormap and normalize values\n self.cmap = mplcolors.LinearSegmentedColormap.from_list(\"my_colormap\", self.colors, N=256)\n self.norm = mplcolors.Normalize(vmin=self.indice_values.min(), vmax=self.indice_values.max())\n self.colors = self.cmap(self.norm(self.indice_values))\n\n def _set_yticks_and_labels(self):\n \"\"\"Sets y-ticks and labels based on the specified position.\"\"\"\n\n self.ax.set_yticklabels([])\n self.yticks = np.linspace(0, self.indice_values.max() + (self.indice_values.max() * .20), self.num_ticks)\n self.ax.set_yticks(list(self.yticks))\n\n if self.ytick_position == \"on_line\":\n # Place y-tick labels on a separate line\n pad = self.indice_values.min() * 0.1\n for yt in self.yticks:\n self.ax.text(-0.2 * np.pi / 2, yt + pad, round(yt, 3), ha=\"center\", size=11, zorder=15)\n else:\n # Place y-tick labels on the bars\n for bar, length in zip(self.bars, self.indice_values):\n height = bar.get_height()\n self.ax.text(bar.get_x() + bar.get_width() / 2, height, f'{length:.3f}', ha='center', va='bottom',\n fontsize=10, zorder=15)\n\n def _add_colorbar(self):\n \"\"\"Adds a colorbar to the plot.\"\"\"\n\n cax = inset_axes(\n self.ax,\n width=\"100%\",\n height=\"100%\",\n loc=\"center\",\n bbox_to_anchor=(0.325, 0.1, 0.35, 0.01),\n bbox_transform=self.fig.transFigure\n )\n\n # Access the already-defined yticks\n yticks = np.linspace(self.indice_values.min(), self.indice_values.max(), self.num_ticks)\n\n cbar = self.fig.colorbar(\n ScalarMappable(norm=self.norm, cmap=self.cmap),\n cax=cax,\n orientation=\"horizontal\",\n ticks=yticks\n )\n\n cbar.outline.set_visible(False)\n cbar.ax.xaxis.set_tick_params(size=0)\n cbar.set_label(self.colorbar_title, size=12, labelpad=-40)" }, { "identifier": "HeatmapPlot", "path": "pyethnobiology/visualization.py", "snippet": "class HeatmapPlot:\n \"\"\"\n Creates a heatmap plot to visualize data in a grid format.\n \"\"\"\n\n def __init__(self,\n data: pd.DataFrame,\n title: str,\n value_column: str,\n row_column: str,\n column_column: str,\n cmap: str = \"coolwarm\",\n show_colorbar: bool = True,\n colorbar_shrink: float = 0.50,\n plot_width: float = 10,\n plot_height: float = 8,\n dpi: int = 300,\n fillna_zero: bool = True):\n\n self.data = data\n self.title = title\n self.value_column = value_column\n self.row_column = row_column\n self.column_column = column_column\n self.cmap = cmap\n self.show_colorbar = show_colorbar\n self.colorbar_shrink = colorbar_shrink\n self.plot_width = plot_width\n self.plot_height = plot_height\n self.dpi = dpi\n self.fillna_zero = fillna_zero\n\n def plot(self):\n \"\"\"Creates and displays the heatmap plot.\"\"\"\n\n self._prepare_data()\n self._create_plot()\n self._customize_plot()\n return self.fig, self.ax\n\n def save_plot(self, filename: str, dpi: int = 300):\n \"\"\"Saves the heatmap plot to a file.\"\"\"\n\n self._prepare_data()\n self._create_plot()\n self._customize_plot()\n self.fig.savefig(filename, bbox_inches=\"tight\", dpi=dpi)\n\n def _prepare_data(self):\n \"\"\"Pivots data into a suitable format for heatmap.\"\"\"\n\n self.heatmap_data = self.data.pivot(index=self.row_column, columns=self.column_column, values=self.value_column)\n if self.fillna_zero:\n self.heatmap_data = self.heatmap_data.fillna(0)\n\n def _create_plot(self):\n \"\"\"Creates the base heatmap plot.\"\"\"\n\n self.fig, self.ax = plt.subplots(figsize=(self.plot_width, self.plot_height), dpi=self.dpi)\n self.im = self.ax.imshow(self.heatmap_data, cmap=self.cmap)\n\n def _customize_plot(self):\n \"\"\"Customizes plot appearance.\"\"\"\n\n plt.rcParams[\"text.color\"] = \"#1f1f1f\"\n plt.rcParams.update({\"font.family\": \"serif\"})\n plt.rc(\"axes\", unicode_minus=False)\n\n # Set tick labels\n if len(self.heatmap_data.columns) > 10:\n rotation, ha = (90, \"center\")\n else:\n rotation, ha = (45, \"right\")\n\n plt.xticks(ticks=range(len(self.heatmap_data.columns)), labels=self.heatmap_data.columns, rotation=rotation,\n ha=ha)\n plt.yticks(ticks=range(len(self.heatmap_data.index)), labels=self.heatmap_data.index)\n\n # Add colorbar if enabled\n if self.show_colorbar:\n self._add_colorbar()\n\n # Customize labels and title\n plt.xlabel(self.column_column)\n plt.ylabel(self.row_column)\n plt.title(self.title) # Add a title if needed\n\n def _add_colorbar(self):\n \"\"\"Adds a colorbar to the plot.\"\"\"\n\n plt.colorbar(self.im, label=self.title, shrink=self.colorbar_shrink)" } ]
import pandas as pd from pyethnobiology.visualization import RadialPlot from pyethnobiology.visualization import HeatmapPlot
5,094
self.title = "Informant Consensus Factor (FIC)" def calculate(self): """ Calculates the FIC for each ailment category. Returns: pd.DataFrame: DataFrame containing ailment category and FIC columns. """ unique_ailment_categories = self.data[self.use_column].unique() fic_values = [] for ailment_category in unique_ailment_categories: specific_data = self.data[self.data[self.use_column] == ailment_category] # Calculate Nur (number of use reports) nur = specific_data.shape[0] # Calculate Nt (number of taxa used) nt = specific_data[self.taxon_column].nunique() # Calculate FIC value if nur > nt: fic = (nur - nt) / (nur - 1) else: fic = 0 # Set FIC to 0 if Nur <= Nt fic_values.append({self.use_column: ailment_category, "FIC": fic}) fic_df = pd.DataFrame(fic_values) fic_df = fic_df.sort_values(by="FIC", ascending=False).reset_index(drop=True) return fic_df def save_data(self): FIC_df = self.calculate() FIC_df.to_csv("informant_consensus_factor_FIC.csv", index=False) print("Saved to informant_consensus_factor_FIC.csv") def plot_radial(self, filename="FIC.png", dpi=300, num_row=10, ytick_position="onbar", colors=None, show_colorbar=True): # Plot radial bar chart radial_plot = RadialPlot(self.calculate(), self.title, "FIC", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column) radial_plot.save_plot(filename, dpi=dpi) radial_plot.plot() class FL: def __init__(self, data, informant_column="informant", taxon_column="taxon", use_column="ailments_treated"): """ Initializes the class with necessary data and column names. Args: data (pd.DataFrame): DataFrame containing plant usage information. informant_column (str, optional): Name of the column containing informant IDs. Defaults to "informant". taxon_column (str, optional): Name of the column containing species names. Defaults to "taxon". use_column (str, optional): Name of the column containing plant uses. Defaults to "ailments_treated". """ self.data = data self.informant_column = informant_column self.taxon_column = taxon_column self.use_column = use_column self.title = "Fidelity Level (FL) per Species" def calculate(self): """ Calculates the fidelity level (FL) for each species-use combination. Returns: pd.DataFrame: DataFrame containing taxon, use, and FL columns. """ # Calculate Frequency of Citation (FC) per species fc_df = FC(self.data, self.informant_column, self.taxon_column, self.use_column).calculate() # Count informants for each species-use combination ns_df = ( self.data.groupby([self.taxon_column, self.use_column])[self.informant_column] .nunique() .reset_index(name="Ns") ) # Merge FC and Ns dataframes merged_df = pd.merge(ns_df, fc_df, on=self.taxon_column) # Calculate FL = (Ns * 100) / FC merged_df["FL"] = (merged_df["Ns"] * 100) / merged_df["FC"] # Exclude rows with FL of 0 merged_df = merged_df[merged_df["FL"] != 0] return merged_df[[self.taxon_column, self.use_column, "FL"]] def save_data(self, filename="fidelity_level_FL.csv"): """ Saves the calculated FL data to a CSV file. Args: filename (str, optional): Name of the CSV file to save. Defaults to "fidelity_level_FL.csv". """ fl_df = self.calculate() fl_df.to_csv(filename, index=False) print(f"Saved to {filename}") def plot_heatmap(self, filename="FL.png", cmap="coolwarm", show_colorbar=True, colorbar_shrink=0.50, plot_width=10, plot_height=8, dpi=300, fillna_zero=True): """ Creates a heatmap of FL values for each species-use combination, with customizable features for plot appearance and layout. """ data = self.calculate()
class FC: def __init__(self, data, informant_column="informant", taxon_column="taxon", use_column="ailments_treated"): """ Initializes the class with necessary data and column names. Args: data (pd.DataFrame): DataFrame containing plant usage information. informant_column (str, optional): Name of the column containing informant IDs. Defaults to "informant". taxon_column (str, optional): Name of the column containing species names. Defaults to "taxon". use_column (str, optional): Name of the column containing plant uses. Defaults to "ailments_treated". """ self.data = data self.informant_column = informant_column self.taxon_column = taxon_column self.use_column = use_column def calculate(self): """ Calculates the frequency of citation (FC) for each species. Returns: pd.DataFrame: DataFrame containing taxon and FC columns. """ # Calculate FC per species by counting unique informants for each taxon fc_df = ( self.data.groupby(self.taxon_column, observed=True)[self.informant_column] .nunique() .reset_index(name="FC") ) # Sort FC values in descending order fc_df = fc_df.sort_values(by="FC", ascending=False).reset_index(drop=True) return fc_df def save_data(self): FC_df = self.calculate() FC_df.to_csv("frequency_of_citation_FC.csv", index=False) print("Saved to frequency_of_citation_FC.csv") def plot_radial(self, filename="FC.png", dpi=300, num_row=10, ytick_position="onbar", colors=None, show_colorbar=True): # Plot radial bar chart radial_plot = RadialPlot(self.calculate(), "Frequency of Citation (FC)", "FC", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column) radial_plot.save_plot(filename, dpi=dpi) radial_plot.plot() class NU: def __init__(self, data, informant_column="informant", taxon_column="taxon", use_column="ailments_treated"): """ Initializes the class with necessary data and column names. Args: data (pd.DataFrame): DataFrame containing plant usage information. informant_column (str, optional): Name of the column containing informant IDs. taxon_column (str, optional): Name of the column containing species names. use_column (str, optional): Name of the column containing plant uses. """ self.data = data self.informant_column = informant_column self.taxon_column = taxon_column self.use_column = use_column self.title = "Number of Uses (NU) per Species" def calculate(self): """ Calculates the NU for each species. Returns: pd.DataFrame: DataFrame containing taxon and NU columns. """ nu_df = ( self.data.groupby(self.taxon_column, observed=True)[self.use_column] .nunique() .reset_index(name="NU") ) nu_df = nu_df.sort_values(by="NU", ascending=False).reset_index(drop=True) return nu_df def save_data(self): NU_df = self.calculate() NU_df.to_csv("number_of_uses_NU.csv", index=False) print("Saved to number_of_uses_NU.csv") def plot_radial(self, filename="NU.png", dpi=300, num_row=10, ytick_position="onbar", colors=None, show_colorbar=True): # Plot radial bar chart radial_plot = RadialPlot(self.calculate(), self.title, "NU", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column) radial_plot.save_plot(filename, dpi=dpi) radial_plot.plot() class UR: def __init__(self, data, informant_column="informant", taxon_column="taxon", use_column="ailments_treated"): """ Initializes the class with necessary data and column names. Args: data (pd.DataFrame): DataFrame containing plant usage information. informant_column (str, optional): Name of the column containing informant IDs. taxon_column (str, optional): Name of the column containing species names. use_column (str, optional): Name of the column containing plant uses. """ self.data = data self.informant_column = informant_column self.taxon_column = taxon_column self.use_column = use_column self.title = "Use Report (UR) per Species" def calculate(self): """ Calculates the UR for each species. Returns: pd.DataFrame: DataFrame containing taxon and UR columns. """ ur_df = ( self.data.groupby(self.taxon_column, observed=True) .size() .reset_index(name="UR") .sort_values(by="UR", ascending=False) .reset_index(drop=True) ) return ur_df def save_data(self): UR_df = self.calculate() UR_df.to_csv("use_report_UR.csv", index=False) print("Saved to use_report_UR.csv") def plot_radial(self, filename="UR.png", dpi=300, num_row=10, ytick_position="onbar", colors=None, show_colorbar=True): # Plot radial bar chart radial_plot = RadialPlot(self.calculate(), self.title, "UR", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column) radial_plot.save_plot(filename, dpi=dpi) radial_plot.plot() class CI: def __init__(self, data, informant_column="informant", taxon_column="taxon", use_column="ailments_treated"): """ Initializes the class with necessary data and column names. Args: data (pd.DataFrame): DataFrame containing plant usage information. informant_column (str, optional): Name of the column containing informant IDs. Defaults to "informant". taxon_column (str, optional): Name of the column containing species names. Defaults to "taxon". use_column (str, optional): Name of the column containing plant uses. Defaults to "ailments_treated". """ self.data = data self.informant_column = informant_column self.taxon_column = taxon_column self.use_column = use_column self.title = "Cultural Importance (CI) Index" def calculate(self): """ Calculates the cultural importance index (CI) for each species. Returns: pd.DataFrame: DataFrame containing taxon and CI columns. """ # Calculate Use Reports (UR) per species ur_df = UR(self.data, self.informant_column, self.taxon_column, self.use_column).calculate() # Count unique informants informants_count = self.data[self.informant_column].nunique() # Merge UR and informants count based on 'taxon' ci_df = pd.merge( ur_df, self.data[[self.taxon_column, self.informant_column]] .drop_duplicates() .groupby(self.taxon_column, observed=False) .size() .reset_index(name=f"{self.informant_column}s_count"), on=self.taxon_column, ) # Calculate CI index (UR divided by the number of informants) ci_df["CI"] = ci_df["UR"] / informants_count # Keep only relevant columns ci_df = ci_df[[self.taxon_column, "CI"]] return ci_df def save_data(self): CI_df = self.calculate() CI_df.to_csv("cultural_importance_CI.csv", index=False) print("Saved to cultural_importance_CI.csv") def plot_radial(self, filename="CI.png", dpi=300, num_row=10, ytick_position="onbar", colors=None, show_colorbar=True): # Plot radial bar chart radial_plot = RadialPlot(self.calculate(), self.title, "CI", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column) radial_plot.save_plot(filename, dpi=dpi) radial_plot.plot() class CV: def __init__(self, data, informant_column="informant", taxon_column="taxon", use_column="ailments_treated"): """ Initializes the class with necessary data and column names. Args: data (pd.DataFrame): DataFrame containing plant usage information. informant_column (str, optional): Name of the column containing informant IDs. Defaults to "informant". taxon_column (str, optional): Name of the column containing species names. Defaults to "taxon". use_column (str, optional): Name of the column containing plant uses. Defaults to "ailments_treated". """ self.data = data self.informant_column = informant_column self.taxon_column = taxon_column self.use_column = use_column self.title = "Cultural Value (CV) for Ethnospecies" def calculate(self): """ Calculates the cultural value (CV) for each ethnospecies. Returns: pd.DataFrame: DataFrame containing taxon and CV columns. """ # Calculate Use Reports (UR) per species ur_df = UR(self.data, self.informant_column, self.taxon_column, self.use_column).calculate() # Calculate Number of Uses (NU) per species nu_df = NU(self.data, self.informant_column, self.taxon_column, self.use_column).calculate() # Calculate Frequency of Citation (FC) per species fc_df = FC(self.data, self.informant_column, self.taxon_column, self.use_column).calculate() # Calculate Uce (Use Citation for Ethnospecies) potential_uses = self.data[self.use_column].nunique() nu_df["Uce"] = nu_df["NU"] / potential_uses # Calculate Ice (Informant Citation Index) ice = fc_df["FC"] / self.data[self.informant_column].nunique() fc_df["Ice"] = ice # Calculate IUce (Informant Use Index) iuce = ur_df["UR"] / self.data[self.informant_column].nunique() ur_df["IUce"] = iuce # Merge dataframes to calculate CV merged_df = pd.merge(nu_df[[self.taxon_column, "Uce"]], ur_df[[self.taxon_column, "IUce"]], on=self.taxon_column) merged_df = pd.merge(merged_df, fc_df[[self.taxon_column, "Ice"]], on=self.taxon_column) # Calculate CV = Uce * Ice * IUce merged_df["CV"] = merged_df["Uce"] * merged_df["Ice"] * merged_df["IUce"] # Sort and round CV values cv_df = merged_df[[self.taxon_column, "CV"]].sort_values(by="CV", ascending=False) return cv_df def save_data(self): CV_df = self.calculate() CV_df.to_csv("cultural_value_CV.csv", index=False) print("Saved to cultural_value_CV.csv") def plot_radial(self, filename="CV.png", dpi=300, num_row=10, ytick_position="onbar", colors=None, show_colorbar=True): # Plot radial bar chart radial_plot = RadialPlot(self.calculate(), self.title, "CV", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column) radial_plot.save_plot(filename, dpi=dpi) radial_plot.plot() class FIC: def __init__(self, data, informant_column="informant", taxon_column="taxon", use_column="ailments_treated"): """ Initializes the class with necessary data and column names. Args: data (pd.DataFrame): DataFrame containing plant usage information. informant_column (str, optional): Name of the column containing informant IDs. taxon_column (str, optional): Name of the column containing species names. use_column (str, optional): Name of the column containing plant uses. """ self.data = data self.informant_column = informant_column self.taxon_column = taxon_column self.use_column = use_column self.title = "Informant Consensus Factor (FIC)" def calculate(self): """ Calculates the FIC for each ailment category. Returns: pd.DataFrame: DataFrame containing ailment category and FIC columns. """ unique_ailment_categories = self.data[self.use_column].unique() fic_values = [] for ailment_category in unique_ailment_categories: specific_data = self.data[self.data[self.use_column] == ailment_category] # Calculate Nur (number of use reports) nur = specific_data.shape[0] # Calculate Nt (number of taxa used) nt = specific_data[self.taxon_column].nunique() # Calculate FIC value if nur > nt: fic = (nur - nt) / (nur - 1) else: fic = 0 # Set FIC to 0 if Nur <= Nt fic_values.append({self.use_column: ailment_category, "FIC": fic}) fic_df = pd.DataFrame(fic_values) fic_df = fic_df.sort_values(by="FIC", ascending=False).reset_index(drop=True) return fic_df def save_data(self): FIC_df = self.calculate() FIC_df.to_csv("informant_consensus_factor_FIC.csv", index=False) print("Saved to informant_consensus_factor_FIC.csv") def plot_radial(self, filename="FIC.png", dpi=300, num_row=10, ytick_position="onbar", colors=None, show_colorbar=True): # Plot radial bar chart radial_plot = RadialPlot(self.calculate(), self.title, "FIC", num_row, ytick_position, colors, show_colorbar, self.informant_column, self.taxon_column, self.use_column) radial_plot.save_plot(filename, dpi=dpi) radial_plot.plot() class FL: def __init__(self, data, informant_column="informant", taxon_column="taxon", use_column="ailments_treated"): """ Initializes the class with necessary data and column names. Args: data (pd.DataFrame): DataFrame containing plant usage information. informant_column (str, optional): Name of the column containing informant IDs. Defaults to "informant". taxon_column (str, optional): Name of the column containing species names. Defaults to "taxon". use_column (str, optional): Name of the column containing plant uses. Defaults to "ailments_treated". """ self.data = data self.informant_column = informant_column self.taxon_column = taxon_column self.use_column = use_column self.title = "Fidelity Level (FL) per Species" def calculate(self): """ Calculates the fidelity level (FL) for each species-use combination. Returns: pd.DataFrame: DataFrame containing taxon, use, and FL columns. """ # Calculate Frequency of Citation (FC) per species fc_df = FC(self.data, self.informant_column, self.taxon_column, self.use_column).calculate() # Count informants for each species-use combination ns_df = ( self.data.groupby([self.taxon_column, self.use_column])[self.informant_column] .nunique() .reset_index(name="Ns") ) # Merge FC and Ns dataframes merged_df = pd.merge(ns_df, fc_df, on=self.taxon_column) # Calculate FL = (Ns * 100) / FC merged_df["FL"] = (merged_df["Ns"] * 100) / merged_df["FC"] # Exclude rows with FL of 0 merged_df = merged_df[merged_df["FL"] != 0] return merged_df[[self.taxon_column, self.use_column, "FL"]] def save_data(self, filename="fidelity_level_FL.csv"): """ Saves the calculated FL data to a CSV file. Args: filename (str, optional): Name of the CSV file to save. Defaults to "fidelity_level_FL.csv". """ fl_df = self.calculate() fl_df.to_csv(filename, index=False) print(f"Saved to {filename}") def plot_heatmap(self, filename="FL.png", cmap="coolwarm", show_colorbar=True, colorbar_shrink=0.50, plot_width=10, plot_height=8, dpi=300, fillna_zero=True): """ Creates a heatmap of FL values for each species-use combination, with customizable features for plot appearance and layout. """ data = self.calculate()
heatmap_plot = HeatmapPlot(
1
2023-12-25 01:06:51+00:00
8k
Zitronenjoghurt/Colonaut
tests/test_ship_system.py
[ { "identifier": "Event", "path": "src/events/event.py", "snippet": "class Event():\n TYPES = EventTypes\n \n def __init__(self, type: str, **kwargs) -> None:\n self.type = type\n self.data = kwargs" }, { "identifier": "EventBus", "path": "src/events/event_bus.py", "snippet": "class EventBus():\n _instance = None\n\n def __init__(self) -> None:\n if EventBus._instance is not None:\n raise RuntimeError(\"Tried to initialize multiple instances of EventBus.\")\n \n self.listeners: dict[str, Callable] = {}\n\n @staticmethod\n def get_instance() -> 'EventBus':\n if EventBus._instance is None:\n EventBus._instance = EventBus()\n return EventBus._instance\n \n @staticmethod\n def reset_instance() -> None:\n EventBus._instance = None\n \n \"\"\"\n Possible errors:\n - RuntimeError\n \"\"\"\n def subscribe(self, event_type: str, listener: Callable) -> None:\n if self.listeners.get(event_type, None) is not None:\n raise RuntimeError(f\"Subscription on event type {event_type} already exists.\")\n self.listeners[event_type] = listener\n\n \"\"\"\n Possible errors:\n - RuntimeError\n \"\"\"\n def unsubscribe(self, event_type: str, listener: Callable) -> None:\n if self.listeners.get(event_type, None) is None:\n raise RuntimeError(f\"Subscription on event type {event_type} does not exist.\")\n self.listeners.pop(event_type)\n\n \"\"\"\n Possible errors:\n - EventTypeNotSubscribedError\n - RuntimeError\n \"\"\"\n def publish(self, event: Event) -> Response:\n if event.type not in self.listeners:\n raise EventTypeNotSubscribedError(event_type=event.type)\n \n listener = self.listeners[event.type]\n try:\n response = listener(**event.data)\n except Exception as e:\n raise RuntimeError(f\"An error occured while publishing event {event.type}.\") from e\n \n return response" }, { "identifier": "Response", "path": "src/events/response.py", "snippet": "class Response():\n TYPES = ResponseTypes\n\n def __init__(self, data: Any = None, typed: bool = False) -> None:\n self.data: Any = data\n self.typed = typed\n self.timestamp = datetime.now()\n\n # Response is true if its not empty\n def __bool__(self):\n if self.data is None:\n return False\n return len(self.data) != 0\n \n def get_data(self, response_type: Optional[str] = None) -> Any:\n if not self.typed or not response_type:\n return self.data\n return self.data.get(response_type, None)\n \n def has_type(self, response_type: str) -> bool:\n return response_type in self.data\n\n def validate_data(self, validation_type: type) -> bool:\n return isinstance(self.data, validation_type)\n \n @staticmethod\n def create(data: Any = None, response_type: Optional[str] = None) -> 'Response':\n response_data = data\n typed = False\n if response_type is not None:\n response_data = {\n response_type: data\n }\n typed = True\n return Response(data=response_data, typed=typed)\n \n def fuse(self, other: 'Response') -> 'Response':\n if not isinstance(other, Response):\n raise ValueError(\"An error occured while trying to fuse two responses: given response is not of class Response\")\n if not self.typed and not self:\n return other # self has no data, return other\n if not other.typed and not other:\n return self # other has no data, return self\n if not self.typed or not other.typed:\n raise ValueError(\"An error occured while trying to fuse two responses: you can only fuse 2 typed responses or a response with an empty response\")\n # Since its typed, data will always be a dict\n data = self.data\n data.update(other.data)\n return Response(data=data, typed=True)\n \n def add_data(self, data: Any, response_type: str) -> None:\n self.data[response_type] = data" }, { "identifier": "SpaceShip", "path": "src/space_ship/space_ship.py", "snippet": "class SpaceShip(BaseEventSubscriber):\n def __init__(self, systems: Optional[dict[str, ShipSystem]] = None) -> None:\n subscriptions = {\n Event.TYPES.SHIP_RETRIEVE_SYSTEM: self.get_system,\n Event.TYPES.SHIP_DAMAGE_SYSTEM: self.damage_system,\n Event.TYPES.SHIP_UPGRADE_SYSTEM: self.upgrade_system,\n Event.TYPES.RETRIEVE_SHIP_STATUS: self.get_status,\n Event.TYPES.RETRIEVE_SYSTEM_UPGRADES: self.get_system_upgrades,\n Event.TYPES.RETRIEVE_SYSTEM_WINDOW_DATA: self.get_system_window_data\n }\n super().__init__(subscriptions=subscriptions)\n if systems is None:\n systems = {}\n sorted_systems: list[tuple[str, ShipSystem]] = sorted(systems.items(), key=lambda system: system[1].WORK_ORDER_PRIORITY, reverse=True)\n self.systems: dict[str, ShipSystem] = dict(sorted_systems)\n\n # Ship system dashboard order priority\n sorted_systems : list[tuple[str, ShipSystem]] = sorted(systems.items(), key=lambda system: system[1].DASHBOARD_ORDER_PRIORITY, reverse=True)\n self._systems_dashboard_order = dict(sorted_systems)\n\n # The unveiled planet data types\n self.scanner_results: list[str] = []\n\n # The status log contain all messages that are printed to the console after the jump\n self.status_log: list[str] = []\n\n @staticmethod\n def from_dict(data: dict) -> 'SpaceShip':\n systems_data: dict[str, dict] = data.get(\"systems\", None)\n if systems_data is None:\n raise ValueError(\"Space ship data has no specified systems.\")\n \n systems = {}\n for system_name, system_dict in systems_data.items():\n system = ShipSystemFactory.create_from_dict(system_name=system_name, data=system_dict)\n systems[system_name] = system\n return SpaceShip(systems=systems)\n\n def to_dict(self) -> Response:\n systems = {}\n for system in self.systems.values():\n systems[system.NAME] = system.to_dict().get_data()\n \n result = {\n \"systems\": systems\n }\n return Response.create(result)\n \n def run(self) -> None:\n self.run_systems()\n self.retrieve_drawn_energy_log()\n \n def run_systems(self) -> None:\n self.scanner_results = []\n for system in self.systems.values():\n response = system.work()\n self.handle_system_response(response)\n\n def retrieve_drawn_energy_log(self) -> None:\n battery_drawn_energy_event = Event(Event.TYPES.BATTERY_RETRIEVE_DRAWN_ENERGY_LOG)\n response = self.publish_event(battery_drawn_energy_event)\n logs = response.get_data(Response.TYPES.BATTERY_DRAWN_ENERGY_LOG)\n \n if logs and isinstance(logs, list):\n self.status_log.extend(logs)\n \n def handle_system_response(self, response: Response) -> None:\n scanner_result = response.get_data(Response.TYPES.SCANNER_RESULT)\n status_log = response.get_data(Response.TYPES.SHIP_STATUS_LOG_ENTRY)\n if scanner_result:\n self.scanner_results.extend(scanner_result)\n if status_log:\n if isinstance(status_log, list):\n self.status_log.extend(status_log)\n else:\n self.status_log.append(status_log)\n\n \"\"\"\n Possible errors:\n - ShipSystemNotFoundError\n - ValueError\n \"\"\"\n def damage_system(self, system_name: str, amount: int) -> Response:\n system_name = system_name.lower()\n system_response = self.get_system(system_name=system_name)\n system: ShipSystem = system_response.get_data()\n\n system.damage(amount=amount)\n return Response.create(f\"{system_name.capitalize()} took {amount} damage.\")\n\n \"\"\"\n Possible errors:\n - ShipSystemNotFoundError\n \"\"\"\n def get_system(self, system_name: str) -> Response:\n system = self.systems.get(system_name, None)\n\n if not system_name or not isinstance(system, ShipSystem):\n raise ShipSystemNotFoundError(system_name=system_name)\n \n return Response.create(data=system)\n \n def get_status(self) -> Response:\n data = {}\n \n for system_name, system in self._systems_dashboard_order.items():\n data[system_name] = system.get_status().get_data()\n response = Response.create(data, Response.TYPES.SHIP_DATA)\n\n response.add_data(self.status_log, Response.TYPES.SHIP_STATUS_LOG)\n self.status_log = []\n return response\n \n \"\"\"\n Possible errors:\n - ShipSystemNotFoundError\n \"\"\"\n def get_system_window_data(self, system_name: str) -> Response:\n system: ShipSystem = self.get_system(system_name=system_name).get_data()\n return system.get_system_window_data()\n \n \"\"\"\n Possible errors:\n - ShipSystemNotFoundError\n \"\"\"\n def get_system_upgrades(self, system_name: str) -> Response:\n system: ShipSystem = self.get_system(system_name=system_name).get_data()\n return system.get_upgrades()\n \n def upgrade_system(self, system_name: str, property: str) -> Response:\n system: ShipSystem = self.get_system(system_name=system_name).get_data()\n try:\n response = system.upgrade_property(property=property)\n cost = response.get_data(Response.TYPES.UPGRADE_COST)\n subtract_matter_event = Event(Event.TYPES.GAME_STATE_SUBTRACT_MATTER, amount=cost)\n self.publish_event(subtract_matter_event)\n except Exception as e:\n raise RuntimeError(f\"An error occured while upgrading property {property} of system {system_name}: {e}\")\n return Response.create()" }, { "identifier": "ShipSystem", "path": "src/space_ship/ship_system.py", "snippet": "class ShipSystem(BaseEventSubscriber):\n NAME = \"default\"\n DASHBOARD_ORDER_PRIORITY = 0\n WORK_ORDER_PRIORITY = 0\n\n def __init__(self, upgrade_model: UpgradeModel, max_hp: int, power_usage: int, hp: Optional[int] = None, subscriptions: Optional[dict] = None) -> None:\n super().__init__(subscriptions=subscriptions)\n if max_hp < 1:\n max_hp = 1\n if hp is None:\n hp = max_hp\n if power_usage < 0:\n power_usage = 0\n\n self.upgrade_model = upgrade_model\n self.max_hp = max_hp\n self.hp = hp\n self.power_usage = power_usage\n\n def __setattr__(self, key, value) -> None:\n if key == \"max_hp\" and hasattr(self, \"max_hp\") and hasattr(self, \"hp\"):\n hp_increase = value - self.max_hp\n self.hp += hp_increase\n super().__setattr__(key, value)\n \n def to_dict(self) -> Response:\n data = {\n \"max_hp\": self.max_hp,\n \"hp\": self.hp,\n \"power_usage\": self.power_usage,\n \"levels\": self.upgrade_model.get_levels(),\n \"model\": self.upgrade_model.model_name\n }\n return Response.create(data)\n \n \"\"\"\n Possible errors:\n - ValueError\n \"\"\"\n def damage(self, amount: int) -> Response:\n validate_int(value=amount, value_name=\"amount\", min_value=0)\n \n damage = amount\n initial_hp = self.hp\n\n self.hp -= amount\n if self.hp < 0:\n damage = initial_hp\n self.hp = 0\n\n return Response.create([f\"{self.NAME.capitalize()} took {damage} damage.\"], Response.TYPES.SHIP_STATUS_LOG_ENTRY)\n \n def get_hp_percentage(self) -> Response:\n if self.max_hp == 0:\n percentage = 0\n else:\n percentage = round(self.hp/self.max_hp, CONFIG.DECIMAL_DIGITS)\n return Response.create(percentage)\n \n def get_status(self) -> Response:\n hp_percentage = self.get_hp_percentage().get_data()\n data = {\n \"health\": hp_percentage\n }\n return Response.create(data=data)\n \n def get_stats(self) -> Response:\n data = [\n (\"health\", f\"{self.hp}/{self.max_hp}\"),\n (\"power_usage\", str(self.power_usage))\n ]\n return Response.create(data=data)\n \n # The work function will be called after every jump\n def work(self) -> Response:\n # Use energy\n try:\n use_power = Event(Event.TYPES.BATTERY_DRAW_ENERGY, amount=self.power_usage, system_name=self.NAME)\n self.publish_event(use_power)\n except EventTypeNotSubscribedError:\n game_over = Event(Event.TYPES.GAME_OVER_NO_ENERGY, system_name=self.NAME)\n self.publish_event(game_over)\n return Response.create()\n \n def get_name(self) -> Response:\n return Response.create(self.NAME)\n \n def get_description(self) -> Response:\n return Response.create(LT.get(self.NAME+\"_description\"))\n \n def get_max_hp(self) -> Response:\n return Response.create(self.max_hp)\n \n def get_hp(self) -> Response:\n return Response.create(self.hp)\n \n def get_hp_ratio(self) -> Response:\n return Response.create(self.hp / self.max_hp)\n \n def get_system_window_data(self) -> Response:\n stats = self.get_stats().get_data()\n data = {\n 'description': self.get_description().get_data(),\n 'stats': stats\n }\n return Response.create(data, Response.TYPES.SYSTEM_WINDOW_DATA)\n \n def get_upgrades(self) -> Response:\n upgrade_options = []\n for property in self.upgrade_model.get_upgrades():\n if not self.upgrade_model.has_upgrades(property=property):\n continue\n option = self.upgrade_model.get_upgrade_option(property=property)\n upgrade_options.append(option)\n return Response.create(upgrade_options, Response.TYPES.SYSTEM_UPGRADES)\n \n def upgrade_property(self, property: str) -> Response:\n if property not in self.upgrade_model.get_upgrades():\n raise RuntimeError(f\"Tried to upgrade property {property} of system {self.NAME}, but the property is not upgradable\")\n if not hasattr(self, property):\n raise RuntimeError(f\"Tried to upgrade property {property} of system {self.NAME}, but the property does not exist in that ship system\")\n if not self.upgrade_model.can_upgrade_property(property=property):\n raise RuntimeError(f\"Tried to upgrade property {property} of system {self.NAME}, but the property is already at max level\")\n \n current_level = self.upgrade_model.get_property_level(property=property)\n difference = self.upgrade_model.get_upgrade_difference(property=property, current_level=current_level)\n enhancements = self.upgrade_model.get_level_enhances(property=property, level=current_level+1)\n\n current_value = getattr(self, property)\n setattr(self, property, current_value + difference)\n\n for property_name, value in enhancements.items():\n if not hasattr(self, property_name):\n raise RuntimeError(f\"Upgrade of property {property} in ship system {self.NAME} tried to also upgrade non-existent property {property_name}\")\n property_value = getattr(self, property_name)\n setattr(self, property_name, property_value + value)\n\n cost = self.upgrade_model.get_level_cost(property=property, level=current_level+1)\n self.upgrade_model.upgrade_property(property=property)\n return Response.create(cost, Response.TYPES.UPGRADE_COST)" }, { "identifier": "file_to_dict", "path": "src/utils/file_operations.py", "snippet": "def file_to_dict(file_path: str) -> dict:\n with open(file_path, 'r', encoding='utf-8') as f:\n data = json.load(f)\n if not isinstance(data, dict):\n raise RuntimeError(\"Deserialized data is not a dictionary.\")\n return data" } ]
import os import pytest import src.space_ship.ship_systems as ShipSystems from src.events.event import Event from src.events.event_bus import EventBus from src.events.response import Response from src.space_ship.space_ship import SpaceShip from src.space_ship.ship_system import ShipSystem from src.utils.file_operations import file_to_dict
3,936
@pytest.fixture def setup(): EventBus.reset_instance() @pytest.fixture def current_dir(): return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture def space_ship(setup, current_dir): ship_file = os.path.join(current_dir, '..', 'src', 'data', 'testing', 'ship.json') ship_dict = file_to_dict(ship_file) return SpaceShip.from_dict(data=ship_dict)
@pytest.fixture def setup(): EventBus.reset_instance() @pytest.fixture def current_dir(): return os.path.dirname(os.path.abspath(__file__)) @pytest.fixture def space_ship(setup, current_dir): ship_file = os.path.join(current_dir, '..', 'src', 'data', 'testing', 'ship.json') ship_dict = file_to_dict(ship_file) return SpaceShip.from_dict(data=ship_dict)
def assert_response_data(response: Response, expected_data, response_type = None):
2
2023-12-22 21:24:33+00:00
8k
YYJeffrey/july_server
app/api/v2/topic.py
[ { "identifier": "db", "path": "app/model/base.py", "snippet": "class BaseModel(db.Model):\n def __getitem__(self, key):\n def init_on_load(self):\n def __set_fields(self):\n def _set_fields(self):\n def keys(self):\n def hide(self, *keys):\n def append(self, *keys):\n def status(self):\n def get_or_404(cls, **kwargs):\n def all_or_404(cls, **kwargs):\n def get_one(cls, **kwargs):\n def get_all(cls, **kwargs):\n def create(cls, commit: bool = True, **kwargs):\n def update(self, commit: bool = True, **kwargs):\n def save(self, commit: bool = True):\n def delete(self, commit: bool = True, soft: bool = True):\n def get_pagination(cls, not_del: bool = True, **kwargs):" }, { "identifier": "Success", "path": "app/lib/exception.py", "snippet": "class Success(APIException):\n code = 200\n msg_code = 0\n msg = '成功'" }, { "identifier": "NotFound", "path": "app/lib/exception.py", "snippet": "class NotFound(APIException):\n code = 404\n msg_code = 10011\n msg = '资源不存在'" }, { "identifier": "Created", "path": "app/lib/exception.py", "snippet": "class Created(APIException):\n code = 201\n msg_code = 1\n msg = '创建成功'" }, { "identifier": "Deleted", "path": "app/lib/exception.py", "snippet": "class Deleted(APIException):\n code = 200\n msg_code = 3\n msg = '删除成功'" }, { "identifier": "RedPrint", "path": "app/lib/red_print.py", "snippet": "class RedPrint(object):\n \"\"\"\n 红图用于嵌套路由使用\n \"\"\"\n\n def __init__(self, name):\n self.name = name\n self.mound = []\n\n def route(self, rule, **options):\n def decorator(func):\n if 'strict_slashes' not in options:\n options['strict_slashes'] = False\n self.mound.append((func, rule, options))\n return func\n\n return decorator\n\n def register(self, bp, url_prefix=None):\n if url_prefix is None:\n url_prefix = f\"/{self.name}\"\n\n for func, rule, options in self.mound:\n endpoint = f\"{self.name}/{options.pop('endpoint', func.__name__)}\"\n bp.add_url_rule(url_prefix + rule, endpoint, func, **options)" }, { "identifier": "paginator_schema", "path": "app/lib/schema.py", "snippet": "def paginator_schema(pagination: Pagination):\n \"\"\"\n 分页响应格式\n \"\"\"\n return {\n 'items': pagination.items,\n 'current_page': pagination.page,\n 'next_page': pagination.next_num,\n 'prev_page': pagination.prev_num,\n 'total_page': pagination.pages,\n 'total_count': pagination.total\n }" }, { "identifier": "auth", "path": "app/lib/token.py", "snippet": "def verify_token(token):\ndef generate_token(user_id):" }, { "identifier": "send_ft_msg", "path": "app/manger/fangtang/server_chan.py", "snippet": "def send_ft_msg(text, desp):\n \"\"\"\n Server酱推送消息\n https://sct.ftqq.com\n \"\"\"\n url = f\"https://sctapi.ftqq.com/{current_app.config['SERVER_CHAN_SEND_KEY']}.send\"\n data = {'text': text, 'desp': desp}\n\n try:\n res = requests.post(url=url, data=data, timeout=TIMEOUT).json()\n current_app.logger.info(f\"Server酱接口调用成功: {res}\")\n except Exception as e:\n current_app.logger.warning(f\"Server酱接口调用失败: {e}\")" }, { "identifier": "Comment", "path": "app/model/comment.py", "snippet": "class Comment(BaseModel):\n \"\"\"\n 评论模型\n \"\"\"\n __tablename__ = 'comment'\n\n content = Column(String(256), nullable=False, comment='内容')\n is_anon = Column(Boolean, default=False, comment='是否匿名')\n user_id = Column(String(32), nullable=False, index=True, comment='用户标识')\n topic_id = Column(String(32), nullable=False, index=True, comment='话题标识')\n comment_id = Column(String(32), index=True, comment='父评论标识')\n ip_belong = Column(String(128), comment='IP归属地')\n\n def __str__(self):\n return self.content\n\n def _set_fields(self):\n self.append('push_time')\n self._exclude.extend(['user_id'])\n\n @property\n def push_time(self):\n \"\"\"\n 发布时间\n \"\"\"\n if self.create_time is not None:\n return datetime_to_hint(self.create_time)\n return None\n\n @classmethod\n def get_commented(cls, user_id, topic_id):\n \"\"\"\n 获取该用户是否评论该话题\n \"\"\"\n return cls.get_one(user_id=user_id, topic_id=topic_id) is not None\n\n @classmethod\n def get_comment_count(cls, topic_id):\n \"\"\"\n 获取该话题的评论数量\n \"\"\"\n return db.session.query(func.count(cls.id)).filter_by(topic_id=topic_id).scalar()" }, { "identifier": "Label", "path": "app/model/label.py", "snippet": "class Label(BaseModel):\n \"\"\"\n 标签模型\n \"\"\"\n __tablename__ = 'label'\n\n name = Column(String(32), nullable=False, comment='名称')\n allowed_anon = Column(Boolean, default=False, comment='是否可以匿名')\n click_count = Column(Integer, default=0, comment='点击次数')\n priority = Column(Integer, default=0, comment='优先级')\n\n def __str__(self):\n return self.name" }, { "identifier": "Star", "path": "app/model/star.py", "snippet": "class Star(BaseModel):\n \"\"\"\n 收藏模型\n \"\"\"\n __tablename__ = 'star'\n\n user_id = Column(String(32), nullable=False, index=True, comment='用户标识')\n topic_id = Column(String(32), nullable=False, index=True, comment='话题标识')\n\n def __str__(self):\n return self.id\n\n @classmethod\n def get_starred(cls, user_id, topic_id):\n \"\"\"\n 获取该用户是否收藏该话题\n \"\"\"\n return cls.get_one(user_id=user_id, topic_id=topic_id) is not None\n\n @classmethod\n def get_star_count(cls, topic_id):\n \"\"\"\n 获取该话题的收藏数量\n \"\"\"\n return db.session.query(func.count(cls.id)).filter_by(topic_id=topic_id).scalar()" }, { "identifier": "Topic", "path": "app/model/topic.py", "snippet": "class Topic(BaseModel):\n \"\"\"\n 话题模型\n \"\"\"\n __tablename__ = 'topic'\n\n title = Column(String(64), comment='标题')\n content = Column(String(1024), nullable=False, comment='内容')\n is_anon = Column(Boolean, default=False, comment='是否匿名')\n click_count = Column(Integer, default=0, comment='点击次数')\n star_count = Column(Integer, default=0, comment='收藏次数')\n comment_count = Column(Integer, default=0, comment='评论次数')\n images = Column(JSON, comment='图片')\n user_id = Column(String(32), nullable=False, index=True, comment='用户标识')\n video_id = Column(String(32), index=True, comment='视频标识')\n ip_belong = Column(String(128), comment='IP归属地')\n\n def __str__(self):\n return self.content\n\n def _set_fields(self):\n self.append('push_time')\n self._exclude.extend(['user_id'])\n\n @property\n def push_time(self):\n \"\"\"\n 发布时间\n \"\"\"\n if self.create_time is not None:\n return datetime_to_hint(self.create_time)\n return None\n\n @property\n def starred(self):\n \"\"\"\n 是否收藏\n \"\"\"\n if g.user is None:\n return False\n return Star.get_starred(user_id=g.user.id, topic_id=self.id)\n\n @property\n def commented(self):\n \"\"\"\n 是否评论\n \"\"\"\n if g.user is None:\n return False\n return Comment.get_commented(user_id=g.user.id, topic_id=self.id)" }, { "identifier": "TopicLabelRel", "path": "app/model/topic.py", "snippet": "class TopicLabelRel(BaseModel):\n \"\"\"\n 话题标签关系模型\n \"\"\"\n __tablename__ = 'topic_label_rel'\n\n topic_id = Column(String(32), nullable=False, index=True, comment='话题标识')\n label_id = Column(String(32), nullable=False, index=True, comment='标签标识')\n\n def __str__(self):\n return self.id" }, { "identifier": "create_topic_verify", "path": "app/service/topic.py", "snippet": "def create_topic_verify(form):\n \"\"\"\n 创建话题验证\n \"\"\"\n # 图片数量校验\n images = form.get_data('images')\n if images is not None:\n if len(images) > current_app.config['MAX_IMAGES_LENGTH']:\n raise ImagesExceedMaxLength\n\n # 标签数量校验\n labels = form.get_data('labels')\n if labels is not None:\n if len(labels) > current_app.config['MAX_LABELS_LENGTH']:\n raise LabelsExceedMaxLength\n\n # 视频存在校验\n video_id = form.get_data('video_id')\n if video_id is not None:\n if Video.get_one(id=video_id) is None:\n raise NotFound(msg='视频不存在')\n\n # 标签匿名校验\n is_anon = form.get_data('is_anon')\n for label_id in labels:\n label = Label.get_one(id=label_id)\n if label is None:\n raise NotFound(msg='标签不存在')\n if is_anon is not None and is_anon and not label.allowed_anon:\n raise LabelNotAllowedAnonymous\n\n client = get_mp_client()\n\n # 标题校验\n title = form.get_data('title')\n if title is not None:\n if not client.check_content(content=title, openid=g.user.openid):\n raise TextContentIllegal('标题不合法')\n\n # 内容校验\n content = form.get_data('content')\n if content is not None:\n if not client.check_content(content=content, openid=g.user.openid):\n raise TextContentIllegal('内容不合法')\n\n # 更新IP归属地\n ip_belong = update_ip_belong()\n\n # 保存话题\n with db.auto_commit():\n topic = Topic.create(commit=False, user_id=g.user.id, ip_belong=ip_belong, **form.dt_data)\n db.session.flush()\n labels = form.get_data('labels')\n if labels is not None:\n for label_id in labels:\n TopicLabelRel.create(commit=False, topic_id=topic.id, label_id=label_id)" }, { "identifier": "format_report_topic", "path": "app/service/topic.py", "snippet": "def format_report_topic(topic):\n \"\"\"\n 格式化举报话题\n \"\"\"\n image_info = ''\n for image in topic.images:\n image_info += f\"![Image]({image})\\n\"\n\n # 举报人\n action_user = g.user\n # 被举报人\n user = User.get_one(id=topic.user_id)\n # 被举报视频\n video = Video.get_one(id=topic.video_id)\n\n return f\"**话题ID:** {topic.id} \\n\" \\\n f\"**话题内容:** {topic.content} \\n\" \\\n f\"**被举报人:** {user.nickname}({user.id}) \\n\" \\\n f\"**话题图片:** {image_info if image_info != '' else '无'} \\n\" \\\n f\"**举报视频:** {video.src if video else '无'} \\n\" \\\n f\"**举报人:** {action_user.nickname}({action_user.id}) \"" }, { "identifier": "get_topic_list", "path": "app/service/topic.py", "snippet": "def get_topic_list(label_id=None, user_id=None):\n \"\"\"\n 获取话题列表\n \"\"\"\n validator = PaginateValidator().dt_data\n page = validator.get('page')\n size = validator.get('size')\n\n query = db.session.query(Topic, User, Video) \\\n .outerjoin(User, Topic.user_id == User.id) \\\n .outerjoin(Video, and_(Topic.video_id == Video.id, Video.video_status == VideoStatus.NORMAL)) \\\n .filter(Topic.delete_time.is_(None))\n\n if label_id is not None:\n topic_ids = db.session.query(TopicLabelRel.topic_id).filter(TopicLabelRel.label_id == label_id)\n query = query.filter(Topic.id.in_(topic_ids))\n\n if user_id is not None:\n query = query.filter(User.id == user_id)\n # 其他用户不能查看到该用户的匿名话题\n if user_id != g.user.id:\n query = query.filter(Topic.is_anon.is_(False))\n\n data = query.order_by(Topic.create_time.desc()).paginate(page=page, size=size)\n\n items = data.items\n for index, (topic, topic.user, topic.video) in enumerate(items):\n if topic.is_anon:\n topic.user = None\n topic.ip_belong = None\n\n topic.append('user', 'video', 'starred', 'commented')\n items[index] = topic\n\n return data" }, { "identifier": "get_topic_detail", "path": "app/service/topic.py", "snippet": "def get_topic_detail(topic_id):\n \"\"\"\n 获取话题详情\n \"\"\"\n data = db.session.query(Topic, User, Video) \\\n .outerjoin(User, Topic.user_id == User.id) \\\n .outerjoin(Video, and_(Topic.video_id == Video.id, Video.video_status == VideoStatus.NORMAL)) \\\n .filter(Topic.id == topic_id) \\\n .filter(Topic.delete_time.is_(None)) \\\n .first()\n\n topic, topic.user, topic.video = data\n if topic.is_anon:\n topic.user = None\n topic.ip_belong = None\n\n topic.append('user', 'video', 'starred', 'commented')\n return topic" }, { "identifier": "GetTopicListValidator", "path": "app/validator/forms.py", "snippet": "class GetTopicListValidator(PaginateValidator):\n label_id = StringField('标签标识')\n user_id = StringField('用户标识')" }, { "identifier": "CreateTopicValidator", "path": "app/validator/forms.py", "snippet": "class CreateTopicValidator(Form):\n content = StringField('内容', validators=[DataRequired(message='内容不能为空')])\n title = StringField('标题')\n is_anon = BooleanField('是否匿名')\n images = FieldList(StringField(), '图片')\n labels = FieldList(StringField(), '标签')\n video_id = StringField('视频标识')" } ]
from flask import current_app, g from app import db from app.lib.exception import Success, NotFound, Created, Deleted from app.lib.red_print import RedPrint from app.lib.schema import paginator_schema from app.lib.token import auth from app.manger.fangtang.server_chan import send_ft_msg from app.model.comment import Comment from app.model.label import Label from app.model.star import Star from app.model.topic import Topic, TopicLabelRel from app.service.topic import create_topic_verify, format_report_topic, get_topic_list, get_topic_detail from app.validator.forms import GetTopicListValidator, CreateTopicValidator
3,800
# -*- coding: utf-8 -*- """ :copyright: (c) 2023 by Jeffrey. :license: Apache 2.0, see LICENSE for more details. """ api = RedPrint('topic') @api.route('/<topic_id>', methods=['GET']) def get_topic(topic_id): """ 获取话题详情 """ topic = get_topic_detail(topic_id=topic_id) if topic is None:
# -*- coding: utf-8 -*- """ :copyright: (c) 2023 by Jeffrey. :license: Apache 2.0, see LICENSE for more details. """ api = RedPrint('topic') @api.route('/<topic_id>', methods=['GET']) def get_topic(topic_id): """ 获取话题详情 """ topic = get_topic_detail(topic_id=topic_id) if topic is None:
raise NotFound(msg='话题不存在')
2
2023-12-30 04:08:35+00:00
8k
farhad-dalirani/MultiObjectTracking-YOLO-NAS-DeepSORT
deep_sort_pytorch_master/deep_sort/deep_sort.py
[ { "identifier": "Extractor", "path": "deep_sort_pytorch_master/deep_sort/deep/feature_extractor.py", "snippet": "class Extractor(object):\n def __init__(self, model_path, use_cuda=True):\n self.net = Net(reid=True)\n self.device = \"cuda\" if torch.cuda.is_available() and use_cuda else \"cpu\"\n state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)['net_dict']\n self.net.load_state_dict(state_dict)\n logger = logging.getLogger(\"root.tracker\")\n logger.info(\"Loading weights from {}... Done!\".format(model_path))\n self.net.to(self.device)\n self.size = (64, 128)\n self.norm = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ])\n \n\n\n def _preprocess(self, im_crops):\n \"\"\"\n TODO:\n 1. to float with scale from 0 to 1\n 2. resize to (64, 128) as Market1501 dataset did\n 3. concatenate to a numpy array\n 3. to torch Tensor\n 4. normalize\n \"\"\"\n def _resize(im, size):\n return cv2.resize(im.astype(np.float32)/255., size)\n\n im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()\n return im_batch\n\n\n def __call__(self, im_crops):\n im_batch = self._preprocess(im_crops)\n with torch.no_grad():\n im_batch = im_batch.to(self.device)\n features = self.net(im_batch)\n return features.cpu().numpy()" }, { "identifier": "NearestNeighborDistanceMetric", "path": "deep_sort_pytorch_master/deep_sort/sort/nn_matching.py", "snippet": "class NearestNeighborDistanceMetric(object):\r\n \"\"\"\r\n A nearest neighbor distance metric that, for each target, returns\r\n the closest distance to any sample that has been observed so far.\r\n\r\n Parameters\r\n ----------\r\n metric : str\r\n Either \"euclidean\" or \"cosine\".\r\n matching_threshold: float\r\n The matching threshold. Samples with larger distance are considered an\r\n invalid match.\r\n budget : Optional[int]\r\n If not None, fix samples per class to at most this number. Removes\r\n the oldest samples when the budget is reached.\r\n\r\n Attributes\r\n ----------\r\n samples : Dict[int -> List[ndarray]]\r\n A dictionary that maps from target identities to the list of samples\r\n that have been observed so far.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, metric, matching_threshold, budget=None):\r\n\r\n\r\n if metric == \"euclidean\":\r\n self._metric = _nn_euclidean_distance\r\n elif metric == \"cosine\":\r\n self._metric = _nn_cosine_distance\r\n else:\r\n raise ValueError(\r\n \"Invalid metric; must be either 'euclidean' or 'cosine'\")\r\n self.matching_threshold = matching_threshold\r\n self.budget = budget\r\n self.samples = {}\r\n\r\n def partial_fit(self, features, targets, active_targets):\r\n \"\"\"Update the distance metric with new data.\r\n\r\n Parameters\r\n ----------\r\n features : ndarray\r\n An NxM matrix of N features of dimensionality M.\r\n targets : ndarray\r\n An integer array of associated target identities.\r\n active_targets : List[int]\r\n A list of targets that are currently present in the scene.\r\n\r\n \"\"\"\r\n for feature, target in zip(features, targets):\r\n self.samples.setdefault(target, []).append(feature)\r\n if self.budget is not None:\r\n self.samples[target] = self.samples[target][-self.budget:]\r\n self.samples = {k: self.samples[k] for k in active_targets}\r\n\r\n def distance(self, features, targets):\r\n \"\"\"Compute distance between features and targets.\r\n\r\n Parameters\r\n ----------\r\n features : ndarray\r\n An NxM matrix of N features of dimensionality M.\r\n targets : List[int]\r\n A list of targets to match the given `features` against.\r\n\r\n Returns\r\n -------\r\n ndarray\r\n Returns a cost matrix of shape len(targets), len(features), where\r\n element (i, j) contains the closest squared distance between\r\n `targets[i]` and `features[j]`.\r\n\r\n \"\"\"\r\n cost_matrix = np.zeros((len(targets), len(features)))\r\n for i, target in enumerate(targets):\r\n cost_matrix[i, :] = self._metric(self.samples[target], features)\r\n return cost_matrix\r" }, { "identifier": "non_max_suppression", "path": "deep_sort_pytorch_master/deep_sort/sort/preprocessing.py", "snippet": "def non_max_suppression(boxes, max_bbox_overlap, scores=None):\r\n \"\"\"Suppress overlapping detections.\r\n\r\n Original code from [1]_ has been adapted to include confidence score.\r\n\r\n .. [1] http://www.pyimagesearch.com/2015/02/16/\r\n faster-non-maximum-suppression-python/\r\n\r\n Examples\r\n --------\r\n\r\n >>> boxes = [d.roi for d in detections]\r\n >>> scores = [d.confidence for d in detections]\r\n >>> indices = non_max_suppression(boxes, max_bbox_overlap, scores)\r\n >>> detections = [detections[i] for i in indices]\r\n\r\n Parameters\r\n ----------\r\n boxes : ndarray\r\n Array of ROIs (x, y, width, height).\r\n max_bbox_overlap : float\r\n ROIs that overlap more than this values are suppressed.\r\n scores : Optional[array_like]\r\n Detector confidence score.\r\n\r\n Returns\r\n -------\r\n List[int]\r\n Returns indices of detections that have survived non-maxima suppression.\r\n\r\n \"\"\"\r\n if len(boxes) == 0:\r\n return []\r\n\r\n boxes = boxes.astype(np.float)\r\n pick = []\r\n\r\n x1 = boxes[:, 0]\r\n y1 = boxes[:, 1]\r\n x2 = boxes[:, 2] + boxes[:, 0]\r\n y2 = boxes[:, 3] + boxes[:, 1]\r\n\r\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\r\n if scores is not None:\r\n idxs = np.argsort(scores)\r\n else:\r\n idxs = np.argsort(y2)\r\n\r\n while len(idxs) > 0:\r\n last = len(idxs) - 1\r\n i = idxs[last]\r\n pick.append(i)\r\n\r\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\r\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\r\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\r\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\r\n\r\n w = np.maximum(0, xx2 - xx1 + 1)\r\n h = np.maximum(0, yy2 - yy1 + 1)\r\n\r\n overlap = (w * h) / area[idxs[:last]]\r\n\r\n idxs = np.delete(\r\n idxs, np.concatenate(\r\n ([last], np.where(overlap > max_bbox_overlap)[0])))\r\n\r\n return pick\r" }, { "identifier": "Detection", "path": "deep_sort_pytorch_master/deep_sort/sort/detection.py", "snippet": "class Detection(object):\r\n \"\"\"\r\n This class represents a bounding box detection in a single image.\r\n\r\n Parameters\r\n ----------\r\n tlwh : array_like\r\n Bounding box in format `(x, y, w, h)`.\r\n confidence : float\r\n Detector confidence score.\r\n feature : array_like\r\n A feature vector that describes the object contained in this image.\r\n\r\n Attributes\r\n ----------\r\n tlwh : ndarray\r\n Bounding box in format `(top left x, top left y, width, height)`.\r\n confidence : ndarray\r\n Detector confidence score.\r\n feature : ndarray | NoneType\r\n A feature vector that describes the object contained in this image.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, tlwh, confidence, feature):\r\n self.tlwh = np.asarray(tlwh, dtype=np.float)\r\n self.confidence = float(confidence)\r\n self.feature = np.asarray(feature, dtype=np.float32)\r\n\r\n def to_tlbr(self):\r\n \"\"\"Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,\r\n `(top left, bottom right)`.\r\n \"\"\"\r\n ret = self.tlwh.copy()\r\n ret[2:] += ret[:2]\r\n return ret\r\n\r\n def to_xyah(self):\r\n \"\"\"Convert bounding box to format `(center x, center y, aspect ratio,\r\n height)`, where the aspect ratio is `width / height`.\r\n \"\"\"\r\n ret = self.tlwh.copy()\r\n ret[:2] += ret[2:] / 2\r\n ret[2] /= ret[3]\r\n return ret\r" }, { "identifier": "Tracker", "path": "deep_sort_pytorch_master/deep_sort/sort/tracker.py", "snippet": "class Tracker:\r\n \"\"\"\r\n This is the multi-target tracker.\r\n\r\n Parameters\r\n ----------\r\n metric : nn_matching.NearestNeighborDistanceMetric\r\n A distance metric for measurement-to-track association.\r\n max_age : int\r\n Maximum number of missed misses before a track is deleted.\r\n n_init : int\r\n Number of consecutive detections before the track is confirmed. The\r\n track state is set to `Deleted` if a miss occurs within the first\r\n `n_init` frames.\r\n\r\n Attributes\r\n ----------\r\n metric : nn_matching.NearestNeighborDistanceMetric\r\n The distance metric used for measurement to track association.\r\n max_age : int\r\n Maximum number of missed misses before a track is deleted.\r\n n_init : int\r\n Number of frames that a track remains in initialization phase.\r\n kf : kalman_filter.KalmanFilter\r\n A Kalman filter to filter target trajectories in image space.\r\n tracks : List[Track]\r\n The list of active tracks at the current time step.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, metric, max_iou_distance=0.7, max_age=70, n_init=3):\r\n self.metric = metric\r\n self.max_iou_distance = max_iou_distance\r\n self.max_age = max_age\r\n self.n_init = n_init\r\n\r\n self.kf = kalman_filter.KalmanFilter()\r\n self.tracks = []\r\n self._next_id = 1\r\n\r\n def predict(self):\r\n \"\"\"Propagate track state distributions one time step forward.\r\n\r\n This function should be called once every time step, before `update`.\r\n \"\"\"\r\n for track in self.tracks:\r\n track.predict(self.kf)\r\n\r\n def update(self, detections):\r\n \"\"\"Perform measurement update and track management.\r\n\r\n Parameters\r\n ----------\r\n detections : List[deep_sort.detection.Detection]\r\n A list of detections at the current time step.\r\n\r\n \"\"\"\r\n # Run matching cascade.\r\n matches, unmatched_tracks, unmatched_detections = \\\r\n self._match(detections)\r\n\r\n # Update track set.\r\n for track_idx, detection_idx in matches:\r\n self.tracks[track_idx].update(\r\n self.kf, detections[detection_idx])\r\n for track_idx in unmatched_tracks:\r\n self.tracks[track_idx].mark_missed()\r\n for detection_idx in unmatched_detections:\r\n self._initiate_track(detections[detection_idx])\r\n self.tracks = [t for t in self.tracks if not t.is_deleted()]\r\n\r\n # Update distance metric.\r\n active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]\r\n features, targets = [], []\r\n for track in self.tracks:\r\n if not track.is_confirmed():\r\n continue\r\n features += track.features\r\n targets += [track.track_id for _ in track.features]\r\n track.features = []\r\n self.metric.partial_fit(\r\n np.asarray(features), np.asarray(targets), active_targets)\r\n\r\n def _match(self, detections):\r\n\r\n def gated_metric(tracks, dets, track_indices, detection_indices):\r\n features = np.array([dets[i].feature for i in detection_indices])\r\n targets = np.array([tracks[i].track_id for i in track_indices])\r\n cost_matrix = self.metric.distance(features, targets)\r\n cost_matrix = linear_assignment.gate_cost_matrix(\r\n self.kf, cost_matrix, tracks, dets, track_indices,\r\n detection_indices)\r\n\r\n return cost_matrix\r\n\r\n # Split track set into confirmed and unconfirmed tracks.\r\n confirmed_tracks = [\r\n i for i, t in enumerate(self.tracks) if t.is_confirmed()]\r\n unconfirmed_tracks = [\r\n i for i, t in enumerate(self.tracks) if not t.is_confirmed()]\r\n\r\n # Associate confirmed tracks using appearance features.\r\n matches_a, unmatched_tracks_a, unmatched_detections = \\\r\n linear_assignment.matching_cascade(\r\n gated_metric, self.metric.matching_threshold, self.max_age,\r\n self.tracks, detections, confirmed_tracks)\r\n\r\n # Associate remaining tracks together with unconfirmed tracks using IOU.\r\n iou_track_candidates = unconfirmed_tracks + [\r\n k for k in unmatched_tracks_a if\r\n self.tracks[k].time_since_update == 1]\r\n unmatched_tracks_a = [\r\n k for k in unmatched_tracks_a if\r\n self.tracks[k].time_since_update != 1]\r\n matches_b, unmatched_tracks_b, unmatched_detections = \\\r\n linear_assignment.min_cost_matching(\r\n iou_matching.iou_cost, self.max_iou_distance, self.tracks,\r\n detections, iou_track_candidates, unmatched_detections)\r\n\r\n matches = matches_a + matches_b\r\n unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))\r\n return matches, unmatched_tracks, unmatched_detections\r\n\r\n def _initiate_track(self, detection):\r\n mean, covariance = self.kf.initiate(detection.to_xyah())\r\n self.tracks.append(Track(\r\n mean, covariance, self._next_id, self.n_init, self.max_age,\r\n detection.feature))\r\n self._next_id += 1\r" } ]
import numpy as np import torch from .deep.feature_extractor import Extractor from .sort.nn_matching import NearestNeighborDistanceMetric from .sort.preprocessing import non_max_suppression from .sort.detection import Detection from .sort.tracker import Tracker
3,622
### # Changed from orginal repository ### # from .deep.feature_extractor import Extractor, FastReIDExtractor __all__ = ['DeepSort'] class DeepSort(object): def __init__(self, model_path, model_config=None, max_dist=0.2, min_confidence=0.3, nms_max_overlap=1.0, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True): self.min_confidence = min_confidence self.nms_max_overlap = nms_max_overlap if model_config is None: self.extractor = Extractor(model_path, use_cuda=use_cuda) ### # Changed from orginal repository ### #else: # self.extractor = FastReIDExtractor(model_config, model_path, use_cuda=use_cuda) max_cosine_distance = max_dist metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
### # Changed from orginal repository ### # from .deep.feature_extractor import Extractor, FastReIDExtractor __all__ = ['DeepSort'] class DeepSort(object): def __init__(self, model_path, model_config=None, max_dist=0.2, min_confidence=0.3, nms_max_overlap=1.0, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True): self.min_confidence = min_confidence self.nms_max_overlap = nms_max_overlap if model_config is None: self.extractor = Extractor(model_path, use_cuda=use_cuda) ### # Changed from orginal repository ### #else: # self.extractor = FastReIDExtractor(model_config, model_path, use_cuda=use_cuda) max_cosine_distance = max_dist metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
self.tracker = Tracker(metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
4
2023-12-26 15:22:02+00:00
8k
JiePKU/MoLE
fine_tune.py
[ { "identifier": "ConfigSanitizer", "path": "library/config_util.py", "snippet": "class ConfigSanitizer:\n # @curry\n @staticmethod\n def __validate_and_convert_twodim(klass, value: Sequence) -> Tuple:\n Schema(ExactSequence([klass, klass]))(value)\n return tuple(value)\n\n # @curry\n @staticmethod\n def __validate_and_convert_scalar_or_twodim(klass, value: Union[float, Sequence]) -> Tuple:\n Schema(Any(klass, ExactSequence([klass, klass])))(value)\n try:\n Schema(klass)(value)\n return (value, value)\n except:\n return ConfigSanitizer.__validate_and_convert_twodim(klass, value)\n\n # subset schema\n SUBSET_ASCENDABLE_SCHEMA = {\n \"color_aug\": bool,\n \"face_crop_aug_range\": functools.partial(__validate_and_convert_twodim.__func__, float),\n \"flip_aug\": bool,\n \"num_repeats\": int,\n \"random_crop\": bool,\n \"shuffle_caption\": bool,\n \"keep_tokens\": int,\n \"token_warmup_min\": int,\n \"token_warmup_step\": Any(float,int),\n }\n # DO means DropOut\n DO_SUBSET_ASCENDABLE_SCHEMA = {\n \"caption_dropout_every_n_epochs\": int,\n \"caption_dropout_rate\": Any(float, int),\n \"caption_tag_dropout_rate\": Any(float, int),\n }\n # DB means DreamBooth\n DB_SUBSET_ASCENDABLE_SCHEMA = {\n \"caption_extension\": str,\n \"class_tokens\": str,\n }\n DB_SUBSET_DISTINCT_SCHEMA = {\n Required(\"image_dir\"): str,\n \"is_reg\": bool,\n }\n # FT means FineTuning\n FT_SUBSET_DISTINCT_SCHEMA = {\n Required(\"metadata_file\"): str,\n \"image_dir\": str,\n }\n\n # datasets schema\n DATASET_ASCENDABLE_SCHEMA = {\n \"batch_size\": int,\n \"bucket_no_upscale\": bool,\n \"bucket_reso_steps\": int,\n \"enable_bucket\": bool,\n \"max_bucket_reso\": int,\n \"min_bucket_reso\": int,\n \"resolution\": functools.partial(__validate_and_convert_scalar_or_twodim.__func__, int),\n }\n\n # options handled by argparse but not handled by user config\n ARGPARSE_SPECIFIC_SCHEMA = {\n \"debug_dataset\": bool,\n \"max_token_length\": Any(None, int),\n \"prior_loss_weight\": Any(float, int),\n }\n # for handling default None value of argparse\n ARGPARSE_NULLABLE_OPTNAMES = [\n \"face_crop_aug_range\",\n \"resolution\",\n ]\n # prepare map because option name may differ among argparse and user config\n ARGPARSE_OPTNAME_TO_CONFIG_OPTNAME = {\n \"train_batch_size\": \"batch_size\",\n \"dataset_repeats\": \"num_repeats\",\n }\n\n def __init__(self, support_dreambooth: bool, support_finetuning: bool, support_dropout: bool) -> None:\n assert support_dreambooth or support_finetuning, \"Neither DreamBooth mode nor fine tuning mode specified. Please specify one mode or more. / DreamBooth モードか fine tuning モードのどちらも指定されていません。1つ以上指定してください。\"\n\n self.db_subset_schema = self.__merge_dict(\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.DB_SUBSET_DISTINCT_SCHEMA,\n self.DB_SUBSET_ASCENDABLE_SCHEMA,\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n )\n\n self.ft_subset_schema = self.__merge_dict(\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.FT_SUBSET_DISTINCT_SCHEMA,\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n )\n\n self.db_dataset_schema = self.__merge_dict(\n self.DATASET_ASCENDABLE_SCHEMA,\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.DB_SUBSET_ASCENDABLE_SCHEMA,\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n {\"subsets\": [self.db_subset_schema]},\n )\n\n self.ft_dataset_schema = self.__merge_dict(\n self.DATASET_ASCENDABLE_SCHEMA,\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n {\"subsets\": [self.ft_subset_schema]},\n )\n\n if support_dreambooth and support_finetuning:\n def validate_flex_dataset(dataset_config: dict):\n subsets_config = dataset_config.get(\"subsets\", [])\n\n # check dataset meets FT style\n # NOTE: all FT subsets should have \"metadata_file\"\n if all([\"metadata_file\" in subset for subset in subsets_config]):\n return Schema(self.ft_dataset_schema)(dataset_config)\n # check dataset meets DB style\n # NOTE: all DB subsets should have no \"metadata_file\"\n elif all([\"metadata_file\" not in subset for subset in subsets_config]):\n return Schema(self.db_dataset_schema)(dataset_config)\n else:\n raise voluptuous.Invalid(\"DreamBooth subset and fine tuning subset cannot be mixed in the same dataset. Please split them into separate datasets. / DreamBoothのサブセットとfine tuninのサブセットを同一のデータセットに混在させることはできません。別々のデータセットに分割してください。\")\n\n self.dataset_schema = validate_flex_dataset\n elif support_dreambooth:\n self.dataset_schema = self.db_dataset_schema\n else:\n self.dataset_schema = self.ft_dataset_schema\n\n self.general_schema = self.__merge_dict(\n self.DATASET_ASCENDABLE_SCHEMA,\n self.SUBSET_ASCENDABLE_SCHEMA,\n self.DB_SUBSET_ASCENDABLE_SCHEMA if support_dreambooth else {},\n self.DO_SUBSET_ASCENDABLE_SCHEMA if support_dropout else {},\n )\n\n self.user_config_validator = Schema({\n \"general\": self.general_schema,\n \"datasets\": [self.dataset_schema],\n })\n\n self.argparse_schema = self.__merge_dict(\n self.general_schema,\n self.ARGPARSE_SPECIFIC_SCHEMA,\n {optname: Any(None, self.general_schema[optname]) for optname in self.ARGPARSE_NULLABLE_OPTNAMES},\n {a_name: self.general_schema[c_name] for a_name, c_name in self.ARGPARSE_OPTNAME_TO_CONFIG_OPTNAME.items()},\n )\n\n self.argparse_config_validator = Schema(Object(self.argparse_schema), extra=voluptuous.ALLOW_EXTRA)\n\n def sanitize_user_config(self, user_config: dict) -> dict:\n try:\n return self.user_config_validator(user_config)\n except MultipleInvalid:\n # TODO: エラー発生時のメッセージをわかりやすくする\n print(\"Invalid user config / ユーザ設定の形式が正しくないようです\")\n raise\n\n # NOTE: In nature, argument parser result is not needed to be sanitize\n # However this will help us to detect program bug\n def sanitize_argparse_namespace(self, argparse_namespace: argparse.Namespace) -> argparse.Namespace:\n try:\n return self.argparse_config_validator(argparse_namespace)\n except MultipleInvalid:\n # XXX: this should be a bug\n print(\"Invalid cmdline parsed arguments. This should be a bug. / コマンドラインのパース結果が正しくないようです。プログラムのバグの可能性が高いです。\")\n raise\n\n # NOTE: value would be overwritten by latter dict if there is already the same key\n @staticmethod\n def __merge_dict(*dict_list: dict) -> dict:\n merged = {}\n for schema in dict_list:\n # merged |= schema\n for k, v in schema.items():\n merged[k] = v\n return merged" }, { "identifier": "BlueprintGenerator", "path": "library/config_util.py", "snippet": "class BlueprintGenerator:\n BLUEPRINT_PARAM_NAME_TO_CONFIG_OPTNAME = {\n }\n\n def __init__(self, sanitizer: ConfigSanitizer):\n self.sanitizer = sanitizer\n\n # runtime_params is for parameters which is only configurable on runtime, such as tokenizer\n def generate(self, user_config: dict, argparse_namespace: argparse.Namespace, **runtime_params) -> Blueprint:\n sanitized_user_config = self.sanitizer.sanitize_user_config(user_config)\n sanitized_argparse_namespace = self.sanitizer.sanitize_argparse_namespace(argparse_namespace)\n\n # convert argparse namespace to dict like config\n # NOTE: it is ok to have extra entries in dict\n optname_map = self.sanitizer.ARGPARSE_OPTNAME_TO_CONFIG_OPTNAME\n argparse_config = {optname_map.get(optname, optname): value for optname, value in vars(sanitized_argparse_namespace).items()}\n\n general_config = sanitized_user_config.get(\"general\", {})\n\n dataset_blueprints = []\n for dataset_config in sanitized_user_config.get(\"datasets\", []):\n # NOTE: if subsets have no \"metadata_file\", these are DreamBooth datasets/subsets\n subsets = dataset_config.get(\"subsets\", [])\n is_dreambooth = all([\"metadata_file\" not in subset for subset in subsets])\n if is_dreambooth:\n subset_params_klass = DreamBoothSubsetParams\n dataset_params_klass = DreamBoothDatasetParams\n else:\n subset_params_klass = FineTuningSubsetParams\n dataset_params_klass = FineTuningDatasetParams\n\n subset_blueprints = []\n for subset_config in subsets:\n params = self.generate_params_by_fallbacks(subset_params_klass,\n [subset_config, dataset_config, general_config, argparse_config, runtime_params])\n subset_blueprints.append(SubsetBlueprint(params))\n\n params = self.generate_params_by_fallbacks(dataset_params_klass,\n [dataset_config, general_config, argparse_config, runtime_params])\n dataset_blueprints.append(DatasetBlueprint(is_dreambooth, params, subset_blueprints))\n\n dataset_group_blueprint = DatasetGroupBlueprint(dataset_blueprints)\n\n return Blueprint(dataset_group_blueprint)\n\n @staticmethod\n def generate_params_by_fallbacks(param_klass, fallbacks: Sequence[dict]):\n name_map = BlueprintGenerator.BLUEPRINT_PARAM_NAME_TO_CONFIG_OPTNAME\n search_value = BlueprintGenerator.search_value\n default_params = asdict(param_klass())\n param_names = default_params.keys()\n\n params = {name: search_value(name_map.get(name, name), fallbacks, default_params.get(name)) for name in param_names}\n\n return param_klass(**params)\n\n @staticmethod\n def search_value(key: str, fallbacks: Sequence[dict], default_value = None):\n for cand in fallbacks:\n value = cand.get(key)\n if value is not None:\n return value\n\n return default_value" }, { "identifier": "apply_snr_weight", "path": "library/custom_train_functions.py", "snippet": "def apply_snr_weight(loss, timesteps, noise_scheduler, gamma):\r\n snr = torch.stack([noise_scheduler.all_snr[t] for t in timesteps])\r\n gamma_over_snr = torch.div(torch.ones_like(snr) * gamma, snr)\r\n snr_weight = torch.minimum(gamma_over_snr, torch.ones_like(gamma_over_snr)).float() # from paper\r\n loss = loss * snr_weight\r\n return loss\r" }, { "identifier": "get_weighted_text_embeddings", "path": "library/custom_train_functions.py", "snippet": "def get_weighted_text_embeddings(\r\n tokenizer,\r\n text_encoder,\r\n prompt: Union[str, List[str]],\r\n device,\r\n max_embeddings_multiples: Optional[int] = 3,\r\n no_boseos_middle: Optional[bool] = False,\r\n clip_skip=None,\r\n):\r\n r\"\"\"\r\n Prompts can be assigned with local weights using brackets. For example,\r\n prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',\r\n and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.\r\n\r\n Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.\r\n\r\n Args:\r\n prompt (`str` or `List[str]`):\r\n The prompt or prompts to guide the image generation.\r\n max_embeddings_multiples (`int`, *optional*, defaults to `3`):\r\n The max multiple length of prompt embeddings compared to the max output length of text encoder.\r\n no_boseos_middle (`bool`, *optional*, defaults to `False`):\r\n If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and\r\n ending token in each of the chunk in the middle.\r\n skip_parsing (`bool`, *optional*, defaults to `False`):\r\n Skip the parsing of brackets.\r\n skip_weighting (`bool`, *optional*, defaults to `False`):\r\n Skip the weighting. When the parsing is skipped, it is forced True.\r\n \"\"\"\r\n max_length = (tokenizer.model_max_length - 2) * max_embeddings_multiples + 2\r\n if isinstance(prompt, str):\r\n prompt = [prompt]\r\n\r\n prompt_tokens, prompt_weights = get_prompts_with_weights(tokenizer, prompt, max_length - 2)\r\n\r\n # round up the longest length of tokens to a multiple of (model_max_length - 2)\r\n max_length = max([len(token) for token in prompt_tokens])\r\n\r\n max_embeddings_multiples = min(\r\n max_embeddings_multiples,\r\n (max_length - 1) // (tokenizer.model_max_length - 2) + 1,\r\n )\r\n max_embeddings_multiples = max(1, max_embeddings_multiples)\r\n max_length = (tokenizer.model_max_length - 2) * max_embeddings_multiples + 2\r\n\r\n # pad the length of tokens and weights\r\n bos = tokenizer.bos_token_id\r\n eos = tokenizer.eos_token_id\r\n pad = tokenizer.pad_token_id\r\n prompt_tokens, prompt_weights = pad_tokens_and_weights(\r\n prompt_tokens,\r\n prompt_weights,\r\n max_length,\r\n bos,\r\n eos,\r\n no_boseos_middle=no_boseos_middle,\r\n chunk_length=tokenizer.model_max_length,\r\n )\r\n prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=device)\r\n\r\n # get the embeddings\r\n text_embeddings = get_unweighted_text_embeddings(\r\n tokenizer,\r\n text_encoder,\r\n prompt_tokens,\r\n tokenizer.model_max_length,\r\n clip_skip,\r\n eos,\r\n pad,\r\n no_boseos_middle=no_boseos_middle,\r\n )\r\n prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=device)\r\n\r\n # assign weights to the prompts and normalize in the sense of mean\r\n previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)\r\n text_embeddings = text_embeddings * prompt_weights.unsqueeze(-1)\r\n current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)\r\n text_embeddings = text_embeddings * (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)\r\n\r\n return text_embeddings\r" }, { "identifier": "prepare_scheduler_for_custom_training", "path": "library/custom_train_functions.py", "snippet": "def prepare_scheduler_for_custom_training(noise_scheduler, device):\r\n if hasattr(noise_scheduler, \"all_snr\"):\r\n return\r\n\r\n alphas_cumprod = noise_scheduler.alphas_cumprod\r\n sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod)\r\n sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - alphas_cumprod)\r\n alpha = sqrt_alphas_cumprod\r\n sigma = sqrt_one_minus_alphas_cumprod\r\n all_snr = (alpha / sigma) ** 2\r\n\r\n noise_scheduler.all_snr = all_snr.to(device)\r" }, { "identifier": "pyramid_noise_like", "path": "library/custom_train_functions.py", "snippet": "def pyramid_noise_like(noise, device, iterations=6, discount=0.4):\r\n b, c, w, h = noise.shape # EDIT: w and h get over-written, rename for a different variant!\r\n u = torch.nn.Upsample(size=(w, h), mode=\"bilinear\").to(device)\r\n for i in range(iterations):\r\n r = random.random() * 2 + 2 # Rather than always going 2x,\r\n wn, hn = max(1, int(w / (r**i))), max(1, int(h / (r**i)))\r\n noise += u(torch.randn(b, c, wn, hn).to(device)) * discount**i\r\n if wn == 1 or hn == 1:\r\n break # Lowest resolution is 1x1\r\n return noise / noise.std() # Scaled back to roughly unit variance\r" }, { "identifier": "apply_noise_offset", "path": "library/custom_train_functions.py", "snippet": "def apply_noise_offset(latents, noise, noise_offset, adaptive_noise_scale):\r\n if noise_offset is None:\r\n return noise\r\n if adaptive_noise_scale is not None:\r\n # latent shape: (batch_size, channels, height, width)\r\n # abs mean value for each channel\r\n latent_mean = torch.abs(latents.mean(dim=(2, 3), keepdim=True))\r\n\r\n # multiply adaptive noise scale to the mean value and add it to the noise offset\r\n noise_offset = noise_offset + adaptive_noise_scale * latent_mean\r\n noise_offset = torch.clamp(noise_offset, 0.0, None) # in case of adaptive noise scale is negative\r\n\r\n noise = noise + noise_offset * torch.randn((latents.shape[0], latents.shape[1], 1, 1), device=latents.device)\r\n return noise\r" }, { "identifier": "scale_v_prediction_loss_like_noise_prediction", "path": "library/custom_train_functions.py", "snippet": "def scale_v_prediction_loss_like_noise_prediction(loss, timesteps, noise_scheduler):\r\n snr_t = torch.stack([noise_scheduler.all_snr[t] for t in timesteps]) # batch_size\r\n snr_t = torch.minimum(snr_t, torch.ones_like(snr_t) * 1000) # if timestep is 0, snr_t is inf, so limit it to 1000\r\n scale = snr_t / (snr_t + 1)\r\n\r\n loss = loss * scale\r\n return loss\r" } ]
import argparse import gc import math import os import toml import torch import diffusers import library.train_util as train_util import library.config_util as config_util import library.custom_train_functions as custom_train_functions from multiprocessing import Value from tqdm import tqdm from accelerate.utils import set_seed from diffusers import DDPMScheduler from library.config_util import ( ConfigSanitizer, BlueprintGenerator, ) from library.custom_train_functions import ( apply_snr_weight, get_weighted_text_embeddings, prepare_scheduler_for_custom_training, pyramid_noise_like, apply_noise_offset, scale_v_prediction_loss_like_noise_prediction, )
7,125
with torch.no_grad(): train_dataset_group.cache_latents(vae, args.vae_batch_size, args.cache_latents_to_disk, accelerator.is_main_process) vae.to("cpu") if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() accelerator.wait_for_everyone() # 学習を準備する:モデルを適切な状態にする training_models = [] if args.gradient_checkpointing: unet.enable_gradient_checkpointing() training_models.append(unet) if args.train_text_encoder: print("enable text encoder training") if args.gradient_checkpointing: text_encoder.gradient_checkpointing_enable() training_models.append(text_encoder) else: text_encoder.to(accelerator.device, dtype=weight_dtype) text_encoder.requires_grad_(False) # text encoderは学習しない if args.gradient_checkpointing: text_encoder.gradient_checkpointing_enable() text_encoder.train() # required for gradient_checkpointing else: text_encoder.eval() if not cache_latents: vae.requires_grad_(False) vae.eval() vae.to(accelerator.device, dtype=weight_dtype) for m in training_models: m.requires_grad_(True) params = [] for m in training_models: params.extend(m.parameters()) params_to_optimize = params # 学習に必要なクラスを準備する print("prepare optimizer, data loader etc.") _, _, optimizer = train_util.get_optimizer(args, trainable_params=params_to_optimize) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる n_workers = min(args.max_data_loader_n_workers, os.cpu_count() - 1) # cpu_count-1 ただし最大で指定された数まで train_dataloader = torch.utils.data.DataLoader( train_dataset_group, batch_size=1, shuffle=True, collate_fn=collater, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, ) # 学習ステップ数を計算する if args.max_train_epochs is not None: args.max_train_steps = args.max_train_epochs * math.ceil( len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps ) print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # データセット側にも学習ステップを送信 train_dataset_group.set_max_train_steps(args.max_train_steps) # lr schedulerを用意する lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: assert ( args.mixed_precision == "fp16" ), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。" print("enable full fp16 training.") unet.to(weight_dtype) text_encoder.to(weight_dtype) # acceleratorがなんかよろしくやってくれるらしい if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler) # transform DDP after prepare text_encoder, unet = train_util.transform_if_model_is_DDP(text_encoder, unet) # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする if args.full_fp16: train_util.patch_accelerator_for_fp16_training(accelerator) # resumeする train_util.resume_from_local_or_hf_if_specified(accelerator, args) # epoch数を計算する num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0): args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1 # 学習する total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps print("running training / 学習開始") print(f" num examples / サンプル数: {train_dataset_group.num_train_images}") print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}") print(f" num epochs / epoch数: {num_train_epochs}") print(f" batch size per device / バッチサイズ: {args.train_batch_size}") print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}") print(f" gradient accumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}") print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}") global_step = args.global_step print(global_step) progress_bar = tqdm(range(args.max_train_steps), initial=global_step, smoothing=0, disable=not accelerator.is_local_main_process, desc="steps") noise_scheduler = DDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False )
# training with captions # XXX dropped option: hypernetwork training def train(args): train_util.verify_training_args(args) train_util.prepare_dataset_args(args, True) cache_latents = args.cache_latents if args.seed is not None: set_seed(args.seed) # 乱数系列を初期化する tokenizer = train_util.load_tokenizer(args) # データセットを準備する if args.dataset_class is None: blueprint_generator = BlueprintGenerator(ConfigSanitizer(False, True, True)) if args.dataset_config is not None: print(f"Load dataset config from {args.dataset_config}") user_config = config_util.load_user_config(args.dataset_config) ignored = ["train_data_dir", "in_json"] if any(getattr(args, attr) is not None for attr in ignored): print( "ignore following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format( ", ".join(ignored) ) ) else: user_config = { "datasets": [ { "subsets": [ { "image_dir": args.train_data_dir, "metadata_file": args.in_json, } ] } ] } print(user_config) blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizer) train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group) else: train_dataset_group = train_util.load_arbitrary_dataset(args, tokenizer) current_epoch = Value("i", 0) current_step = Value("i", 0) ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None collater = train_util.collater_class(current_epoch, current_step, ds_for_collater) if args.debug_dataset: train_util.debug_dataset(train_dataset_group) return if len(train_dataset_group) == 0: print( "No data found. Please verify the metadata file and train_data_dir option. / 画像がありません。メタデータおよびtrain_data_dirオプションを確認してください。" ) return if cache_latents: assert ( train_dataset_group.is_latent_cacheable() ), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません" # acceleratorを準備する print("prepare accelerator") accelerator, unwrap_model = train_util.prepare_accelerator(args) # mixed precisionに対応した型を用意しておき適宜castする weight_dtype, save_dtype = train_util.prepare_dtype(args) # モデルを読み込む text_encoder, vae, unet, load_stable_diffusion_format = train_util.load_target_model(args, weight_dtype, accelerator, False) # verify load/save model formats if load_stable_diffusion_format: src_stable_diffusion_ckpt = args.pretrained_model_name_or_path src_diffusers_model_path = None else: src_stable_diffusion_ckpt = None src_diffusers_model_path = args.pretrained_model_name_or_path if args.save_model_as is None: save_stable_diffusion_format = load_stable_diffusion_format use_safetensors = args.use_safetensors else: save_stable_diffusion_format = args.save_model_as.lower() == "ckpt" or args.save_model_as.lower() == "safetensors" use_safetensors = args.use_safetensors or ("safetensors" in args.save_model_as.lower()) # Diffusers版のxformers使用フラグを設定する関数 def set_diffusers_xformers_flag(model, valid): # model.set_use_memory_efficient_attention_xformers(valid) # 次のリリースでなくなりそう # pipeが自動で再帰的にset_use_memory_efficient_attention_xformersを探すんだって(;´Д`) # U-Netだけ使う時にはどうすればいいのか……仕方ないからコピって使うか # 0.10.2でなんか巻き戻って個別に指定するようになった(;^ω^) # Recursively walk through all the children. # Any children which exposes the set_use_memory_efficient_attention_xformers method # gets the message def fn_recursive_set_mem_eff(module: torch.nn.Module): if hasattr(module, "set_use_memory_efficient_attention_xformers"): module.set_use_memory_efficient_attention_xformers(valid) for child in module.children(): fn_recursive_set_mem_eff(child) fn_recursive_set_mem_eff(model) # モデルに xformers とか memory efficient attention を組み込む if args.diffusers_xformers: print("Use xformers by Diffusers") set_diffusers_xformers_flag(unet, True) else: # Windows版のxformersはfloatで学習できないのでxformersを使わない設定も可能にしておく必要がある print("Disable Diffusers' xformers") set_diffusers_xformers_flag(unet, False) train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers) # 学習を準備する if cache_latents: vae.to(accelerator.device, dtype=weight_dtype) vae.requires_grad_(False) vae.eval() with torch.no_grad(): train_dataset_group.cache_latents(vae, args.vae_batch_size, args.cache_latents_to_disk, accelerator.is_main_process) vae.to("cpu") if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() accelerator.wait_for_everyone() # 学習を準備する:モデルを適切な状態にする training_models = [] if args.gradient_checkpointing: unet.enable_gradient_checkpointing() training_models.append(unet) if args.train_text_encoder: print("enable text encoder training") if args.gradient_checkpointing: text_encoder.gradient_checkpointing_enable() training_models.append(text_encoder) else: text_encoder.to(accelerator.device, dtype=weight_dtype) text_encoder.requires_grad_(False) # text encoderは学習しない if args.gradient_checkpointing: text_encoder.gradient_checkpointing_enable() text_encoder.train() # required for gradient_checkpointing else: text_encoder.eval() if not cache_latents: vae.requires_grad_(False) vae.eval() vae.to(accelerator.device, dtype=weight_dtype) for m in training_models: m.requires_grad_(True) params = [] for m in training_models: params.extend(m.parameters()) params_to_optimize = params # 学習に必要なクラスを準備する print("prepare optimizer, data loader etc.") _, _, optimizer = train_util.get_optimizer(args, trainable_params=params_to_optimize) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる n_workers = min(args.max_data_loader_n_workers, os.cpu_count() - 1) # cpu_count-1 ただし最大で指定された数まで train_dataloader = torch.utils.data.DataLoader( train_dataset_group, batch_size=1, shuffle=True, collate_fn=collater, num_workers=n_workers, persistent_workers=args.persistent_data_loader_workers, ) # 学習ステップ数を計算する if args.max_train_epochs is not None: args.max_train_steps = args.max_train_epochs * math.ceil( len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps ) print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # データセット側にも学習ステップを送信 train_dataset_group.set_max_train_steps(args.max_train_steps) # lr schedulerを用意する lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: assert ( args.mixed_precision == "fp16" ), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。" print("enable full fp16 training.") unet.to(weight_dtype) text_encoder.to(weight_dtype) # acceleratorがなんかよろしくやってくれるらしい if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler) # transform DDP after prepare text_encoder, unet = train_util.transform_if_model_is_DDP(text_encoder, unet) # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする if args.full_fp16: train_util.patch_accelerator_for_fp16_training(accelerator) # resumeする train_util.resume_from_local_or_hf_if_specified(accelerator, args) # epoch数を計算する num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0): args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1 # 学習する total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps print("running training / 学習開始") print(f" num examples / サンプル数: {train_dataset_group.num_train_images}") print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}") print(f" num epochs / epoch数: {num_train_epochs}") print(f" batch size per device / バッチサイズ: {args.train_batch_size}") print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}") print(f" gradient accumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}") print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}") global_step = args.global_step print(global_step) progress_bar = tqdm(range(args.max_train_steps), initial=global_step, smoothing=0, disable=not accelerator.is_local_main_process, desc="steps") noise_scheduler = DDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False )
prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
4
2023-12-30 07:46:35+00:00
8k
khabbazan/Mattermost-Subscriptions
helpers/channels_graphql_ws/graphql_ws_consumer.py
[ { "identifier": "DictAsObject", "path": "helpers/channels_graphql_ws/dict_as_object.py", "snippet": "class DictAsObject:\n \"\"\"Dict wrapper to access keys as attributes.\"\"\"\n\n def __init__(self, scope):\n \"\"\"Remember given `scope`.\"\"\"\n self._scope = scope\n\n def _asdict(self):\n \"\"\"Provide inner Channels scope object.\"\"\"\n return self._scope\n\n # ------------------------------------------------ WRAPPER FUNCTIONS\n def __getattr__(self, name):\n \"\"\"Route attributes to the scope object.\"\"\"\n if name.startswith(\"_\"):\n raise AttributeError()\n try:\n return self._scope[name]\n except KeyError as ex:\n raise AttributeError() from ex\n\n def __setattr__(self, name, value):\n \"\"\"Route attributes to the scope object.\"\"\"\n if name.startswith(\"_\"):\n super().__setattr__(name, value)\n self._scope[name] = value\n\n # ----------------------------------------------------- DICT WRAPPER\n def __getitem__(self, key):\n \"\"\"Wrap dict method.\"\"\"\n return self._scope[key]\n\n def __setitem__(self, key, value):\n \"\"\"Wrap dict method.\"\"\"\n self._scope[key] = value\n\n def __delitem__(self, key):\n \"\"\"Wrap dict method.\"\"\"\n del self._scope[key]\n\n def __contains__(self, item):\n \"\"\"Wrap dict method.\"\"\"\n return item in self._scope\n\n def __str__(self):\n \"\"\"Wrap dict method.\"\"\"\n return self._scope.__str__()\n\n def __repr__(self):\n \"\"\"Wrap dict method.\"\"\"\n return self._scope.__repr__()" }, { "identifier": "Serializer", "path": "helpers/channels_graphql_ws/serializer.py", "snippet": "class Serializer:\n \"\"\"Serialize/deserialize Python collection with Django models.\n\n Serialize/deserialize the data with the MessagePack like Redis\n Channels layer backend does.\n\n If `data` contains Django models, then it is serialized by the\n Django serialization utilities. For details see:\n Django serialization:\n https://docs.djangoproject.com/en/dev/topics/serialization/\n MessagePack:\n https://github.com/msgpack/msgpack-python\n \"\"\"\n\n @staticmethod\n def serialize(data):\n \"\"\"Serialize the `data`.\"\"\"\n\n def encode_extra_types(obj):\n \"\"\"MessagePack hook to serialize extra types.\n\n The recipe took from the MessagePack for Python docs:\n https://github.com/msgpack/msgpack-python#packingunpacking-of-custom-data-type\n\n Supported types:\n - Django models (through `django.core.serializers`).\n - Python `datetime` types:\n - `datetime.datetime`\n - `datetime.date`\n - `datetime.time`\n\n \"\"\"\n if isinstance(obj, django.db.models.Model):\n return {\n \"__djangomodel__\": True,\n \"as_str\": django.core.serializers.serialize(\"json\", [obj]),\n }\n if isinstance(obj, datetime.datetime):\n return {\"__datetime__\": True, \"as_str\": obj.isoformat()}\n if isinstance(obj, datetime.date):\n return {\"__date__\": True, \"as_str\": obj.isoformat()}\n if isinstance(obj, datetime.time):\n return {\"__time__\": True, \"as_str\": obj.isoformat()}\n return obj\n\n return msgpack.packb(data, default=encode_extra_types, use_bin_type=True)\n\n @staticmethod\n def deserialize(data):\n \"\"\"Deserialize the `data`.\"\"\"\n\n def decode_extra_types(obj):\n \"\"\"MessagePack hook to deserialize extra types.\"\"\"\n if \"__djangomodel__\" in obj:\n obj = next(django.core.serializers.deserialize(\"json\", obj[\"as_str\"])).object\n elif \"__datetime__\" in obj:\n obj = datetime.datetime.fromisoformat(obj[\"as_str\"])\n elif \"__date__\" in obj:\n obj = datetime.date.fromisoformat(obj[\"as_str\"])\n elif \"__time__\" in obj:\n obj = datetime.time.fromisoformat(obj[\"as_str\"])\n return obj\n\n return msgpack.unpackb(data, object_hook=decode_extra_types, raw=False)" } ]
import asyncio import dataclasses import functools import inspect import logging import threading import time import traceback import weakref import channels.db import channels.generic.websocket as ch_websocket import graphene import graphql import graphql.error import graphql.execution import graphql.pyutils import graphql.utilities from typing import Any from typing import AsyncIterator from typing import Awaitable from typing import Callable from typing import Dict from typing import Iterable from typing import List from typing import Optional from typing import Tuple from typing import Type from typing import Union from typing import cast from .dict_as_object import DictAsObject from .serializer import Serializer
3,650
return_type = info.return_type while graphql.is_wrapping_type(return_type): return_type = return_type.of_type # type: ignore[union-attr] subscription_class = return_type.graphene_type # type: ignore[union-attr] # It is ok to access private fields of `Subscription` # implementation. `Subscription` class used to create # subscriptions as graphene object but actually it is a part of # consumer implementation. # pylint: disable=protected-access # Attach current subscription to the group corresponding to # the concrete class. This allows to trigger all the # subscriptions of the current type, by invoking `publish` # without setting the `group` argument. groups = [subscription_class._group_name()] # Invoke the subclass-specified `subscribe` method to get # the groups subscription must be attached to. if subscription_class._meta.subscribe is not None: subclass_groups = subscription_class._meta.subscribe(root, info, *args, **kwds) # Properly handle `async def subscribe`. if asyncio.iscoroutinefunction(subscription_class._meta.subscribe): subclass_groups = await subclass_groups assert subclass_groups is None or isinstance(subclass_groups, (list, tuple)), ( f"Method 'subscribe' returned a value of an incorrect type" f" {type(subclass_groups)}! A list, a tuple, or 'None' expected." ) subclass_groups = subclass_groups or [] else: subclass_groups = [] groups += [subscription_class._group_name(group) for group in subclass_groups] # The subscription notification queue. Required to preserve the # order of notifications within a single subscription. queue_size = subscription_class.notification_queue_limit if queue_size is None or queue_size <= 0: # Take default limit from the Consumer class. queue_size = self.subscription_notification_queue_limit # The subscription notification queue. # NOTE: The asyncio.Queue class is not thread-safe. So use the # `notification_queue_lock` as a guard while reading or writing # to the queue. notification_queue: asyncio.Queue = asyncio.Queue(maxsize=queue_size) # Lock to ensure that `notification_queue` operations are # thread safe. notification_queue_lock = threading.RLock() unsubscribed = subscription_class._meta.unsubscribed async def unsubscribed_callback(): """Call `unsubscribed` notification. The `cls._meta.unsubscribed` might do blocking operations, so offload it to the thread. """ if unsubscribed is None: return None result = unsubscribed(None, info, *args, **kwds) # Properly handle `async def unsubscribed`. if inspect.isawaitable(result): result = await result def enqueue_notification(payload): """Put notification to the queue. Called by the WebSocket consumer (instance of the GraphqlWsConsumer subclass) when it receives the broadcast message (from the Channels group) sent by the Subscription.broadcast. Args: sid: Operation id of the subscription. """ while True: with notification_queue_lock: try: notification_queue.put_nowait(payload) break # The item was enqueued. Exit the loop. except asyncio.QueueFull: # The queue is full - issue a warning and throw # away the oldest item from the queue. # NOTE: Queue with the size 1 means that it is # safe to drop intermediate notifications. if notification_queue.maxsize != 1: LOG.warning( "Subscription notification dropped! Operation %s(%s).", operation_name, operation_id, ) notification_queue.get_nowait() notification_queue.task_done() # Try to put the incoming item to the queue # within the same lock. This is an speed # optimization. try: notification_queue.put_nowait(payload) # The item was enqueued. Exit the loop. break except asyncio.QueueFull: # Kind'a impossible to get here, but if we # do, then we should retry until the queue # have capacity to process item. pass waitlist = [] for group in groups: self._sids_by_group.setdefault(group, []).append(operation_id) waitlist.append(asyncio.create_task(self._channel_layer.group_add(group, self.channel_name))) self._subscriptions[operation_id] = self._SubInf( groups=groups, sid=operation_id, unsubscribed_callback=unsubscribed_callback, enqueue_notification=enqueue_notification, ) if waitlist: await asyncio.wait(waitlist)
# Copyright (C) DATADVANCE, 2011-2023 # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Channels consumer which implements GraphQL WebSocket protocol. The `GraphqlWsConsumer` is a Channels WebSocket consumer which maintains WebSocket connection with the client. Implementation assumes that client uses the protocol implemented by the library `subscription-transport-ws` (which is used by Apollo). NOTE: Links based on which this functionality is implemented: - Protocol description: https://github.com/apollographql/subscriptions-transport-ws/blob/master/PROTOCOL.md https://github.com/apollographql/subscriptions-transport-ws/blob/master/src/message-types.ts - ASGI specification for WebSockets: https://github.com/django/asgiref/blob/master/specs/www.rst#websocket - GitHubGist with the root of inspiration: https://gist.github.com/tricoder42/af3d0337c1b33d82c1b32d12bd0265ec """ # Module logger. LOG = logging.getLogger(__name__) # WebSocket subprotocol used for the GraphQL. GRAPHQL_WS_SUBPROTOCOL = "graphql-ws" class GraphqlWsConsumer(ch_websocket.AsyncJsonWebsocketConsumer): """Channels consumer for the WebSocket GraphQL backend. NOTE: Each instance of this class maintains one WebSocket connection to a single client. This class implements the WebSocket-based GraphQL protocol used by `subscriptions-transport-ws` library (used by Apollo): https://github.com/apollographql/subscriptions-transport-ws/blob/master/PROTOCOL.md """ # ----------------------------------------------------------------- PUBLIC INTERFACE # Overwrite this in the subclass to specify the GraphQL schema which # processes GraphQL queries. schema: graphene.Schema # The interval to send keepalive messages to the clients (seconds). send_keepalive_every: Optional[float] = None # Set to `True` to process requests (i.e. GraphQL documents) from # a client in order of arrival, which is the same as sending order, # as guaranteed by the WebSocket protocol. This means that request # processing for this particular client becomes serial - in other # words, the server will not start processing another request # before it finishes the current one. Note that requests from # different clients (within different WebSocket connections) # are still processed asynchronously. Useful for tests. strict_ordering: bool = False # When set to `True` the server will send an empty data message in # response to the subscription. This is needed to let client know # when the subscription activates, so he can be sure he doesn't miss # any notifications. Disabled by default, cause this is an extension # to the original protocol and the client must be tuned accordingly. confirm_subscriptions: bool = False # The message sent to the client when subscription activation # confirmation is enabled. subscription_confirmation_message: Dict[str, Any] = {"data": None, "errors": None} # Issue a warning to the log when operation takes longer than # specified number in seconds. None disables the warning. warn_operation_timeout: Optional[float] = 1 # The size of the subscription notification queue. If there are more # notifications (for a single subscription) than the given number, # then an oldest notification is dropped and a warning is logged. subscription_notification_queue_limit: int = 1024 # GraphQL middleware. # Instance of `graphql.MiddlewareManager` or the list of functions # (callables) like the following: # ```python # async def my_middleware(next_middleware, root, info, *args, **kwds): # result = next_middleware(root, info, *args, **kwds) # if graphql.pyutils.is_awaitable(result): # result = await result # return result # ``` # The first middleware in the middlewares list will be the closest # to the resolver in the middlewares call stack. # For more information read docs: # - https://docs.graphene-python.org/en/latest/execution/middleware/#middleware # - https://graphql-core-3.readthedocs.io/en/latest/diffs.html#custom-middleware # Docs about async middlewares are still missing - read the # GraphQL-core sources to know more. middleware: Optional[graphql.Middleware] = None async def on_connect(self, payload): """Client connection handler. Called after CONNECTION_INIT message from client. Overwrite and raise an Exception to tell the server to reject the connection when it's necessary. Args: payload: Payload from CONNECTION_INIT message. """ del payload async def on_operation(self, op_id, payload): """Process business logic before operation processing starts. Useful e.g. to check that user session is not yet expired. Throw `graphql.error.GraphQLError` to cancel the operation. Args: op_id: Operation id. payload: Payload of the operation. """ del op_id, payload # ------------------------------------------------------------------- IMPLEMENTATION # A prefix of Channel groups with subscription notifications. group_name_prefix: str = "GQLWS" # Structure that holds subscription information. @dataclasses.dataclass class _SubInf: """Subscription information structure.""" # Subscription identifier - protocol operation identifier. sid: int # Subscription groups the subscription belongs to. groups: List[str] # A function which triggets subscription. enqueue_notification: Callable[[Any], None] # The callback to invoke when client unsubscribes. unsubscribed_callback: Callable[..., Awaitable[None]] def __init__(self, *args, **kwargs): """Consumer constructor.""" assert self.schema is not None, "An attribute 'schema' is not set! Subclasses must specify " "the schema which processes GraphQL subscription queries." # Registry of active (subscribed) subscriptions. self._subscriptions: Dict[int, GraphqlWsConsumer._SubInf] = {} # {'<sid>': '<SubInf>', ...} self._sids_by_group = {} # {'<grp>': ['<sid0>', '<sid1>', ...], ...} # Tasks which send notifications to clients indexed by an # operation/subscription id. self._notifier_tasks: Dict[int, asyncio.Task] = {} # Task that sends keepalive messages periodically. self._keepalive_task = None # Background tasks to clean it up when a client disconnects. # We use weak collection so finished task will be autoremoved. self._background_tasks: weakref.WeakSet = weakref.WeakSet() # Crafty weak collection with per-operation locks. It holds a # mapping from the operaion id (protocol message id) to the # `asyncio.Lock` used to serialize processing of start & stop # requests. Since the collection is weak, it automatically # throws away items when locks are garbage collected. self._operation_locks: weakref.WeakValueDictionary = weakref.WeakValueDictionary() # MiddlewareManager maintains internal cache for resolvers # wrapped with middlewares. Using the same manager for all # operations improves performance. self._middleware = None if self.middleware: self._middleware = self.middleware if not isinstance(self._middleware, graphql.MiddlewareManager): self._middleware = graphql.MiddlewareManager(*self._middleware) super().__init__(*args, **kwargs) # ---------------------------------------------------------- CONSUMER EVENT HANDLERS async def connect(self): """Handle new WebSocket connection.""" # Check the subprotocol told by the client. # # NOTE: In Python 3.6 `scope["subprotocols"]` was a string, but # starting with Python 3.7 it is a bytes. This can be a proper # change or just a bug in the Channels to be fixed. So let's # accept both variants until it becomes clear. assert GRAPHQL_WS_SUBPROTOCOL in ((sp.decode() if isinstance(sp, bytes) else sp) for sp in self.scope["subprotocols"]), ( f"WebSocket client does not request for the subprotocol " f"{GRAPHQL_WS_SUBPROTOCOL}!" ) # Accept connection with the GraphQL-specific subprotocol. await self.accept(subprotocol=GRAPHQL_WS_SUBPROTOCOL) async def disconnect(self, code): """Handle WebSocket disconnect. Remove itself from the Channels groups, clear triggers and stop sending keepalive messages. """ # Print debug or warning message depending on the value of the # connection close code. We consider all reserved codes (<999), # 1000 "Normal Closure", and 1001 "Going Away" as OK. # See: https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent if not code: LOG.warning("WebSocket connection closed without a code!") elif code <= 1001: LOG.debug("WebSocket connection closed with code: %s.", code) else: LOG.warning("WebSocket connection closed with code: %s!", code) # The list of awaitables to simultaneously wait at the end. waitlist: List[asyncio.Task] = [] # Unsubscribe from the Channels groups. waitlist += [asyncio.create_task(self._channel_layer.group_discard(group, self.channel_name)) for group in self._sids_by_group] # Cancel all currently running background tasks. for bg_task in self._background_tasks: bg_task.cancel() waitlist += list(self._background_tasks) # Stop sending keepalive messages (if enabled). if self._keepalive_task is not None: self._keepalive_task.cancel() waitlist += [self._keepalive_task] # Stop tasks which listen to GraphQL lib and send notifications. for notifier_task in self._notifier_tasks.values(): notifier_task.cancel() waitlist += [notifier_task] # Wait for tasks to stop. if waitlist: await asyncio.wait(waitlist) self._background_tasks.clear() self._keepalive_task = None self._notifier_tasks.clear() self._operation_locks.clear() self._sids_by_group.clear() self._subscriptions.clear() async def receive_json(self, content): # pylint: disable=arguments-differ """Process WebSocket message received from the client. NOTE: We force 'STOP' message processing to wait until 'START' with the same operation id finishes (if it is running). This protects us from race conditions which may happen when a client stops operation immediately after starting it. An illustrative example is a subscribe-unsubscribe pair. If we spawn processing of both messages concurrently we can deliver subscription confirmation after unsubscription confirmation. """ # Extract message type based on which we select how to proceed. msg_type = content["type"].upper() if msg_type == "CONNECTION_INIT": task = self._on_gql_connection_init(payload=content["payload"]) elif msg_type == "CONNECTION_TERMINATE": task = self._on_gql_connection_terminate() elif msg_type == "START": op_id = content["id"] # Create and lock a mutex for this particular operation id, # so STOP processing for the same operation id will wait # until START processing finishes. Locks are stored in a # weak collection so we do not have to manually clean it up. if op_id in self._operation_locks: raise graphql.error.GraphQLError(f"Operation with msg_id={op_id} is already running!") op_lock = asyncio.Lock() self._operation_locks[op_id] = op_lock await op_lock.acquire() async def on_start(): try: # User hook which raises to cancel processing. await self.on_operation(op_id, payload=content["payload"]) # START message processing. await self._on_gql_start(op_id, payload=content["payload"]) except Exception as ex: # pylint: disable=broad-except await self._send_gql_error(op_id, ex) finally: op_lock.release() task = on_start() elif msg_type == "STOP": op_id = content["id"] async def on_stop(): # Wait until START message processing finishes, if any. async with self._operation_locks.setdefault(op_id, asyncio.Lock()): await self._on_gql_stop(op_id) task = on_stop() else: task = self._send_gql_error( content["id"] if "id" in content else None, Exception(f"Wrong message type '{msg_type}'!"), ) # If strict ordering is required then simply wait until the # message processing finishes. Otherwise spawn a task so # Channels may continue calling `receive_json` while requests # (i.e. GraphQL documents) are being processed. if self.strict_ordering: await task else: self._spawn_background_task(task) async def broadcast(self, message): """The broadcast message handler. Method is called when new `broadcast` message (sent by `Subscription.broadcast`) received from the Channels group. """ # If strict ordering is required then simply wait until all the # broadcast messages are sent. Otherwise spawn a task so this # consumer will continue receiving messages. if self.strict_ordering: await self._process_broadcast(message) else: self._spawn_background_task(self._process_broadcast(message)) async def _process_broadcast(self, message): """Process the broadcast message. This triggers subscription notification to all the subscriptions belonging to the group received in the `message`. NOTE: Depending on the value of the `strict_ordering` setting this method is either awaited directly or offloaded to an async task by the `broadcast` method (message handler). """ group = message["group"] # Do nothing if group does not exist. It is quite possible for # a client and a backend to concurrently unsubscribe and send # notification. And these events do not need to be synchronized. if group not in self._sids_by_group: return payload = message["payload"] # Put the payload to the notification queues of subscriptions # belonging to the subscription group. Drop the oldest payloads # if the `notification_queue` is full. for sid in self._sids_by_group[group]: subinf = self._subscriptions[sid] subinf.enqueue_notification(payload) async def unsubscribe(self, message): """The unsubscribe message handler. Method is called when new `unsubscribe` message received from the Channels group. The message is typically sent by the method `Subscription.unsubscribe`. Here we figure out the group message received from and stop all the subscriptions in this group. """ group = message["group"] # Do nothing if group does not exist. It is quite possible for # a client and a backend to unsubscribe from a subscription # concurrently. And these events do not need to be synchronized. if group not in self._sids_by_group: return # Send messages which look like user unsubscribes from all # subscriptions in the subscription group. This saves us from # thinking about raise condition between subscription and # unsubscription. if self._sids_by_group[group]: await asyncio.wait([asyncio.create_task(self.receive_json({"type": "stop", "id": sid})) for sid in self._sids_by_group[group]]) # ---------------------------------------------------------- GRAPHQL PROTOCOL EVENTS async def _on_gql_connection_init(self, payload): """Process the CONNECTION_INIT message. Start sending keepalive messages if `send_keepalive_every` set. Respond with either CONNECTION_ACK or CONNECTION_ERROR message. NOTE: Depending on the value of the `strict_ordering` setting this method is either awaited directly or offloaded to an async task. See the `receive_json` handler. """ try: # Notify subclass a new client is connected. await self.on_connect(payload) except Exception as ex: # pylint: disable=broad-except await self._send_gql_connection_error(ex) # Close the connection. NOTE: We use the 4000 code because # there are two reasons: A) We can not use codes greater # than 1000 and less than 3000 because Daphne and Autobahn # do not allow this (see `sendClose` from # `autobahn/websocket/protocol.py` and # `daphne/ws_protocol.py`). B) # https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent # Mozilla offers codes 4000–4999 available for all apps. await self.close(code=4000) else: # Send CONNECTION_ACK message. await self._send_gql_connection_ack() # If keepalive enabled then send one message immediately and # schedule periodic messages. if self.send_keepalive_every is not None: send_keepalive_every = self.send_keepalive_every async def keepalive_sender(): """Send keepalive messages periodically.""" while True: await asyncio.sleep(send_keepalive_every) await self._send_gql_connection_keep_alive() self._keepalive_task = asyncio.create_task(keepalive_sender()) # Immediately send keepalive message cause it is # required by the protocol description. await self._send_gql_connection_keep_alive() async def _on_gql_connection_terminate(self): """Process the CONNECTION_TERMINATE message. NOTE: Depending on the value of the `strict_ordering` setting this method is either awaited directly or offloaded to an async task. See the `receive_json` handler. """ # Close the connection. await self.close(code=1000) async def _on_gql_start(self, op_id, payload): """Process the START message. Handle the message with query, mutation or subscription request. NOTE: Depending on the value of the `strict_ordering` setting this method is either awaited directly or offloaded to an async task. See the `receive_json` handler. """ try: if op_id in self._subscriptions: message = f"Subscription with msg_id={op_id} already exists!" raise graphql.error.GraphQLError(message) # Get the message data. query = payload["query"] op_name = payload.get("operationName") variables = payload.get("variables", {}) # Prepare a context object. context = DictAsObject({}) context.channels_scope = self.scope context.channel_name = self.channel_name context.graphql_operation_name = op_name context.graphql_operation_id = op_id # Process the request with Graphene and GraphQL-core. doc_ast, op_ast, errors = await self._on_gql_start__parse_query(op_name, query) if errors: await self._send_gql_data(op_id, None, errors) await self._send_gql_complete(op_id) return # Assert values are not None to suppress MyPy complains. assert doc_ast is not None assert op_ast is not None # If the operation is subscription. if op_ast.operation == graphql.language.ast.OperationType.SUBSCRIPTION: LOG.debug( "Subscription request. Operation ID: %s, operation name: %s.)", op_id, op_name, ) # This returns asynchronous generator or ExecutionResult # instance in case of error. subscr_result = await self._on_gql_start__subscribe( doc_ast, operation_name=op_name, root_value=None, variable_values=variables, context_value=context, subscribe_field_resolver=functools.partial( self._on_gql_start__initialize_subscription_stream, op_id, op_name, ), middleware=self._middleware, ) # When subscr_result is an AsyncGenerator, consume # stream of notifications and send them to clients. if not isinstance(subscr_result, graphql.ExecutionResult): stream = cast(AsyncIterator[graphql.ExecutionResult], subscr_result) # Send subscription activation message (if enabled) # NOTE: We do it before reading the the stream # stream to guarantee that no notifications are sent # before the subscription confirmation message. if self.confirm_subscriptions: await self._send_gql_data( op_id, data=self.subscription_confirmation_message["data"], errors=self.subscription_confirmation_message["errors"], ) consumer_init_done = asyncio.Event() async def consume_stream(): consumer_init_done.set() try: async for item in stream: # Skipped subscription event may have no # data and no errors. Send message only # when we have something to send. if item.data or item.errors: try: await self._send_gql_data(op_id, item.data, item.errors) except asyncio.CancelledError: break except Exception as ex: # pylint: disable=broad-except LOG.debug( "Exception in the subscription GraphQL resolver!" "Operation %s(%s).", op_name, op_id, exc_info=ex, ) await self._send_gql_data(op_id, None, [ex]) # We need to end this task when client drops # connection or unsubscribes, so lets store it. self._notifier_tasks[op_id] = asyncio.create_task(consume_stream()) # We must be sure here that the subscription # initialization is finished and the stream consumer # is active before we exit this function. Because in # the outer scope we have locking mechanism of start # and stop operations. And we want to say # "subscription operation is started" only when it # actually is. # This allows us to avoid the race condition between # simultaneous subscribe and unsubscribe calls. await consumer_init_done.wait() return # Else (when gql_subscribe returns ExecutionResult # containing error) fallback to standard handling below. operation_result = cast(graphql.ExecutionResult, subscr_result) # If the operation is query or mutation. else: LOG.debug("New query/mutation. Operation %s(%s).", op_name, op_id) if self.warn_operation_timeout is not None: start_time = time.perf_counter() # Standard name for "IntrospectionQuery". We might also # check that # `doc_ast.definitions[0].selection_set.selections[0].name.value` # equals to `__schema`. This is a more robust way. But # it will eat up more CPU pre each query. For now lets # check only a query name. middleware_manager = self._middleware if op_name == "IntrospectionQuery": # No need to call middlewares for the # IntrospectionQuery. There no real resolvers. Only # the type information. middleware_manager = None exec_result = graphql.execution.execute( self.schema.graphql_schema, document=doc_ast, root_value=None, operation_name=op_name, variable_values=variables, context_value=context, middleware=middleware_manager, ) if inspect.isawaitable(exec_result): exec_result = await exec_result operation_result = cast(graphql.ExecutionResult, exec_result) if self.warn_operation_timeout is not None: duration = time.perf_counter() - start_time if duration >= self.warn_operation_timeout: LOG.warning( "Operation %s(%s) took %.6f seconds. Debug" " log contains full operation details.", op_name, op_id, duration, ) LOG.debug( "Operation %s(%s) took %.6f seconds. Query:" " %r, variables: %r.", op_name, op_id, duration, query, variables, ) # Respond to a query or mutation immediately. await self._send_gql_data(op_id, operation_result.data, operation_result.errors) await self._send_gql_complete(op_id) except Exception as ex: # pylint: disable=broad-except if isinstance(ex, graphql.error.GraphQLError): # Respond with details of GraphQL execution error. LOG.warning("GraphQL error! Operation %s(%s).", op_name, op_id, exc_info=True) await self._send_gql_data(op_id, None, [ex]) await self._send_gql_complete(op_id) else: # Respond with general error responce. await self._send_gql_error(op_id, ex) async def _on_gql_start__parse_query( self, op_name: str, query: str ) -> Tuple[Optional[graphql.DocumentNode], Optional[graphql.OperationDefinitionNode], Optional[Iterable[graphql.GraphQLError]],]: """Parse and validate GraphQL query. It is highly likely that the same operation will be parsed many times, so this function is wrapped with LRU cache. This async function offloads the GraphQL processing to the worker thread cause according to our experiments even GraphQL document parsing and validation take a while and depends approx. linearly on the size of the selection set. This is a part of START message processing routine so the name prefixed with `_on_gql_start__` to make this explicit. Returns: Tuple with three optional fields: 0: AST of parsed GraphQL document. 1: GraphQL operation definition. 2: Sequence of errors. """ res = await channels.db.database_sync_to_async(self._on_gql_start__parse_query_sync_cached, thread_sensitive=False)(op_name, query) doc_ast: Optional[graphql.DocumentNode] = res[0] op_ast: Optional[graphql.OperationDefinitionNode] = res[1] errors: Optional[Iterable[graphql.GraphQLError]] = res[2] return (doc_ast, op_ast, errors) @functools.lru_cache(maxsize=128) def _on_gql_start__parse_query_sync_cached( self, op_name: str, query: str ) -> Tuple[Optional[graphql.DocumentNode], Optional[graphql.OperationDefinitionNode], Optional[Iterable[graphql.GraphQLError]],]: """Parse and validate GraphQL query. Cached sync implementation. This is a part of START message processing routine so the name prefixed with `_on_gql_start__` to make this explicit. """ # Parsing. try: doc_ast = graphql.parse(query) except graphql.GraphQLError as ex: return None, None, [ex] # Validation. validation_errors: List[graphql.GraphQLError] = graphql.validate(self.schema.graphql_schema, doc_ast) if validation_errors: return None, None, validation_errors op_ast = graphql.utilities.get_operation_ast(doc_ast, op_name) return doc_ast, op_ast, None async def _on_gql_start__subscribe( self, document: graphql.DocumentNode, root_value: Any = None, context_value: Any = None, variable_values: Optional[Dict[str, Any]] = None, operation_name: Optional[str] = None, field_resolver: Optional[graphql.GraphQLFieldResolver] = None, subscribe_field_resolver: Optional[graphql.GraphQLFieldResolver] = None, middleware: graphql.Middleware = None, execution_context_class: Optional[Type[graphql.ExecutionContext]] = None, ) -> Union[AsyncIterator[graphql.ExecutionResult], graphql.ExecutionResult]: """Create a GraphQL subscription. This is a copy of `graphql.execution.subscribe.subscribe` from the GraphQL-core library v3.2.3 improved to support middlewares and user defined execution_context_class. This is a part of START message processing routine so the name prefixed with `_on_gql_start__` to make this explicit. """ result_or_stream = await graphql.create_source_event_stream( self.schema.graphql_schema, document, root_value, context_value, variable_values, operation_name, subscribe_field_resolver, ) if isinstance(result_or_stream, graphql.ExecutionResult): return result_or_stream async def map_source_to_response(payload: Any) -> graphql.ExecutionResult: """Map source to response. For each payload yielded from a subscription, map it over the normal GraphQL :func:`~graphql.execute` function, with `payload` as the `root_value`. This implements the "MapSourceToResponseEvent" algorithm described in the GraphQL specification. The :func:`~graphql.execute` function provides the "ExecuteSubscriptionEvent" algorithm, as it is nearly identical to the "ExecuteQuery" algorithm, for which :func:`~graphql.execute` is also used. """ result = graphql.execute( self.schema.graphql_schema, document, payload, context_value, variable_values, operation_name, field_resolver, middleware=middleware, execution_context_class=execution_context_class, ) # type: ignore result = await result if inspect.isawaitable(result) else result result = cast(graphql.ExecutionResult, result) # Skip notification if subscription returned `None`. if not result.errors and result.data: for key in list(result.data.keys()): if result.data[key] is None: result.data.pop(key) return result # Map every source value to a ExecutionResult value. return graphql.MapAsyncIterator(result_or_stream, map_source_to_response) async def _on_gql_start__initialize_subscription_stream( self, operation_id: int, operation_name: str, root: Any, info: graphql.GraphQLResolveInfo, *args, **kwds, ): """Create asynchronous generator with subscription events. Called inside `_on_gql_start__subscribe` function by graphql-core as `subscribe_field_resolver` argument. This is a part of START message processing routine so the name prefixed with `_on_gql_start__` to make this explicit. """ # Graphene stores original subscription class in `graphene_type` # field of `return_type` object. Since subscriptions are build # on top of `graphene` we always have graphene specific # `return_type` class. return_type = info.return_type while graphql.is_wrapping_type(return_type): return_type = return_type.of_type # type: ignore[union-attr] subscription_class = return_type.graphene_type # type: ignore[union-attr] # It is ok to access private fields of `Subscription` # implementation. `Subscription` class used to create # subscriptions as graphene object but actually it is a part of # consumer implementation. # pylint: disable=protected-access # Attach current subscription to the group corresponding to # the concrete class. This allows to trigger all the # subscriptions of the current type, by invoking `publish` # without setting the `group` argument. groups = [subscription_class._group_name()] # Invoke the subclass-specified `subscribe` method to get # the groups subscription must be attached to. if subscription_class._meta.subscribe is not None: subclass_groups = subscription_class._meta.subscribe(root, info, *args, **kwds) # Properly handle `async def subscribe`. if asyncio.iscoroutinefunction(subscription_class._meta.subscribe): subclass_groups = await subclass_groups assert subclass_groups is None or isinstance(subclass_groups, (list, tuple)), ( f"Method 'subscribe' returned a value of an incorrect type" f" {type(subclass_groups)}! A list, a tuple, or 'None' expected." ) subclass_groups = subclass_groups or [] else: subclass_groups = [] groups += [subscription_class._group_name(group) for group in subclass_groups] # The subscription notification queue. Required to preserve the # order of notifications within a single subscription. queue_size = subscription_class.notification_queue_limit if queue_size is None or queue_size <= 0: # Take default limit from the Consumer class. queue_size = self.subscription_notification_queue_limit # The subscription notification queue. # NOTE: The asyncio.Queue class is not thread-safe. So use the # `notification_queue_lock` as a guard while reading or writing # to the queue. notification_queue: asyncio.Queue = asyncio.Queue(maxsize=queue_size) # Lock to ensure that `notification_queue` operations are # thread safe. notification_queue_lock = threading.RLock() unsubscribed = subscription_class._meta.unsubscribed async def unsubscribed_callback(): """Call `unsubscribed` notification. The `cls._meta.unsubscribed` might do blocking operations, so offload it to the thread. """ if unsubscribed is None: return None result = unsubscribed(None, info, *args, **kwds) # Properly handle `async def unsubscribed`. if inspect.isawaitable(result): result = await result def enqueue_notification(payload): """Put notification to the queue. Called by the WebSocket consumer (instance of the GraphqlWsConsumer subclass) when it receives the broadcast message (from the Channels group) sent by the Subscription.broadcast. Args: sid: Operation id of the subscription. """ while True: with notification_queue_lock: try: notification_queue.put_nowait(payload) break # The item was enqueued. Exit the loop. except asyncio.QueueFull: # The queue is full - issue a warning and throw # away the oldest item from the queue. # NOTE: Queue with the size 1 means that it is # safe to drop intermediate notifications. if notification_queue.maxsize != 1: LOG.warning( "Subscription notification dropped! Operation %s(%s).", operation_name, operation_id, ) notification_queue.get_nowait() notification_queue.task_done() # Try to put the incoming item to the queue # within the same lock. This is an speed # optimization. try: notification_queue.put_nowait(payload) # The item was enqueued. Exit the loop. break except asyncio.QueueFull: # Kind'a impossible to get here, but if we # do, then we should retry until the queue # have capacity to process item. pass waitlist = [] for group in groups: self._sids_by_group.setdefault(group, []).append(operation_id) waitlist.append(asyncio.create_task(self._channel_layer.group_add(group, self.channel_name))) self._subscriptions[operation_id] = self._SubInf( groups=groups, sid=operation_id, unsubscribed_callback=unsubscribed_callback, enqueue_notification=enqueue_notification, ) if waitlist: await asyncio.wait(waitlist)
_deserialize = channels.db.database_sync_to_async(Serializer.deserialize, thread_sensitive=False)
1
2023-12-25 11:40:56+00:00
8k
Hatins/DEOE
data/genx_utils/sequence_for_streaming.py
[ { "identifier": "SparselyBatchedObjectLabels", "path": "data/genx_utils/labels.py", "snippet": "class SparselyBatchedObjectLabels:\n def __init__(self, sparse_object_labels_batch: List[Optional[ObjectLabels]]):\n # Can contain None elements that indicate missing labels.\n for entry in sparse_object_labels_batch:\n assert isinstance(entry, ObjectLabels) or entry is None\n self.sparse_object_labels_batch = sparse_object_labels_batch\n self.set_empty_labels_to_none_()\n\n def __len__(self) -> int:\n return len(self.sparse_object_labels_batch)\n\n def __iter__(self):\n return iter(self.sparse_object_labels_batch)\n\n def __getitem__(self, item: int) -> Optional[ObjectLabels]:\n if item < 0 or item >= len(self):\n raise IndexError(f'Index ({item}) out of range (0, {len(self) - 1})')\n return self.sparse_object_labels_batch[item]\n\n def __add__(self, other: SparselyBatchedObjectLabels):\n sparse_object_labels_batch = self.sparse_object_labels_batch + other.sparse_object_labels_batch\n return SparselyBatchedObjectLabels(sparse_object_labels_batch=sparse_object_labels_batch)\n\n def set_empty_labels_to_none_(self):\n for idx, obj_label in enumerate(self.sparse_object_labels_batch):\n if obj_label is not None and len(obj_label) == 0:\n self.sparse_object_labels_batch[idx] = None\n\n @property\n def input_size_hw(self) -> Optional[Union[Tuple[int, int], Tuple[float, float]]]:\n for obj_labels in self.sparse_object_labels_batch:\n if obj_labels is not None:\n return obj_labels.input_size_hw\n return None\n\n def zoom_in_and_rescale_(self, *args, **kwargs):\n for idx, entry in enumerate(self.sparse_object_labels_batch):\n if entry is not None:\n self.sparse_object_labels_batch[idx].zoom_in_and_rescale_(*args, **kwargs)\n # We may have deleted labels. If no labels are left, set the object to None\n self.set_empty_labels_to_none_()\n\n def zoom_out_and_rescale_(self, *args, **kwargs):\n for idx, entry in enumerate(self.sparse_object_labels_batch):\n if entry is not None:\n self.sparse_object_labels_batch[idx].zoom_out_and_rescale_(*args, **kwargs)\n\n def rotate_(self, *args, **kwargs):\n for idx, entry in enumerate(self.sparse_object_labels_batch):\n if entry is not None:\n self.sparse_object_labels_batch[idx].rotate_(*args, **kwargs)\n\n def scale_(self, *args, **kwargs):\n for idx, entry in enumerate(self.sparse_object_labels_batch):\n if entry is not None:\n self.sparse_object_labels_batch[idx].scale_(*args, **kwargs)\n # We may have deleted labels. If no labels are left, set the object to None\n self.set_empty_labels_to_none_()\n\n def flip_lr_(self):\n for idx, entry in enumerate(self.sparse_object_labels_batch):\n if entry is not None:\n self.sparse_object_labels_batch[idx].flip_lr_()\n\n def to(self, *args, **kwargs):\n for idx, entry in enumerate(self.sparse_object_labels_batch):\n if entry is not None:\n self.sparse_object_labels_batch[idx].to(*args, **kwargs)\n return self\n\n def get_valid_labels_and_batch_indices(self) -> Tuple[List[ObjectLabels], List[int]]:\n out = list()\n valid_indices = list()\n for idx, label in enumerate(self.sparse_object_labels_batch):\n if label is not None:\n out.append(label)\n valid_indices.append(idx)\n return out, valid_indices\n\n @staticmethod\n def transpose_list(list_of_sparsely_batched_object_labels: List[SparselyBatchedObjectLabels]) \\\n -> List[SparselyBatchedObjectLabels]:\n return [SparselyBatchedObjectLabels(list(labels_as_tuple)) for labels_as_tuple \\\n in zip(*list_of_sparsely_batched_object_labels)]" }, { "identifier": "SequenceBase", "path": "data/genx_utils/sequence_base.py", "snippet": "class SequenceBase(MapDataPipe):\n \"\"\"\n Structure example of a sequence:\n .\n ├── event_representations_v2\n │ └── ev_representation_name\n │ ├── event_representations.h5\n │ ├── objframe_idx_2_repr_idx.npy\n │ └── timestamps_us.npy\n └── labels_v2\n ├── labels.npz\n └── timestamps_us.npy\n \"\"\"\n\n def __init__(self,\n path: Path,\n ev_representation_name: str,\n sequence_length: int,\n dataset_type: DatasetType,\n downsample_by_factor_2: bool,\n only_load_end_labels: bool):\n assert sequence_length >= 1\n assert path.is_dir()\n assert dataset_type in {DatasetType.GEN1, DatasetType.GEN4}, f'{dataset_type} not implemented'\n\n self.only_load_end_labels = only_load_end_labels\n\n ev_repr_dir = get_event_representation_dir(path=path, ev_representation_name=ev_representation_name)\n\n labels_dir = path / 'labels_v2'\n assert labels_dir.is_dir()\n\n height, width = get_original_hw(dataset_type)\n self.seq_len = sequence_length\n\n ds_factor_str = '_ds2_nearest' if downsample_by_factor_2 else ''\n self.ev_repr_file = ev_repr_dir / f'event_representations{ds_factor_str}.h5'\n assert self.ev_repr_file.exists(), f'{str(self.ev_repr_file)=}'\n\n with Timer(timer_name='prepare labels'):\n label_data = np.load(str(labels_dir / 'labels.npz'))\n objframe_idx_2_label_idx = label_data['objframe_idx_2_label_idx']\n labels = label_data['labels']\n label_factory = ObjectLabelFactory.from_structured_array(\n object_labels=labels,\n objframe_idx_2_label_idx=objframe_idx_2_label_idx,\n input_size_hw=(height, width),\n downsample_factor=2 if downsample_by_factor_2 else None)\n self.label_factory = label_factory\n\n with Timer(timer_name='load objframe_idx_2_repr_idx'):\n self.objframe_idx_2_repr_idx = get_objframe_idx_2_repr_idx(\n path=path, ev_representation_name=ev_representation_name)\n with Timer(timer_name='construct repr_idx_2_objframe_idx'):\n self.repr_idx_2_objframe_idx = dict(zip(self.objframe_idx_2_repr_idx,\n range(len(self.objframe_idx_2_repr_idx))))\n\n def _get_labels_from_repr_idx(self, repr_idx: int, dataset_mode: DatasetMode, dataset_config: DictConfig) -> Optional[ObjectLabels]:\n objframe_idx = self.repr_idx_2_objframe_idx.get(repr_idx, None)\n self.training_classes = list(dataset_config.training_classes.keys())\n self.testing_classes = list(dataset_config.testing_classes.keys())\n if objframe_idx is None:\n return None\n else:\n if dataset_mode == DatasetMode.TRAIN:\n labels_ = self.label_factory[objframe_idx].get_labels_as_tensors(keep_classes=self.training_classes)\n mask = [row[-1] == 1 for row in labels_]\n if not (True in mask):\n return None\n else:\n return self.label_factory[objframe_idx]\n else:\n labels_ = self.label_factory[objframe_idx].get_labels_as_tensors(keep_classes=self.testing_classes)\n mask = [row[-1] == 1 for row in labels_]\n if not (True in mask):\n return None\n else:\n return self.label_factory[objframe_idx]\n\n # labels = torch.stack([row for i, row in enumerate(labels_) if mask[i]],dim=0)\n \n def _get_event_repr_torch(self, start_idx: int, end_idx: int) -> List[torch.Tensor]:\n assert end_idx > start_idx\n with h5py.File(str(self.ev_repr_file), 'r') as h5f:\n ev_repr = h5f['data'][start_idx:end_idx]\n ev_repr = torch.from_numpy(ev_repr)\n if ev_repr.dtype != torch.uint8:\n ev_repr = torch.asarray(ev_repr, dtype=torch.float32)\n ev_repr = torch.split(ev_repr, 1, dim=0)\n # remove first dim that is always 1 due to how torch.split works\n ev_repr = [x[0] for x in ev_repr]\n return ev_repr\n\n def __len__(self) -> int:\n raise NotImplementedError\n\n def __getitem__(self, index: int) -> Any:\n raise NotImplementedError" }, { "identifier": "get_objframe_idx_2_repr_idx", "path": "data/genx_utils/sequence_base.py", "snippet": "def get_objframe_idx_2_repr_idx(path: Path, ev_representation_name: str) -> np.ndarray:\n ev_repr_dir = get_event_representation_dir(path=path, ev_representation_name=ev_representation_name)\n objframe_idx_2_repr_idx = np.load(str(ev_repr_dir / 'objframe_idx_2_repr_idx.npy'))\n return objframe_idx_2_repr_idx" }, { "identifier": "RandomSpatialAugmentorGenX", "path": "data/utils/augmentor.py", "snippet": "class RandomSpatialAugmentorGenX:\n def __init__(self,\n dataset_hw: Tuple[int, int],\n automatic_randomization: bool,\n augm_config: DictConfig):\n assert isinstance(dataset_hw, tuple)\n assert len(dataset_hw) == 2\n assert all(x > 0 for x in dataset_hw)\n assert isinstance(automatic_randomization, bool)\n\n self.hw_tuple = dataset_hw\n self.automatic_randomization = automatic_randomization\n self.h_flip_prob = augm_config.prob_hflip\n self.rot_prob = augm_config.rotate.prob\n self.rot_min_angle_deg = augm_config.rotate.get('min_angle_deg', 0)\n self.rot_max_angle_deg = augm_config.rotate.max_angle_deg\n self.zoom_prob = augm_config.zoom.prob\n zoom_out_weight = augm_config.zoom.zoom_out.get('weight', 1)\n self.min_zoom_out_factor = augm_config.zoom.zoom_out.factor.min\n self.max_zoom_out_factor = augm_config.zoom.zoom_out.factor.max\n has_zoom_in = 'zoom_in' in augm_config.zoom\n zoom_in_weight = augm_config.zoom.zoom_in.weight if has_zoom_in else 0\n self.min_zoom_in_factor = augm_config.zoom.zoom_in.factor.min if has_zoom_in else 1\n self.max_zoom_in_factor = augm_config.zoom.zoom_in.factor.max if has_zoom_in else 1\n\n assert 0 <= self.h_flip_prob <= 1\n assert 0 <= self.rot_prob <= 1\n assert 0 <= self.rot_min_angle_deg <= self.rot_max_angle_deg\n assert 0 <= self.zoom_prob <= 1\n assert 0 <= zoom_in_weight\n assert self.max_zoom_in_factor >= self.min_zoom_in_factor >= 1\n assert 0 <= zoom_out_weight\n assert self.max_zoom_out_factor >= self.min_zoom_out_factor >= 1\n if not automatic_randomization:\n # We are probably applying augmentation to a streaming dataset for which zoom in augm is not supported.\n assert zoom_in_weight == 0, f'{zoom_in_weight=}'\n\n self.zoom_in_or_out_distribution = torch.distributions.categorical.Categorical(\n probs=th.tensor([zoom_in_weight, zoom_out_weight]))\n\n self.augm_state = AugmentationState(\n apply_h_flip=False,\n rotation=RotationState(active=False, angle_deg=0.0),\n apply_zoom_in=False,\n zoom_out=ZoomOutState(active=False, x0=0, y0=0, zoom_out_factor=1.0))\n\n def randomize_augmentation(self):\n \"\"\"Sample new augmentation parameters that will be consistently applied among the items.\n\n This function only works with augmentations that are input-independent.\n E.g. The zoom-in augmentation parameters depend on the labels and cannot be sampled in this function.\n For the same reason, it is not a very reasonable augmentation for the streaming scenario.\n \"\"\"\n self.augm_state.apply_h_flip = self.h_flip_prob > th.rand(1).item()\n\n self.augm_state.rotation.active = self.rot_prob > th.rand(1).item()\n if self.augm_state.rotation.active:\n sign = 1 if th.randn(1).item() >= 0 else -1\n self.augm_state.rotation.angle_deg = sign * torch_uniform_sample_scalar(\n min_value=self.rot_min_angle_deg, max_value=self.rot_max_angle_deg)\n\n # Zoom in and zoom out is mutually exclusive.\n do_zoom = self.zoom_prob > th.rand(1).item()\n do_zoom_in = self.zoom_in_or_out_distribution.sample().item() == 0\n do_zoom_out = not do_zoom_in\n do_zoom_in &= do_zoom\n do_zoom_out &= do_zoom\n self.augm_state.apply_zoom_in = do_zoom_in\n self.augm_state.zoom_out.active = do_zoom_out\n if do_zoom_out:\n rand_zoom_out_factor = torch_uniform_sample_scalar(\n min_value=self.min_zoom_out_factor, max_value=self.max_zoom_out_factor)\n height, width = self.hw_tuple\n zoom_window_h, zoom_window_w = int(height / rand_zoom_out_factor), int(width / rand_zoom_out_factor)\n x0_sampled = int(torch_uniform_sample_scalar(min_value=0, max_value=width - zoom_window_w))\n y0_sampled = int(torch_uniform_sample_scalar(min_value=0, max_value=height - zoom_window_h))\n self.augm_state.zoom_out.x0 = x0_sampled\n self.augm_state.zoom_out.y0 = y0_sampled\n self.augm_state.zoom_out.zoom_out_factor = rand_zoom_out_factor\n\n def _zoom_out_and_rescale(self, data_dict: LoaderDataDictGenX) -> LoaderDataDictGenX:\n zoom_out_state = self.augm_state.zoom_out\n\n zoom_out_factor = zoom_out_state.zoom_out_factor\n if zoom_out_factor == 1:\n return data_dict\n return {k: RandomSpatialAugmentorGenX._zoom_out_and_rescale_recursive(\n v, zoom_coordinates_x0y0=(zoom_out_state.x0, zoom_out_state.y0),\n zoom_out_factor=zoom_out_factor, datatype=k) for k, v in data_dict.items()}\n\n @staticmethod\n def _zoom_out_and_rescale_tensor(input_: th.Tensor,\n zoom_coordinates_x0y0: Tuple[int, int],\n zoom_out_factor: float,\n datatype: DataType) -> th.Tensor:\n assert len(zoom_coordinates_x0y0) == 2\n assert isinstance(input_, th.Tensor)\n\n if datatype == DataType.IMAGE or datatype == DataType.EV_REPR:\n assert input_.ndim == 3, f'{input_.shape=}'\n height, width = input_.shape[-2:]\n zoom_window_h, zoom_window_w = int(height / zoom_out_factor), int(width / zoom_out_factor)\n zoom_window = interpolate(input_.unsqueeze(0), size=(zoom_window_h, zoom_window_w), mode='nearest-exact')[0]\n output = th.zeros_like(input_)\n\n x0, y0 = zoom_coordinates_x0y0\n assert x0 >= 0\n assert y0 >= 0\n output[:, y0:y0 + zoom_window_h, x0:x0 + zoom_window_w] = zoom_window\n return output\n raise NotImplementedError\n\n @classmethod\n def _zoom_out_and_rescale_recursive(cls,\n input_: Any,\n zoom_coordinates_x0y0: Tuple[int, int],\n zoom_out_factor: float,\n datatype: DataType):\n if datatype in (DataType.IS_PADDED_MASK, DataType.IS_FIRST_SAMPLE):\n return input_\n if isinstance(input_, th.Tensor):\n return cls._zoom_out_and_rescale_tensor(input_=input_,\n zoom_coordinates_x0y0=zoom_coordinates_x0y0,\n zoom_out_factor=zoom_out_factor,\n datatype=datatype)\n if isinstance(input_, ObjectLabels) or isinstance(input_, SparselyBatchedObjectLabels):\n assert datatype == DataType.OBJLABELS or datatype == DataType.OBJLABELS_SEQ\n input_.zoom_out_and_rescale_(zoom_coordinates_x0y0=zoom_coordinates_x0y0, zoom_out_factor=zoom_out_factor)\n return input_\n if isinstance(input_, abc.Sequence):\n return [RandomSpatialAugmentorGenX._zoom_out_and_rescale_recursive(\n x, zoom_coordinates_x0y0=zoom_coordinates_x0y0, zoom_out_factor=zoom_out_factor, datatype=datatype) \\\n for x in input_]\n if isinstance(input_, abc.Mapping):\n return {key: RandomSpatialAugmentorGenX._zoom_out_and_rescale_recursive(\n value, zoom_coordinates_x0y0=zoom_coordinates_x0y0, zoom_out_factor=zoom_out_factor, datatype=datatype) \\\n for key, value in input_.items()}\n raise NotImplementedError\n\n def _zoom_in_and_rescale(self, data_dict: LoaderDataDictGenX) -> LoaderDataDictGenX:\n rand_zoom_in_factor = torch_uniform_sample_scalar(min_value=self.min_zoom_in_factor,\n max_value=self.max_zoom_in_factor)\n if rand_zoom_in_factor == 1:\n return data_dict\n\n height, width = RandomSpatialAugmentorGenX._hw_from_data(data_dict=data_dict)\n assert (height, width) == self.hw_tuple\n zoom_window_h, zoom_window_w = int(height / rand_zoom_in_factor), int(width / rand_zoom_in_factor)\n latest_objframe = get_most_recent_objframe(data_dict=data_dict, check_if_nonempty=True)\n if latest_objframe is None:\n warn(message=NO_LABEL_WARN_MSG, category=UserWarning, stacklevel=2)\n return data_dict\n x0_sampled, y0_sampled = randomly_sample_zoom_window_from_objframe(\n objframe=latest_objframe, zoom_window_height=zoom_window_h, zoom_window_width=zoom_window_w)\n\n return {k: RandomSpatialAugmentorGenX._zoom_in_and_rescale_recursive(\n v, zoom_coordinates_x0y0=(x0_sampled, y0_sampled), zoom_in_factor=rand_zoom_in_factor, datatype=k) \\\n for k, v in data_dict.items()}\n\n @staticmethod\n def _zoom_in_and_rescale_tensor(input_: th.Tensor,\n zoom_coordinates_x0y0: Tuple[int, int],\n zoom_in_factor: float,\n datatype: DataType) -> th.Tensor:\n assert len(zoom_coordinates_x0y0) == 2\n assert isinstance(input_, th.Tensor)\n\n if datatype == DataType.IMAGE or datatype == DataType.EV_REPR:\n assert input_.ndim == 3, f'{input_.shape=}'\n height, width = input_.shape[-2:]\n zoom_window_h, zoom_window_w = int(height / zoom_in_factor), int(width / zoom_in_factor)\n\n x0, y0 = zoom_coordinates_x0y0\n assert x0 >= 0\n assert y0 >= 0\n zoom_canvas = input_[..., y0:y0 + zoom_window_h, x0:x0 + zoom_window_w].unsqueeze(0)\n output = interpolate(zoom_canvas, size=(height, width), mode='nearest-exact')\n output = output[0]\n return output\n raise NotImplementedError\n\n @classmethod\n def _zoom_in_and_rescale_recursive(cls,\n input_: Any,\n zoom_coordinates_x0y0: Tuple[int, int],\n zoom_in_factor: float,\n datatype: DataType):\n if datatype in (DataType.IS_PADDED_MASK, DataType.IS_FIRST_SAMPLE):\n return input_\n if isinstance(input_, th.Tensor):\n return cls._zoom_in_and_rescale_tensor(input_=input_,\n zoom_coordinates_x0y0=zoom_coordinates_x0y0,\n zoom_in_factor=zoom_in_factor,\n datatype=datatype)\n if isinstance(input_, ObjectLabels) or isinstance(input_, SparselyBatchedObjectLabels):\n assert datatype == DataType.OBJLABELS or datatype == DataType.OBJLABELS_SEQ\n input_.zoom_in_and_rescale_(zoom_coordinates_x0y0=zoom_coordinates_x0y0, zoom_in_factor=zoom_in_factor)\n return input_\n if isinstance(input_, abc.Sequence):\n return [RandomSpatialAugmentorGenX._zoom_in_and_rescale_recursive(\n x, zoom_coordinates_x0y0=zoom_coordinates_x0y0, zoom_in_factor=zoom_in_factor, datatype=datatype) \\\n for x in input_]\n if isinstance(input_, abc.Mapping):\n return {key: RandomSpatialAugmentorGenX._zoom_in_and_rescale_recursive(\n value, zoom_coordinates_x0y0=zoom_coordinates_x0y0, zoom_in_factor=zoom_in_factor, datatype=datatype) \\\n for key, value in input_.items()}\n raise NotImplementedError\n\n def _rotate(self, data_dict: LoaderDataDictGenX) -> LoaderDataDictGenX:\n angle_deg = self.augm_state.rotation.angle_deg\n return {k: RandomSpatialAugmentorGenX._rotate_recursive(v, angle_deg=angle_deg, datatype=k)\n for k, v in data_dict.items()}\n\n @staticmethod\n def _rotate_tensor(input_: Any, angle_deg: float, datatype: DataType):\n assert isinstance(input_, th.Tensor)\n if datatype == DataType.IMAGE or datatype == DataType.EV_REPR:\n return rotate(input_, angle=angle_deg, interpolation=InterpolationMode.NEAREST)\n raise NotImplementedError\n\n @classmethod\n def _rotate_recursive(cls, input_: Any, angle_deg: float, datatype: DataType):\n if datatype in (DataType.IS_PADDED_MASK, DataType.IS_FIRST_SAMPLE):\n return input_\n if isinstance(input_, th.Tensor):\n return cls._rotate_tensor(input_=input_, angle_deg=angle_deg, datatype=datatype)\n if isinstance(input_, ObjectLabels) or isinstance(input_, SparselyBatchedObjectLabels):\n assert datatype == DataType.OBJLABELS or datatype == DataType.OBJLABELS_SEQ\n input_.rotate_(angle_deg=angle_deg)\n return input_\n if isinstance(input_, abc.Sequence):\n return [RandomSpatialAugmentorGenX._rotate_recursive(x, angle_deg=angle_deg, datatype=datatype) \\\n for x in input_]\n if isinstance(input_, abc.Mapping):\n return {key: RandomSpatialAugmentorGenX._rotate_recursive(value, angle_deg=angle_deg, datatype=datatype) \\\n for key, value in input_.items()}\n raise NotImplementedError\n\n @staticmethod\n def _flip(data_dict: LoaderDataDictGenX, type_: str) -> LoaderDataDictGenX:\n assert type_ in {'h', 'v'}\n return {k: RandomSpatialAugmentorGenX._flip_recursive(v, flip_type=type_, datatype=k) \\\n for k, v in data_dict.items()}\n\n @staticmethod\n def _flip_tensor(input_: Any, flip_type: str, datatype: DataType):\n assert isinstance(input_, th.Tensor)\n flip_axis = -1 if flip_type == 'h' else -2\n if datatype == DataType.IMAGE or datatype == DataType.EV_REPR:\n return th.flip(input_, dims=[flip_axis])\n if datatype == DataType.FLOW:\n assert input_.shape[-3] == 2\n flow_idx = 0 if flip_type == 'h' else 1\n input_ = th.flip(input_, dims=[flip_axis])\n # Also flip the sign of the x (horizontal) or y (vertical) component of the flow.\n input_[..., flow_idx, :, :] = -1 * input_[..., flow_idx, :, :]\n return input_\n raise NotImplementedError\n\n @classmethod\n def _flip_recursive(cls, input_: Any, flip_type: str, datatype: DataType):\n if datatype in (DataType.IS_PADDED_MASK, DataType.IS_FIRST_SAMPLE):\n return input_\n if isinstance(input_, th.Tensor):\n return cls._flip_tensor(input_=input_, flip_type=flip_type, datatype=datatype)\n if isinstance(input_, ObjectLabels) or isinstance(input_, SparselyBatchedObjectLabels):\n assert datatype == DataType.OBJLABELS or datatype == DataType.OBJLABELS_SEQ\n if flip_type == 'h':\n # in-place modification\n input_.flip_lr_()\n return input_\n else:\n raise NotImplementedError\n if isinstance(input_, abc.Sequence):\n return [RandomSpatialAugmentorGenX._flip_recursive(x, flip_type=flip_type, datatype=datatype) \\\n for x in input_]\n if isinstance(input_, abc.Mapping):\n return {key: RandomSpatialAugmentorGenX._flip_recursive(value, flip_type=flip_type, datatype=datatype) \\\n for key, value in input_.items()}\n raise NotImplementedError\n\n @staticmethod\n def _hw_from_data(data_dict: LoaderDataDictGenX) -> Tuple[int, int]:\n height = None\n width = None\n for k, v in data_dict.items():\n _hw = None\n if k == DataType.OBJLABELS or k == DataType.OBJLABELS_SEQ:\n hw = v.input_size_hw\n if hw is not None:\n _hw = v.input_size_hw\n elif k in (DataType.IMAGE, DataType.FLOW, DataType.EV_REPR):\n _hw = v[0].shape[-2:]\n if _hw is not None:\n _height, _width = _hw\n if height is None:\n assert width is None\n height, width = _height, _width\n else:\n assert height == _height and width == _width\n assert height is not None\n assert width is not None\n return height, width\n\n def __call__(self, data_dict: LoaderDataDictGenX):\n \"\"\"\n :param data_dict: LoaderDataDictGenX type, image-based tensors must have (*, h, w) shape.\n :return: map with same keys but spatially augmented values.\n \"\"\"\n if self.automatic_randomization:\n self.randomize_augmentation()\n\n if self.augm_state.apply_h_flip:\n data_dict = self._flip(data_dict, type_='h')\n if self.augm_state.rotation.active:\n data_dict = self._rotate(data_dict)\n if self.augm_state.apply_zoom_in:\n data_dict = self._zoom_in_and_rescale(data_dict=data_dict)\n if self.augm_state.zoom_out.active:\n assert not self.augm_state.apply_zoom_in\n data_dict = self._zoom_out_and_rescale(data_dict=data_dict)\n return data_dict" }, { "identifier": "DatasetMode", "path": "data/utils/types.py", "snippet": "class DataType(Enum):\nclass DatasetType(Enum):\nclass DatasetMode(Enum):\nclass DatasetSamplingMode(StrEnum):\nclass ObjDetOutput(Enum):\n EV_REPR = auto()\n FLOW = auto()\n IMAGE = auto()\n OBJLABELS = auto()\n OBJLABELS_SEQ = auto()\n IS_PADDED_MASK = auto()\n IS_FIRST_SAMPLE = auto()\n TOKEN_MASK = auto()\n GEN1 = auto()\n GEN4 = auto()\n TRAIN = auto()\n VALIDATION = auto()\n TESTING = auto()\n RANDOM = 'random'\n STREAM = 'stream'\n MIXED = 'mixed'\n LABELS_PROPH = auto()\n PRED_PROPH = auto()\n EV_REPR = auto()\n SKIP_VIZ = auto()" }, { "identifier": "TimerDummy", "path": "utils/timers.py", "snippet": "class TimerDummy:\n def __init__(self, *args, **kwargs):\n pass\n\n def __enter__(self):\n pass\n\n def __exit__(self, *args):\n pass" } ]
from pathlib import Path from typing import List, Optional, Union, Tuple from omegaconf import DictConfig from torchdata.datapipes.iter import IterDataPipe from data.genx_utils.labels import SparselyBatchedObjectLabels from data.genx_utils.sequence_base import SequenceBase, get_objframe_idx_2_repr_idx from data.utils.augmentor import RandomSpatialAugmentorGenX from data.utils.types import DatasetMode, DataType, DatasetType, LoaderDataDictGenX from utils.timers import TimerDummy as Timer import h5py import numpy as np import torch import ipdb
6,993
def _scalar_as_1d_array(scalar: Union[int, float]): return np.atleast_1d(scalar) def _get_ev_repr_range_indices(indices: np.ndarray, max_len: int) -> List[Tuple[int, int]]: """ Computes a list of index ranges based on the input array of indices and a maximum length. The index ranges are computed such that the difference between consecutive indices should not exceed the maximum length (max_len). Parameters: ----------- indices : np.ndarray A NumPy array of indices, where the indices are sorted in ascending order. max_len : int The maximum allowed length between consecutive indices. Returns: -------- out : List[Tuple[int, int]] A list of tuples, where each tuple contains two integers representing the start and stop indices of the range. """ #np.flatnonzero 返回非零元素的索引 meta_indices_stop = np.flatnonzero(np.diff(indices) > max_len) meta_indices_start = np.concatenate((np.atleast_1d(0), meta_indices_stop + 1)) meta_indices_stop = np.concatenate((meta_indices_stop, np.atleast_1d(len(indices) - 1))) out = list() for meta_idx_start, meta_idx_stop in zip(meta_indices_start, meta_indices_stop): idx_start = max(indices[meta_idx_start] - max_len + 1, 0) idx_stop = indices[meta_idx_stop] + 1 out.append((idx_start, idx_stop)) return out class SequenceForIter(SequenceBase): def __init__(self, path: Path,
def _scalar_as_1d_array(scalar: Union[int, float]): return np.atleast_1d(scalar) def _get_ev_repr_range_indices(indices: np.ndarray, max_len: int) -> List[Tuple[int, int]]: """ Computes a list of index ranges based on the input array of indices and a maximum length. The index ranges are computed such that the difference between consecutive indices should not exceed the maximum length (max_len). Parameters: ----------- indices : np.ndarray A NumPy array of indices, where the indices are sorted in ascending order. max_len : int The maximum allowed length between consecutive indices. Returns: -------- out : List[Tuple[int, int]] A list of tuples, where each tuple contains two integers representing the start and stop indices of the range. """ #np.flatnonzero 返回非零元素的索引 meta_indices_stop = np.flatnonzero(np.diff(indices) > max_len) meta_indices_start = np.concatenate((np.atleast_1d(0), meta_indices_stop + 1)) meta_indices_stop = np.concatenate((meta_indices_stop, np.atleast_1d(len(indices) - 1))) out = list() for meta_idx_start, meta_idx_stop in zip(meta_indices_start, meta_indices_stop): idx_start = max(indices[meta_idx_start] - max_len + 1, 0) idx_stop = indices[meta_idx_stop] + 1 out.append((idx_start, idx_stop)) return out class SequenceForIter(SequenceBase): def __init__(self, path: Path,
dataset_mode: DatasetMode,
4
2023-12-29 04:04:34+00:00
8k
yeyingdege/ctr-din-pytorch
din/train.py
[ { "identifier": "DataIterator", "path": "din/data_iterator.py", "snippet": "class DataIterator:\n\n def __init__(self, source,\n uid_voc,\n mid_voc,\n cat_voc,\n batch_size=128,\n maxlen=100,\n skip_empty=False,\n shuffle_each_epoch=False,\n sort_by_length=True,\n max_batch_size=20,\n minlen=None):\n self.source_orig = copy.deepcopy(source)\n if shuffle_each_epoch:\n # np.random.shuffle(source)\n # self.source = source\n self.source = shuffle.main(self.source_orig, temporary=True)\n else:\n self.source = fopen(source, 'r')\n self.source_dicts = []\n for source_dict in [uid_voc, mid_voc, cat_voc]:\n self.source_dicts.append(load_dict(source_dict))\n #uid:543060, mid:367983, cat: 1601\n f_meta = open(\"data/item-info\", \"r\")\n meta_map = {} # 2370585 keys\n for line in f_meta:\n arr = line.strip().split(\"\\t\")\n if arr[0] not in meta_map:\n meta_map[arr[0].encode(\"UTF-8\")] = arr[1]\n self.meta_id_map ={} # 367983\n for key in meta_map:\n val = meta_map[key].encode(\"UTF-8\")\n if key in self.source_dicts[1]:\n mid_idx = self.source_dicts[1][key]\n else:\n mid_idx = 0\n if val in self.source_dicts[2]:\n cat_idx = self.source_dicts[2][val]\n else:\n cat_idx = 0\n self.meta_id_map[mid_idx] = cat_idx\n\n f_review = open(\"data/reviews-info\", \"r\")\n self.mid_list_for_random = [] # 8898041\n for line in f_review:\n arr = line.strip().split(\"\\t\")\n tmp_idx = 0\n tmp = arr[1].encode(\"UTF-8\")\n if tmp in self.source_dicts[1]:\n tmp_idx = self.source_dicts[1][tmp]\n self.mid_list_for_random.append(tmp_idx)\n # print(f\"Unique values {len(np.unique(np.array(self.mid_list_for_random)))}\") #367982\n self.batch_size = batch_size\n self.maxlen = maxlen\n self.minlen = minlen\n self.skip_empty = skip_empty\n\n self.n_uid = len(self.source_dicts[0])\n self.n_mid = len(self.source_dicts[1])\n self.n_cat = len(self.source_dicts[2])\n\n self.shuffle = shuffle_each_epoch\n self.sort_by_length = sort_by_length\n\n self.source_buffer = []\n self.k = batch_size * max_batch_size\n\n self.end_of_data = False\n\n def get_n(self):\n return self.n_uid, self.n_mid, self.n_cat\n\n def __iter__(self):\n return self\n\n def reset(self):\n if self.shuffle:\n # np.random.shuffle(self.source_orig)\n # self.source = self.source_orig\n self.source= shuffle.main(self.source_orig, temporary=True)\n else:\n self.source.seek(0)\n\n def next(self):\n if self.end_of_data:\n self.end_of_data = False\n self.reset()\n raise StopIteration\n\n source = []\n target = []\n\n if len(self.source_buffer) == 0:\n for k_ in range(self.k): #2560\n ss = self.source.readline()\n if ss == \"\":\n break\n self.source_buffer.append(ss.strip(\"\\n\").split(\"\\t\"))\n\n # sort by history behavior length\n if self.sort_by_length:\n his_length = np.array([len(s[4].split(\"\u0002\")) for s in self.source_buffer])\n tidx = his_length.argsort()\n\n _sbuf = [self.source_buffer[i] for i in tidx]\n self.source_buffer = _sbuf\n else:\n self.source_buffer.reverse()\n\n if len(self.source_buffer) == 0:\n self.end_of_data = False\n self.reset()\n raise StopIteration\n\n try:\n\n # actual work here\n while True:\n\n # read from source file and map to word index\n try:\n ss = self.source_buffer.pop()\n except IndexError:\n break\n ss[1] = ss[1].encode(\"UTF-8\")\n ss[2] = ss[2].encode(\"UTF-8\")\n ss[3] = ss[3].encode(\"UTF-8\")\n uid = self.source_dicts[0][ss[1]] if ss[1] in self.source_dicts[0] else 0\n mid = self.source_dicts[1][ss[2]] if ss[2] in self.source_dicts[1] else 0\n cat = self.source_dicts[2][ss[3]] if ss[3] in self.source_dicts[2] else 0\n tmp = []\n for fea in ss[4].split(\"\u0002\"):\n m = self.source_dicts[1][fea.encode(\"UTF-8\")] if fea.encode(\"UTF-8\") in self.source_dicts[1] else 0\n tmp.append(m)\n mid_list = tmp\n\n tmp1 = []\n for fea in ss[5].split(\"\u0002\"):\n c = self.source_dicts[2][fea.encode(\"UTF-8\")] if fea.encode(\"UTF-8\") in self.source_dicts[2] else 0\n tmp1.append(c)\n cat_list = tmp1\n\n # read from source file and map to word index\n\n #if len(mid_list) > self.maxlen:\n # continue\n if self.minlen != None:\n if len(mid_list) <= self.minlen:\n continue\n if self.skip_empty and (not mid_list):\n continue\n\n noclk_mid_list = []\n noclk_cat_list = []\n for pos_mid in mid_list:\n noclk_tmp_mid = []\n noclk_tmp_cat = []\n noclk_index = 0\n while True:\n noclk_mid_indx = random.randint(0, len(self.mid_list_for_random)-1)\n noclk_mid = self.mid_list_for_random[noclk_mid_indx]\n if noclk_mid == pos_mid:\n continue\n noclk_tmp_mid.append(noclk_mid)\n noclk_tmp_cat.append(self.meta_id_map[noclk_mid])\n noclk_index += 1\n if noclk_index >= 5:\n break\n noclk_mid_list.append(noclk_tmp_mid)\n noclk_cat_list.append(noclk_tmp_cat)\n source.append([uid, mid, cat, mid_list, cat_list, noclk_mid_list, noclk_cat_list])\n target.append([float(ss[0]), 1-float(ss[0])])\n\n if len(source) >= self.batch_size or len(target) >= self.batch_size:\n break\n except IOError:\n self.end_of_data = True\n\n # all sentence pairs in maxibatch filtered out because of length\n if len(source) == 0 or len(target) == 0:\n source, target = self.next()\n\n return source, target" }, { "identifier": "DeepInterestNetwork", "path": "din/model.py", "snippet": "class DeepInterestNetwork(nn.Module):\n def __init__(self, n_uid, n_mid, n_cat, EMBEDDING_DIM, HIDDEN_DIM=[162,200,80,2]):\n super(DeepInterestNetwork, self).__init__()\n self.embedding_dim = EMBEDDING_DIM\n self.hid_dim = HIDDEN_DIM\n\n # embeddings\n self.uid_embeddings = EmbeddingLayer(n_uid, self.embedding_dim)\n self.mid_embeddings = EmbeddingLayer(n_mid, self.embedding_dim)\n self.cat_embeddings = EmbeddingLayer(n_cat, self.embedding_dim)\n\n self.attn = DinAttentionLayer(embedding_dim=self.embedding_dim*2)\n mlp_input_dim = self.embedding_dim * 9\n self.mlp = nn.Sequential(\n FCLayer(mlp_input_dim, hidden_size=self.hid_dim[1], bias=True, batch_norm=True, activation='dice'),\n FCLayer(self.hid_dim[1], hidden_size=self.hid_dim[2], bias=True, activation='dice'),\n FCLayer(self.hid_dim[2], hidden_size=self.hid_dim[3], bias=False, activation='none')\n )\n uid_params = sum(p.numel() for p in self.uid_embeddings.parameters() if p.requires_grad)\n print(f\"uid_embeddings trainable parameters: {uid_params}\")\n mid_params = sum(p.numel() for p in self.mid_embeddings.parameters() if p.requires_grad)\n print(f\"mid_embeddings trainable parameters: {mid_params}\")\n cat_params = sum(p.numel() for p in self.cat_embeddings.parameters() if p.requires_grad)\n print(f\"cat_embeddings trainable parameters: {cat_params}\")\n att_params = sum(p.numel() for p in self.attn.parameters() if p.requires_grad)\n print(f\"DinAttentionLayer trainable parameters: {att_params}\")\n mlp_params = sum(p.numel() for p in self.mlp.parameters() if p.requires_grad)\n print(f\"MLP trainable parameters: {mlp_params}\")\n\n\n \n def forward(self, uids, mids, cats, mid_his, cat_his, mid_mask, noclk_mids, noclk_cats, use_negsampling=False):\n \"\"\"input: uids, mids, cats, mid_his, cat_his, mid_mask, noclk_mids, noclk_cats\n \"\"\"\n # item_eb, item_his_eb, mask\n uid_batch_eb = self.uid_embeddings(uids) # [B, emb_dim]\n mid_batch_eb = self.mid_embeddings(mids)\n cat_batch_eb = self.cat_embeddings(cats)\n mid_his_batch_eb = self.mid_embeddings(mid_his) # [128, 100, 18]\n cat_his_batch_eb = self.cat_embeddings(cat_his)\n \n item_eb = torch.concat((mid_batch_eb, cat_batch_eb), 1) # [128, 36]\n item_his_eb = torch.concat((mid_his_batch_eb, cat_his_batch_eb), 2) # [128, 100, 36]\n item_his_eb_sum = torch.sum(item_his_eb, dim=1) # [128, 36]\n \n if use_negsampling:\n noclk_mid_his_batch_eb = self.mid_embeddings(noclk_mids)\n noclk_cat_his_batch_eb = self.cat_embeddings(noclk_cats)\n noclk_item_his_eb = torch.concat((noclk_mid_his_batch_eb[:, :, 0, :], noclk_cat_his_batch_eb[:, :, 0, :]), -1)\n noclk_item_his_eb = noclk_item_his_eb.reshape(-1, noclk_mid_his_batch_eb.shape[1], 36)\n noclk_his_eb = torch.concat((noclk_mid_his_batch_eb, noclk_cat_his_batch_eb), -1)\n noclk_his_eb_sum_1 = torch.sum(noclk_his_eb, dim=2)\n noclk_his_eb_sum = torch.sum(noclk_his_eb_sum_1, 1)\n \n attention_output = self.attn(item_eb, item_his_eb, mid_mask) # [128, 1, 36]\n att_fea = torch.sum(attention_output, dim=1)\n inp = torch.concat((uid_batch_eb, item_eb, item_his_eb_sum, item_eb * item_his_eb_sum, att_fea), dim=-1) # [128, 162]\n\n y_hat = F.softmax(self.mlp(inp), dim=-1)\n\n return y_hat" } ]
import time import random import sys, os import numpy as np import argparse import torch from din.data_iterator import DataIterator from din.model import DeepInterestNetwork from din.utils import *
5,028
# for src, tgt in test_data: nums += 1 uids, mids, cats, mid_his, cat_his, mid_mask, target, sl, noclk_mids, noclk_cats = prepare_data(src, tgt, return_neg=True) uids = transform(uids) mids = transform(mids) cats = transform(cats) mid_his = transform(mid_his) cat_his = transform(cat_his) mid_mask = transform(mid_mask) noclk_mids = transform(noclk_mids) noclk_cats = transform(noclk_cats) target = transform(target) prob = model(uids, mids, cats, mid_his, cat_his, mid_mask, noclk_mids, noclk_cats) loss = - torch.mean(torch.log(prob) * target) # acc = torch.mean(torch.round(prob) == target) acc = torch.sum(torch.round(prob) * target) / target.shape[0] loss_sum += loss # aux_loss_sum = aux_loss accuracy_sum += acc prob_1 = prob[:, 0].tolist() target_1 = target[:, 0].tolist() for p ,t in zip(prob_1, target_1): stored_arr.append([p, t]) test_auc = calc_auc(stored_arr) accuracy_sum = accuracy_sum / nums loss_sum = loss_sum / nums global best_auc if best_auc < test_auc: best_auc = test_auc torch.save({'model_state_dict': model.state_dict()}, model_path) return test_auc, loss_sum, accuracy_sum def train_one_epoch(epoch, model, train_data, test_data, optimizer, maxlen, test_iter, save_iter, best_model_path, model_path): train_data.reset() iter = 0 loss_sum = 0.0 accuracy_sum = 0. for _ in range(8000): optimizer.zero_grad() src, tgt = train_data.next() # (B,), (B), (B), (B, 100), (B, 100), (B, 100), (B, 2), (B), (128, 100, 5), (128, 100, 5) uids, mids, cats, mid_his, cat_his, mid_mask, target, sl, noclk_mids, noclk_cats = prepare_data(src, tgt, maxlen, return_neg=True) uids = transform(uids) mids = transform(mids) cats = transform(cats) mid_his = transform(mid_his) cat_his = transform(cat_his) mid_mask = transform(mid_mask) noclk_mids = transform(noclk_mids) noclk_cats = transform(noclk_cats) target = transform(target) y_hat = model(uids, mids, cats, mid_his, cat_his, mid_mask, noclk_mids, noclk_cats) y_hat = y_hat + 1e-8 loss = - torch.mean(torch.log(y_hat) * target) # acc = torch.mean(torch.round(y_hat) == target) acc = torch.sum(torch.round(y_hat) * target) / target.shape[0] loss_sum += loss accuracy_sum += acc loss.backward() optimizer.step() iter += 1 if (iter % test_iter) == 0: print('[epoch: %d/iter: %d] ----> train_loss: %.4f ---- train_accuracy: %.4f' % \ (epoch, iter, loss_sum / test_iter, accuracy_sum / test_iter)) test_auc, test_loss, test_accuracy = eval(test_data, model, best_model_path) print('test_auc: %.4f ----test_loss: %.4f ---- test_accuracy: %.4f' % (test_auc, test_loss.data, test_accuracy.data)) loss_sum = 0.0 accuracy_sum = 0.0 if (iter % save_iter) == 0: # print('save model iter: %d' %(iter)) torch.save({ 'EPOCH': epoch, 'iter': iter, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss, }, f"{model_path}_ep{epoch}_{iter}") return model, optimizer def train( train_file = "data/local_train_splitByUser", test_file = "data/local_test_splitByUser", uid_voc = "data/uid_voc.pkl", mid_voc = "data/mid_voc.pkl", cat_voc = "data/cat_voc.pkl", batch_size = 128, maxlen = 100, test_iter = 500, save_iter = 1000, model_type = 'DIN', seed = 2, epochs = 5 ): out_dir1 = "output" out_dir2 = "best_model" os.makedirs(out_dir1, exist_ok=True) os.makedirs(out_dir2, exist_ok=True) model_path = f"{out_dir1}/ckpt_noshuff{model_type}{str(seed)}" best_model_path = f"{out_dir2}/ckpt_noshuff{model_type}{str(seed)}" train_data = DataIterator(train_file, uid_voc, mid_voc, cat_voc, batch_size, maxlen, shuffle_each_epoch=False) test_data = DataIterator(test_file, uid_voc, mid_voc, cat_voc, batch_size, maxlen) n_uid, n_mid, n_cat = train_data.get_n() #uid: 543060, mid: 367983, cat: 1601 if model_type == 'DIN':
sys.path.append(os.getcwd()) EMBEDDING_DIM = 12 HIDDEN_DIM = [108,200,80,2] ATTENTION_SIZE = EMBEDDING_DIM * 2 best_auc = 0.0 device = "cuda" if torch.cuda.is_available() else "cpu" def transform(data): return torch.from_numpy(data).to(device) def prepare_data(input, target, maxlen=None, return_neg=False): # x: a list of sentences lengths_x = [len(s[4]) for s in input] seqs_mid = [inp[3] for inp in input] seqs_cat = [inp[4] for inp in input] noclk_seqs_mid = [inp[5] for inp in input] noclk_seqs_cat = [inp[6] for inp in input] if maxlen is not None: new_seqs_mid = [] new_seqs_cat = [] new_noclk_seqs_mid = [] new_noclk_seqs_cat = [] new_lengths_x = [] for l_x, inp in zip(lengths_x, input): if l_x > maxlen: new_seqs_mid.append(inp[3][l_x - maxlen:]) new_seqs_cat.append(inp[4][l_x - maxlen:]) new_noclk_seqs_mid.append(inp[5][l_x - maxlen:]) new_noclk_seqs_cat.append(inp[6][l_x - maxlen:]) new_lengths_x.append(maxlen) else: new_seqs_mid.append(inp[3]) new_seqs_cat.append(inp[4]) new_noclk_seqs_mid.append(inp[5]) new_noclk_seqs_cat.append(inp[6]) new_lengths_x.append(l_x) lengths_x = new_lengths_x seqs_mid = new_seqs_mid seqs_cat = new_seqs_cat noclk_seqs_mid = new_noclk_seqs_mid noclk_seqs_cat = new_noclk_seqs_cat if len(lengths_x) < 1: return None, None, None, None n_samples = len(seqs_mid) maxlen_x = np.max(lengths_x) neg_samples = len(noclk_seqs_mid[0][0]) mid_his = np.zeros((n_samples, maxlen_x)).astype('int64') cat_his = np.zeros((n_samples, maxlen_x)).astype('int64') noclk_mid_his = np.zeros((n_samples, maxlen_x, neg_samples)).astype('int64') noclk_cat_his = np.zeros((n_samples, maxlen_x, neg_samples)).astype('int64') mid_mask = np.zeros((n_samples, maxlen_x)).astype('float32') for idx, [s_x, s_y, no_sx, no_sy] in enumerate(zip(seqs_mid, seqs_cat, noclk_seqs_mid, noclk_seqs_cat)): mid_mask[idx, :lengths_x[idx]] = 1. mid_his[idx, :lengths_x[idx]] = s_x cat_his[idx, :lengths_x[idx]] = s_y noclk_mid_his[idx, :lengths_x[idx], :] = no_sx noclk_cat_his[idx, :lengths_x[idx], :] = no_sy uids = np.array([inp[0] for inp in input]) mids = np.array([inp[1] for inp in input]) cats = np.array([inp[2] for inp in input]) if return_neg: return uids, mids, cats, mid_his, cat_his, mid_mask, np.array(target), np.array(lengths_x), noclk_mid_his, noclk_cat_his else: return uids, mids, cats, mid_his, cat_his, mid_mask, np.array(target), np.array(lengths_x) def eval(test_data, model, model_path): test_data.reset() loss_sum = 0. accuracy_sum = 0. nums = 0 stored_arr = [] for _ in range(100): src, tgt = test_data.next() # for src, tgt in test_data: nums += 1 uids, mids, cats, mid_his, cat_his, mid_mask, target, sl, noclk_mids, noclk_cats = prepare_data(src, tgt, return_neg=True) uids = transform(uids) mids = transform(mids) cats = transform(cats) mid_his = transform(mid_his) cat_his = transform(cat_his) mid_mask = transform(mid_mask) noclk_mids = transform(noclk_mids) noclk_cats = transform(noclk_cats) target = transform(target) prob = model(uids, mids, cats, mid_his, cat_his, mid_mask, noclk_mids, noclk_cats) loss = - torch.mean(torch.log(prob) * target) # acc = torch.mean(torch.round(prob) == target) acc = torch.sum(torch.round(prob) * target) / target.shape[0] loss_sum += loss # aux_loss_sum = aux_loss accuracy_sum += acc prob_1 = prob[:, 0].tolist() target_1 = target[:, 0].tolist() for p ,t in zip(prob_1, target_1): stored_arr.append([p, t]) test_auc = calc_auc(stored_arr) accuracy_sum = accuracy_sum / nums loss_sum = loss_sum / nums global best_auc if best_auc < test_auc: best_auc = test_auc torch.save({'model_state_dict': model.state_dict()}, model_path) return test_auc, loss_sum, accuracy_sum def train_one_epoch(epoch, model, train_data, test_data, optimizer, maxlen, test_iter, save_iter, best_model_path, model_path): train_data.reset() iter = 0 loss_sum = 0.0 accuracy_sum = 0. for _ in range(8000): optimizer.zero_grad() src, tgt = train_data.next() # (B,), (B), (B), (B, 100), (B, 100), (B, 100), (B, 2), (B), (128, 100, 5), (128, 100, 5) uids, mids, cats, mid_his, cat_his, mid_mask, target, sl, noclk_mids, noclk_cats = prepare_data(src, tgt, maxlen, return_neg=True) uids = transform(uids) mids = transform(mids) cats = transform(cats) mid_his = transform(mid_his) cat_his = transform(cat_his) mid_mask = transform(mid_mask) noclk_mids = transform(noclk_mids) noclk_cats = transform(noclk_cats) target = transform(target) y_hat = model(uids, mids, cats, mid_his, cat_his, mid_mask, noclk_mids, noclk_cats) y_hat = y_hat + 1e-8 loss = - torch.mean(torch.log(y_hat) * target) # acc = torch.mean(torch.round(y_hat) == target) acc = torch.sum(torch.round(y_hat) * target) / target.shape[0] loss_sum += loss accuracy_sum += acc loss.backward() optimizer.step() iter += 1 if (iter % test_iter) == 0: print('[epoch: %d/iter: %d] ----> train_loss: %.4f ---- train_accuracy: %.4f' % \ (epoch, iter, loss_sum / test_iter, accuracy_sum / test_iter)) test_auc, test_loss, test_accuracy = eval(test_data, model, best_model_path) print('test_auc: %.4f ----test_loss: %.4f ---- test_accuracy: %.4f' % (test_auc, test_loss.data, test_accuracy.data)) loss_sum = 0.0 accuracy_sum = 0.0 if (iter % save_iter) == 0: # print('save model iter: %d' %(iter)) torch.save({ 'EPOCH': epoch, 'iter': iter, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss, }, f"{model_path}_ep{epoch}_{iter}") return model, optimizer def train( train_file = "data/local_train_splitByUser", test_file = "data/local_test_splitByUser", uid_voc = "data/uid_voc.pkl", mid_voc = "data/mid_voc.pkl", cat_voc = "data/cat_voc.pkl", batch_size = 128, maxlen = 100, test_iter = 500, save_iter = 1000, model_type = 'DIN', seed = 2, epochs = 5 ): out_dir1 = "output" out_dir2 = "best_model" os.makedirs(out_dir1, exist_ok=True) os.makedirs(out_dir2, exist_ok=True) model_path = f"{out_dir1}/ckpt_noshuff{model_type}{str(seed)}" best_model_path = f"{out_dir2}/ckpt_noshuff{model_type}{str(seed)}" train_data = DataIterator(train_file, uid_voc, mid_voc, cat_voc, batch_size, maxlen, shuffle_each_epoch=False) test_data = DataIterator(test_file, uid_voc, mid_voc, cat_voc, batch_size, maxlen) n_uid, n_mid, n_cat = train_data.get_n() #uid: 543060, mid: 367983, cat: 1601 if model_type == 'DIN':
model = DeepInterestNetwork(n_uid, n_mid, n_cat, EMBEDDING_DIM, HIDDEN_DIM)
1
2023-12-27 05:53:50+00:00
8k
Enthusiasm23/primkit
src/primkit/designer/Primer.py
[ { "identifier": "MFE_PRIMER", "path": "src/primkit/config.py", "snippet": "MFE_PRIMER = os.environ.get('MFE_PRIMER', \"https://mfeprimer3.igenetech.com\")" }, { "identifier": "PRIMER_URL", "path": "src/primkit/config.py", "snippet": "PRIMER_URL = os.environ.get('PRIMER_URL', f\"{MFE_PRIMER}/muld\")" }, { "identifier": "RETRY_INTERVAL", "path": "src/primkit/config.py", "snippet": "RETRY_INTERVAL = 2" }, { "identifier": "MAX_RETRIES", "path": "src/primkit/config.py", "snippet": "MAX_RETRIES = 3" }, { "identifier": "CHECK_INTERVAL", "path": "src/primkit/config.py", "snippet": "CHECK_INTERVAL = 3" }, { "identifier": "PRIMER_PARAMS", "path": "src/primkit/config.py", "snippet": "PRIMER_PARAMS = {\n 'DB': 'hg19.fa', # hg19.fa/mm10.fa\n 'SnpFilter': 'yes', # yes/no\n 'PrimerMinSize': '17', # 15-35\n 'PrimerOptSize': '22', # 15-35\n 'PrimerMaxSize': '25', # 15-35\n 'PrimerMinTm': '58', # 0-100\n 'PrimerOptTm': '60', # 0-100\n 'PrimerMaxTm': '62', # 0-100\n 'ProdMinSize': '80', # 0-1000000\n 'ProdMaxSize': '120', # 0-1000000\n 'DimerScore': '5', # 3-20\n 'HairpinScore': '5', # 3-20\n 'Tm': '47', # 0-100\n 'SpecMinSize': '0', # 0-1000000\n 'SpecMaxSize': '500', # 0-1000000\n}" }, { "identifier": "PARAMS_CONSTRAINTS", "path": "src/primkit/config.py", "snippet": "PARAMS_CONSTRAINTS = {\n 'DB': ['hg19.fa', 'mm10.fa'],\n 'SnpFilter': ['yes', 'no'],\n 'PrimerMinSize': [15, 35],\n 'PrimerOptSize': [15, 35],\n 'PrimerMaxSize': [15, 35],\n 'PrimerMinTm': [0, 100],\n 'PrimerOptTm': [0, 100],\n 'PrimerMaxTm': [0, 100],\n 'ProdMinSize': [0, 1000000],\n 'ProdMaxSize': [0, 1000000],\n 'DimerScore': [3, 20],\n 'HairpinScore': [3, 20],\n 'Tm': [0, 100],\n 'SpecMinSize': [0, 1000000],\n 'SpecMaxSize': [0, 1000000],\n}" }, { "identifier": "PRIMER_SET_COUNT", "path": "src/primkit/config.py", "snippet": "PRIMER_SET_COUNT = 20" }, { "identifier": "WAITING_TIMEOUT", "path": "src/primkit/config.py", "snippet": "WAITING_TIMEOUT = 300" }, { "identifier": "WebDriverUtility", "path": "src/primkit/utils/SiteSeleniumer.py", "snippet": "class WebDriverUtility:\n \"\"\"\n Utility class to interact with a website using Selenium WebDriver.\n \"\"\"\n\n def __init__(self, url, driver_path=CHROME_DRIVER_PATH, timeout=DEFAULT_TIMEOUT):\n \"\"\"\n Initializes a WebDriverUtility instance to manage a WebDriver session.\n\n This constructor sets up a Chrome WebDriver using the specified driver path and URL.\n It waits for the page to load within the given timeout period.\n\n Parameters:\n :param url (str): The URL to be accessed by the WebDriver.\n :param driver_path (str, optional): The file path to the ChromeDriver executable. If not provided,\n a default path defined by CHROME_DRIVER_PATH is used.\n :param timeout (int, optional): The maximum time in seconds to wait for the page to load.\n If not provided, a default timeout defined by DEFAULT_TIMEOUT is used.\n\n The driver is set to the `self.driver` attribute and can be accessed by instance methods.\n \"\"\"\n self.url = url\n self.driver_path = driver_path\n self.timeout = timeout\n self.driver = None\n\n def init_driver(self, return_driver=False):\n \"\"\"\n Initializes the WebDriver if it has not been initialized already.\n \"\"\"\n if not self.driver:\n self.driver = get_chrome_driver(driver_path=self.driver_path)\n self.driver.maximize_window()\n if return_driver:\n logging.warning(\"Direct access to the WebDriver is granted. This reduces encapsulation and abstraction.\")\n return self.driver\n\n def load_url(self):\n \"\"\"\n Navigates to the URL set during initialization after ensuring the driver is ready.\n If the URL is not valid, raises a ValueError.\n \"\"\"\n if not is_url(self.url):\n raise ValueError(f\"Invalid URL: {self.url}\")\n\n self.init_driver()\n self.driver.get(self.url)\n self.ensure_loaded(self.timeout)\n\n def ensure_loaded(self, timeout=DEFAULT_TIMEOUT):\n \"\"\"\n Waits for the page to load within the given timeout period.\n \"\"\"\n WebDriverWait(self.driver, timeout).until(\n lambda d: d.execute_script('return document.readyState') == 'complete'\n )\n\n @staticmethod\n def get_headers():\n \"\"\"\n # Call the function imported from primertools.utils.gather_system_details\n \"\"\"\n return get_system_headers()\n\n @staticmethod\n def format_cookies(cookies):\n \"\"\"\n Convert cookies from WebDriver format to requests format.\n\n :param cookies: A list of cookies from WebDriver.\n :return: A dictionary of cookies in requests format.\n \"\"\"\n return {cookie['name']: cookie['value'] for cookie in cookies}\n\n def get_cookies(self):\n \"\"\"\n Navigate to a URL and return the cookies found on the page.\n\n :return: 'cookies' in requests format.\n \"\"\"\n cookies = self.driver.get_cookies()\n\n return self.format_cookies(cookies)\n\n def get_token(self, token_name=XSRF_NAME):\n \"\"\"\n the specified token found on the page.\n\n :param token_name: The name of the token to retrieve (default is '_xsrf').\n :return: the specified 'token'.\n \"\"\"\n token = self.get_dynamic_token(token_name)\n\n return token\n\n def refresh_page(self):\n \"\"\"\n Refreshes the current page.\n \"\"\"\n self.driver.refresh()\n\n def ensure_element(self, locator, by=By.CSS_SELECTOR, timeout=DEFAULT_TIMEOUT):\n \"\"\"\n Wait for an element to appear and be visible on the page.\n\n :param locator: The locator of the element to wait for.\n :param by: The type of strategy to locate the element (default is By.CSS_SELECTOR).\n :param timeout: Maximum time in seconds to wait for the element to appear.\n :return: True if the element appears within the timeout, False otherwise.\n \"\"\"\n try:\n WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located((by, locator)))\n return True\n except TimeoutException:\n return False\n\n def get_dynamic_token(self, token_name, timeout=DEFAULT_TIMEOUT):\n \"\"\"\n Retrieve a dynamic token from the page.\n\n :param token_name: The name of the token to retrieve.\n :param timeout: Maximum time in seconds to wait for the token to become available.\n :return: The value of the token if found, None otherwise.\n \"\"\"\n try:\n WebDriverWait(self.driver, timeout).until(\n EC.presence_of_element_located((By.NAME, token_name))\n )\n return self.driver.find_element(By.NAME, token_name).get_attribute('value')\n except Exception as e:\n logger.error(f\"Error retrieving token: {e}\")\n return None\n\n def get_page_source(self):\n \"\"\"\n Retrieves the source code of the current page loaded in the WebDriver.\n\n :return: A string representing the source code of the current page.\n \"\"\"\n return self.driver.page_source\n\n def scroll_to_element(self, element):\n \"\"\"\n Scrolls the browser window to an element.\n\n :param element: The WebElement to scroll to.\n \"\"\"\n self.driver.execute_script(\"arguments[0].scrollIntoView(true);\", element)\n\n def is_driver_active(self):\n \"\"\"\n Checks if the WebDriver is still active.\n\n :return: True if the WebDriver session is still active, False if it has been closed.\n \"\"\"\n try:\n # Attempt to get the current URL. If the driver is closed, this will raise an exception.\n _ = self.driver.current_url\n return True\n except WebDriverException:\n return False\n\n def find_element(self, by, value):\n \"\"\"\n Finds an element on the page based on the provided locator.\n\n :param by: The method to locate the element (e.g., By.ID, By.CSS_SELECTOR).\n :param value: The value of the locator.\n :return: The found web element.\n \"\"\"\n return self.driver.find_element(by, value)\n\n def click_by_locator(self, by, value):\n \"\"\"\n Clicks an element on the page identified by a locator.\n\n :param by: The method to locate the element.\n :param value: The value of the locator.\n \"\"\"\n element = self.find_element(by, value)\n element.click()\n\n @staticmethod\n def click_element(element):\n \"\"\"\n Clicks a web element.\n\n :param element: The web element to click.\n \"\"\"\n element.click()\n\n def clear_by_locator(self, by, value):\n \"\"\"\n Clears the content of an input field identified by a locator.\n\n :param by: The method to locate the element.\n :param value: The value of the locator.\n \"\"\"\n element = self.find_element(by, value)\n element.clear()\n\n @staticmethod\n def clear_element(element):\n \"\"\"\n Clears the content of a web element.\n\n :param element: The web element to clear.\n \"\"\"\n element.clear()\n\n def input_by_locator(self, by, value, text):\n \"\"\"\n Inputs text into an element identified by a locator.\n\n :param by: The method to locate the element.\n :param value: The value of the locator.\n :param text: The text to input into the element.\n \"\"\"\n element = self.find_element(by, value)\n element.send_keys(text)\n\n @staticmethod\n def input_element(element, *args):\n \"\"\"\n Inputs text or key sequences into a web element.\n\n Usage Examples:\n - utility.input_element(element, \"Text to input\")\n - utility.input_element(element, Keys.SHIFT, Keys.SPACE)\n\n :param element: The web element where the text or key sequences will be input.\n :param args: The text or key sequences to input into the element.\n \"\"\"\n for arg in args:\n element.send_keys(arg)\n\n def input_values(self, locator_or_element, input_value, by=None):\n \"\"\"\n Inputs the given value into the element identified by the locator or directly into the provided element.\n\n Usage Examples:\n - Using a Locator Tuple:\n utility.input_values((By.CSS_SELECTOR, \"#inputElementId\"), \"Input Value\")\n utility.input_values((By.ID, \"inputElementId\"), \"Input Value\")\n\n - Using a Web Element:\n element = utility.find_element(By.CSS_SELECTOR, \"#inputElementId\")\n utility.input_values(element, \"Input Value\")\n\n - Using locator value with by parameter:\n utility.input_values(\"inputElementId\", \"Input Value\", by=By.ID)\n\n :param locator_or_element: Either a locator tuple, an element object, or a locator string.\n :param input_value: The value to input into the element.\n :param by: Optional; The type of the locator (By.CSS_SELECTOR, By.ID, etc.).\n Required if the first parameter is a locator string.\n :raises: Exception if the locator_or_element is not a valid WebElement or locator tuple.\n \"\"\"\n if isinstance(locator_or_element, tuple):\n # Check if the first element of the tuple is a valid By attribute\n if not (locator_or_element[0] in vars(By).values() and isinstance(locator_or_element[1], str)):\n raise ValueError(\"Locator tuple is not in the correct order (By method, locator value).\")\n element = self.find_element(*locator_or_element)\n elif by is not None:\n element = self.find_element(by, locator_or_element)\n elif isinstance(locator_or_element, WebElement):\n element = locator_or_element\n else:\n raise ValueError(\"The locator_or_element argument must be a locator tuple, WebElement, or string with 'by' parameter.\")\n\n # Perform actions on the located element\n self.click_element(element)\n self.clear_element(element)\n self.input_element(element, input_value)\n\n def get_element_attribute(self, locator, attribute, by=By.CSS_SELECTOR):\n \"\"\"\n Gets the specified attribute of an element.\n\n Usage Example:\n - utility.get_element_attribute(\"#myElement\", \"href\")\n\n :param locator: The locator of the element.\n :param attribute: The attribute to retrieve from the element.\n :param by: The method to locate the element (default is By.CSS_SELECTOR).\n :return: The value of the specified attribute, or None if the element is not found.\n \"\"\"\n try:\n element = self.driver.find_element(by, locator)\n return element.get_attribute(attribute)\n except Exception as e:\n logger.error(f'Error getting attribute from element: {e}')\n return None\n\n def select_dropdown_option(self, dropdown_selector, option_value):\n \"\"\"\n Selects an option from a dropdown select element based on the value attribute of the option.\n Raises an exception if the option_value is not found in the dropdown.\n\n :param dropdown_selector: The CSS selector for the dropdown select element.\n :param option_value: The value attribute of the option to be selected.\n :raises NoSuchElementException: If the option_value is not found in the dropdown options.\n \"\"\"\n # Find the dropdown select element\n dropdown_element = self.find_element(By.CSS_SELECTOR, dropdown_selector)\n\n # Create a Select object for the dropdown element\n select = Select(dropdown_element)\n\n # Check if the option value is present in the dropdown\n if not any(option.get_attribute('value') == option_value for option in select.options):\n available_options = [option.get_attribute('value') for option in select.options]\n raise NoSuchElementException(f\"The option value '{option_value}' was not found in the dropdown. \"\n f\"Available options are: {available_options}\")\n\n # Select the option by its value attribute\n select.select_by_value(option_value)\n\n def close(self):\n \"\"\"\n Closes the WebDriver session.\n \"\"\"\n if self.driver:\n self.driver.quit()\n self.driver = None" } ]
import re import time import logging import requests from bs4 import BeautifulSoup from selenium.webdriver import Keys from selenium.webdriver.common.by import By from selenium.common import NoSuchElementException from ..config import MFE_PRIMER, PRIMER_URL, RETRY_INTERVAL, \ MAX_RETRIES, CHECK_INTERVAL, PRIMER_PARAMS, PARAMS_CONSTRAINTS, \ PRIMER_SET_COUNT, WAITING_TIMEOUT from ..utils.SiteSeleniumer import WebDriverUtility
5,078
logger.info(f"Validating parameter {parameter} with value {value}.") if parameter in ['DB', 'SnpFilter']: if value not in constraints: logger.error(f"Value for {parameter} is not within the allowed constraints.") return value in constraints elif parameter in ['PrimerMinSize', 'PrimerOptSize', 'PrimerMaxSize', 'PrimerMinTm', 'PrimerOptTm', 'PrimerMaxTm', 'ProdMinSize', 'ProdMaxSize', 'DimerScore', 'HairpinScore', 'Tm', 'SpecMinSize', 'SpecMaxSize']: if not (constraints[0] <= int(value) <= constraints[1]): logger.error(f"Value for {parameter} is not within the allowed range of {constraints}.") return constraints[0] <= int(value) <= constraints[1] else: return True def validate_bed_input(bed_input, max_count=PRIMER_SET_COUNT): """ Validates the format of the BedInput parameter and ensures the number of entries does not exceed the maximum count. The BedInput should be a string containing lines with specific chromosomes (chr1 to chr22, chrX, chrY), start, and end positions, separated by tabs or spaces and ending with a newline character. Regular expression matches lines with "chr[specific chromosome][spaces/tabs][number][spaces/tabs][number][newline]" :param bed_input: The input string in BED format to validate. :param max_count: The maximum number of entries allowed. :return: A tuple containing a boolean indicating if the BedInput format is correct and the processed BedInput. """ logger.info("Validating the BedInput format.") bed_lines = bed_input.splitlines() if len(bed_lines) > max_count: logger.warning( f"BedInput contains more than {max_count} entries. Only the first {max_count} will be processed.") bed_lines = bed_lines[:max_count] # Keep only the first 20 entries bed_input_pattern = re.compile(r'chr(?:[1-9]|1\d|2[0-2]|X|Y)\s+(\d+)\s+(\d+)(\r?\n|$)') for line in bed_lines: match = bed_input_pattern.match(line) if not match: logger.error( f"Line does not match the expected format: {line}. (Expected format: 'chr[specific chromosome][spaces/tabs][number][spaces/tabs][number][newline]').") return False, bed_input # Return original bed_input for further reference start, end = map(int, match.groups()[:2]) if start >= end: logger.error( f"Starting position is greater than or equal to the ending position in the line: {line}. (The ending position must be greater than the starting position by at least 1 base pair (bp).)") return False, bed_input processed_bed_input = "\n".join(bed_lines) # Reconstruct the BedInput with potentially fewer lines return True, processed_bed_input def build_data_dict(token, bed_input, custom_params=None, default_params=None): """ Builds a data dictionary using default and custom parameters. :param token: The authentication token required for the POST request. :param bed_input: The BED format input containing chromosome, start, and end positions. :param custom_params: A dictionary of parameters provided by the user to override defaults. :param default_params: A dictionary of default parameters for the POST request. :return: A dictionary containing the combined default and custom parameters. """ logger.info("Building data dictionary with parameters.") data = default_params.copy() if default_params else {} data.update(custom_params if custom_params else {}) data['_xsrf'] = token data['BedInput'] = bed_input return data def prepare_post_data(token, bed_input, custom_params=None, default_params=PRIMER_PARAMS, constraints=PARAMS_CONSTRAINTS): """ Prepares the data for a POST request by validating parameters and constructing a data dictionary. :param token: The authentication token required for the POST request. :param bed_input: The BED format input containing chromosome, start, and end positions. :param custom_params: Optional; A dictionary of parameters provided by the user to override defaults. :param default_params: Optional; A dictionary of default parameters for the POST request. :param constraints: Optional; A dictionary of constraints for parameter validation. :return: A dictionary ready to be sent in a POST request if all validations pass. :raises ValueError: If token is empty, BedInput format is incorrect, custom_params is not a dictionary, or if any parameter is out of its constraint range. """ logger.info("Preparing post data.") if not token: logger.error("Token parameter cannot be empty.") raise ValueError("Token parameter cannot be empty.") valid, processed_bed_input = validate_bed_input(bed_input) if not valid: logger.error("BedInput format is incorrect.") raise ValueError("BedInput format is incorrect.") if custom_params is not None and not isinstance(custom_params, dict): logger.error("Custom_params must be a dictionary.") raise ValueError("Custom_params must be a dictionary.") valid_keys = constraints.keys() for key in custom_params or {}: if key not in valid_keys: valid_keys_str = ', '.join(valid_keys) logger.error(f"Invalid parameter: {key}. Valid keys are: {valid_keys_str}") raise ValueError(f"Invalid parameter: {key}. Valid keys are: {valid_keys_str}") data = build_data_dict(token, processed_bed_input, custom_params, default_params) for key, value in data.items(): if not validate_parameter(key, value, constraints.get(key, [])): logger.error(f"Parameter {key} with value {value} is out of constraint range.") raise ValueError(f"Parameter {key} with value {value} is out of constraint range.") logger.info("Data prepared successfully.") return data def submit_and_track(data, headers, cookies, url=PRIMER_URL, root_url=MFE_PRIMER, max_retries=MAX_RETRIES,
logger = logging.getLogger(__name__) def validate_parameter(parameter, value, constraints): """ Validates if a single parameter value is within its constraint range. :param parameter: The name of the parameter to validate. :param value: The value of the parameter to validate. :param constraints: A list or tuple containing the allowed range or set of values for the parameter. :return: Boolean indicating whether the parameter value is valid. """ logger.info(f"Validating parameter {parameter} with value {value}.") if parameter in ['DB', 'SnpFilter']: if value not in constraints: logger.error(f"Value for {parameter} is not within the allowed constraints.") return value in constraints elif parameter in ['PrimerMinSize', 'PrimerOptSize', 'PrimerMaxSize', 'PrimerMinTm', 'PrimerOptTm', 'PrimerMaxTm', 'ProdMinSize', 'ProdMaxSize', 'DimerScore', 'HairpinScore', 'Tm', 'SpecMinSize', 'SpecMaxSize']: if not (constraints[0] <= int(value) <= constraints[1]): logger.error(f"Value for {parameter} is not within the allowed range of {constraints}.") return constraints[0] <= int(value) <= constraints[1] else: return True def validate_bed_input(bed_input, max_count=PRIMER_SET_COUNT): """ Validates the format of the BedInput parameter and ensures the number of entries does not exceed the maximum count. The BedInput should be a string containing lines with specific chromosomes (chr1 to chr22, chrX, chrY), start, and end positions, separated by tabs or spaces and ending with a newline character. Regular expression matches lines with "chr[specific chromosome][spaces/tabs][number][spaces/tabs][number][newline]" :param bed_input: The input string in BED format to validate. :param max_count: The maximum number of entries allowed. :return: A tuple containing a boolean indicating if the BedInput format is correct and the processed BedInput. """ logger.info("Validating the BedInput format.") bed_lines = bed_input.splitlines() if len(bed_lines) > max_count: logger.warning( f"BedInput contains more than {max_count} entries. Only the first {max_count} will be processed.") bed_lines = bed_lines[:max_count] # Keep only the first 20 entries bed_input_pattern = re.compile(r'chr(?:[1-9]|1\d|2[0-2]|X|Y)\s+(\d+)\s+(\d+)(\r?\n|$)') for line in bed_lines: match = bed_input_pattern.match(line) if not match: logger.error( f"Line does not match the expected format: {line}. (Expected format: 'chr[specific chromosome][spaces/tabs][number][spaces/tabs][number][newline]').") return False, bed_input # Return original bed_input for further reference start, end = map(int, match.groups()[:2]) if start >= end: logger.error( f"Starting position is greater than or equal to the ending position in the line: {line}. (The ending position must be greater than the starting position by at least 1 base pair (bp).)") return False, bed_input processed_bed_input = "\n".join(bed_lines) # Reconstruct the BedInput with potentially fewer lines return True, processed_bed_input def build_data_dict(token, bed_input, custom_params=None, default_params=None): """ Builds a data dictionary using default and custom parameters. :param token: The authentication token required for the POST request. :param bed_input: The BED format input containing chromosome, start, and end positions. :param custom_params: A dictionary of parameters provided by the user to override defaults. :param default_params: A dictionary of default parameters for the POST request. :return: A dictionary containing the combined default and custom parameters. """ logger.info("Building data dictionary with parameters.") data = default_params.copy() if default_params else {} data.update(custom_params if custom_params else {}) data['_xsrf'] = token data['BedInput'] = bed_input return data def prepare_post_data(token, bed_input, custom_params=None, default_params=PRIMER_PARAMS, constraints=PARAMS_CONSTRAINTS): """ Prepares the data for a POST request by validating parameters and constructing a data dictionary. :param token: The authentication token required for the POST request. :param bed_input: The BED format input containing chromosome, start, and end positions. :param custom_params: Optional; A dictionary of parameters provided by the user to override defaults. :param default_params: Optional; A dictionary of default parameters for the POST request. :param constraints: Optional; A dictionary of constraints for parameter validation. :return: A dictionary ready to be sent in a POST request if all validations pass. :raises ValueError: If token is empty, BedInput format is incorrect, custom_params is not a dictionary, or if any parameter is out of its constraint range. """ logger.info("Preparing post data.") if not token: logger.error("Token parameter cannot be empty.") raise ValueError("Token parameter cannot be empty.") valid, processed_bed_input = validate_bed_input(bed_input) if not valid: logger.error("BedInput format is incorrect.") raise ValueError("BedInput format is incorrect.") if custom_params is not None and not isinstance(custom_params, dict): logger.error("Custom_params must be a dictionary.") raise ValueError("Custom_params must be a dictionary.") valid_keys = constraints.keys() for key in custom_params or {}: if key not in valid_keys: valid_keys_str = ', '.join(valid_keys) logger.error(f"Invalid parameter: {key}. Valid keys are: {valid_keys_str}") raise ValueError(f"Invalid parameter: {key}. Valid keys are: {valid_keys_str}") data = build_data_dict(token, processed_bed_input, custom_params, default_params) for key, value in data.items(): if not validate_parameter(key, value, constraints.get(key, [])): logger.error(f"Parameter {key} with value {value} is out of constraint range.") raise ValueError(f"Parameter {key} with value {value} is out of constraint range.") logger.info("Data prepared successfully.") return data def submit_and_track(data, headers, cookies, url=PRIMER_URL, root_url=MFE_PRIMER, max_retries=MAX_RETRIES,
retry_interval=RETRY_INTERVAL):
2
2023-12-25 14:12:46+00:00
8k
Wangyuhao06/2022-adhoc
src/env.py
[ { "identifier": "random_waypoint", "path": "pymobility/models/mobility.py", "snippet": "def random_waypoint(*args, **kwargs):\n return iter(RandomWaypoint(*args, **kwargs))" }, { "identifier": "Node", "path": "src/node.py", "snippet": "class Node(object):\n def __init__(self,id_node):\n super(Node, self).__init__()\n #multi-agent sys setting\n self.node_max=36\n self.act_range=self.node_max-1 #最大邻居范围\n # current agent-property setting\n self.id=id_node#该节点id\n # 1 - packets\n self.packets_ToSend_id=[]#该节点当前待传的包\n self.packets_id_list=[]#该节点至今为止保存过的包id\n \n self.sending_flag=0\n self.rec_flag=0\n \n self.trans_task_send=Queue(maxsize=1)#该节点当前传输的任务\n self.trans_taskID_rec=[]#该节点当前接收的任务\n # 2 - energy\n self.current_amp_send=0#节点当前发送增益--------动作\n #self.current_amp_receive=0#节点当前接收增益--------动作\n \n self.current_power_send=0#节点当前发送功率\n self.current_power_receive=0#节点当前接收功率\n self.power_list=[]#节点使用能量记录\n \n self.energy_consumption=0#截至现在能量消耗\n # 3 - freq\n self.current_freqB=[1]#当前选用频谱块--------动作\n self.freqB_list=[1]#频谱块历史\n # 4 - topology\n self.neibor_idlist=[]\n self.next_hop_id=-1#下一条节点id--------动作\n # 5 - observation\n #self.ob_send=[]\n \n # def observation_rec(self,send_node):\n # if len(self.ob_send)==0 or len(send_node.ob_send)==0 :\n # raise ValueError(\"send observation unfinished\")\n # self.ob_rec.append(self.ob_send[-1])\n # self.ob_rec.append(send_node.ob_send[-1])\n # return self.ob_rec\n \n \n def get_send_action(self,ob,action_space):\n \n ###缺省决策###\n \n #改变属性\n return self.current_amp_send,self.current_freqB,self.next_hop_id\n \n def get_rec_action(self,ob):\n \n ###缺省决策###\n \n #改变属性\n return self.current_amp_receive " }, { "identifier": "Packet", "path": "src/packet.py", "snippet": "class Packet(object):\n def __init__(self,id_packet,packet_size,ori_node_id,dst_node_id,time_start_0):\n super(Packet, self).__init__()\n self.id=id_packet\n self.size=packet_size\n #节点属性\n self.ori_node_id=ori_node_id\n self.cur_node_id=ori_node_id\n self.dst_node_id=dst_node_id\n self.node_list=[ori_node_id]\n #T-T属性\n self.cur_trans_task_id=-100\n self.in_TR=0\n self.trans_task_IDlist=[]\n #路由属性\n self.time_start=time_start_0\n self.time_use=0\n self.arrive_flag=0\n \n def packet_trans_update(self,trans_task):\n if trans_task.trans_property[2]!=self.id:\n raise ValueError('trans_task not matched')\n self.cur_trans_task_id=trans_task.id" }, { "identifier": "Trans_task", "path": "src/transtask.py", "snippet": "class Trans_task(object):\n def __init__(self,trans_id,node_send,node_rec,packet):\n self.id=trans_id\n self.trans_property=(node_send.id,node_rec.id,packet.id)#基本属性\n self.packsize=packet.size\n ####frequency block info####\n self.FreqB_occup=node_send.current_freqB #占用频谱块id\n ####SINR and Capacity####\n self.SNR_C=([],1)#Y(SNR,Capacity)-----------------[X(timeslot1:SNR,Capacity),(timeslot2:SNR,Capacity),...]\n ####time of trans####\n self.time_use=1#int(self.packsize/self.SNR_C[1])+1\n self.time_cnt=0\n self.finish_flag=0\n ####energy setting####\n self.energy_property = (node_send.current_amp_send,RECAMP)\n self.energy_consume=(node_send.current_amp_send*packet.size*PACKENERGY,RECAMP*packet.size*PACKENERGY)\n self.power_consume=(round(node_send.current_amp_send*packet.size*PACKENERGY/self.time_use,6),round(RECAMP*packet.size*PACKENERGY/self.time_use,6))\n \n def show_info(self):\n return self.trans_property[0],self.trans_property[1],self.trans_property[2]\n \n def Trans_task_update(self):\n if self.finish_flag:\n return 1\n if self.time_cnt>=self.time_use:\n self.finish_flag=1\n return 1\n elif self.time_cnt<self.time_use:\n self.time_cnt+=1\n return 0\n \n \n #trans_task=tuple([],{},(node_send_id,node_send_amp,node_rec_id,node_rec_amp,packet_id),0)\n #tuple:([占用频谱块id],{(timeslot1:SNR,Capacity),(timeslot2:SNR,Capacity),...},(基本属性:发送节点id,发送增益,接收节点id,接收增益,包id),完成标志位)" } ]
import random import numpy as np from math import log2, log10 from queue import Queue from pymobility.models.mobility import random_waypoint from src.node import Node from src.packet import Packet from src.parameter import * from src.transtask import Trans_task
4,746
all_ob.append(pw+dgr+pcs+dn) #self.node_list[node_id].ob_send=neibor_vector return np.array(all_ob) # def generate_trans_task(self,trans_id,send_node,rec_node,packet): # trans_task_temp=Trans_task(trans_id,send_node,rec_node,packet) # return trans_task_temp def env_check_right(self): for node_id in self.live_node_ID_list: if self.node_list[node_id].trans_task_send.empty(): assert self.node_list[node_id].sending_flag == 0 elif not self.node_list[node_id].trans_task_send.empty(): assert self.node_list[node_id].sending_flag == 1 st_temp=self.node_list[node_id].trans_task_send.get() self.node_list[node_id].trans_task_send.put(st_temp)#无损使用队列内容 s_node_send_id,s_node_rec_id,s_packet_id=st_temp.show_info() assert node_id==s_node_send_id # assert self.node_list[node_id].next_hop_id==s_node_rec_id assert self.node_list[node_id].packets_ToSend_id[0]==s_packet_id elif self.node_list[node_id].trans_task_rec.empty(): assert self.rec_flag == 0 elif not self.node_list[node_id].trans_task_rec.empty(): assert self.node_list[node_id].rec_flag == 1 rt_temp=self.node_list[node_id].trans_task_rec.get() self.node_list[node_id].trans_task_rec.put(rt_temp)#无损使用队列内容 r_node_send_id,r_node_rec_id,r_packet_id=rt_temp.show_info() assert node_id==r_node_rec_id # assert self.node_list[node_id].next_hop_id==s_node_rec_id assert self.node_list[node_id].packets_ToSend_id[0] != r_packet_id return 0 def topology_update(self,cur_time,rand_change): self.topology = np.zeros((NODE_MAX,NODE_MAX)) ################--------随机更改拓扑结构--------################ if rand_change: positions=next(self.geo_area) self.position = positions for a in range(NODE_MAX): for b in range(NODE_MAX): if np.linalg.norm(positions[a]-positions[b]) <= COM_RANGE: self.topology[a,b]=1 self.topology[b,a]=1 else: self.topology[a,b]=0 self.topology[b,a]=0 # if np.random.rand()<DELTA and cur_time%30==0: # for i in np.random.randint(0,self.node_max,np.random.randint(3)+1): # self.topology[i,:]=np.random.randint(0,2,self.node_max) # self.topology[i,i] = 1 # for j in range(self.node_max): # #构建双向图 # if self.topology[i,j] == 1: # self.topology[j,i] = 1 # print(positions) # print("****************") # print(self.topology) # print("------------------------------------") ################--------更新邻域--------################ self.live_node_ID_list=[] self.topology_actSpace=[] for i in range(self.topology.shape[0]): if any(self.topology[i,:]): TPtemp = np.nonzero(self.topology[i,:]) # self.node_list[i].neibor_idlist=TPtemp self.topology_actSpace.append(TPtemp) self.live_node_ID_list.append(i) else: TPtemp = -1 self.topology_actSpace.append(TPtemp) return self.topology def get_state_reward(self): return self.topology,self.all_ob,self.reward def time_step(self,cur_time,action): self.packet_arrive_success=[] self.agent_arrive=[] for i in range(NODE_MAX): self.packet_arrive_success.append(0) self.agent_arrive.append(0) self.arrive_success=0 # self.env_check_right() topology_now=self.topology_update(cur_time,1) self.generate_packet(cur_time) self.all_ob=self.all_agent_observe() self.trans_task_update(cur_time) for node_index in self.live_node_ID_list : if len(self.node_list[node_index].packets_ToSend_id)>0 and self.node_list[node_index].sending_flag!=1: packet_toSend_id=self.node_list[node_index].packets_ToSend_id[0] #包未到达且非在传----->生成trans_task if self.packets_list[packet_toSend_id].arrive_flag==0 and self.packets_list[packet_toSend_id].in_TR==0: #传输和接收节点决策 send_node=self.node_list[node_index] Action=action[node_index]####################################################### next_hop_id,current_freqB,current_amp_send=Action[0],Action[1:N_ACTION_C],Action[N_ACTION_C] send_node.next_hop_id=next_hop_id rec_node=self.node_list[next_hop_id] current_amp_rec=RECAMP self.node_list[node_index].current_freqB=current_freqB self.node_list[node_index].next_hop_id=next_hop_id self.node_list[node_index].current_amp_send=current_amp_send #频谱环境更新 freqB_ID_now=0 for fB_ocp in current_freqB: if node_index!=next_hop_id and fB_ocp: self.freqB_list[freqB_ID_now].append(node_index) self.freqB_use_history[freqB_ID_now].append(node_index) freqB_ID_now+=1 #T-T生成与T-T环境更新
class Environment(): #初始化环境 def __init__(self): #初始数据-最大节点数 self.node_max=NODE_MAX self.node_space_size=NODE_MAX self.node_moving_area=MOV_AREA #初始化二维平面 self.geo_area = random_waypoint(self.node_max, dimensions=(MOV_AREA, MOV_AREA), velocity=(10, 15), wt_max=1.0) self.position=0 #初始化随机相邻矩阵 self.topology = np.zeros((self.node_space_size,self.node_space_size)) self.topology[0:self.node_max,0:self.node_max] = np.random.randint(0,2,(self.node_max,self.node_max)) for i in range(self.node_max): self.topology[i,i] = 1 for j in range(self.node_max): #构建双向图 if self.topology[i,j] == 1: self.topology[j,i] = 1 #初始化节点动作空间 self.topology_actSpace=[] #初始化频谱块元组-----(0,[])表示(占用与否,[占用transtaskID列表]) self.freqB_list=([],[],[],[],[],[],[],[],[],[]) #((0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[])) self.freqB_use_history=([],[],[],[],[],[],[],[],[],[]) #初始化传输事件列表 self.trans_task_ID_inTR=[] self.trans_task_list=[] self.trans_task_cnt=0 # id计数器 #初始化包列表 self.amount_poisson_list = np.random.poisson(lam=LAMDA,size=MAX_TIME)#包数量初始化 self.size_normal_list = ((np.random.normal(0,1,MAX_TIME*2)*16+16)//8)*8#包大小初始化 self.pack_use_cnt=0#包序号计数器 self.packets_list=[]#包列表 self.packets_live_id=[] #初始化节点列表 self.node_list=[] self.live_node_ID_list=[] for i in range(self.node_max): locals()['node_'+str(i)] = Node(i) self.node_list.append(locals()['node_'+str(i)]) self.live_node_ID_list.append(i) #噪声系数 self.noise_list = np.random.rayleigh(1,MAX_TIME*2)#*NOISE_CONST/2 #统计参数 self.envTr_time=0 self.allNode_pw=0 self.allNode_delay=0 self.time_avg=0 self.arrive_time=1 self.end=0 self.terminate=0 self.packet_arrive_success=[] self.agent_arrive=[] for i in range(NODE_MAX): self.packet_arrive_success.append(0)#节点作为 |源节点| 发的包成功到达数 self.agent_arrive.append(0)#节点作为 |最后一个中间节点| 发的包成功到达数 # self.sum_packet_done_rate=0 #四元组 self.all_ob=np.array([[0]*OBS_LEN]*NODE_MAX) self.reward=np.array([1]*self.node_max) self.para_reward=np.array([1]*self.node_max) def generate_packet(self,cur_time): packetsList_temp=[] packets_cnt=self.amount_poisson_list[cur_time] for i in range(packets_cnt): nodes_temp = random.sample(self.live_node_ID_list,2) locals()['packet_'+str(self.pack_use_cnt)]=Packet(self.pack_use_cnt,abs(self.size_normal_list[self.pack_use_cnt])+8,nodes_temp[0],nodes_temp[1],cur_time) self.packets_list.append(locals()['packet_'+str(self.pack_use_cnt)]) self.packets_live_id.append(self.pack_use_cnt) packetsList_temp.append(locals()['packet_'+str(self.pack_use_cnt)]) self.node_list[nodes_temp[0]].packets_ToSend_id.append(self.pack_use_cnt) self.node_list[nodes_temp[0]].packets_id_list.append(self.pack_use_cnt) self.pack_use_cnt+=1 return packetsList_temp #传输任务更新 def trans_task_update(self,cur_time): if len(self.trans_task_ID_inTR)>0 and len(self.trans_task_list)>0: #所有在传传输任务 for trans_temp_id in self.trans_task_ID_inTR: task_finish=self.trans_task_list[trans_temp_id].Trans_task_update() node_send_id,node_rec_id,packet_id=self.trans_task_list[trans_temp_id].show_info() #包传输更新 self.packets_list[packet_id].time_use+=1 #节点更新 # self.node_list[node_send_id].next_hop_id=node_rec_id if node_send_id!=node_rec_id: self.node_list[node_send_id].power_list.append(self.trans_task_list[trans_temp_id].power_consume[0]) self.node_list[node_send_id].current_power_send=self.trans_task_list[trans_temp_id].power_consume[0] self.node_list[node_send_id].energy_consumption+=self.trans_task_list[trans_temp_id].power_consume[0] self.node_list[node_rec_id].power_list.append(self.trans_task_list[trans_temp_id].power_consume[1]) self.node_list[node_rec_id].current_power_receive=self.trans_task_list[trans_temp_id].power_consume[1] self.node_list[node_rec_id].energy_consumption+=self.trans_task_list[trans_temp_id].power_consume[1] #统计参数更新 self.envTr_time+=1 #trans任务完成更新 if task_finish and self.topology[node_send_id,node_rec_id]==1 : #更新包与节点 # T-T清除 self.trans_task_ID_inTR.remove(trans_temp_id) # 包属性清除 self.packets_list[packet_id].in_TR=0 self.packets_list[packet_id].cur_trans_task_id=0 self.packets_list[packet_id].cur_node_id=node_rec_id # 发送节点属性清除 self.node_list[node_send_id].packets_ToSend_id.remove(packet_id) self.node_list[node_send_id].trans_task_send.get() self.node_list[node_send_id].sending_flag=0 self.node_list[node_send_id].current_amp_send=0 self.node_list[node_send_id].current_power_send=0 # 接收节点属性清除 self.node_list[node_rec_id].trans_taskID_rec.remove(trans_temp_id) if len(self.node_list[node_rec_id].trans_taskID_rec)==0: self.node_list[node_rec_id].rec_flag=0 # self.node_list[node_rec_id].current_amp_receive=0 self.node_list[node_rec_id].current_power_receive=0 # 频谱环境更新(频谱块release) freqB_ID_now=0 for freqB_ocp_now in self.trans_task_list[trans_temp_id].FreqB_occup: if freqB_ocp_now and node_send_id!=node_rec_id: self.freqB_list[freqB_ID_now].remove(node_send_id) freqB_ID_now+=1 #判断是否到达目的地 if self.packets_list[packet_id].cur_node_id==self.packets_list[packet_id].dst_node_id and self.topology[node_send_id,node_rec_id]==1: # 可通信到达 self.packets_list[packet_id].arrive_flag=1 self.packets_live_id.remove(packet_id) ### 记录接受节点和发出节点的奖励 ### self.packet_arrive_success[self.packets_list[packet_id].ori_node_id]+=1 self.agent_arrive[node_send_id]+=1 # self.arrive_time += self.trans_task_list[trans_temp_id].time_use # datacheck3 self.arrive_success += 1 elif self.topology[node_send_id,node_rec_id]==1 : #可通信没到达 self.node_list[node_rec_id].packets_ToSend_id.append(packet_id) # self.arrive_time += (cur_time - self.packets_list[packet_id].time_start) # datacheck3 else: #不可通信 self.trans_task_list[trans_temp_id].time_cnt=0 self.trans_task_list[trans_temp_id].finish_flag=0 # for packet_id in self.packets_live_id: # #判断是否到达目的地 # if self.packets_list[packet_id].cur_node_id==self.packets_list[packet_id].dst_node_id or self.packets_list[packet_id].arrive_flag==1: # #到达 # continue # # self.arrive_time += self.trans_task_list[trans_temp_id].time_use # else:#没到达 # self.arrive_time += 1 self.arrive_time += len(self.packets_live_id) def all_agent_observe(self): all_ob=[] # fBlst=[0,0,0,0,0,0,0,0,0,0] degree=0 pack_storage=0 pw_avg_all=0 dst_node=-1 # for node_id in range(self.node_max): # if len (self.node_list[node_id].packets_ToSend_id): # packet_toSend_id=self.node_list[node_id].packets_ToSend_id[0] # dst_node=self.packets_list[packet_toSend_id].dst_node_id # else: # dst_node=-1 # for node_id in self.live_node_ID_list: # for node_id in range(self.node_max): # fb_tp_id=0 # for fb_tp in self.node_list[node_id].current_freqB: # fBlst[fb_tp_id]=fb_tp # fb_tp_id+=1 # for node_id in self.live_node_ID_list: #neibor_idlist=self.node_list[node_id].neibor_idlist[:]#深复制 #receive ob? #neibor_idlist.append(node_id) #neibor_vector=[] #for i in neibor_idlist: # for node_id in range(self.node_max): # pwl=self.node_list[node_id].power_list # if len(pwl)>=BACKTIME: # pwlst=pwl[len(pwl)-BACKTIME:len(pwl)] # else: # pwlst=pwl # if len(pwlst)>0: # pw_avg=sum(pwlst)/len(pwlst) # else: # pw_avg=0 # pw_avg_all+=pw_avg for node_id in range(self.node_max): pwl=self.node_list[node_id].power_list if len(pwl)>=BACKTIME: pwlst=pwl[len(pwl)-BACKTIME:len(pwl)] else: pwlst=pwl if len(pwlst)>0: pw_avg=sum(pwlst)/len(pwlst) else: pw_avg=0 if len (self.node_list[node_id].packets_ToSend_id)>0: packet_toSend_id=self.node_list[node_id].packets_ToSend_id[0] dst_node=self.packets_list[packet_toSend_id].dst_node_id else: dst_node=-1 pw=[] pw.append(pw_avg) dgr=[] degree=len(self.topology_actSpace[node_id][0])-1 dgr.append(degree) pcs=[] pack_storage=len(self.node_list[node_id].packets_ToSend_id) pcs.append(pack_storage) dn=[] dn.append(dst_node) all_ob.append(pw+dgr+pcs+dn) #self.node_list[node_id].ob_send=neibor_vector return np.array(all_ob) # def generate_trans_task(self,trans_id,send_node,rec_node,packet): # trans_task_temp=Trans_task(trans_id,send_node,rec_node,packet) # return trans_task_temp def env_check_right(self): for node_id in self.live_node_ID_list: if self.node_list[node_id].trans_task_send.empty(): assert self.node_list[node_id].sending_flag == 0 elif not self.node_list[node_id].trans_task_send.empty(): assert self.node_list[node_id].sending_flag == 1 st_temp=self.node_list[node_id].trans_task_send.get() self.node_list[node_id].trans_task_send.put(st_temp)#无损使用队列内容 s_node_send_id,s_node_rec_id,s_packet_id=st_temp.show_info() assert node_id==s_node_send_id # assert self.node_list[node_id].next_hop_id==s_node_rec_id assert self.node_list[node_id].packets_ToSend_id[0]==s_packet_id elif self.node_list[node_id].trans_task_rec.empty(): assert self.rec_flag == 0 elif not self.node_list[node_id].trans_task_rec.empty(): assert self.node_list[node_id].rec_flag == 1 rt_temp=self.node_list[node_id].trans_task_rec.get() self.node_list[node_id].trans_task_rec.put(rt_temp)#无损使用队列内容 r_node_send_id,r_node_rec_id,r_packet_id=rt_temp.show_info() assert node_id==r_node_rec_id # assert self.node_list[node_id].next_hop_id==s_node_rec_id assert self.node_list[node_id].packets_ToSend_id[0] != r_packet_id return 0 def topology_update(self,cur_time,rand_change): self.topology = np.zeros((NODE_MAX,NODE_MAX)) ################--------随机更改拓扑结构--------################ if rand_change: positions=next(self.geo_area) self.position = positions for a in range(NODE_MAX): for b in range(NODE_MAX): if np.linalg.norm(positions[a]-positions[b]) <= COM_RANGE: self.topology[a,b]=1 self.topology[b,a]=1 else: self.topology[a,b]=0 self.topology[b,a]=0 # if np.random.rand()<DELTA and cur_time%30==0: # for i in np.random.randint(0,self.node_max,np.random.randint(3)+1): # self.topology[i,:]=np.random.randint(0,2,self.node_max) # self.topology[i,i] = 1 # for j in range(self.node_max): # #构建双向图 # if self.topology[i,j] == 1: # self.topology[j,i] = 1 # print(positions) # print("****************") # print(self.topology) # print("------------------------------------") ################--------更新邻域--------################ self.live_node_ID_list=[] self.topology_actSpace=[] for i in range(self.topology.shape[0]): if any(self.topology[i,:]): TPtemp = np.nonzero(self.topology[i,:]) # self.node_list[i].neibor_idlist=TPtemp self.topology_actSpace.append(TPtemp) self.live_node_ID_list.append(i) else: TPtemp = -1 self.topology_actSpace.append(TPtemp) return self.topology def get_state_reward(self): return self.topology,self.all_ob,self.reward def time_step(self,cur_time,action): self.packet_arrive_success=[] self.agent_arrive=[] for i in range(NODE_MAX): self.packet_arrive_success.append(0) self.agent_arrive.append(0) self.arrive_success=0 # self.env_check_right() topology_now=self.topology_update(cur_time,1) self.generate_packet(cur_time) self.all_ob=self.all_agent_observe() self.trans_task_update(cur_time) for node_index in self.live_node_ID_list : if len(self.node_list[node_index].packets_ToSend_id)>0 and self.node_list[node_index].sending_flag!=1: packet_toSend_id=self.node_list[node_index].packets_ToSend_id[0] #包未到达且非在传----->生成trans_task if self.packets_list[packet_toSend_id].arrive_flag==0 and self.packets_list[packet_toSend_id].in_TR==0: #传输和接收节点决策 send_node=self.node_list[node_index] Action=action[node_index]####################################################### next_hop_id,current_freqB,current_amp_send=Action[0],Action[1:N_ACTION_C],Action[N_ACTION_C] send_node.next_hop_id=next_hop_id rec_node=self.node_list[next_hop_id] current_amp_rec=RECAMP self.node_list[node_index].current_freqB=current_freqB self.node_list[node_index].next_hop_id=next_hop_id self.node_list[node_index].current_amp_send=current_amp_send #频谱环境更新 freqB_ID_now=0 for fB_ocp in current_freqB: if node_index!=next_hop_id and fB_ocp: self.freqB_list[freqB_ID_now].append(node_index) self.freqB_use_history[freqB_ID_now].append(node_index) freqB_ID_now+=1 #T-T生成与T-T环境更新
trans_task_now=Trans_task(self.trans_task_cnt,send_node,rec_node,self.packets_list[packet_toSend_id])
3
2023-12-30 09:35:30+00:00
8k
davidsvy/fractal_video
src/transform/compose.py
[ { "identifier": "Transform_Camera", "path": "src/transform/camera.py", "snippet": "class Transform_Camera(nn.Module):\n\n def __init__(self, prob_shift, prob_zoom, prob_shake, n_steps):\n super(Transform_Camera, self).__init__()\n assert 0 <= prob_shift <= 1\n assert 0 <= prob_zoom <= 1\n assert 0 <= prob_shake <= 1\n assert n_steps > 0\n\n self.prob_shift, self.prob_zoom = prob_shift, prob_zoom\n self.prob_shake, self.n_steps = prob_shake, n_steps\n\n self.ampl_min = (1, 2)\n self.ampl_max = (2, 4)\n\n self.slope_ampl_min = (self.ampl_min[1] - self.ampl_min[0]) / n_steps\n self.slope_ampl_max = (self.ampl_max[1] - self.ampl_max[0]) / n_steps\n\n self.time = (0.1, 1.0)\n self.slope_time = (self.time[1] - self.time[0]) / n_steps\n\n self.speed_shift = (0.2, 1.0)\n self.slope_speed_shift = (\n self.speed_shift[1] - self.speed_shift[0]) / n_steps\n\n self.speed_zoom = (0.3, 1.0)\n self.slope_speed_zoom = (\n self.speed_zoom[1] - self.speed_zoom[0]) / n_steps\n\n ####################################\n # SHIFT\n ####################################\n\n def sample_params_shift(self, n_frames, step, res):\n if step >= self.n_steps:\n speed_max = self.speed_shift[1]\n time_max = self.time[1]\n\n else:\n speed_max = self.speed_shift[0] + step * self.slope_speed_shift\n time_max = self.time[0] + step * self.slope_time\n\n speed_max = max(1, speed_max * res / n_frames)\n speed_min = max(1, self.speed_shift[0] * res / n_frames)\n speed = random.uniform(speed_min, speed_max)\n\n time_max = min(n_frames, max(2, int(time_max * n_frames)))\n time_min = min(n_frames, max(2, int(self.time[0] * n_frames)))\n time = random.randint(time_min, time_max)\n\n angle = random.uniform(0, 2 * math.pi)\n\n return speed, time, angle\n\n def forward_shift(self, video, step):\n # video -> [B, C, T, H, W]\n B, C, T, H, W = video.shape\n device = video.device\n\n speed, time, angle = self.sample_params_shift(\n n_frames=T, res=H, step=step)\n time_before = random.randint(0, T - time)\n # angle, speed, time -> [1]\n\n curve = torch.arange(time, dtype=torch.float32, device=device) * speed\n curve_h = (math.sin(angle) * curve).to(torch.int32)\n curve_w = (math.cos(angle) * curve).to(torch.int32)\n # curve_h, curve_w, curve_t -> [n_time]\n\n end_h = curve_h[-1].item()\n end_w = curve_w[-1].item()\n\n min_h = min(0, end_h)\n min_w = min(0, end_w)\n\n diff_h = abs(end_h) + 1\n diff_w = abs(end_w) + 1\n diff = max(diff_h, diff_w)\n\n size_interp = (T, H + diff, W + diff)\n video = F.interpolate(video, size=size_interp, mode='trilinear')\n # enlarged -> [B, C, T, H + D, W + D]\n\n curve_h = curve_h - min_h + (diff - diff_h) // 2\n curve_w = curve_w - min_w + (diff - diff_w) // 2\n\n pad = (time_before, T - time_before - time)\n curve = torch.stack([curve_h, curve_w], dim=0).to(torch.float32)\n curve_h, curve_w = F.pad(\n curve, pad=pad, mode='replicate').to(torch.int32)\n\n video = torch.stack(\n [video[..., t, h: h + H, w: w + W]\n for t, (h, w) in enumerate(zip(curve_h, curve_w))],\n dim=-3,\n )\n # video -> [B, C, T, H, W]\n\n return video\n\n ####################################\n # ZOOM\n ####################################\n\n def sample_params_zoom(self, step, n_frames):\n if step >= self.n_steps:\n speed_max = self.speed_zoom[1]\n time_max = self.time[1]\n\n else:\n speed_max = self.speed_zoom[0] + step * self.slope_speed_zoom\n time_max = self.time[0] + step * self.slope_time\n\n speed_max = speed_max / n_frames\n speed_min = self.speed_zoom[0] / n_frames\n speed = random.uniform(speed_min, speed_max)\n\n time_max = min(n_frames, max(2, int(time_max * n_frames)))\n time_min = min(n_frames, max(2, int(self.time[0] * n_frames)))\n time = random.randint(time_min, time_max)\n\n return speed, time\n\n def forward_zoom(self, video, step):\n # video -> [B, C, T, H, W]\n B, C, T, H, W = video.shape\n device = video.device\n\n speed, time = self.sample_params_zoom(step=step, n_frames=T)\n time_before = random.randint(0, T - time)\n\n scale = 1 + torch.arange(\n time, device=device, dtype=torch.float32) * speed\n if random.random() < 0.5:\n scale = torch.flip(scale, dims=(0,))\n\n pad = (time_before, T - time_before - time)\n scale = F.pad(scale[None, :], pad=pad, mode='replicate')[0]\n # scale -> [T]\n\n video = torch.stack(\n [\n TF.center_crop(F.interpolate(\n video[..., t, :, :], scale_factor=_scale.item(), mode='bilinear'), (H, W))\n for t, _scale in enumerate(scale)\n ],\n dim=-3\n )\n # video -> [B, C, T, H, W]\n\n return video\n\n ####################################\n # SHAKE\n ####################################\n\n def sample_params_shake(self, n_frames, ampl, device, res):\n n_sin = random.randint(2, 5)\n freq = sample_uniform(0.1, 1.2, size=n_sin, device=device)\n # freq -> [n_sin]\n phase = sample_uniform(0, 2 * math.pi, size=1, device=device)\n # phase -> [1]\n noise = sample_uniform(-0.3, 0.3, size=n_frames, device=device)\n # noise -> [n_frames]\n #noise = torch.cumsum(noise, dim=0)\n\n time = torch.arange(n_frames, dtype=torch.float32, device=device)\n # time -> [n_frames]\n weight = 1 / torch.arange(\n 1, 1 + n_sin, dtype=torch.float32, device=device)[None, ...]\n # weight -> [1, n_sin]\n shake = (\n weight * torch.sin(torch.outer(time, freq) + phase)).sum(dim=1) + noise\n shake = shake * ampl / shake.std() * res / 112\n shake = torch.round(shake).to(torch.int32)\n # shake -> [n_frames]\n\n return shake\n\n def sample_ampl_shake(self, step, device):\n if step >= self.n_steps:\n min_ = self.ampl_min[1]\n max_ = self.ampl_max[1]\n\n else:\n min_ = self.ampl_min[0] + step * self.slope_ampl_min\n max_ = self.ampl_max[0] + step * self.slope_ampl_max\n\n ampl = sample_uniform(min_, max_, size=1, device=device)\n\n return ampl\n\n def forward_shake(self, video, step):\n \"\"\"Based on:\n https://ieeexplore.ieee.org/document/6706422\n https://www.shaopinglu.net/index.files/ICIP2018.pdf\n \"\"\"\n # video -> [B, C, T, H, W]\n T, H, W = video.shape[-3:]\n device = video.device\n\n ampl = self.sample_ampl_shake(step=step, device=device)\n\n shake_h = self.sample_params_shake(\n n_frames=T, ampl=ampl, device=device, res=H)\n shake_w = self.sample_params_shake(\n n_frames=T, ampl=ampl, device=device, res=H)\n # shake_h, shake_w -> [T]\n\n min_h, max_h = shake_h.min().item(), shake_h.max().item()\n min_w, max_w = shake_w.min().item(), shake_w.max().item()\n diff_h, diff_w = max_h - min_h + 1, max_w - min_w + 1\n diff = max(diff_h, diff_w)\n\n size_interp = (T, H + diff, W + diff)\n enlarged = F.interpolate(\n video, size=size_interp, mode='trilinear')\n # enlarged -> [B, C, T, H + D, W + D]\n enlarged = torch.clip(enlarged, 0, 1).permute(2, 0, 1, 3, 4)\n # enlarged -> [T, B, C, H + D, W + D]\n\n shake_h = shake_h - min_h + (diff - diff_h) // 2\n shake_w = shake_w - min_w + (diff - diff_w) // 2\n\n video = torch.stack(\n [v[..., h: h + H, w: w + W]\n for v, h, w in zip(enlarged, shake_h, shake_w)],\n dim=-3,\n )\n # video -> [B, C, T, H, W]\n\n return video\n\n def forward(self, video, step):\n # video -> [B, C, T, H, W]\n B, device = video.shape[0], video.device\n mask_shift = torch.rand(B, device=device) < self.prob_shift\n mask_zoom = torch.rand(B, device=device) < self.prob_zoom\n mask_shake = torch.rand(B, device=device) < self.prob_shake\n\n B_shift = mask_shift.sum().item()\n B_zoom = mask_zoom.sum().item()\n B_shake = mask_shake.sum().item()\n\n if B_shift > 0:\n video[mask_shift] = self.forward_shift(\n video=video[mask_shift], step=step)\n\n if B_zoom > 0:\n video[mask_zoom] = self.forward_zoom(\n video=video[mask_zoom], step=step)\n\n if B_shake > 0:\n video[mask_shake] = self.forward_shake(\n video=video[mask_shake], step=step)\n\n return video" }, { "identifier": "IMAGENET_DEFAULT_MEAN", "path": "src/transform/constants.py", "snippet": "IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)" }, { "identifier": "IMAGENET_DEFAULT_STD", "path": "src/transform/constants.py", "snippet": "IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)" }, { "identifier": "Crop_Center", "path": "src/transform/spatial.py", "snippet": "class Crop_Center(nn.Module):\n\n def __init__(self, crop_size, interpolation='bicubic', resize=True, crop_inc=True):\n super(Crop_Center, self).__init__()\n self.crop_size, self.interpolation = crop_size, interpolation\n self.resize_dim = int(\n (256 / 224) * crop_size) if crop_inc else crop_size\n self.resize = resize\n\n def forward(self, x):\n # x -> [C, T, H, W]\n if self.resize:\n x = short_side_scale(\n x, size=self.resize_dim, interpolation=self.interpolation)\n x = T.functional.center_crop(x, output_size=self.crop_size)\n\n else:\n height, width = x.shape[-2:]\n start_h = int(math.ceil((height - self.crop_size) / 2))\n start_w = int(math.ceil((width - self.crop_size) / 2))\n x = x[\n ..., start_h: start_h + self.crop_size, start_w: start_w + self.crop_size]\n\n return x" }, { "identifier": "Random_Perspective", "path": "src/transform/spatial.py", "snippet": "class Random_Perspective(T.RandomPerspective):\n\n def sample_points(self, width, height):\n h_half = height // 2\n w_half = width // 2\n\n h_start = int(self.distortion_scale * h_half)\n w_start = int(self.distortion_scale * w_half)\n\n h_end = height - int(self.distortion_scale * h_half) - 1\n w_end = width - int(self.distortion_scale * w_half) - 1\n\n topleft = [\n random.randint(0, w_start), random.randint(0, h_start)]\n topright = [\n random.randint(w_end, width - 1), random.randint(0, h_start)]\n botright = [\n random.randint(w_end, width - 1), random.randint(h_end, height - 1)]\n botleft = [\n random.randint(0, w_start), random.randint(h_end, height - 1)]\n\n points_end = [topleft, topright, botright, botleft]\n points_start = [\n [0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]\n\n return points_start, points_end\n\n def forward(self, video):\n # video -> [B, C, T, H, W]\n B, C, T, H, W = video.shape\n device = video.device\n mask = torch.rand(B, device=device) < self.p\n B_tr = mask.sum().item()\n\n if B_tr == 0:\n return video\n\n video_tr = torch.flatten(video[mask], start_dim=1, end_dim=2)\n # video_tr -> [B_tr, C * T, H, W]\n\n points_start, points_end = self.sample_points(width=W, height=H)\n video_tr = TF.perspective(\n video_tr, startpoints=points_start, endpoints=points_end,\n interpolation=self.interpolation, fill=0.0)\n # video_tr -> [B_tr, C * T, H, W]\n video_tr = F.interpolate(video_tr, scale_factor=1.4, mode='bilinear')\n video_tr = TF.center_crop(video_tr, output_size=(H, W))\n video_tr = video_tr.reshape(B_tr, C, T, H, W)\n # video_tr -> [B_tr, C, T, H, W]\n\n video[mask] = video_tr\n\n return video" } ]
import pytorchvideo.transforms as Tv import torch import torch.nn as nn import torchvision.transforms as T from src.transform.camera import Transform_Camera from src.transform.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from src.transform.spatial import Crop_Center, Random_Perspective
3,969
def transform_inner_train(crop_size=112, min_scale=0.5, interp='bicubic'): crop_tr = Tv.RandomResizedCrop( target_height=crop_size, target_width=crop_size, scale=(min_scale, 1), aspect_ratio=(3.0 / 4.0, 4.0 / 3.0), interpolation=interp, ) transform = T.Compose([ Tv.ConvertUint8ToFloat(), crop_tr, ]) return transform def transform_inner_val(crop_size=112, resize=True, interp='bicubic', crop_inc=True): return T.Compose([ Tv.ConvertUint8ToFloat(), Crop_Center( crop_size=crop_size, interpolation=interp, resize=resize, crop_inc=crop_inc, ), Tv.Normalize( mean=IMAGENET_DEFAULT_MEAN,
def transform_inner_train(crop_size=112, min_scale=0.5, interp='bicubic'): crop_tr = Tv.RandomResizedCrop( target_height=crop_size, target_width=crop_size, scale=(min_scale, 1), aspect_ratio=(3.0 / 4.0, 4.0 / 3.0), interpolation=interp, ) transform = T.Compose([ Tv.ConvertUint8ToFloat(), crop_tr, ]) return transform def transform_inner_val(crop_size=112, resize=True, interp='bicubic', crop_inc=True): return T.Compose([ Tv.ConvertUint8ToFloat(), Crop_Center( crop_size=crop_size, interpolation=interp, resize=resize, crop_inc=crop_inc, ), Tv.Normalize( mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
2
2023-12-27 19:43:45+00:00
8k
Simuoss/THE-TTF2HEX_Extractor
extractor.py
[ { "identifier": "char_in_font", "path": "in_font.py", "snippet": "def char_in_font(char: str, cfont: TTFont) -> bool:\n \"\"\"判断字符是否在字体里\n\n Args:\n char (str): 单字符文本\n fontfile (str): 字体文件\n\n Returns:\n bool: 是否在字体里\n \"\"\"\n code = char.encode(\"unicode-escape\").decode()\n if \"\\\\u\" in code:\n code = \"uni\" + code[2:].upper()\n \n glyf = cfont[\"glyf\"]\n if not glyf.has_key(code):\n return False\n return len(glyf[code].getCoordinates(0)[0]) > 0" }, { "identifier": "Worker", "path": "woker.py", "snippet": "class Worker(QThread):\n update_main_progress = Signal(int, str)\n\n\n def __init__(self, font_path, font_size, canvas_size_width, canvas_size_height, offset_x, offset_y, flip, mirror, rotate, reverse_color, extract_range, parent=None):\n super().__init__(parent)\n self.font_path = font_path\n self.font_size = font_size\n self.canvas_size_width = canvas_size_width\n self.canvas_size_height = canvas_size_height\n self.offset_x = offset_x\n self.offset_y = offset_y\n self.flip = flip\n self.mirror = mirror\n self.rotate = rotate\n self.reverse_color = reverse_color\n self.extract_range = extract_range\n\n \n def run(self):\n # 字体文件路径\n font_path = self.font_path\n # 画布大小\n canvas_size_width = self.canvas_size_width\n canvas_size_height = self.canvas_size_height\n # 字体大小\n font_size = self.font_size\n # 反色\n reverse_color = self.reverse_color\n # 字符偏移量\n character_offset_x = self.offset_x\n character_offset_y = self.offset_y\n # 是否让字符上下翻转\n flip_character = self.flip\n # 是否让字符左右翻转\n mirror_character = self.mirror\n # 顺时针旋转多少度\n rotate_degree = self.rotate\n # 要提取的字符列表\n ori_characters = self.extract_range\n\n\n # 开启进度条\n total = len(ori_characters)\n step = 0\n not_in = 0\n self.update_main_progress.emit(0,\"开始处理\")\n\n # 使用PIL加载字体文件\n font = ImageFont.truetype(font_path, size=font_size)\n cfont = TTFont(font_path)\n # 创建一个空白画布来绘制字符\n image = Image.new(\"1\", (canvas_size_width, canvas_size_height), reverse_color)\n draw = ImageDraw.Draw(image)\n\n # 生成类似C语言数组的输出\n base_font_name = os.path.basename(font_path)\n # 使用正则表达式替换非字母、数字和下划线的字符为下划线\n base_font_name = re.sub(r'[^a-zA-Z0-9_]', '_', base_font_name)\n\n # save\n with open(f\"{base_font_name}.h\", \"w\", encoding=\"utf-8\") as f:\n f.writelines(f\"#ifndef __{base_font_name.upper()}_FONT_H__\\n#define __{base_font_name.upper()}_FONT_H__\\n\\n\")\n f.writelines(\"const unsigned char code F_zh_cn_8x8[][8] = {\\n\")\n for char in ori_characters:\n if not char_in_font(char, cfont):\n not_in += 1\n continue\n\n\n # 清空图像内容\n draw.rectangle([0, 0, canvas_size_width, canvas_size_height], fill=reverse_color)#参数为左上角和右下角的坐标\n # 在图像上绘制字符\n draw.text((character_offset_x,character_offset_y), char, font=font, fill= not reverse_color)\n \n # 获取字符的点阵表示\n pixels = list(image.getdata())\n bitmap = [pixels[i:i+canvas_size_width] for i in range(0, len(pixels), canvas_size_width)]\n \n # 把每个char的每一行都翻转一下\n if flip_character:\n bitmap.reverse()\n # 把每个char的每一列都翻转一下\n if mirror_character:\n bitmap = [row[::-1] for row in bitmap]\n \n # 顺时针旋转\n if rotate_degree == '90°':\n bitmap = list(zip(*bitmap[::-1]))\n elif rotate_degree == '180°':\n bitmap = [row[::-1] for row in bitmap[::-1]]\n elif rotate_degree == '270°':\n bitmap = list(zip(*bitmap))[::-1]\n\n # 以UTF-8写入文件\n try:\n f.writelines(f\" // {char.encode('unicode-escape').decode()}: {char}\\n\")\n except:\n f.writelines(f\" // 无法显示该字符\\n\")\n f.writelines(\" {\")\n for row in bitmap:\n # 所以row是一个长度为8的数组,每个元素是一个bit。现在要把这个数组转换成一个byte,所以要把这8个bit拼接成一个byte。\n row_hex = 0\n for i in range(8):\n row_hex |= row[i] << i\n # 用十六进制表示byte\n f.writelines(f\"0x{row_hex:02X}, \")\n \n f.writelines(\"},\\n\")\n\n step += 1\n self.update_main_progress.emit(step / total * 100, f\"{step / total * 100:.1f}% 已处理{step}/{total}个字符\")\n f.writelines(\"};\\n\\n#endif\\n\")\n f.writelines(f\"// 提取完成,应提取{total}个字符,实际提取{total - not_in}个字符,有{not_in}个字符不在字体里。\")\n\n # 关闭进度条\n self.update_main_progress.emit(100, f\"完成!应提{total}字,实提{total - not_in}字,{not_in}字符不存在。已保存在同级目录。\")" } ]
from PySide6.QtWidgets import QMainWindow, QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, \ QFileDialog, QCheckBox, QLineEdit, QSpinBox, QComboBox, QProgressBar, QTextEdit from PySide6.QtGui import QImage, QPixmap from PySide6.QtCore import Qt from PIL import Image, ImageFont, ImageDraw from fontTools.ttLib import TTFont from in_font import char_in_font from woker import Worker
4,464
if not preview_text.isprintable(): self.update_main_progress(0, "这个字符显示不出呢") return # 如果整个preview_text存在字符不在字体里,则不显示 if not all(char_in_font(c, TTFont(font_path)) for c in preview_text): self.update_main_progress(0, "字体里没有这个字呢") return # 创建一个空白画布来绘制字符 canvas = Image.new("1", (canvas_size_width, canvas_size_height), reverse_color) # 使用PIL加载字体文件 font = ImageFont.truetype(font_path, size=font_size) draw = ImageDraw.Draw(canvas) # 清空图像内容(不一定是8*8) draw.rectangle([0, 0, canvas_size_width, canvas_size_height], fill=reverse_color)#参数为左上角和右下角的坐标 # 在图像上绘制字符 draw.text((offset_x,offset_y), preview_text, font=font, fill= not reverse_color) # 上下翻转 if flip: canvas = canvas.transpose(Image.FLIP_TOP_BOTTOM) # 左右翻转 if mirror: canvas = canvas.transpose(Image.FLIP_LEFT_RIGHT) # 顺时针旋转 if rotate == "90°": canvas = canvas.transpose(Image.ROTATE_270)# 逆时针旋转270°,即顺时针旋转90° elif rotate == "180°": canvas = canvas.transpose(Image.ROTATE_180) elif rotate == "270°": canvas = canvas.transpose(Image.ROTATE_90) # 获取字符的点阵表示 pixels = list(canvas.getdata()) bitmap = [pixels[i:i+canvas_size_width] for i in range(0, len(pixels), canvas_size_width)] # 将点阵转换为硬缩放不大于400*400的图像 zoom = 300 // canvas_size_width canvas = Image.new("1", (canvas_size_width*zoom, canvas_size_height*zoom)) draw = ImageDraw.Draw(canvas) for y in range(canvas_size_height): for x in range(canvas_size_width): if bitmap[y][x]: draw.rectangle([x*zoom, y*zoom, (x+1)*zoom, (y+1)*zoom], fill=0) else: draw.rectangle([x*zoom, y*zoom, (x+1)*zoom, (y+1)*zoom], fill=1) # 在将 PIL.Image 转换为 QImage 对象时,需要将其模式转换为 RGB888 或 ARGB32 canvas = canvas.convert("RGB") # 将 PIL.Image 转换为 QImage 对象 qimage = QImage(canvas.tobytes(), canvas.width, canvas.height, QImage.Format_RGB888) # 然后再将 QImage 对象传递给 QPixmap.fromImage 方法 self.preview_image_label.setPixmap(QPixmap.fromImage(qimage)) def extract_font(self): font_path = self.file_path_textbox.text() font_size = self.font_size_spinbox.value() canvas_size_width = self.canvas_size_width_spinbox.value() canvas_size_height = self.canvas_size_height_spinbox.value() offset_x = self.offset_x_spinbox.value() offset_y = self.offset_y_spinbox.value() flip = self.flip_checkbox.isChecked() mirror = self.mirror_checkbox.isChecked() rotate = self.rotate_combobox.currentText() reverse_color = self.invert_checkbox.isChecked() input_text = self.extract_from_text_input.toPlainText() if not font_path: self.update_main_progress(0, "还没选字体呢") return # 如果没选中任何提取范围,则默认提取 ASCII 可见字符集 if not self.ascii_checkbox.isChecked() and not self.chinese_checkbox.isChecked() \ and not self.common_chinese_checkbox.isChecked() and not self.chinese_punctuation_checkbox.isChecked() \ and not self.custom_range_checkbox.isChecked() and not self.extract_from_text_checkbox.isChecked(): self.ascii_checkbox.setChecked(True) # 如果选择了自选区域,则检查输入是否合法 if self.custom_range_checkbox.isChecked() and not self.select_all_checkbox.isChecked(): range_from = self.range_from_input.text() range_to = self.range_to_input.text() if not range_from or not range_to: self.update_main_progress(0,"自选区域想选什么呢?") return if not range_from.startswith("0x") or not range_to.startswith("0x"): self.update_main_progress(0,"自选区域要0x开头哦") return range_from = int(range_from, 16) range_to = int(range_to, 16) if range_from > range_to: self.update_main_progress(0,"自选区域要从小到大哦") return # 确定提取范围 extract_range = "" if self.extract_from_text_checkbox.isChecked():# 从文本提取 extract_range = input_text elif self.select_all_checkbox.isChecked():# 全选 extract_range = [chr(i) for i in range(0x0000, 0xFFFF + 1)] else: if self.ascii_checkbox.isChecked():# ASCII 可见字符集 extract_range += "".join([chr(i) for i in range(0x0020, 0x007E + 1)]) if self.chinese_checkbox.isChecked():# 所有汉字 extract_range += "".join([chr(i) for i in range(0x4E00, 0x9FFF + 1)]) if self.common_chinese_checkbox.isChecked():# 常用汉字 extract_range += "".join([chr(i) for i in range(0x4E00, 0x9FA5 + 1)]) if self.chinese_punctuation_checkbox.isChecked():# 汉字标点符号 extract_range += "".join([chr(i) for i in range(0x3000, 0x303F + 1)]) if self.custom_range_checkbox.isChecked():# 自选区域 extract_range += "".join([chr(i) for i in range(range_from, range_to + 1)]) #print(f"提取范围:{extract_range}") # 创建 Worker 对象
class FontExtractorApp(QMainWindow): def __init__(self): super().__init__() self.initUI() def initUI(self): self.setWindowTitle("TTF2HEX 字体字库转换器 --- By 司沐_Simuoss @ https://github.com/Simuoss") # 主窗口包含一个水平布局 main_layout = QHBoxLayout() left_layout = QVBoxLayout() right_layout = QVBoxLayout() self.setupLeftWidgets(left_layout) self.setupRightWidgets(right_layout) main_layout.addLayout(left_layout) main_layout.addLayout(right_layout) central_widget = QWidget() central_widget.setLayout(main_layout) self.setCentralWidget(central_widget) def setupLeftWidgets(self, layout): self.setupFileSelection(layout) self.setupCanvasFontInputs(layout) self.setupCharacterOffsetInputs(layout) self.setupFlipMirrorCheckboxes(layout) self.setupRotateInvertOptions(layout) self.setupExtractFontOptions(layout) def setupRightWidgets(self, layout): self.setupMintArea(layout) self.setupPreviewArea(layout) self.setupExtractArea(layout) # 下面是拆分出来的小功能块 def setupFileSelection(self, layout): # 创建选择文件相关部件和布局 # 创建一个水平布局以容纳文件选择按钮和文件路径文本框 file_layout = QHBoxLayout() # 添加标题和选择文件标签 title_label = QLabel("选择字体文件:") file_layout.addWidget(title_label) # 创建一个不可编辑的文本框用于显示文件路径 self.file_path_textbox = QLineEdit() self.file_path_textbox.setReadOnly(True) file_layout.addWidget(self.file_path_textbox) # 创建一个按钮用于选择文件 select_file_button = QPushButton("选择文件") select_file_button.clicked.connect(self.select_font_file) file_layout.addWidget(select_file_button) # 将文件选择部件添加到主布局中 layout.addLayout(file_layout) def setupCanvasFontInputs(self, layout): # 创建画布大小、字体大小输入相关部件和布局 # 添加画布大小、字体大小输入 canvas_font_layout = QHBoxLayout() canvas_label = QLabel("画布大小(宽:高):") canvas_font_layout.addWidget(canvas_label) self.canvas_size_width_spinbox = QSpinBox() self.canvas_size_width_spinbox.setMinimum(1) self.canvas_size_width_spinbox.setValue(12) # 默认值为8 canvas_font_layout.addWidget(self.canvas_size_width_spinbox) self.canvas_size_height_spinbox = QSpinBox() self.canvas_size_height_spinbox.setMinimum(1) self.canvas_size_height_spinbox.setValue(12) canvas_font_layout.addWidget(self.canvas_size_height_spinbox) font_label = QLabel("字体大小:") canvas_font_layout.addWidget(font_label) self.font_size_spinbox = QSpinBox() self.font_size_spinbox.setMinimum(1) self.font_size_spinbox.setValue(12) # 默认值为8 canvas_font_layout.addWidget(self.font_size_spinbox) layout.addLayout(canvas_font_layout) def setupCharacterOffsetInputs(self, layout): # 创建字符偏移量输入相关部件和布局 # 添加字符偏移量输入框及方向提示标签 offset_layout = QHBoxLayout() offset_label = QLabel("字符偏移量(可为负):") offset_layout.addWidget(offset_label) self.offset_x_spinbox = QSpinBox() self.offset_x_spinbox.setMinimum(-10) self.offset_x_spinbox.setMaximum(10) offset_layout.addWidget(self.offset_x_spinbox) self.offset_x_direction_label = QLabel("向右→ ") offset_layout.addWidget(self.offset_x_direction_label) self.offset_y_spinbox = QSpinBox() self.offset_y_spinbox.setMinimum(-10) self.offset_y_spinbox.setMaximum(10) offset_layout.addWidget(self.offset_y_spinbox) self.offset_y_direction_label = QLabel("↓向下") offset_layout.addWidget(self.offset_y_direction_label) layout.addLayout(offset_layout) def setupFlipMirrorCheckboxes(self, layout): # 创建上下翻转和左右翻转复选框相关部件和布局 # 添加上下翻转和左右翻转复选框 flip_mirror_layout = QHBoxLayout() flip_label = QLabel("上下翻转:") flip_mirror_layout.addWidget(flip_label) self.flip_checkbox = QCheckBox() flip_mirror_layout.addWidget(self.flip_checkbox) mirror_label = QLabel("左右翻转:") flip_mirror_layout.addWidget(mirror_label) self.mirror_checkbox = QCheckBox() flip_mirror_layout.addWidget(self.mirror_checkbox) layout.addLayout(flip_mirror_layout) def setupRotateInvertOptions(self, layout): # 创建旋转角度和反色选项相关部件和布局 # 并排添加旋转角度和反色选项 rotate_and_invert_layout = QHBoxLayout() # 添加旋转角度选项 rotate_layout = QHBoxLayout() rotate_label = QLabel("顺时针旋转角度:") rotate_layout.addWidget(rotate_label) self.rotate_combobox = QComboBox() self.rotate_combobox.addItem("0°") self.rotate_combobox.addItem("90°") self.rotate_combobox.addItem("180°") self.rotate_combobox.addItem("270°") rotate_layout.addWidget(self.rotate_combobox) rotate_and_invert_layout.addLayout(rotate_layout) # 添加反色选项 invert_label = QLabel(" 反色:") rotate_and_invert_layout.addWidget(invert_label) self.invert_checkbox = QCheckBox() rotate_and_invert_layout.addWidget(self.invert_checkbox) layout.addLayout(rotate_and_invert_layout) def setupExtractFontOptions(self, layout): # 创建提取字体选项相关部件和布局 # 添加提取字体选项 # 创建布局 extract_range_layout = QVBoxLayout() self.extract_font_label = QLabel("选择提取字体范围:") extract_range_layout.addWidget(self.extract_font_label) # 创建全选复选框 self.select_all_checkbox = QCheckBox("所有字体(很大,慎用)") self.select_all_checkbox.stateChanged.connect(self.toggle_select_all) extract_range_layout.addWidget(self.select_all_checkbox) # 创建其他提取范围复选框 self.ascii_checkbox = QCheckBox("ASCII可见字符集(0x0020, 0x007E)") self.chinese_checkbox = QCheckBox("所有汉字(0x4E00, 0x9FFF)") self.common_chinese_checkbox = QCheckBox("常用汉字(0x4E00, 0x9FA5)") self.chinese_punctuation_checkbox = QCheckBox("汉字标点符号(0x3000, 0x303F)") extract_range_layout.addWidget(self.ascii_checkbox) extract_range_layout.addWidget(self.chinese_checkbox) extract_range_layout.addWidget(self.common_chinese_checkbox) extract_range_layout.addWidget(self.chinese_punctuation_checkbox) # 创建自选区域复选框 self.custom_range_checkbox = QCheckBox("自选区域") self.custom_range_checkbox.stateChanged.connect(self.toggle_custom_range) extract_range_layout.addWidget(self.custom_range_checkbox) # 创建水平布局用于放置自选区域输入框 custom_range_layout = QHBoxLayout() # 创建自选区域输入框 self.range_from_input = QLineEdit() self.range_from_input.setPlaceholderText("开始(0x0000)") self.range_to_input = QLineEdit() self.range_to_input.setPlaceholderText("结束(0xFFFF)") self.range_from_input.setEnabled(False) self.range_to_input.setEnabled(False) custom_range_layout.addWidget(self.range_from_input) custom_range_layout.addWidget(self.range_to_input) extract_range_layout.addLayout(custom_range_layout) # 创建从文本复选框,后面跟一个文本框 self.extract_from_text_checkbox = QCheckBox("从文本提取(一般用这个)") self.extract_from_text_checkbox.stateChanged.connect(self.toggle_extract_from_text) extract_range_layout.addWidget(self.extract_from_text_checkbox) self.extract_from_text_input = QTextEdit() self.extract_from_text_input.setPlaceholderText("输入要提取的字符") self.extract_from_text_input.setEnabled(False) extract_range_layout.addWidget(self.extract_from_text_input) layout.addLayout(extract_range_layout) # 标志变量来控制复选框状态 self.custom_range_enabled = False def setupMintArea(self, layout): # 预览区域标题 preview_label = QLabel("预览区域:") layout.addWidget(preview_label) # 提示标签 preview_tip_label1 = QLabel("有些字体标的是12px,但实际上调到15px才是正常的12px状态,需要多微调参数试试") layout.addWidget(preview_tip_label1) preview_tip_label2 = QLabel("有些屏幕显示点阵是旋转过的,比如1306的oled屏就需要上下翻转+旋转90°才是正的") layout.addWidget(preview_tip_label2) def setupPreviewArea(self, layout): # 创建预览图像展示框 self.preview_image_label = QLabel() self.preview_image_label.setAlignment(Qt.AlignCenter) self.preview_image_label.setFixedSize(400, 400) self.preview_image_label.setStyleSheet("border: 1px solid black;") layout.addWidget(self.preview_image_label) # 创建预览操作区域 preview_options_layout = QHBoxLayout() # 创建预览字符输入框 self.preview_input = QLineEdit() self.preview_input.setPlaceholderText("输入字符或 Unicode 编码") # 默认是A self.preview_input.setText("A") preview_options_layout.addWidget(self.preview_input) # 创建预览按钮 preview_button = QPushButton("预览") preview_button.clicked.connect(self.preview_font) preview_options_layout.addWidget(preview_button) layout.addLayout(preview_options_layout) def setupExtractArea(self, layout): # 创建提取字体按钮 extract_button = QPushButton("提取字体") extract_button.clicked.connect(self.extract_font) layout.addWidget(extract_button) # 创建主任务的进度条 self.main_progress_bar = QProgressBar() self.main_progress_bar.setRange(0, 100) self.main_progress_bar.setValue(0) self.main_progress_bar.setFormat("等待开始") layout.addWidget(self.main_progress_bar) def update_main_progress(self, progress, text): self.main_progress_bar.setValue(progress) self.main_progress_bar.setFormat(text) def select_font_file(self): options = QFileDialog.Options() file_dialog = QFileDialog() file_path, _ = file_dialog.getOpenFileName(self, "选择字体文件", "", "TrueType 字体文件 (*.ttf)", options=options) if file_path: self.file_path_textbox.setText(file_path) #print(f"已选择字体文件:{file_path}") def toggle_extract_from_text(self, state): # 如果选中了从文本提取,则禁用其他提取范围复选框 if state: self.select_all_checkbox.setChecked(False) self.select_all_checkbox.setEnabled(False) self.ascii_checkbox.setChecked(False) self.ascii_checkbox.setEnabled(False) self.chinese_checkbox.setChecked(False) self.chinese_checkbox.setEnabled(False) self.common_chinese_checkbox.setChecked(False) self.common_chinese_checkbox.setEnabled(False) self.chinese_punctuation_checkbox.setChecked(False) self.chinese_punctuation_checkbox.setEnabled(False) self.custom_range_checkbox.setChecked(False) self.custom_range_checkbox.setEnabled(False) self.extract_from_text_input.setEnabled(True) else: self.select_all_checkbox.setEnabled(True) self.ascii_checkbox.setEnabled(True) self.chinese_checkbox.setEnabled(True) self.common_chinese_checkbox.setEnabled(True) self.chinese_punctuation_checkbox.setEnabled(True) self.custom_range_checkbox.setEnabled(True) self.extract_from_text_input.setEnabled(False) def toggle_select_all(self, state): self.ascii_checkbox.setChecked(state) self.ascii_checkbox.setEnabled(not state) self.chinese_checkbox.setChecked(state) self.chinese_checkbox.setEnabled(not state) self.common_chinese_checkbox.setChecked(state) self.common_chinese_checkbox.setEnabled(not state) self.chinese_punctuation_checkbox.setChecked(state) self.chinese_punctuation_checkbox.setEnabled(not state) self.custom_range_checkbox.setChecked(state) self.custom_range_checkbox.setEnabled(not state) self.extract_from_text_checkbox.setChecked(False) self.extract_from_text_checkbox.setEnabled(not state) self.toggle_custom_range(state) # 切换自选区域输入框的状态 def toggle_custom_range(self, state): if not self.custom_range_enabled: self.range_from_input.setEnabled(True) self.range_to_input.setEnabled(True) self.custom_range_enabled = True else: self.range_from_input.setEnabled(False) self.range_to_input.setEnabled(False) self.custom_range_enabled = False def preview_font(self): font_path = self.file_path_textbox.text() font_size = self.font_size_spinbox.value() canvas_size_width = self.canvas_size_width_spinbox.value() canvas_size_height = self.canvas_size_height_spinbox.value() offset_x = self.offset_x_spinbox.value() offset_y = self.offset_y_spinbox.value() flip = self.flip_checkbox.isChecked() mirror = self.mirror_checkbox.isChecked() rotate = self.rotate_combobox.currentText() preview_text = self.preview_input.text() reverse_color = self.invert_checkbox.isChecked() if not font_path: self.update_main_progress(0, "还没选字体呢") return if not preview_text: self.update_main_progress(0, "还没输入预览字符呢") return if not preview_text.isprintable(): self.update_main_progress(0, "这个字符显示不出呢") return # 如果整个preview_text存在字符不在字体里,则不显示 if not all(char_in_font(c, TTFont(font_path)) for c in preview_text): self.update_main_progress(0, "字体里没有这个字呢") return # 创建一个空白画布来绘制字符 canvas = Image.new("1", (canvas_size_width, canvas_size_height), reverse_color) # 使用PIL加载字体文件 font = ImageFont.truetype(font_path, size=font_size) draw = ImageDraw.Draw(canvas) # 清空图像内容(不一定是8*8) draw.rectangle([0, 0, canvas_size_width, canvas_size_height], fill=reverse_color)#参数为左上角和右下角的坐标 # 在图像上绘制字符 draw.text((offset_x,offset_y), preview_text, font=font, fill= not reverse_color) # 上下翻转 if flip: canvas = canvas.transpose(Image.FLIP_TOP_BOTTOM) # 左右翻转 if mirror: canvas = canvas.transpose(Image.FLIP_LEFT_RIGHT) # 顺时针旋转 if rotate == "90°": canvas = canvas.transpose(Image.ROTATE_270)# 逆时针旋转270°,即顺时针旋转90° elif rotate == "180°": canvas = canvas.transpose(Image.ROTATE_180) elif rotate == "270°": canvas = canvas.transpose(Image.ROTATE_90) # 获取字符的点阵表示 pixels = list(canvas.getdata()) bitmap = [pixels[i:i+canvas_size_width] for i in range(0, len(pixels), canvas_size_width)] # 将点阵转换为硬缩放不大于400*400的图像 zoom = 300 // canvas_size_width canvas = Image.new("1", (canvas_size_width*zoom, canvas_size_height*zoom)) draw = ImageDraw.Draw(canvas) for y in range(canvas_size_height): for x in range(canvas_size_width): if bitmap[y][x]: draw.rectangle([x*zoom, y*zoom, (x+1)*zoom, (y+1)*zoom], fill=0) else: draw.rectangle([x*zoom, y*zoom, (x+1)*zoom, (y+1)*zoom], fill=1) # 在将 PIL.Image 转换为 QImage 对象时,需要将其模式转换为 RGB888 或 ARGB32 canvas = canvas.convert("RGB") # 将 PIL.Image 转换为 QImage 对象 qimage = QImage(canvas.tobytes(), canvas.width, canvas.height, QImage.Format_RGB888) # 然后再将 QImage 对象传递给 QPixmap.fromImage 方法 self.preview_image_label.setPixmap(QPixmap.fromImage(qimage)) def extract_font(self): font_path = self.file_path_textbox.text() font_size = self.font_size_spinbox.value() canvas_size_width = self.canvas_size_width_spinbox.value() canvas_size_height = self.canvas_size_height_spinbox.value() offset_x = self.offset_x_spinbox.value() offset_y = self.offset_y_spinbox.value() flip = self.flip_checkbox.isChecked() mirror = self.mirror_checkbox.isChecked() rotate = self.rotate_combobox.currentText() reverse_color = self.invert_checkbox.isChecked() input_text = self.extract_from_text_input.toPlainText() if not font_path: self.update_main_progress(0, "还没选字体呢") return # 如果没选中任何提取范围,则默认提取 ASCII 可见字符集 if not self.ascii_checkbox.isChecked() and not self.chinese_checkbox.isChecked() \ and not self.common_chinese_checkbox.isChecked() and not self.chinese_punctuation_checkbox.isChecked() \ and not self.custom_range_checkbox.isChecked() and not self.extract_from_text_checkbox.isChecked(): self.ascii_checkbox.setChecked(True) # 如果选择了自选区域,则检查输入是否合法 if self.custom_range_checkbox.isChecked() and not self.select_all_checkbox.isChecked(): range_from = self.range_from_input.text() range_to = self.range_to_input.text() if not range_from or not range_to: self.update_main_progress(0,"自选区域想选什么呢?") return if not range_from.startswith("0x") or not range_to.startswith("0x"): self.update_main_progress(0,"自选区域要0x开头哦") return range_from = int(range_from, 16) range_to = int(range_to, 16) if range_from > range_to: self.update_main_progress(0,"自选区域要从小到大哦") return # 确定提取范围 extract_range = "" if self.extract_from_text_checkbox.isChecked():# 从文本提取 extract_range = input_text elif self.select_all_checkbox.isChecked():# 全选 extract_range = [chr(i) for i in range(0x0000, 0xFFFF + 1)] else: if self.ascii_checkbox.isChecked():# ASCII 可见字符集 extract_range += "".join([chr(i) for i in range(0x0020, 0x007E + 1)]) if self.chinese_checkbox.isChecked():# 所有汉字 extract_range += "".join([chr(i) for i in range(0x4E00, 0x9FFF + 1)]) if self.common_chinese_checkbox.isChecked():# 常用汉字 extract_range += "".join([chr(i) for i in range(0x4E00, 0x9FA5 + 1)]) if self.chinese_punctuation_checkbox.isChecked():# 汉字标点符号 extract_range += "".join([chr(i) for i in range(0x3000, 0x303F + 1)]) if self.custom_range_checkbox.isChecked():# 自选区域 extract_range += "".join([chr(i) for i in range(range_from, range_to + 1)]) #print(f"提取范围:{extract_range}") # 创建 Worker 对象
self.worker = Worker(font_path, font_size, canvas_size_width, canvas_size_height, offset_x, offset_y, flip, mirror, rotate, reverse_color, extract_range)
1
2023-12-30 06:38:36+00:00
8k
ysyBrenda/Transformer-For-Geochemical-Anomaly-Detection
train.py
[ { "identifier": "Transformer", "path": "transformer/Models.py", "snippet": "class Transformer(nn.Module):\n ''' A sequence to sequence model with attention mechanism. '''\n\n def __init__(\n self, src_pad_idx, trg_pad_idx,\n d_word_vec=38, d_model=38, d_inner=2048,\n n_layers=6, n_head=8, d_k=38, d_v=38, dropout=0.1, n_position=2000,\n ):\n\n super().__init__()\n\n self.src_pad_idx, self.trg_pad_idx = src_pad_idx, trg_pad_idx\n\n self.scale_prj = False #True\n self.d_model = d_model\n\n self.encoder = Encoder(\n n_position=n_position,\n d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,\n n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,\n pad_idx=src_pad_idx, dropout=dropout)\n\n self.decoder = Decoder(\n n_position=n_position,\n d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,\n n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,\n pad_idx=trg_pad_idx, dropout=dropout)\n\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n assert d_model == d_word_vec, \\\n 'To facilitate the residual connections, \\\n the dimensions of all module outputs shall be the same.'\n\n def forward(self, src_seq, trg_seq):\n\n src_mask=get_pad_mask(src_seq[:,:,0], self.src_pad_idx)\n trg_mask=trg_seq[:, :,0] #.unsqueeze(1)\n trg_mask = get_pad_mask(trg_mask, self.trg_pad_idx) & get_subsequent_mask(trg_mask)\n\n enc_output,enc_slf_attn_list = self.encoder(src_seq, src_mask,return_attns=True)\n dec_output, dec_slf_attn_list, dec_enc_attn_list= self.decoder(trg_seq, trg_mask, enc_output, src_mask,return_attns=True)\n\n seq_logit=dec_output\n\n return seq_logit.view(-1, seq_logit.size(2)),enc_slf_attn_list,dec_enc_attn_list" }, { "identifier": "ScheduledOptim", "path": "transformer/Optim.py", "snippet": "class ScheduledOptim():\n '''A simple wrapper class for learning rate scheduling'''\n\n def __init__(self, optimizer, lr_mul, d_model, n_warmup_steps):\n self._optimizer = optimizer\n self.lr_mul = lr_mul\n self.d_model = d_model\n self.n_warmup_steps = n_warmup_steps\n self.n_steps = 0\n\n\n def step_and_update_lr(self):\n \"Step with the inner optimizer\"\n self._update_learning_rate()\n self._optimizer.step()\n\n\n def zero_grad(self):\n \"Zero out the gradients with the inner optimizer\"\n self._optimizer.zero_grad()\n\n\n def _get_lr_scale(self):\n d_model = self.d_model\n n_steps, n_warmup_steps = self.n_steps, self.n_warmup_steps\n return (d_model ** -0.5) * min(n_steps ** (-0.5), n_steps * n_warmup_steps ** (-1.5))\n\n\n def _update_learning_rate(self):\n ''' Learning rate scheduling per step '''\n\n self.n_steps += 1\n lr = self.lr_mul * self._get_lr_scale()\n\n for param_group in self._optimizer.param_groups:\n param_group['lr'] = lr" } ]
import argparse import time import dill as pickle import numpy as np import random import os import torch import torch.nn.functional as F import torch.optim as optim import torch.utils.data as Data from tqdm import tqdm from transformer.Models import Transformer from transformer.Optim import ScheduledOptim from tensorboardX import SummaryWriter
3,887
torch.save(checkpoint, os.path.join(opt.output_dir, opt.fileHead, model_name)) print(' - [Info] The checkpoint file has been updated.') with open(log_train_file, 'a') as log_tf, open(log_valid_file, 'a') as log_vf: log_tf.write('{epoch},{loss: 8.5f},{lr:8.2e}\n'.format( epoch=epoch_i, loss=train_loss, lr=lr)) log_vf.write('{epoch},{loss: 8.5f},{lr:8.2e}\n'.format( epoch=epoch_i, loss=valid_loss, lr=lr)) if opt.use_tb: tb_writer.add_scalars('loss', {'train': train_loss, 'val': valid_loss}, epoch_i) tb_writer.add_scalar('learning_rate', lr, epoch_i) # auto break if valid_loss < best: best = valid_loss bad_counter = 0 else: bad_counter += 1 if bad_counter == patience: break log_opt_file = 'opt_file_log.log' # add with open(log_opt_file, 'a') as log_f: log_f.write(str(opt.fileHead) + '__loss__{:8.5f}\n'.format(valid_loss)) def main(): ''' Usage: python train.py -data_pkl ./data/pre_data.pkl -output_dir output -epoch 150 -b 16 -use_tb -save_mode all ''' parser = argparse.ArgumentParser() parser.add_argument('-data_pkl', default=None) # all-in-1 data pickle or bpe field parser.add_argument('-train_path', default=None) # bpe encoded data parser.add_argument('-val_path', default=None) # bpe encoded data parser.add_argument('-epoch', type=int, default=10) parser.add_argument('-b', '--batch_size', type=int, default=2048) parser.add_argument('-d_model', type=int, default=38) # 38;8 #todo parser.add_argument('-d_inner_hid', type=int, default=2048) # 64 #todo parser.add_argument('-d_k', type=int, default=38) parser.add_argument('-d_v', type=int, default=38) parser.add_argument('-n_head', type=int, default=2) parser.add_argument('-n_layers', type=int, default=4) # 6 parser.add_argument('-warmup', '--n_warmup_steps', type=int, default=4000) parser.add_argument('-lr_mul', type=float, default=2.0) # 2.0 parser.add_argument('-seed', type=int, default=None) parser.add_argument('-dropout', type=float, default=0.1) parser.add_argument('-output_dir', type=str, default=None) parser.add_argument('-use_tb', action='store_true') parser.add_argument('-save_mode', type=str, choices=['all', 'best'], default='best') parser.add_argument('-no_cuda', action='store_true') parser.add_argument('-unmask', type=float, default=0.3) parser.add_argument('-l2', type=float, default=0.0) # weight_dacay parser.add_argument('-lambda_con', type=float, default=0.01) # contrast loss lambda parser.add_argument('-T', type=int, default=1) # the times of mask parser.add_argument('-isContrastLoss', action='store_true') parser.add_argument('-isRandMask', action='store_true') opt = parser.parse_args() # # ++++++++++++++++ opt.d_k = opt.d_model opt.d_v = opt.d_model opt.cuda = not opt.no_cuda opt.d_word_vec = opt.d_model # 512 ==>38 # ------Output fileHead---- opt.fileHead = 'T' + str(opt.T) + '_unmask' + str(opt.unmask) + '_h' + str(opt.n_head) + 'L' + str( opt.n_layers) + '_hid' + str(opt.d_inner_hid) + '_d' + str(opt.d_model) + '_b' + str( opt.batch_size) + '_warm' + str(opt.n_warmup_steps) + '_lrm' + str(opt.lr_mul) + '_seed' + \ str(opt.seed) + '_dr' + str(opt.dropout) +'_isCL'+str(opt.isContrastLoss)+ '_lamb'+str(opt.lambda_con) +'_ismask'+str(opt.isRandMask) if os.path.exists(os.path.join(opt.output_dir, opt.fileHead)): print('the output file is rewriting....', opt.fileHead) else: os.mkdir(os.path.join(opt.output_dir, opt.fileHead)) print('The output filename is generated: ', opt.fileHead) # https://pytorch.org/docs/stable/notes/randomness.html # For reproducibility if opt.seed is not None: torch.manual_seed(opt.seed) torch.backends.cudnn.benchmark = False # torch.set_deterministic(True) np.random.seed(opt.seed) random.seed(opt.seed) if not opt.output_dir: print('No experiment result will be saved.') raise if not os.path.exists(opt.output_dir): os.makedirs(opt.output_dir) if opt.batch_size < 2048 and opt.n_warmup_steps <= 4000: print('[Warning] The warmup steps may be not enough.\n' \ '(sz_b, warmup) = (2048, 4000) is the official setting.\n' \ 'Using smaller batch w/o longer warmup may cause ' \ 'the warmup stage ends with only little data trained.') device = torch.device('cuda' if opt.cuda else 'cpu') # ========= Loading Dataset =========# training_data, validation_data = prepare_dataloaders(opt, device) print("training data size:{}, validation data size:{}".format(training_data.__len__(),validation_data.__len__())) print(opt) log_opt_file = os.path.join(opt.output_dir, opt.fileHead, 'opt.log') with open(log_opt_file, 'w') as log_f: log_f.write(str(opt))
''' This script handles the training process. author: ysyBrenda run in env:torch1.3.0 ''' # to use tensorboard,input following in terminal: # $ tensorboard --logdir=output --port 6006 # if【was already in use】:lsof -i:6006, kill -9 PID def train_epoch(model, training_data, optimizer, opt, device): ''' Epoch operation in training''' model.train() total_loss = 0 iter = 0 desc = ' - (Training) ' for batch in tqdm(training_data, mininterval=2, desc=desc, leave=False): # todo # prepare data if opt.isContrastLoss: temp= batch[0].to(device) a = torch.chunk(temp, 3, dim=1) src_seq=torch.cat([a[0],a[1],a[2]],0) else: src_seq = batch[0].to(device) gold = batch[1][:, 2:].unsqueeze(1) trg_seq, gold = map(lambda x: x.to(device), [batch[1].unsqueeze(1), gold.contiguous().view(-1)]) # transpose、unsqueeze vector if opt.isContrastLoss: trg_seq=torch.cat([trg_seq,trg_seq,trg_seq],0) # forward optimizer.zero_grad() pred, *_ = model(src_seq, trg_seq) # backward and update parameters if opt.isContrastLoss: a = torch.chunk(pred, 3, dim=0) contras_loss = F.l1_loss(a[1].contiguous().view(-1), a[2].contiguous().view(-1), reduction='mean') loss = F.l1_loss(a[0].contiguous().view(-1), gold, reduction='mean') + opt.lambda_con * contras_loss else: loss = F.l1_loss(pred.contiguous().view(-1), gold, reduction='mean') # F.l1_loss,F_mse_loss loss.backward() optimizer.step_and_update_lr() total_loss += loss.item() iter += 1 print('total_train loss: {:8.5f},iter:{},average_train loss:{:8.5f} '.format(total_loss,iter,total_loss/iter)) #optimizer.n_steps=iter return total_loss/iter def eval_epoch(model, validation_data, device, opt): ''' Epoch operation in evaluation ''' model.eval() total_loss = 0 iter=0 desc = ' - (Validation) ' with torch.no_grad(): for batch in tqdm(validation_data, mininterval=2, desc=desc, leave=False): if opt.isContrastLoss: temp = batch[0].to(device) a = torch.chunk(temp, 3, dim=1) src_seq = torch.cat([a[0], a[1], a[2]], 0) else: src_seq = batch[0].to(device) gold = batch[1][:, 2:].unsqueeze(1) trg_seq, gold = map(lambda x: x.to(device), [batch[1].unsqueeze(1), gold.contiguous().view(-1)]) if opt.isContrastLoss: trg_seq = torch.cat([trg_seq, trg_seq, trg_seq], 0) # forward pred, *_ = model(src_seq, trg_seq) # ============= loss========================== if opt.isContrastLoss: a = torch.chunk(pred, 3, dim=0) contras_loss = F.l1_loss(a[1].contiguous().view(-1), a[2].contiguous().view(-1), reduction='mean') loss = F.l1_loss(a[0].contiguous().view(-1), gold, reduction='mean') + opt.lambda_con * contras_loss else: loss = F.l1_loss(pred.contiguous().view(-1), gold, reduction='mean') # reduction="mean" total_loss += loss.item() iter +=1 print('total_val loss:{:8.5f} ,iter:{},average_val loss:{:8.5f}'.format(total_loss,iter,total_loss/iter)) return total_loss/iter def train(model, training_data, validation_data, optimizer, device, opt): """ Start training """ # Use tensorboard to plot curves, e.g. loss, learning rate if opt.use_tb: print("[Info] Use Tensorboard") # from torch.utils.tensorboard import SummaryWriter tb_writer = SummaryWriter(log_dir=os.path.join(opt.output_dir, 'tensorboard' + opt.fileHead)) log_train_file = os.path.join(opt.output_dir, opt.fileHead, 'train.log') log_valid_file = os.path.join(opt.output_dir, opt.fileHead, 'valid.log') print('[Info] Training performance will be written to file: {} and {}'.format(log_train_file, log_valid_file)) with open(log_train_file, 'w') as log_tf, open(log_valid_file, 'w') as log_vf: log_tf.write('epoch,loss,lr\n') log_vf.write('epoch,loss,lr\n') def print_performances(header, loss, start_time, lr): print(' - {header:12} loss: {loss: 8.5f}, lr: {lr: 8.2e}, ' \ 'elapse: {elapse:3.3f} min'.format( header=f"({header})", loss=loss, elapse=(time.time() - start_time) / 60, lr=lr)) # lr: {lr:8.5f} 8.2e valid_losses = [] bad_counter = 0 best = 50000 patience = 5 # 5 for epoch_i in range(opt.epoch): print('[ Epoch', epoch_i, ']') start = time.time() train_loss = train_epoch( model, training_data, optimizer, opt, device) # todo # Current learning rate lr = optimizer._optimizer.param_groups[0]['lr'] print_performances('Training', train_loss, start, lr) # start = time.time() valid_loss = eval_epoch(model, validation_data, device, opt) # todo print_performances('Validation', valid_loss, start, lr) valid_losses += [valid_loss] checkpoint = {'epoch': epoch_i, 'settings': opt, 'model': model.state_dict()} if opt.save_mode == 'all': # if epoch_i % 10 == 9: model_name = 'model_{epoch:d}_vloss_{vloss:.4f}.chkpt'.format(epoch=epoch_i, vloss=valid_loss) torch.save(checkpoint, os.path.join(opt.output_dir, opt.fileHead, model_name)) elif opt.save_mode == 'best': model_name = 'model_best.chkpt' torch.save(checkpoint, os.path.join(opt.output_dir, opt.fileHead, model_name)) print(' - [Info] The checkpoint file has been updated.') with open(log_train_file, 'a') as log_tf, open(log_valid_file, 'a') as log_vf: log_tf.write('{epoch},{loss: 8.5f},{lr:8.2e}\n'.format( epoch=epoch_i, loss=train_loss, lr=lr)) log_vf.write('{epoch},{loss: 8.5f},{lr:8.2e}\n'.format( epoch=epoch_i, loss=valid_loss, lr=lr)) if opt.use_tb: tb_writer.add_scalars('loss', {'train': train_loss, 'val': valid_loss}, epoch_i) tb_writer.add_scalar('learning_rate', lr, epoch_i) # auto break if valid_loss < best: best = valid_loss bad_counter = 0 else: bad_counter += 1 if bad_counter == patience: break log_opt_file = 'opt_file_log.log' # add with open(log_opt_file, 'a') as log_f: log_f.write(str(opt.fileHead) + '__loss__{:8.5f}\n'.format(valid_loss)) def main(): ''' Usage: python train.py -data_pkl ./data/pre_data.pkl -output_dir output -epoch 150 -b 16 -use_tb -save_mode all ''' parser = argparse.ArgumentParser() parser.add_argument('-data_pkl', default=None) # all-in-1 data pickle or bpe field parser.add_argument('-train_path', default=None) # bpe encoded data parser.add_argument('-val_path', default=None) # bpe encoded data parser.add_argument('-epoch', type=int, default=10) parser.add_argument('-b', '--batch_size', type=int, default=2048) parser.add_argument('-d_model', type=int, default=38) # 38;8 #todo parser.add_argument('-d_inner_hid', type=int, default=2048) # 64 #todo parser.add_argument('-d_k', type=int, default=38) parser.add_argument('-d_v', type=int, default=38) parser.add_argument('-n_head', type=int, default=2) parser.add_argument('-n_layers', type=int, default=4) # 6 parser.add_argument('-warmup', '--n_warmup_steps', type=int, default=4000) parser.add_argument('-lr_mul', type=float, default=2.0) # 2.0 parser.add_argument('-seed', type=int, default=None) parser.add_argument('-dropout', type=float, default=0.1) parser.add_argument('-output_dir', type=str, default=None) parser.add_argument('-use_tb', action='store_true') parser.add_argument('-save_mode', type=str, choices=['all', 'best'], default='best') parser.add_argument('-no_cuda', action='store_true') parser.add_argument('-unmask', type=float, default=0.3) parser.add_argument('-l2', type=float, default=0.0) # weight_dacay parser.add_argument('-lambda_con', type=float, default=0.01) # contrast loss lambda parser.add_argument('-T', type=int, default=1) # the times of mask parser.add_argument('-isContrastLoss', action='store_true') parser.add_argument('-isRandMask', action='store_true') opt = parser.parse_args() # # ++++++++++++++++ opt.d_k = opt.d_model opt.d_v = opt.d_model opt.cuda = not opt.no_cuda opt.d_word_vec = opt.d_model # 512 ==>38 # ------Output fileHead---- opt.fileHead = 'T' + str(opt.T) + '_unmask' + str(opt.unmask) + '_h' + str(opt.n_head) + 'L' + str( opt.n_layers) + '_hid' + str(opt.d_inner_hid) + '_d' + str(opt.d_model) + '_b' + str( opt.batch_size) + '_warm' + str(opt.n_warmup_steps) + '_lrm' + str(opt.lr_mul) + '_seed' + \ str(opt.seed) + '_dr' + str(opt.dropout) +'_isCL'+str(opt.isContrastLoss)+ '_lamb'+str(opt.lambda_con) +'_ismask'+str(opt.isRandMask) if os.path.exists(os.path.join(opt.output_dir, opt.fileHead)): print('the output file is rewriting....', opt.fileHead) else: os.mkdir(os.path.join(opt.output_dir, opt.fileHead)) print('The output filename is generated: ', opt.fileHead) # https://pytorch.org/docs/stable/notes/randomness.html # For reproducibility if opt.seed is not None: torch.manual_seed(opt.seed) torch.backends.cudnn.benchmark = False # torch.set_deterministic(True) np.random.seed(opt.seed) random.seed(opt.seed) if not opt.output_dir: print('No experiment result will be saved.') raise if not os.path.exists(opt.output_dir): os.makedirs(opt.output_dir) if opt.batch_size < 2048 and opt.n_warmup_steps <= 4000: print('[Warning] The warmup steps may be not enough.\n' \ '(sz_b, warmup) = (2048, 4000) is the official setting.\n' \ 'Using smaller batch w/o longer warmup may cause ' \ 'the warmup stage ends with only little data trained.') device = torch.device('cuda' if opt.cuda else 'cpu') # ========= Loading Dataset =========# training_data, validation_data = prepare_dataloaders(opt, device) print("training data size:{}, validation data size:{}".format(training_data.__len__(),validation_data.__len__())) print(opt) log_opt_file = os.path.join(opt.output_dir, opt.fileHead, 'opt.log') with open(log_opt_file, 'w') as log_f: log_f.write(str(opt))
transformer = Transformer(
0
2023-12-22 13:22:58+00:00
8k
camenduru/MotionCtrl-hf
lvdm/models/ddpm3d.py
[ { "identifier": "disabled_train", "path": "lvdm/basics.py", "snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self" }, { "identifier": "default", "path": "lvdm/common.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "lvdm/common.py", "snippet": "def exists(val):\n return val is not None" }, { "identifier": "extract_into_tensor", "path": "lvdm/common.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "lvdm/common.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "lvdm/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self, noise=None):\n if noise is None:\n noise = torch.randn(self.mean.shape)\n \n x = self.mean + self.std * noise.to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "lvdm/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "lvdm/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "DDIMSampler", "path": "lvdm/models/samplers/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.counter = 0\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n schedule_verbose=False,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n \n # check condition bs\n if conditioning is not None:\n if isinstance(conditioning, dict):\n try:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n except:\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=schedule_verbose)\n \n # make shape\n if len(shape) == 3:\n C, H, W = shape\n size = (batch_size, C, H, W)\n elif len(shape) == 4:\n C, T, H, W = shape\n size = (batch_size, C, T, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n \n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n verbose=verbose,\n **kwargs)\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, verbose=True,\n **kwargs):\n device = self.model.betas.device \n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n \n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n \n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n if verbose:\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n else:\n iterator = time_range\n\n clean_cond = kwargs.pop(\"clean_cond\", False)\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n # use mask to blend noised original latent (img_orig) & new sampled latent (img)\n if mask is not None:\n assert x0 is not None\n if clean_cond:\n img_orig = x0\n else:\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? <ddim inversion>\n img = img_orig * mask + (1. - mask) * img # keep original & modify use img\n \n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n \n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n uc_type=None, conditional_guidance_scale_temporal=None, **kwargs):\n b, *_, device = *x.shape, x.device\n if x.dim() == 5:\n is_video = True\n else:\n is_video = False\n # f=open('/apdcephfs_cq2/share_1290939/yingqinghe/code/LVDM-private/cfg_range_s5noclamp.txt','a')\n # print(f't={t}, model input, min={torch.min(x)}, max={torch.max(x)}',file=f)\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c, **kwargs) # unet denoiser\n else:\n # with unconditional condition\n if isinstance(c, torch.Tensor):\n un_kwargs = kwargs.copy()\n if isinstance(unconditional_conditioning, dict):\n for uk, uv in unconditional_conditioning.items():\n if uk in un_kwargs:\n un_kwargs[uk] = uv\n unconditional_conditioning = unconditional_conditioning['uc']\n if 'cond_T' in kwargs and t < kwargs['cond_T']:\n if 'features_adapter' in kwargs:\n kwargs.pop('features_adapter')\n un_kwargs.pop('features_adapter')\n # kwargs['features_adapter'] = None\n # un_kwargs['features_adapter'] = None\n # if 'pose_emb' in kwargs:\n # kwargs.pop('pose_emb')\n # un_kwargs.pop('pose_emb')\n # kwargs['pose_emb'] = None\n # un_kwargs['pose_emb'] = None\n e_t = self.model.apply_model(x, t, c, **kwargs)\n # e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **un_kwargs)\n elif isinstance(c, dict):\n e_t = self.model.apply_model(x, t, c, **kwargs)\n e_t_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n else:\n raise NotImplementedError\n # text cfg\n if uc_type is None:\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n else:\n if uc_type == 'cfg_original':\n e_t = e_t + unconditional_guidance_scale * (e_t - e_t_uncond)\n elif uc_type == 'cfg_ours':\n e_t = e_t + unconditional_guidance_scale * (e_t_uncond - e_t)\n else:\n raise NotImplementedError\n # temporal guidance\n if conditional_guidance_scale_temporal is not None:\n e_t_temporal = self.model.apply_model(x, t, c, **kwargs)\n e_t_image = self.model.apply_model(x, t, c, no_temporal_attn=True, **kwargs)\n e_t = e_t + conditional_guidance_scale_temporal * (e_t_temporal - e_t_image)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n \n if is_video:\n size = (b, 1, 1, 1, 1)\n else:\n size = (b, 1, 1, 1)\n a_t = torch.full(size, alphas[index], device=device)\n a_prev = torch.full(size, alphas_prev[index], device=device)\n sigma_t = torch.full(size, sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(size, sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n # print(f't={t}, pred_x0, min={torch.min(pred_x0)}, max={torch.max(pred_x0)}',file=f)\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n # # norm pred_x0\n # p=2\n # s=()\n # pred_x0 = pred_x0 - torch.max(torch.abs(pred_x0))\n\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n \n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n\n return x_prev, pred_x0" }, { "identifier": "make_beta_schedule", "path": "lvdm/models/utils_diffusion.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "instantiate_from_config", "path": "utils/utils.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" } ]
import logging import os import random import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager from functools import partial from einops import rearrange, repeat from tqdm import tqdm from pytorch_lightning.utilities import rank_zero_only from torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR from torchvision.utils import make_grid from lvdm.basics import disabled_train from lvdm.common import default, exists, extract_into_tensor, noise_like from lvdm.distributions import DiagonalGaussianDistribution, normal_kl from lvdm.ema import LitEma from lvdm.models.samplers.ddim import DDIMSampler from lvdm.models.utils_diffusion import make_beta_schedule from utils.utils import instantiate_from_config
6,339
""" wild mixture of https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/CompVis/taming-transformers -- merci """ mainlogger = logging.getLogger('mainlogger') __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor=None, use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization mainlogger.info(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.channels = channels self.temporal_length = unet_config.params.temporal_length self.image_size = image_size # try conv? if isinstance(self.image_size, int): self.image_size = [self.image_size, self.image_size] self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) #count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema:
""" wild mixture of https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/CompVis/taming-transformers -- merci """ mainlogger = logging.getLogger('mainlogger') __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor=None, use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization mainlogger.info(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.channels = channels self.temporal_length = unet_config.params.temporal_length self.image_size = image_size # try conv? if isinstance(self.image_size, int): self.image_size = [self.image_size, self.image_size] self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) #count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema:
self.model_ema = LitEma(self.model)
7
2023-12-27 19:32:03+00:00
8k
bitstuffing/pychat
tests/pychat.py
[ { "identifier": "OpenChat", "path": "core/openchat.py", "snippet": "class OpenChat(Browser):\n\n main_origin = \"https://openchat.team\"\n main_referer = \"https://openchat.team/es\"\n\n history = {}\n\n def __init__(self):\n super().__init__()\n self.url = \"https://openchat.team/\"\n self.url_api = \"https://openchat.team/api/chat\"\n self.url_api_models = \"https://openchat.team/api/models\"\n self.headers = {\n 'User-Agent': Browser.USER_AGENT,\n 'Accept': '*/*',\n 'Accept-Language': 'es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3',\n 'Referer' : self.main_referer,\n 'Content-Type' : 'application/json',\n 'Origin': self.main_origin,\n 'Connection': 'keep-alive',\n 'Sec-Fetch-Dest': 'empty',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Site' : 'same-origin',\n 'TE': 'trailers'\n }\n\n def obtain_models(self):\n return self.session.post(self.url_api_models, json={'key': ''}, headers=Browser.headers)\n\n def send_message(self, message, stream=True, queue=queue.Queue()):\n\n if self.history == {}:\n self.history = self.build_history()\n \n response = None\n self.history[\"messages\"].append(self.build_message(message))\n \n if stream:\n response = self.session.post(self.url_api, json=self.history, stream=True, headers=self.headers)\n\n if response.status_code == 200:\n stringResponse = \"\"\n for line in response.iter_content(Browser.STEAM_BUFFER_SIZE):\n if line:\n try:\n resp = (line.decode('utf-8'))\n except UnicodeDecodeError:\n for encoding in ['ISO-8859-1', 'latin1', 'cp1252', 'cp437', 'big5', 'gb2312', 'euc-kr', 'windows-1252']:\n try:\n resp = (line.decode(encoding))\n break\n except UnicodeDecodeError:\n continue\n else:\n raise UnicodeDecodeError(\"Not possible to decode it :'(\")\n stringResponse += resp\n queue.put(resp)\n print(resp, end='')\n print(\"\")\n return stringResponse\n else:\n print(f\"Error obtaining information: {response.status_code}\")\n else:\n response = self.session.post(self.url_api, json=self.history, headers=self.headers)\n return response.text\n\n\n def build_history(self):\n return {\n \"model\": {\n \"id\": \"openchat_v3.2_mistral\",\n \"name\": \"OpenChat Aura\",\n \"maxLength\": 24576,\n \"tokenLimit\": 8192\n },\n \"messages\": [ ],\n \"key\": \"\",\n \"prompt\": \"\",\n \"temperature\": 0.5\n }\n\n def build_message(self, message = \"hi\"):\n return {\n \"role\": \"user\",\n \"content\": message\n }" }, { "identifier": "Bing", "path": "core/bing.py", "snippet": "class Bing(Browser):\n VERSION = \"1.1381.12\"\n\n conversationId = ''\n clientId = ''\n conversationSignature = ''\n\n WS_BING_HEADERS = {\n \"Pragma\": \"no-cache\",\n \"Origin\": \"https://www.bing.com\",\n \"Accept-Language\": 'es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3',\n \"User-Agent\": Browser.USER_AGENT,\n \"Cache-Control\": \"no-cache\",\n \"Connection\": \"Upgrade\",\n }\n \n DELIMITER = \"\\x1e\"\n\n def __init__(self):\n super().__init__()\n self.url = \"https://www.bing.com/search\"\n self.form_url = 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx'\n self.turning_url = 'https://www.bing.com/turing/api/suggestions/v1/zeroinputstarter?lang=es&region=*&tone=Balanced&enablePersonalizedSuggestions=undefined&enableMarketplaceSuggestions=undefined'\n self.voice_service_url = 'wss://sr.bing.com/opaluqu/speech/recognition/dictation/cognitiveservices/v1'\n self.invocation_id = 0\n self.headers = {\n 'User-Agent': Browser.USER_AGENT, \n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',\n 'Accept-Language': 'es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3',\n 'Upgrade-Insecure-Requests': '1',\n 'Connection': 'keep-alive',\n 'Sec-Fetch-Dest': 'document',\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-Site': 'none',\n 'Sec-Fetch-User': '?1'\n }\n self.ws_headers = {\n 'User-Agent': Browser.USER_AGENT,\n 'Accept': '*/*',\n 'Accept-Language': 'es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3',\n 'Sec-WebSocket-Version': '13',\n 'Origin': 'https://www.bing.com',\n 'Host': 'sydney.bing.com',\n 'Sec-WebSocket-Extensions': 'permessage-deflate',\n 'Connection': 'keep-alive, Upgrade',\n 'Sec-Fetch-Dest': 'empty',\n 'Sec-Fetch-Mode': 'websocket',\n 'Sec-Fetch-Site': 'same-site',\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache',\n 'Upgrade': 'websocket',\n }\n '''\n try:\n self.getCID()\n except Exception as e:\n print(e)\n print(\"Error getting IG and CID from bing.com, trying normal way...\")\n self.session.get(self.url, headers=self.headers)\n pass\n '''\n # set MUID, _EDGE_S, _EDGE_V, SRCHD, SRCHUID, SRCHUSR, SRCHHPGUSR, _SS, _HPVN cookies\n response = self.session.get(\"https://www.bing.com/\", headers=self.headers)\n for cookie in response.cookies:\n #print(cookie.name+\"=\"+cookie.value)\n self.session.cookies.set(cookie.name, cookie.value)\n\n # set MUIDB cookie\n response = self.session.get(\"https://www.bing.com/geolocation/write?isDevLoc=false\", headers=self.headers)\n for cookie in response.cookies:\n #print(cookie.name+\"=\"+cookie.value)\n self.session.cookies.set(cookie.name, cookie.value)\n \n\n def getCID(self):\n response1 = self.session.get(self.form_url, headers=self.headers)\n\n html = response1.text\n self.ig = re.search(r'IG:\"(.*?)\"', html).group(1)\n print(f\"IG: {self.ig}\")\n\n self.cid = re.search(r'CID:\"(.*?)\"', html).group(1)\n print(f\"cid: {self.cid}\")\n\n def init_conversation(self, cmd=\"hello\", queue = queue.Queue()):\n asyncio.run(self.init_conversation_async2(cmd, queue))\n\n async def init_conversation_async2(self, prompt, queue = queue.Queue()):\n try:\n prompt = prompt.encode('ascii', 'ignore').decode('ascii')\n except:\n pass\n cookies = \"\"\n cookies = self.extractFirefoxCookies()\n if True:\n #cookies = self.launch_captcha_solver()\n for cookie in cookies.split(\"; \"):\n if \"=\" in cookie:\n cookie = cookie.split(\"=\")\n #print(\"updating cookie: \"+cookie[0]+\"=\"+cookie[1])\n self.session.cookies.set(cookie[0], cookie[1])\n\n for cookie in self.session.cookies:\n cookies += cookie.name+\"=\"+cookie.value+\"; \"\n #print(\"PRE cookies: \"+cookies)\n coroutine = self.run_init_conversation(prompt, cookies=cookies, queue=queue)\n #response = asyncio.run(coroutine)\n response = await coroutine\n if \"CaptchaChallenge\" in response.data:\n cookies = self.launch_captcha_solver()\n # updates self.session cookies using normal string cookies, parsing it and creating a cookie object for each one\n \n for cookie in cookies.split(\"; \"):\n if \"=\" in cookie:\n cookie = cookie.split(\"=\")\n #print(\"updating cookie: \"+cookie[0]+\"=\"+cookie[1])\n self.session.cookies.set(cookie[0], cookie[1])\n\n self.run_init_conversation(prompt, cookies, queue)\n\n\n def launch_captcha_solver(self):\n # set valid cct cookie\n url = \"https://www.bing.com/turing/captcha/challenge?q=&iframeid=local-gen-\"+str(uuid.uuid4())\n cookie = self.extractCookiesFromRealFirefox(url)\n return cookie\n \n def cookiesToDict(self, cookies: str) -> dict:\n cookies = {\n key_value.strip().split(\"=\")[0]: \"=\".join(key_value.split(\"=\")[1:])\n for key_value in cookies.split(\";\")\n }\n cookies2 = {}\n for key in cookies:\n if cookies[key] == '':\n #del cookies[key]\n pass\n else:\n cookies2[key] = cookies[key].strip()\n return cookies2\n\n async def init_conversation_async(self):\n response = self.session.get(f\"https://www.bing.com/turing/conversation/create?bundleVersion={Bing.VERSION}\", headers=self.headers)\n data = response.json()\n conversationId = data.get('conversationId')\n clientId = data.get('clientId')\n conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')\n conversationSignature2 = response.headers.get('X-Sydney-Conversationsignature')\n #print(f\"CONVERSATION-ID: {conversationId}\")\n return conversationId, clientId , conversationSignature, conversationSignature2\n\n async def run_init_conversation(self, prompt=\"hello world!\", cookies = '', queue = queue.Queue()):\n #print(\"init_conversation: cookies: \"+cookies)\n if cookies != '':\n self.headers['Cookie'] = cookies\n if self.conversationId == '' or self.clientId == '' or self.conversationSignature == '' or self.conversationSignature2 == '':\n \n #tasks = [asyncio.create_task(self.init_conversation_async())]\n #await asyncio.gather(*tasks)\n #self.conversationId, self.clientId, self.conversationSignature, self.conversationSignature2 = tasks[0].result()\n self.conversationId, self.clientId, self.conversationSignature, self.conversationSignature2 = await self.init_conversation_async()\n\n async with ClientSession(headers=self.ws_headers, cookies=self.cookiesToDict(cookies), timeout=aiohttp.ClientTimeout(total=60)) as session:\n async with session.ws_connect('wss://sydney.bing.com/sydney/ChatHub', autoping=False, params={'sec_access_token': self.conversationSignature}) as wss:\n #print(\"starting conversation...\")\n await wss.send_str(self.format_message({'protocol': 'json', 'version': 1}))\n response = await wss.receive(timeout=10)\n #print(\"response: \"+response.data)\n await wss.send_str(self.create_message(self.conversationId, self.clientId, self.conversationSignature, self.conversationSignature2, prompt))\n self.invocation_id += 1\n response2 = await wss.receive(timeout=10)\n #print(\"response2: \"+response2.data)\n if \"CaptchaChallenge\" in response2.data:\n return response2\n #else:\n # print(response2.data)\n #print(\"done!\")\n # get all responses until disconnected (TODO handler out of this function)\n while True:\n response = await wss.receive(timeout=10)\n if response.type == aiohttp.WSMsgType.CLOSED:\n print(\"CLOSED!\")\n break\n elif response.type == aiohttp.WSMsgType.ERROR:\n print(\"ERROR!\")\n break\n else:\n json_response = response.data\n if json_response != \"\":\n \n try:\n #print(len(json_response))\n #print(json_response)\n if json_response[-1] == Bing.DELIMITER:\n json_response = json_response[:-1] # remove the 'custom' end delimiter\n if \"}\\x1e\" in json_response:\n discarted = json_response[json_response.find(\"}\\x1e{\")+2:]\n json_response = json_response[:json_response.find(\"}\\x1e{\")+1]\n discarted_json = json.loads(discarted)\n discartedType = BingMessageType(discarted_json.get('type'),discarted_json.get('invocationId'))\n\n print(f\"Discarted: type: {discartedType.type}, invocationId: {discartedType.invocationId}\")\n\n data = json.loads(json_response)\n\n # ChatData object\n bingResponse = BingResponse(data)\n # if bingResponse.chatmessage exists, it's a chat message\n if hasattr(bingResponse, 'chatmessage'):\n if isinstance(bingResponse.chatmessage ,BingMessageType1):\n print(f\"BingMessageType1, author: {bingResponse.chatmessage.arguments.messages[0].author}, message: {bingResponse.chatmessage.arguments.messages[0].text}\")\n elif isinstance(bingResponse.chatmessage ,BingMessageType2):\n print(f\"BingMessageType2, author: {bingResponse.chatmessage.item.messages[0].author}, message: {bingResponse.chatmessage.item.messages[0].text}\")\n queue.put(bingResponse)\n else:\n print(\"not a chat message:\")\n print(json_response)\n\n except Exception as e:\n traceback_str = traceback.format_exc()\n print(f\"Error: {e}\")\n print(traceback_str)\n print(f\"json_response.encode('utf-8'): {json_response.encode('utf-8')}\")\n pass\n \n \n def create_message(self, conversationId: str, clientId: str, conversationSignature: str, conversationSignature2: str, prompt: str):\n request_id = str(uuid.uuid4())\n struct = {\n \"arguments\": [\n {\n \"source\": \"cib\",\n \"optionsSets\": [\n \"nlu_direct_response_filter\",\n \"deepleo\",\n \"disable_emoji_spoken_text\",\n \"responsible_ai_policy_235\",\n \"enablemm\",\n \"dv3sugg\",\n \"iyxapbing\",\n \"iycapbing\",\n \"galileo\",\n \"saharagenconv5\"\n ],\n \"allowedMessageTypes\": [\n \"ActionRequest\",\n \"Chat\",\n \"ConfirmationCard\",\n \"Context\",\n \"InternalSearchQuery\",\n \"InternalSearchResult\",\n \"Disengaged\",\n \"InternalLoaderMessage\",\n \"InvokeAction\",\n \"Progress\",\n \"RenderCardRequest\",\n \"RenderContentRequest\",\n \"AdsQuery\",\n \"SemanticSerp\",\n \"GenerateContentQuery\",\n \"SearchQuery\"\n ],\n \"sliceIds\": [],\n \"verbosity\": \"verbose\",\n \"scenario\": \"SERP\",\n \"spokenTextMode\": \"None\",\n \"traceId\": ''.join(random.choice(string.hexdigits.upper()) for _ in range(32)),\n \"conversationHistoryOptionsSets\": [\n \"autosave\",\n \"savemem\",\n \"uprofupd\",\n \"uprofgen\"\n ],\n \"isStartOfSession\": self.invocation_id == 0,\n 'requestId': request_id,\n \"message\": {\n #\"userIpAddress\": self.getInternetIpAddress(),\n \"timestamp\": self.getTimeStamp(),\n \"author\": \"user\",\n \"inputMethod\": \"Keyboard\",\n \"text\": prompt,\n \"messageType\": \"SearchQuery\",\n \"requestId\": request_id,\n \"messageId\": request_id\n },\n \"tone\": \"Balanced\", # Creative, Precise, Balanced\n \"spokenTextMode\": \"None\",\n \"conversationSignature\": conversationSignature2,\n \"conversationId\": conversationId,\n \"participant\": {\n \"id\": clientId\n }\n }\n ],\n \"invocationId\": str(self.invocation_id),\n \"target\": \"chat\",\n \"type\": 4\n }\n return self.format_message(struct)\n\n def format_message(self, msg: dict) -> str:\n return json.dumps(msg, ensure_ascii=False) + Bing.DELIMITER\n \n def build_speech_message(self, path, request_id = str(uuid.uuid4()),content = {}):\n timestamp = datetime.datetime.now(tz=tzutc()).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n content_type=\"application/json\"\n\n message = f\"Path: {path}\\r\\n\"\n message += f\"X-RequestId: {request_id}\\r\\n\"\n message += f\"X-Timestamp: {timestamp}\\r\\n\"\n if content_type == \"application/json\":\n message += f\"Content-Type: {content_type}\\r\\n\\r\\n\"\n message += json.dumps(content, ensure_ascii=True)\n else:\n if content_type == \"audio/x-wav\":\n message += f\"Content-Type: {content_type}\\r\\n\\r\\n\"\n message += content\n #print(message)\n return message\n\n def speech_to_text(self):\n asyncio.run(self.speech_to_text_async())\n\n def build_audio_message_header(self, request_id = str(uuid.uuid4())):\n timestamp = datetime.datetime.now(tz=tzutc()).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n\n content_type=\"audio/x-wav\"\n message = bytes.fromhex('007e506174683a20617564696f') + b'\\r\\n' # ~Path: audio\\n\n message += b'X-RequestId: '+request_id.encode('utf-8')+ b'\\r\\n' # X-RequestId: 534885F5FF347068C0CD34C5A66F6EFE\\n\n message += b'X-Timestamp: '+timestamp.encode('utf-8')+ b'\\r\\n'\n message += b'Content-Type: '+content_type.encode('utf-8')+ b'\\r\\n\\r\\n'\n message += bytes.fromhex('524946460000000057415645666d74201000000001000100803e0000007d0000020010006461746100000000') # RIFF...WAVEfmt .............data....\n return message\n \n def build_audio_message_intro(self, request_id = str(uuid.uuid4())):\n timestamp = datetime.datetime.now(tz=tzutc()).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n message = bytes.fromhex('0063506174683a20617564696f') + b'\\r\\n' # Path: audio\\n\n message += bytes.fromhex('582d5265717565737449643a20') + request_id.encode('utf-8') + b'\\r\\n' \n message += bytes.fromhex('582d54696d657374616d703a20') + timestamp.encode('utf-8') + b'\\r\\n\\r\\n' \n \n return message\n \n def build_audio_message_content(self, request_id = str(uuid.uuid4()), content = b''):\n timestamp = datetime.datetime.now(tz=tzutc()).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n print(f'timestamp: {timestamp}')\n message = bytes.fromhex('0063506174683a20617564696f') + b'\\r\\n' \n message += bytes.fromhex('582d5265717565737449643a20') + request_id.encode('utf-8') + b'\\r\\n' \n message += bytes.fromhex('582d54696d657374616d703a20') + timestamp.encode('utf-8') + b'\\r\\n\\r\\n' \n message += content\n return message\n\n async def speech_to_text_async(self):\n \n '''\n if( self.cid == '' or self.ig == ''):\n self.session = requests.Session()\n self.getCID()\n '''\n\n response = self.session.get(url = self.turning_url, headers=self.headers)\n\n cookies = \"\"\n for cookie in response.cookies:\n cookies += cookie.name+\"=\"+cookie.value+\"; \"\n\n for cookie in self.session.cookies:\n cookies += cookie.name+\"=\"+cookie.value+\"; \"\n\n if cookies != '':\n self.headers['Cookie'] = cookies\n\n print(\"extracted cookies: \"+cookies)\n\n request_id = str(uuid.uuid4())\n print(f'request_id: {request_id}')\n\n connection_key = ''.join(random.choice(string.hexdigits.upper()) for _ in range(32)) \n print(f'connection_key: {connection_key}')\n #connection_key = str(uuid.uuid4())\n paramsDic = {\n 'clientbuild': 'sydney',\n 'referer': urllib.parse.quote_plus(self.form_url),\n 'uqurequestid': request_id,\n 'surface': 'desktop',\n 'autodetect': 1,\n 'uquclientversion': 0,\n 'sroptions' : 'cdxrwss,cdxsydoroff,cdxwhisr,cdxwsnnc,lidprimary,cdxdlid,autotts',\n 'language': 'xx-yy',\n 'format': 'simple',\n 'Ocp-Apim-Subscription-Key': 'key',\n 'X-ConnectionId': connection_key\n }\n\n async with ClientSession(headers=self.headers, timeout=aiohttp.ClientTimeout(total=60)) as session:\n async with session.ws_connect(url = self.voice_service_url, autoping=False, params=paramsDic) as wss:\n request_id = ''.join(random.choice(string.hexdigits.upper()) for _ in range(32)) \n message = {\"context\":{\"system\":{\"name\":\"SpeechSDK\",\"version\":\"1.15.0-alpha.0.1\",\"build\":\"JavaScript\",\"lang\":\"JavaScript\"},\"os\":{\"platform\":\"Browser/Linux x86_64\",\"name\":Browser.USER_AGENT,\"version\":\"5.0 (X11)\"},\"audio\":{\"source\":{\"bitspersample\":16,\"channelcount\":1,\"connectivity\":\"Unknown\",\"manufacturer\":\"Speech SDK\",\"model\":\"HD Webcam C910\",\"samplerate\":16000,\"type\":\"Microphones\"}}},\"recognition\":\"conversation\"}\n content1 = self.build_speech_message(path='speech.config', request_id=request_id, content=message)\n #print(content1)\n await wss.send_str(content1)\n print(\"speech.config sent\")\n await wss.send_str(self.build_speech_message(path='speech.context', request_id=request_id, content={}))\n print(\"speech.context sent\")\n\n response = await wss.receive(timeout=10)\n if response.data != '':\n print(response.data)\n\n response = await wss.receive(timeout=10)\n if response.data != '':\n print(response.data)\n\n response = await wss.receive(timeout=10)\n if response.data != '':\n print(response.data)\n if \"speech.startDetected\" in response.data:\n print(\"ok, lets go!\")\n\n await wss.send_bytes(self.build_audio_message_header(request_id=request_id))\n\n await wss.send_bytes(self.build_audio_message_intro(request_id=request_id))\n\n print(\"audio header sent, reading response...\")\n\n import time\n #sleep 100 ms\n time.sleep(0.101)\n \n \n chunk_size = 6400\n call = 0\n recorder = AudioRecorder()\n recorder.start()\n import time\n time.sleep(1)\n while not recorder.getExit():\n if not recorder.getQueue().empty():\n audio_data = recorder.getQueue().get()\n # 6400 bytes per chunk\n num_chunks = len(audio_data) // chunk_size\n for i in range(num_chunks):\n start_index = i * chunk_size\n end_index = start_index + chunk_size\n chunk_audio_data = audio_data[start_index:end_index]\n\n if len(chunk_audio_data) == chunk_size:\n await wss.send_bytes(self.build_audio_message_content(request_id=request_id, content=chunk_audio_data))\n call += 1\n\n if call % 12 == 0:\n print('show partial text content')\n response = await wss.receive(timeout=10)\n if response.type == aiohttp.WSMsgType.CLOSED:\n print(\"CLOSED!\")\n elif response.type == aiohttp.WSMsgType.ERROR:\n print(\"ERROR!\")\n else:\n json_response = response.data\n if json_response != \"\":\n print(json_response)\n try:\n resp = json.loads(json_response[json_response.find(\"\\r\\n\\r\\n\")+4:])\n text, offset, duration, recognitionStatus, displayText, primaryLanguage = None, None, None, None, None, None\n if hasattr(resp, 'Text'):\n text = resp.get('Text')\n if hasattr(resp, 'Offset'):\n offset = resp.get('Offset')\n if hasattr(resp, 'Duration'):\n duration = resp.get('Duration')\n if hasattr(resp, 'RecognitionStatus'):\n recognitionStatus = resp.get('RecognitionStatus')\n if hasattr(resp, 'DisplayText'):\n displayText = resp.get('DisplayText')\n if hasattr(resp, 'PrimaryLanguage'):\n primaryLanguage = resp.get('PrimaryLanguage')\n\n bingResponse = BingTextResponse(text = text, offset = offset, duration = duration, recognitionStatus = recognitionStatus, displayText = displayText, primaryLanguage = primaryLanguage)\n if '{\"RecognitionStatus\":\"Success\",' in json_response:\n # TODO review why this line is not working, it should be but... probably tomorrow will be a better day to check it\n #if recognitionStatus is not None and (recognitionStatus == \"Success\" or recognitionStatus == \"EndOfDictation\"):\n recorder.setExit(True)\n except Exception as e:\n traceback_str = traceback.format_exc()\n print(f\"Error: {e}\")\n print(traceback_str)\n print(f\"json_response.encode('utf-8'): {json_response.encode('utf-8')}\")\n pass\n\n \n\n #print(\"sent content, receiving LAST response...\")\n\n \n response = await wss.receive(timeout=10)\n if response.type == aiohttp.WSMsgType.CLOSED:\n print(\"CLOSED!\")\n elif response.type == aiohttp.WSMsgType.ERROR:\n print(\"ERROR!\")\n else:\n json_response = response.data\n if json_response != \"\":\n print(json_response) # {\"Instrumentation\": {}}" }, { "identifier": "Translator", "path": "core/translator.py", "snippet": "class Translator:\n\n PLAYER = \"mpg123\"\n\n def __init__(self):\n self.lang = 'es'\n self.url = 'https://translate.google.com/translate_tts?client=tw-ob&ie=UTF-8&idx=0&total=1&textlen={}&tl={}&q={}'\n self.headers = {'User-agent': Browser.USER_AGENT}\n #self.pwd = os.getcwd()+\"/\"\n self.pwd = \"/tmp/\"\n self.fileName = self.pwd+f\"audio{str(time.time()).replace('.','')}.mp3\"\n\n def play(self, text, play=True):\n urlText = urllib.parse.quote_plus(text)\n url = self.url.format(len(text), self.lang, urlText)\n response = requests.get(url, headers=self.headers)\n with open(self.fileName, \"wb\") as local_file:\n local_file.write(response.content)\n if play:\n subprocess.call([self.PLAYER, self.fileName])\n os.remove(self.fileName)\n else:\n return self.fileName" } ]
from core.openchat import OpenChat from core.bing import Bing from core.translator import Translator
6,740
#from core.watson import Watson #def test_library_function(): realPetitionPromptNew = "¿qué modelo de lenguaje estás utilizando? ¿chatgpt3 o chatgpt4?" #watson = Watson() #watson.speech_to_text() openchat = OpenChat() openchat.send_message(realPetitionPromptNew, stream=True) bing = Bing() #bing.speech_to_text() bing.init_conversation(realPetitionPromptNew)
#from core.watson import Watson #def test_library_function(): realPetitionPromptNew = "¿qué modelo de lenguaje estás utilizando? ¿chatgpt3 o chatgpt4?" #watson = Watson() #watson.speech_to_text() openchat = OpenChat() openchat.send_message(realPetitionPromptNew, stream=True) bing = Bing() #bing.speech_to_text() bing.init_conversation(realPetitionPromptNew)
translator = Translator()
2
2023-12-28 19:45:49+00:00
8k
vita-epfl/social-transmotion
train_jrdb.py
[ { "identifier": "collate_batch", "path": "dataset_jrdb.py", "snippet": "def collate_batch(batch):\n joints_list = []\n masks_list = []\n num_people_list = []\n for joints, masks in batch:\n \n joints_list.append(joints)\n masks_list.append(masks)\n num_people_list.append(torch.zeros(joints.shape[0]))\n \n joints = pad_sequence(joints_list, batch_first=True)\n masks = pad_sequence(masks_list, batch_first=True)\n padding_mask = pad_sequence(num_people_list, batch_first=True, padding_value=1).bool()\n\n return joints, masks, padding_mask" }, { "identifier": "batch_process_coords", "path": "dataset_jrdb.py", "snippet": "def batch_process_coords(coords, masks, padding_mask, config, modality_selection='traj+2dbox', training=False, multiperson=True):\n joints = coords.to(config[\"DEVICE\"])\n masks = masks.to(config[\"DEVICE\"])\n in_F = config[\"TRAIN\"][\"input_track_size\"]\n \n in_joints_pelvis = joints[:,:, (in_F-1):in_F, 0:1, :].clone()\n in_joints_pelvis_last = joints[:,:, (in_F-2):(in_F-1), 0:1, :].clone()\n\n joints[:,:,:,0] = joints[:,:,:,0] - joints[:,0:1, (in_F-1):in_F, 0]\n joints[:,:,:,1:] = (joints[:,:,:,1:] - joints[:,:,(in_F-1):in_F,1:])*0.25 #rescale for BB\n\n B, N, F, J, K = joints.shape\n if not training:\n if modality_selection=='traj':\n joints[:,:,:,1:]=0\n elif modality_selection=='traj+2dbox':\n pass\n else:\n print('modality error')\n exit()\n else:\n # augment JRDB traj\n joints[:,:,:,0,:3] = getRandomRotatePoseTransform(config)(joints[:,:,:,0,:3])\n joints = joints.transpose(1, 2).reshape(B, F, N*J, K)\n in_joints_pelvis = in_joints_pelvis.reshape(B, 1, N, K)\n in_joints_pelvis_last = in_joints_pelvis_last.reshape(B, 1, N, K)\n masks = masks.transpose(1, 2).reshape(B, F, N*J)\n\n in_F, out_F = config[\"TRAIN\"][\"input_track_size\"], config[\"TRAIN\"][\"output_track_size\"] \n in_joints = joints[:,:in_F].float()\n out_joints = joints[:,in_F:in_F+out_F].float()\n in_masks = masks[:,:in_F].float()\n out_masks = masks[:,in_F:in_F+out_F].float()\n\n \n return in_joints, in_masks, out_joints, out_masks, padding_mask.float()" }, { "identifier": "get_datasets", "path": "dataset_jrdb.py", "snippet": "def get_datasets(datasets_list, config, logger):\n\n in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size']\n datasets = []\n for dataset_name in datasets_list:\n datasets.append(create_dataset(dataset_name, logger, split=\"train\", track_size=(in_F+out_F), track_cutoff=in_F))\n return datasets" }, { "identifier": "create_dataset", "path": "dataset_jrdb.py", "snippet": "def create_dataset(dataset_name, logger, **args):\n logger.info(\"Loading dataset \" + dataset_name)\n\n if dataset_name == 'jta_all_visual_cues':\n dataset = JtaAllVisualCuesDataset(**args)\n elif dataset_name == 'jrdb_2dbox':\n dataset = Jrdb2dboxDataset(**args)\n else:\n raise ValueError(f\"Dataset with name '{dataset_name}' not found.\")\n \n return dataset" }, { "identifier": "create_model", "path": "model_jrdb.py", "snippet": "def create_model(config, logger):\n seq_len = config[\"MODEL\"][\"seq_len\"]\n token_num = config[\"MODEL\"][\"token_num\"]\n nhid=config[\"MODEL\"][\"dim_hidden\"]\n nhead=config[\"MODEL\"][\"num_heads\"]\n nlayers_local=config[\"MODEL\"][\"num_layers_local\"]\n nlayers_global=config[\"MODEL\"][\"num_layers_global\"]\n dim_feedforward=config[\"MODEL\"][\"dim_feedforward\"]\n\n if config[\"MODEL\"][\"type\"] == \"transmotion\":\n logger.info(\"Creating bert model.\")\n model = TransMotion(tok_dim=seq_len,\n nhid=nhid,\n nhead=nhead,\n dim_feedfwd=dim_feedforward,\n nlayers_local=nlayers_local,\n nlayers_global=nlayers_global,\n output_scale=config[\"MODEL\"][\"output_scale\"],\n obs_and_pred=config[\"TRAIN\"][\"input_track_size\"] + config[\"TRAIN\"][\"output_track_size\"],\n num_tokens=token_num,\n device=config[\"DEVICE\"]\n ).to(config[\"DEVICE\"]).float()\n else:\n raise ValueError(f\"Model type '{config['MODEL']['type']}' not found\")\n\n return model" }, { "identifier": "create_logger", "path": "utils/utils.py", "snippet": "def create_logger(logdir):\n head = '%(asctime)-15s %(message)s'\n if logdir != '':\n log_file = os.path.join(logdir, 'log.txt')\n logging.basicConfig(filename=log_file, format=head)\n # output to console as well\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n else:\n logging.basicConfig(format=head)\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n return logger" }, { "identifier": "load_default_config", "path": "utils/utils.py", "snippet": "def load_default_config():\n return load_config(path_to_config(\"default.yaml\"))" }, { "identifier": "load_config", "path": "utils/utils.py", "snippet": "def load_config(path, exp_name=\"default\"):\n \"\"\"\n Load the config file and make any dynamic edits.\n \"\"\"\n with open(path, \"rt\") as reader:\n config = yaml.load(reader, Loader=yaml.Loader)\n\n if \"OUTPUT\" not in config:\n config[\"OUTPUT\"] = {}\n config[\"OUTPUT\"][\"log_dir\"], config[\"OUTPUT\"][\"ckpt_dir\"], config[\"OUTPUT\"][\"runs_dir\"] = init_output_dirs(exp_name=exp_name)\n\n with open(os.path.join(config[\"OUTPUT\"][\"ckpt_dir\"], \"config.yaml\"), 'w') as f:\n yaml.dump(config, f)\n\n return config" }, { "identifier": "AverageMeter", "path": "utils/utils.py", "snippet": "class AverageMeter(object):\n \"\"\"\n From https://github.com/mkocabas/VIBE/blob/master/lib/core/trainer.py\n Keeps track of a moving average.\n \"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count" }, { "identifier": "MSE_LOSS", "path": "utils/metrics.py", "snippet": "def MSE_LOSS(output, target, mask=None):\n \n pred_xy = output[:,:,0,:2]\n gt_xy = target[:,:,0,:2]\n\n norm = torch.norm(pred_xy - gt_xy, p=2, dim=-1)\n\n mean_K = torch.mean(norm, dim=-1)\n mean_B = torch.mean(mean_K)\n\n return mean_B*100" } ]
import argparse import numpy as np import os import random import time import torch from datetime import datetime from progress.bar import Bar from torch.utils.data import DataLoader, ConcatDataset from torch.utils.tensorboard import SummaryWriter from dataset_jrdb import collate_batch, batch_process_coords, get_datasets, create_dataset from model_jrdb import create_model from utils.utils import create_logger, load_default_config, load_config, AverageMeter from utils.metrics import MSE_LOSS
3,774
for epoch in range(config["TRAIN"]["epochs"]): start_time = time.time() dataiter = iter(dataloader_train) timer = {"DATA": 0, "FORWARD": 0, "BACKWARD": 0} loss_avg = AverageMeter() disc_loss_avg = AverageMeter() disc_acc_avg = AverageMeter() if config["TRAIN"]["optimizer"] == "adam": adjust_learning_rate(optimizer, epoch, config) train_steps = len(dataloader_train) bar = Bar(f"TRAIN {epoch}/{config['TRAIN']['epochs'] - 1}", fill="#", max=train_steps) for i in range(train_steps): model.train() optimizer.zero_grad() ################################ # Load a batch of data ################################ start = time.time() try: joints, masks, padding_mask = next(dataiter) except StopIteration: dataiter = iter(dataloader_train) joints, masks, padding_mask = next(dataiter) in_joints, in_masks, out_joints, out_masks, padding_mask = batch_process_coords(joints, masks, padding_mask, config, training=True) padding_mask = padding_mask.to(config["DEVICE"]) timer["DATA"] = time.time() - start ################################ # Forward Pass ################################ start = time.time() loss, pred_joints = compute_loss(model, config, in_joints, out_joints, in_masks, out_masks, padding_mask, epoch=epoch, mode='train', optimizer=None) timer["FORWARD"] = time.time() - start ################################ # Backward Pass + Optimization ################################ start = time.time() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), config["TRAIN"]["max_grad_norm"]) optimizer.step() timer["BACKWARD"] = time.time() - start ################################ # Logging ################################ loss_avg.update(loss.item(), len(joints)) summary = [ f"{str(epoch).zfill(3)} ({i + 1}/{train_steps})", f"LOSS: {loss_avg.avg:.4f}", f"T-TOT: {bar.elapsed_td}", f"T-ETA: {bar.eta_td:}" ] for key, val in timer.items(): summary.append(f"{key}: {val:.2f}") bar.suffix = " | ".join(summary) bar.next() if cfg['dry_run']: break bar.finish() ################################ # Tensorboard logs ################################ global_step += train_steps writer_train.add_scalar("loss", loss_avg.avg, epoch) val_loss = evaluate_loss(model, dataloader_val, config) writer_valid.add_scalar("loss", val_loss, epoch) val_ade = val_loss/100 if val_ade < min_val_loss: min_val_loss = val_ade print('------------------------------BEST MODEL UPDATED------------------------------') print('Best ADE: ', val_ade) save_checkpoint(model, optimizer, epoch, config, 'best_val'+'_checkpoint.pth.tar', logger) if cfg['dry_run']: break print('time for training: ', time.time()-start_time) print('epoch ', epoch, ' finished!') if not cfg['dry_run']: save_checkpoint(model, optimizer, epoch, config, 'checkpoint.pth.tar', logger) logger.info("All done.") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--exp_name", type=str, default="", help="Experiment name. Otherwise will use timestamp") parser.add_argument("--cfg", type=str, default="", help="Config name. Otherwise will use default config") parser.add_argument('--dry-run', action='store_true', help="Run just one iteration") args = parser.parse_args() if args.cfg != "":
def evaluate_loss(model, dataloader, config): bar = Bar(f"EVAL", fill="#", max=len(dataloader)) loss_avg = AverageMeter() dataiter = iter(dataloader) model.eval() with torch.no_grad(): for i in range(len(dataloader)): try: joints, masks, padding_mask = next(dataiter) except StopIteration: break in_joints, in_masks, out_joints, out_masks, padding_mask = batch_process_coords(joints, masks, padding_mask, config) padding_mask = padding_mask.to(config["DEVICE"]) loss, _ = compute_loss(model, config, in_joints, out_joints, in_masks, out_masks, padding_mask) loss_avg.update(loss.item(), len(in_joints)) summary = [ f"({i + 1}/{len(dataloader)})", f"LOSS: {loss_avg.avg:.4f}", f"T-TOT: {bar.elapsed_td}", f"T-ETA: {bar.eta_td:}" ] bar.suffix = " | ".join(summary) bar.next() bar.finish() return loss_avg.avg def compute_loss(model, config, in_joints, out_joints, in_masks, out_masks, padding_mask, epoch=None, mode='val', loss_last=True, optimizer=None): _, in_F, _, _ = in_joints.shape metamask = (mode == 'train') pred_joints = model(in_joints, padding_mask, metamask=metamask) loss = MSE_LOSS(pred_joints[:,in_F:], out_joints, out_masks) return loss, pred_joints def adjust_learning_rate(optimizer, epoch, config): """ From: https://github.com/microsoft/MeshTransformer/ Sets the learning rate to the initial LR decayed by x every y epochs x = 0.1, y = args.num_train_epochs*2/3 = 100 """ # dct_multi_overfit_3dpw_allsize_multieval_noseg_rot_permute_id lr = config['TRAIN']['lr'] * (config['TRAIN']['lr_decay'] ** epoch) # (0.1 ** (epoch // (config['TRAIN']['epochs']*4./5.) )) if 'lr_drop' in config['TRAIN'] and config['TRAIN']['lr_drop']: lr = lr * (0.1 ** (epoch // (config['TRAIN']['epochs']*4./5.) )) for param_group in optimizer.param_groups: param_group['lr'] = lr print('lr: ',lr) def save_checkpoint(model, optimizer, epoch, config, filename, logger): logger.info(f'Saving checkpoint to {filename}.') ckpt = { 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'config': config } torch.save(ckpt, os.path.join(config['OUTPUT']['ckpt_dir'], filename)) def dataloader_for(dataset, config, **kwargs): return DataLoader(dataset, batch_size=config['TRAIN']['batch_size'], num_workers=config['TRAIN']['num_workers'], collate_fn=collate_batch, **kwargs) def dataloader_for_val(dataset, config, **kwargs): return DataLoader(dataset, batch_size=1, num_workers=0, collate_fn=collate_batch, **kwargs) def train(config, logger, experiment_name="", dataset_name=""): ################################ # Load data ################################ in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size'] dataset_train = ConcatDataset(get_datasets(config['DATA']['train_datasets'], config, logger)) dataloader_train = dataloader_for(dataset_train, config, shuffle=True, pin_memory=True) logger.info(f"Training on a total of {len(dataset_train)} annotations.") dataset_val = create_dataset(config['DATA']['train_datasets'][0], logger, split="val", track_size=(in_F+out_F), track_cutoff=in_F) dataloader_val = dataloader_for(dataset_val, config, shuffle=True, pin_memory=True) writer_name = experiment_name + "_" + str(datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) writer_train = SummaryWriter(os.path.join(config["OUTPUT"]["runs_dir"], f"{writer_name}_TRAIN")) writer_valid = SummaryWriter(os.path.join(config["OUTPUT"]["runs_dir"], f"{writer_name}_VALID")) ################################ # Create model, loss, optimizer ################################ model = create_model(config, logger) if config["MODEL"]["checkpoint"] != "": logger.info(f"Loading checkpoint from {config['MODEL']['checkpoint']}") checkpoint = torch.load(os.path.join(config['OUTPUT']['ckpt_dir'], config["MODEL"]["checkpoint"])) model.load_state_dict(checkpoint["model"]) optimizer = torch.optim.Adam(model.parameters(), lr=config['TRAIN']['lr']) num_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) logger.info(f"Model has {num_parameters} parameters.") ################################ # Begin Training ################################ global_step = 0 min_val_loss = 1e4 for epoch in range(config["TRAIN"]["epochs"]): start_time = time.time() dataiter = iter(dataloader_train) timer = {"DATA": 0, "FORWARD": 0, "BACKWARD": 0} loss_avg = AverageMeter() disc_loss_avg = AverageMeter() disc_acc_avg = AverageMeter() if config["TRAIN"]["optimizer"] == "adam": adjust_learning_rate(optimizer, epoch, config) train_steps = len(dataloader_train) bar = Bar(f"TRAIN {epoch}/{config['TRAIN']['epochs'] - 1}", fill="#", max=train_steps) for i in range(train_steps): model.train() optimizer.zero_grad() ################################ # Load a batch of data ################################ start = time.time() try: joints, masks, padding_mask = next(dataiter) except StopIteration: dataiter = iter(dataloader_train) joints, masks, padding_mask = next(dataiter) in_joints, in_masks, out_joints, out_masks, padding_mask = batch_process_coords(joints, masks, padding_mask, config, training=True) padding_mask = padding_mask.to(config["DEVICE"]) timer["DATA"] = time.time() - start ################################ # Forward Pass ################################ start = time.time() loss, pred_joints = compute_loss(model, config, in_joints, out_joints, in_masks, out_masks, padding_mask, epoch=epoch, mode='train', optimizer=None) timer["FORWARD"] = time.time() - start ################################ # Backward Pass + Optimization ################################ start = time.time() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), config["TRAIN"]["max_grad_norm"]) optimizer.step() timer["BACKWARD"] = time.time() - start ################################ # Logging ################################ loss_avg.update(loss.item(), len(joints)) summary = [ f"{str(epoch).zfill(3)} ({i + 1}/{train_steps})", f"LOSS: {loss_avg.avg:.4f}", f"T-TOT: {bar.elapsed_td}", f"T-ETA: {bar.eta_td:}" ] for key, val in timer.items(): summary.append(f"{key}: {val:.2f}") bar.suffix = " | ".join(summary) bar.next() if cfg['dry_run']: break bar.finish() ################################ # Tensorboard logs ################################ global_step += train_steps writer_train.add_scalar("loss", loss_avg.avg, epoch) val_loss = evaluate_loss(model, dataloader_val, config) writer_valid.add_scalar("loss", val_loss, epoch) val_ade = val_loss/100 if val_ade < min_val_loss: min_val_loss = val_ade print('------------------------------BEST MODEL UPDATED------------------------------') print('Best ADE: ', val_ade) save_checkpoint(model, optimizer, epoch, config, 'best_val'+'_checkpoint.pth.tar', logger) if cfg['dry_run']: break print('time for training: ', time.time()-start_time) print('epoch ', epoch, ' finished!') if not cfg['dry_run']: save_checkpoint(model, optimizer, epoch, config, 'checkpoint.pth.tar', logger) logger.info("All done.") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--exp_name", type=str, default="", help="Experiment name. Otherwise will use timestamp") parser.add_argument("--cfg", type=str, default="", help="Config name. Otherwise will use default config") parser.add_argument('--dry-run', action='store_true', help="Run just one iteration") args = parser.parse_args() if args.cfg != "":
cfg = load_config(args.cfg, exp_name=args.exp_name)
7
2023-12-25 15:12:40+00:00
8k
AzizKpln/AutoIOC-MISP
main.py
[ { "identifier": "runAbuseIP", "path": "Integrations/abuseipdb.py", "snippet": "def runAbuseIP(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n url = 'https://api.abuseipdb.com/api/v2/blacklist'\r\n querystring = {\r\n 'confidenceMinimum':'85'\r\n }\r\n headers = {\r\n 'Accept': 'application/json',\r\n 'Key': '438fad635ef39a0a143ffe7ab3f77ecba1f1cae0ef974c6ce16bd5ae6199b104fb780d282f2b1e9e'\r\n }\r\n print(\"[+] Geting IOC List From AbuseipDB. Please wait!\")\r\n response = requests.request(method='GET', url=url, headers=headers, params=querystring)\r\n decodedResponse = json.loads(response.text)\r\n def extract_ip_addresses(data):\r\n try:\r\n json_data = json.loads(data)\r\n ip_addresses = [entry[\"ipAddress\"] for entry in json_data.get(\"data\", [])]\r\n return ip_addresses\r\n except json.JSONDecodeError as e:\r\n return f\"Error decoding JSON: {e}\"\r\n ip_addresses = extract_ip_addresses(json.dumps(decodedResponse, sort_keys=True, indent=4))\r\n for i in ip_addresses:\r\n upload_attr(i)\r" }, { "identifier": "runCinsScore", "path": "Integrations/cinsscore.py", "snippet": "def runCinsScore(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n req=requests.get(\"https://cinsscore.com/list/ci-badguys.txt\").text\r\n ipAddr=req.split(\"\\n\")\r\n for i in ipAddr:\r\n upload_attr(i)\r" }, { "identifier": "runKillnet", "path": "Integrations/killnet.py", "snippet": "def runKillnet(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n \r\n req=requests.get(\"https://raw.githubusercontent.com/securityscorecard/SSC-Threat-Intel-IoCs/master/KillNet-DDoS-Blocklist/ipblocklist.txt\").text\r\n iocs=req.split(\"\\n\")\r\n for ioc in iocs:\r\n upload_attr(ioc)\r" }, { "identifier": "runEmergingThreats", "path": "Integrations/emergingthreats.py", "snippet": "def runEmergingThreats(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n print(\"[+] Getting IOC List From Emergingthreats. Please wait!\")\r\n req=requests.get(\"https://rules.emergingthreats.net/blockrules/compromised-ips.txt\").text\r\n ipAddr=req.split(\"\\n\")\r\n for i in ipAddr:\r\n upload_attr(i)\r" }, { "identifier": "runHoneyDB", "path": "Integrations/honeydb.py", "snippet": "def runHoneyDB(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n url = 'https://honeydb.io/api/bad-hosts'\r\n headers = {\r\n 'Accept': 'application/json',\r\n 'X-HoneyDb-ApiId':'93a1d7d4f552a782cf120958f99f86abad8397985161dd78892b3348a467aa91',\r\n 'X-HoneyDb-ApiKey': '022d5de38750eb4bfea60f6dbd49de839e08bfc12aeae3b0e1de2de935ff2b59'\r\n }\r\n response = requests.request(method='GET', url=url, headers=headers)\r\n decodedResponse = json.loads(response.text)\r\n for i in decodedResponse:\r\n if str(i[\"last_seen\"])==str(current_date):\r\n upload_attr(str(i[\"remote_host\"]))\r" }, { "identifier": "runMaltiverse", "path": "Integrations/maltiverse.py", "snippet": "def runMaltiverse(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n api_key = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjIzMTkzNTg5MzcsImlhdCI6MTY4ODYzODkzNywic3ViIjoxNjA5NSwidXNlcm5hbWUiOiJheml6a2FwbGFuMTkwNyIsImFkbWluIjpmYWxzZSwidGVhbV9pZCI6bnVsbCwidGVhbV9uYW1lIjpudWxsLCJ0ZWFtX2xlYWRlciI6ZmFsc2UsInRlYW1fcmVzZWFyY2hlciI6ZmFsc2UsInRlYW1faW5kZXgiOm51bGwsImFwaV9saW1pdCI6MTAwfQ.JLvydZA3dd-fKO0TZQzHlU0ckoBDfpVQGEk_S-AeWWM'\r\n url = 'https://api.maltiverse.com/collection/WZ0XJHIB8jmkCY9eLpr0/download?filetype=sha256'\r\n headers = { 'Authorization':'Bearer ' + api_key }\r\n response = requests.get(url, headers=headers)\r\n re=response.text\r\n r=re.split(\"\\n\")\r\n for i in r:\r\n upload_attr(i)\r" }, { "identifier": "runMalwareBazaar", "path": "Integrations/malware_bazar.py", "snippet": "def runMalwareBazaar(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n zip_url = \"https://bazaar.abuse.ch/export/txt/sha256/full/\"\r\n destination_folder = \"Integrations/\"\r\n if not os.path.exists(destination_folder):\r\n os.makedirs(destination_folder)\r\n download_and_extract(zip_url, destination_folder)\r" }, { "identifier": "runOpenPhish", "path": "Integrations/openphish.py", "snippet": "def runOpenPhish(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n req=requests.get(\"https://openphish.com/feed.txt\").text\r\n req_=req.split(\"\\n\")\r\n for i in req_:\r\n upload_attr(i)\r" }, { "identifier": "runPhishHunt", "path": "Integrations/phishunt.py", "snippet": "def runPhishHunt(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n req=requests.get(\"https://phishunt.io/feed.txt\").text\r\n iocs=req.split(\"\\n\")\r\n for ioc in iocs:\r\n upload_attr(ioc)\r" }, { "identifier": "runRescureMe", "path": "Integrations/rescureme.py", "snippet": "def runRescureMe(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n req=requests.get(\"https://rescure.me/rescure_blacklist.txt\").text\r\n lines = req.split('\\n')\r\n ip_addresses = [line.strip() for line in lines if line and line[0].isdigit()]\r\n for ip_address in ip_addresses:\r\n upload_attr(ip_address)\r\n req1=requests.get(\"https://rescure.me/rescure_malware_hashes.txt\").text\r\n lines = req1.split('\\n')\r\n hash_values = [line.strip() for line in lines if line and len(line) == 40]\r\n for hash_value in hash_values:\r\n upload_attr(hash_value)\r\n\r\n req1=requests.get(\"https://rescure.me/rescure_domain_blacklist.txt\").text\r\n lines = req1.split(' # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #')\r\n lines=lines[2].split(\"\\n\")\r\n for i in lines:\r\n if i==\"\":\r\n pass\r\n else:\r\n upload_attr(i.strip())\r" }, { "identifier": "runSSLbl", "path": "Integrations/sslbl.py", "snippet": "def runSSLbl(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n req=requests.get(\"https://sslbl.abuse.ch/blacklist/sslipblacklist.txt\").text\r\n iocs=req.split(\"\\n\")\r\n for ioc in iocs:\r\n upload_attr(ioc)\r\n req1=requests.get(\"https://sslbl.abuse.ch/blacklist/sslblacklist.csv\").text\r\n lines = req1.split('\\n')\r\n hash_values = [line.split(',')[1] for line in lines if line and not line.startswith(\"#\")]\r\n for hash_value in hash_values:\r\n upload_attr(hash_value)\r" }, { "identifier": "runThreatFox", "path": "Integrations/threatfox.py", "snippet": "def runThreatFox(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n zip_url = [\"https://threatfox.abuse.ch/export/json/sha256/full/\",\"https://threatfox.abuse.ch/export/json/ip-port/full/\",\"https://threatfox.abuse.ch/export/json/domains/full/\"]\r\n destination_folder = \"Integrations/\"\r\n \r\n if not os.path.exists(destination_folder):\r\n os.makedirs(destination_folder)\r\n for url in zip_url:\r\n download_and_extract(url, destination_folder)\r\n read_content()\r\n for r in list([\"Integrations/full_sha256.json\",\"Integrations/full_ip-port.json\",\"Integrations/full_domains.json\"]):\r\n delete_file(str(r))\r" }, { "identifier": "runURLHaus", "path": "Integrations/urlhaus.py", "snippet": "def runURLHaus(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n url = 'https://urlhaus.abuse.ch/downloads/csv_recent/'\r\n response = requests.get(url)\r\n if response.status_code == 200:\r\n csv_data = response.text\r\n csv_file = StringIO(csv_data)\r\n csv_reader = csv.reader(csv_file)\r\n header = next(csv_reader)\r\n for row in csv_reader:\r\n if len(row) >= 9:\r\n upload_attr(row[2])\r\n else:\r\n print(\"Error: Row does not have the expected number of columns\")\r\n\r\n else:\r\n print(f\"Error fetching data. Status code: {response.status_code}\")\r" }, { "identifier": "runVirusShare", "path": "Integrations/virusshare.py", "snippet": "def runVirusShare(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n f=requests.get(\"https://virusshare.com/hashes\")\r\n res=re.findall(\"hashfiles/VirusShare_\\d+\",f.text)\r\n fileName=res[-1].split(\"/\")[1]\r\n malware_md5=list()\r\n url1=f\"https://virusshare.com/hashfiles/{fileName}.md5\"\r\n local_filename = url1.split('/')[-1]\r\n with requests.get(url1, stream=True) as r:\r\n with open(\"Integrations/\"+local_filename, 'wb') as f:\r\n shutil.copyfileobj(r.raw, f)\r\n with open(\"Integrations/\"+local_filename, 'r') as md5_file:\r\n lines = md5_file.readlines()\r\n for line in lines:\r\n if 'http://VirusShare.com' in line or 'Twitter: @VXShare' in line or \"################################\" in line or \"# Malware sample MD5 list for #\" in line or \"# VirusShare_00484.zip #\" in line:\r\n malware_md5.append(line)\r\n with open(\"Integrations/\"+local_filename, 'w') as md5_file:\r\n for i in malware_md5:\r\n md5_file.write(i)\r\n for line in lines:\r\n if line not in malware_md5:\r\n upload_attr(line)\r" }, { "identifier": "runVXVault", "path": "Integrations/vxvault.py", "snippet": "def runVXVault(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n req=requests.get(\"http://vxvault.net/URL_List.php\").text\r\n iocs=req.split(\"\\n\")\r\n for ioc in iocs:\r\n upload_attr(ioc)\r" }, { "identifier": "runManually", "path": "Integrations/manual.py", "snippet": "def runManually(mispapi,mispurl,mispeventid,ioclist):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n ioclist_=list(ioclist)\r\n iocs=ioclist_[0].split(\"\\r\")\r\n for i in iocs:\r\n upload_attr(i.strip())" } ]
from flask import Flask, render_template, redirect, request from Integrations.abuseipdb import runAbuseIP from Integrations.cinsscore import runCinsScore from Integrations.killnet import runKillnet from Integrations.emergingthreats import runEmergingThreats from Integrations.honeydb import runHoneyDB from Integrations.maltiverse import runMaltiverse from Integrations.malware_bazar import runMalwareBazaar from Integrations.openphish import runOpenPhish from Integrations.phishunt import runPhishHunt from Integrations.rescureme import runRescureMe from Integrations.sslbl import runSSLbl from Integrations.threatfox import runThreatFox from Integrations.urlhaus import runURLHaus from Integrations.virusshare import runVirusShare from Integrations.vxvault import runVXVault from Integrations.manual import runManually import threading
3,649
app = Flask(__name__) @app.route('/', methods=['GET', 'POST']) def hello_world(): if request.method == 'POST': operation = request.form['operation'] if operation=="add_manually": return redirect("/manually") else: return redirect('/automaticlly') return render_template('main.html') @app.route("/manually",methods=["GET","POST"]) def manually(): if request.method=="POST": ioclist=request.form.getlist("iocList") mispapi=request.form["mispapi"];mispurl=request.form["mispurl"];mispeventid=request.form["mispeventid"] threading.Thread(target=runManually,args=(mispapi,mispurl,mispeventid,ioclist,)).start() return render_template("manual.html") @app.route("/automaticlly",methods=["GET","POST"]) def automaticlly(): if request.method=="POST": selected_sources = request.form.getlist('sources') mispapi=request.form["mispapi"];mispurl=request.form["mispurl"];mispeventid=request.form["mispeventid"] with open("MISP/Info","w") as f: f.write("MISPAPI:"+mispapi+"\n"+"MISPURL:"+mispurl+"\n"+"MISPEVENTID:"+mispeventid) for i in selected_sources: if i=="AbuseIPDB": with open("Selected/sources","a+") as f: f.write("AbuseIPDB\n") threading.Thread(target=runAbuseIP,args=(mispapi,mispurl,mispeventid,)).start() if i=="CinsScore": with open("Selected/sources","a+") as f: f.write("CinsScore\n") threading.Thread(target=runCinsScore,args=(mispapi,mispurl,mispeventid,)).start() if i=="KillNet": with open("Selected/sources","a+") as f: f.write("KillNet\n") threading.Thread(target=runKillnet,args=(mispapi,mispurl,mispeventid,)).start() if i=="Emerging Threats": with open("Selected/sources","a+") as f: f.write("Emerging_Threats\n") threading.Thread(target=runEmergingThreats,args=(mispapi,mispurl,mispeventid,)).start() if i=="HoneyDB": with open("Selected/sources","a+") as f: f.write("HoneyDB\n")
app = Flask(__name__) @app.route('/', methods=['GET', 'POST']) def hello_world(): if request.method == 'POST': operation = request.form['operation'] if operation=="add_manually": return redirect("/manually") else: return redirect('/automaticlly') return render_template('main.html') @app.route("/manually",methods=["GET","POST"]) def manually(): if request.method=="POST": ioclist=request.form.getlist("iocList") mispapi=request.form["mispapi"];mispurl=request.form["mispurl"];mispeventid=request.form["mispeventid"] threading.Thread(target=runManually,args=(mispapi,mispurl,mispeventid,ioclist,)).start() return render_template("manual.html") @app.route("/automaticlly",methods=["GET","POST"]) def automaticlly(): if request.method=="POST": selected_sources = request.form.getlist('sources') mispapi=request.form["mispapi"];mispurl=request.form["mispurl"];mispeventid=request.form["mispeventid"] with open("MISP/Info","w") as f: f.write("MISPAPI:"+mispapi+"\n"+"MISPURL:"+mispurl+"\n"+"MISPEVENTID:"+mispeventid) for i in selected_sources: if i=="AbuseIPDB": with open("Selected/sources","a+") as f: f.write("AbuseIPDB\n") threading.Thread(target=runAbuseIP,args=(mispapi,mispurl,mispeventid,)).start() if i=="CinsScore": with open("Selected/sources","a+") as f: f.write("CinsScore\n") threading.Thread(target=runCinsScore,args=(mispapi,mispurl,mispeventid,)).start() if i=="KillNet": with open("Selected/sources","a+") as f: f.write("KillNet\n") threading.Thread(target=runKillnet,args=(mispapi,mispurl,mispeventid,)).start() if i=="Emerging Threats": with open("Selected/sources","a+") as f: f.write("Emerging_Threats\n") threading.Thread(target=runEmergingThreats,args=(mispapi,mispurl,mispeventid,)).start() if i=="HoneyDB": with open("Selected/sources","a+") as f: f.write("HoneyDB\n")
threading.Thread(target=runHoneyDB,args=(mispapi,mispurl,mispeventid,)).start()
4
2023-12-23 10:39:28+00:00
8k
facebookresearch/ca_body
ca_body/nn/unet.py
[ { "identifier": "weights_initializer", "path": "ca_body/nn/blocks.py", "snippet": "def weights_initializer(lrelu_slope=0.2):\n # pyre-ignore\n def init_fn(m):\n if isinstance(\n m,\n (\n nn.Conv2d,\n nn.Conv1d,\n nn.ConvTranspose2d,\n nn.Linear,\n ),\n ):\n gain = nn.init.calculate_gain(\"leaky_relu\", lrelu_slope)\n nn.init.kaiming_uniform_(m.weight.data, a=gain)\n if hasattr(m, \"bias\") and m.bias is not None:\n nn.init.zeros_(m.bias.data)\n else:\n logger.debug(f\"skipping initialization for {m}\")\n\n return init_fn" }, { "identifier": "Conv2dWNUB", "path": "ca_body/nn/layers.py", "snippet": "def gaussian_kernel(ksize: int, std: Optional[float] = None) -> np.ndarray:\n def __init__(self, n_in, n_out, nonlin=fc_default_activation) -> None:\n def forward(self, x):\ndef check_args_shadowing(name, method: object, arg_names) -> None:\n def __init__(\n self,\n name_mapping: List[Tuple[str, str]],\n expected_shape: Optional[Dict[str, List[int]]] = None,\n ) -> None:\n def __call__(\n self,\n # pyre-fixme[2]: Parameter must be annotated.\n state_dict,\n # pyre-fixme[2]: Parameter must be annotated.\n prefix,\n # pyre-fixme[2]: Parameter must be annotated.\n local_metadata,\n # pyre-fixme[2]: Parameter must be annotated.\n strict,\n # pyre-fixme[2]: Parameter must be annotated.\n missing_keys,\n # pyre-fixme[2]: Parameter must be annotated.\n unexpected_keys,\n # pyre-fixme[2]: Parameter must be annotated.\n error_msgs,\n ) -> None:\ndef weight_norm_wrapper(\n cls: Type[th.nn.Module],\n new_cls_name: str,\n name: str = \"weight\",\n g_dim: int = 0,\n v_dim: Optional[int] = 0,\n):\n def __init__(self, *args: Any, name=name, g_dim=g_dim, v_dim=v_dim, **kwargs: Any):\n def fuse(self):\n def unfuse(self):\n def __deepcopy__(self, memo):\ndef is_weight_norm_wrapped(module) -> bool:\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n height: int,\n width: int,\n # pyre-fixme[2]: Parameter must be annotated.\n *args,\n bias: bool = True,\n # pyre-fixme[2]: Parameter must be annotated.\n **kwargs,\n ) -> None:\n def _conv_forward(self, input: th.Tensor, weight: th.Tensor, bias: Optional[th.Tensor]):\n def forward(self, input: th.Tensor) -> th.Tensor:\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n height: int,\n width: int,\n # pyre-fixme[2]: Parameter must be annotated.\n *args,\n bias: bool = True,\n # pyre-fixme[2]: Parameter must be annotated.\n **kwargs,\n ) -> None:\n def forward(self, input: th.Tensor, output_size: Optional[List[int]] = None) -> th.Tensor:\n def _output_padding(\n self,\n input: th.Tensor,\n output_size: Optional[List[int]],\n stride: List[int],\n padding: List[int],\n kernel_size: List[int],\n num_spatial_dims: int,\n dilation: Optional[List[int]] = None,\n ) -> List[int]:\n def __init__(self, size=None, scale_factor=None, mode: str = \"bilinear\") -> None:\n def __call__(self, module, x):\ndef interpolate_wrapper(cls: Type[th.nn.Module], new_cls_name: str):\n def __init__(\n self, *args: Any, size=None, scale_factor=None, mode=\"bilinear\", **kwargs: Any\n ):\n def __init__(self) -> None:\n def forward(self, x):\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n def forward(self, x):\n def __init__(self, shape) -> None:\n def forward(self, x, scale=None, crop=None):\ndef glorot(m: th.nn.Module, alpha: float = 1.0) -> None:\ndef make_tuple(x: Union[int, Tuple[int, int]], n: int) -> Tuple[int, int]:\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n gain: Optional[float] = None,\n lr_mul: float = 1.0,\n bias_lr_mul: Optional[float] = None,\n ) -> None:\n def reset_parameters(self) -> None:\n def forward(self, x: th.Tensor) -> th.Tensor:\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: Union[int, Tuple[int, int]],\n stride: Union[int, Tuple[int, int]] = 1,\n padding: Union[int, Tuple[int, int]] = 0,\n output_padding: Union[int, Tuple[int, int]] = 0,\n dilation: Union[int, Tuple[int, int]] = 1,\n groups: int = 1,\n bias: bool = True,\n untied: bool = False,\n height: int = 1,\n width: int = 1,\n gain: Optional[float] = None,\n transpose: bool = False,\n fuse_box_filter: bool = False,\n lr_mul: float = 1.0,\n bias_lr_mul: Optional[float] = None,\n ) -> None:\n def reset_parameters(self) -> None:\n def forward(self, x: th.Tensor) -> th.Tensor:\n def __init__(\n self,\n # pyre-fixme[2]: Parameter must be annotated.\n branch,\n # pyre-fixme[2]: Parameter must be annotated.\n n_concat_in,\n every_other: bool = True,\n ksize: int = 7,\n # pyre-fixme[2]: Parameter must be annotated.\n kstd=None,\n transposed: bool = False,\n ) -> None:\n def forward(self, x, y):\ndef get_pad_layer(pad_type):\n def __init__(self, pad_type=\"reflect\", filt_size=3, stride=2, channels=None, pad_off=0):\n def forward(self, inp):\nclass FCLayer(th.nn.Module):\nclass TensorMappingHook(object):\n class Wrap(cls):\nclass Conv2dUB(th.nn.Conv2d):\nclass ConvTranspose2dUB(th.nn.ConvTranspose2d):\nclass InterpolateHook(object):\n class Wrap(cls):\nclass GlobalAvgPool(th.nn.Module):\nclass Upsample(th.nn.Module):\nclass DenseAffine(th.nn.Module):\nclass LinearELR(th.nn.Module):\nclass Conv2dELR(th.nn.Module):\nclass ConcatPyramid(th.nn.Module):\nclass Downsample(th.nn.Module):\n W = self.W\n W = thf.interpolate(W, scale_factor=scale, mode=\"bilinear\")\n W = W[..., crop[0] : crop[1], crop[2] : crop[3]]" } ]
import torch as th import torch.nn as nn import ca_body.nn.layers as la from ca_body.nn.blocks import weights_initializer from ca_body.nn.layers import Conv2dWNUB, ConvTranspose2dWNUB, glorot
4,128
): super().__init__() F = n_init_ftrs self.size = size self.down1 = nn.Sequential( la.Conv2dWNUB(in_channels, F, self.size // 2, self.size // 2, 4, 2, 1), nn.LeakyReLU(0.2), ) self.down2 = nn.Sequential( la.Conv2dWNUB(F, 2 * F, self.size // 4, self.size // 4, 4, 2, 1), nn.LeakyReLU(0.2), ) self.down3 = nn.Sequential( la.Conv2dWNUB(2 * F, 4 * F, self.size // 8, self.size // 8, 4, 2, 1), nn.LeakyReLU(0.2), ) self.down4 = nn.Sequential( la.Conv2dWNUB(4 * F, 8 * F, self.size // 16, self.size // 16, 4, 2, 1), nn.LeakyReLU(0.2), ) self.down5 = nn.Sequential( la.Conv2dWNUB(8 * F, 16 * F, self.size // 32, self.size // 32, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up1 = nn.Sequential( la.ConvTranspose2dWNUB(16 * F, 8 * F, self.size // 16, self.size // 16, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up2 = nn.Sequential( la.ConvTranspose2dWNUB(2 * 8 * F, 4 * F, self.size // 8, self.size // 8, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up3 = nn.Sequential( la.ConvTranspose2dWNUB(2 * 4 * F, 2 * F, self.size // 4, self.size // 4, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up4 = nn.Sequential( la.ConvTranspose2dWNUB(2 * 2 * F, F, self.size // 2, self.size // 2, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up5 = nn.Sequential( la.ConvTranspose2dWNUB(2 * F, F, self.size, self.size, 4, 2, 1), nn.LeakyReLU(0.2), ) self.out = la.Conv2dWNUB(F + in_channels, out_channels, self.size, self.size, kernel_size=1) self.apply(lambda x: la.glorot(x, 0.2)) la.glorot(self.out, 1.0) def forward(self, x): x1 = x x2 = self.down1(x1) x3 = self.down2(x2) x4 = self.down3(x3) x5 = self.down4(x4) x6 = self.down5(x5) x = th.cat([self.up1(x6), x5], 1) x = th.cat([self.up2(x), x4], 1) x = th.cat([self.up3(x), x3], 1) x = th.cat([self.up4(x), x2], 1) x = self.up5(x) x = th.cat([x, x1], dim=1) return self.out(x) class UNetW(nn.Module): def __init__( self, in_channels, out_channels, n_init_ftrs, kernel_size=4, out_scale=1.0, ): super().__init__() self.out_scale = out_scale F = n_init_ftrs self.down1 = nn.Sequential( la.Conv2dWN(in_channels, F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.down2 = nn.Sequential( la.Conv2dWN(F, 2 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.down3 = nn.Sequential( la.Conv2dWN(2 * F, 4 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.down4 = nn.Sequential( la.Conv2dWN(4 * F, 8 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.down5 = nn.Sequential( la.Conv2dWN(8 * F, 16 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.up1 = nn.Sequential( la.ConvTranspose2dWN(16 * F, 8 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.up2 = nn.Sequential( la.ConvTranspose2dWN(8 * F, 4 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.up3 = nn.Sequential( la.ConvTranspose2dWN(4 * F, 2 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.up4 = nn.Sequential( la.ConvTranspose2dWN(2 * F, F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.up5 = nn.Sequential(la.ConvTranspose2dWN(F, F, kernel_size, 2, 1), nn.LeakyReLU(0.2)) self.out = la.Conv2dWN(F + in_channels, out_channels, kernel_size=1)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class UNetWB(nn.Module): def __init__( self, in_channels: int, out_channels: int, size: int, n_init_ftrs: int=8, out_scale: float =0.1, ): # super().__init__(*args, **kwargs) super().__init__() self.out_scale = out_scale F = n_init_ftrs self.size = size self.down1 = nn.Sequential( Conv2dWNUB(in_channels, F, self.size // 2, self.size // 2, 4, 2, 1), nn.LeakyReLU(0.2), ) self.down2 = nn.Sequential( Conv2dWNUB(F, 2 * F, self.size // 4, self.size // 4, 4, 2, 1), nn.LeakyReLU(0.2), ) self.down3 = nn.Sequential( Conv2dWNUB(2 * F, 4 * F, self.size // 8, self.size // 8, 4, 2, 1), nn.LeakyReLU(0.2), ) self.down4 = nn.Sequential( Conv2dWNUB(4 * F, 8 * F, self.size // 16, self.size // 16, 4, 2, 1), nn.LeakyReLU(0.2), ) self.down5 = nn.Sequential( Conv2dWNUB(8 * F, 16 * F, self.size // 32, self.size // 32, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up1 = nn.Sequential( ConvTranspose2dWNUB(16 * F, 8 * F, self.size // 16, self.size // 16, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up2 = nn.Sequential( ConvTranspose2dWNUB(8 * F, 4 * F, self.size // 8, self.size // 8, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up3 = nn.Sequential( ConvTranspose2dWNUB(4 * F, 2 * F, self.size // 4, self.size // 4, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up4 = nn.Sequential( ConvTranspose2dWNUB(2 * F, F, self.size // 2, self.size // 2, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up5 = nn.Sequential( ConvTranspose2dWNUB(F, F, self.size, self.size, 4, 2, 1), nn.LeakyReLU(0.2) ) self.out = Conv2dWNUB(F + in_channels, out_channels, self.size, self.size, kernel_size=1) self.apply(lambda x: glorot(x, 0.2)) glorot(self.out, 1.0) def forward(self, x): x1 = x x2 = self.down1(x1) x3 = self.down2(x2) x4 = self.down3(x3) x5 = self.down4(x4) x6 = self.down5(x5) # TODO: switch to concat? x = self.up1(x6) + x5 x = self.up2(x) + x4 x = self.up3(x) + x3 x = self.up4(x) + x2 x = self.up5(x) x = th.cat([x, x1], dim=1) return self.out(x) * self.out_scale class UNetWBConcat(nn.Module): def __init__( self, in_channels: int, out_channels: int, size: int, n_init_ftrs: int = 8, ): super().__init__() F = n_init_ftrs self.size = size self.down1 = nn.Sequential( la.Conv2dWNUB(in_channels, F, self.size // 2, self.size // 2, 4, 2, 1), nn.LeakyReLU(0.2), ) self.down2 = nn.Sequential( la.Conv2dWNUB(F, 2 * F, self.size // 4, self.size // 4, 4, 2, 1), nn.LeakyReLU(0.2), ) self.down3 = nn.Sequential( la.Conv2dWNUB(2 * F, 4 * F, self.size // 8, self.size // 8, 4, 2, 1), nn.LeakyReLU(0.2), ) self.down4 = nn.Sequential( la.Conv2dWNUB(4 * F, 8 * F, self.size // 16, self.size // 16, 4, 2, 1), nn.LeakyReLU(0.2), ) self.down5 = nn.Sequential( la.Conv2dWNUB(8 * F, 16 * F, self.size // 32, self.size // 32, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up1 = nn.Sequential( la.ConvTranspose2dWNUB(16 * F, 8 * F, self.size // 16, self.size // 16, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up2 = nn.Sequential( la.ConvTranspose2dWNUB(2 * 8 * F, 4 * F, self.size // 8, self.size // 8, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up3 = nn.Sequential( la.ConvTranspose2dWNUB(2 * 4 * F, 2 * F, self.size // 4, self.size // 4, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up4 = nn.Sequential( la.ConvTranspose2dWNUB(2 * 2 * F, F, self.size // 2, self.size // 2, 4, 2, 1), nn.LeakyReLU(0.2), ) self.up5 = nn.Sequential( la.ConvTranspose2dWNUB(2 * F, F, self.size, self.size, 4, 2, 1), nn.LeakyReLU(0.2), ) self.out = la.Conv2dWNUB(F + in_channels, out_channels, self.size, self.size, kernel_size=1) self.apply(lambda x: la.glorot(x, 0.2)) la.glorot(self.out, 1.0) def forward(self, x): x1 = x x2 = self.down1(x1) x3 = self.down2(x2) x4 = self.down3(x3) x5 = self.down4(x4) x6 = self.down5(x5) x = th.cat([self.up1(x6), x5], 1) x = th.cat([self.up2(x), x4], 1) x = th.cat([self.up3(x), x3], 1) x = th.cat([self.up4(x), x2], 1) x = self.up5(x) x = th.cat([x, x1], dim=1) return self.out(x) class UNetW(nn.Module): def __init__( self, in_channels, out_channels, n_init_ftrs, kernel_size=4, out_scale=1.0, ): super().__init__() self.out_scale = out_scale F = n_init_ftrs self.down1 = nn.Sequential( la.Conv2dWN(in_channels, F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.down2 = nn.Sequential( la.Conv2dWN(F, 2 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.down3 = nn.Sequential( la.Conv2dWN(2 * F, 4 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.down4 = nn.Sequential( la.Conv2dWN(4 * F, 8 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.down5 = nn.Sequential( la.Conv2dWN(8 * F, 16 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.up1 = nn.Sequential( la.ConvTranspose2dWN(16 * F, 8 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.up2 = nn.Sequential( la.ConvTranspose2dWN(8 * F, 4 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.up3 = nn.Sequential( la.ConvTranspose2dWN(4 * F, 2 * F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.up4 = nn.Sequential( la.ConvTranspose2dWN(2 * F, F, kernel_size, 2, 1), nn.LeakyReLU(0.2), ) self.up5 = nn.Sequential(la.ConvTranspose2dWN(F, F, kernel_size, 2, 1), nn.LeakyReLU(0.2)) self.out = la.Conv2dWN(F + in_channels, out_channels, kernel_size=1)
self.apply(weights_initializer(0.2))
0
2023-12-27 15:31:35+00:00
8k
0x00wolf/hkrsAI
src/dispatcher.py
[ { "identifier": "InputParser", "path": "src/inputparser.py", "snippet": "class InputParser:\n @staticmethod\n def parse(user_input):\n \"\"\"parses user input and passes an Action to the Dispatcher\"\"\"\n if user_input.startswith('>'):\n if ' ' in user_input:\n user_input = user_input.split(' ')\n command = user_input.pop(0).replace('>', '')\n arguments = user_input[:]\n return Action(command=command, arguments=arguments)\n else:\n command = user_input.replace('>', '')\n for _command in COMMANDS:\n if command == _command:\n return Action(command=command)\n return Action(command='error')\n else:\n action = Action(command='chat', raw_input=user_input)\n return action" }, { "identifier": "Action", "path": "src/action.py", "snippet": "class Action:\n \"\"\"Action dataclass returned by the input parser after parsing new input. \\\n Gets passed to the dispatcher who turns Action into function.\"\"\"\n command: str = ''\n arguments: list[str] = dataclasses.field(default_factory=list)\n raw_input: str = ''" }, { "identifier": "GPT", "path": "src/gpt.py", "snippet": "class GPT:\n def __init__(self, client, model, temperature, top_p, n, frequency_penalty, presence_penalty, max_tokens):\n self.client = client\n self.model = model\n self.temperature = temperature\n self.top_p = top_p\n self.n = n\n self.frequency_penalty = frequency_penalty\n self.presence_penalty = presence_penalty\n self.max_tokens = max_tokens\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, new_value: str):\n new_value = str(new_value)\n if new_value == 'gpt-3.5-turbo' or new_value == 'gpt-4':\n self._model = new_value\n else:\n raise ValueError(f'\\n{BAD_MODEL.format(new_value)}')\n\n @property\n def temperature(self):\n return self._temperature\n\n @temperature.setter\n def temperature(self, new_value: float):\n new_value = float(new_value)\n if not (0.0 <= new_value <= 2.0):\n raise ValueError(f'\\n{BAD_TEMP.format(new_value)}')\n else:\n self._temperature = new_value\n\n @property\n def top_p(self):\n return self._top_p\n\n @top_p.setter\n def top_p(self, new_value: float):\n new_value = float(new_value)\n if not (0 <= new_value <= 1.0):\n raise ValueError(f'\\n{BAD_TP.format(new_value)}')\n else:\n self._top_p = new_value\n\n @property\n def frequency_penalty(self):\n return self._frequency_penalty\n\n @frequency_penalty.setter\n def frequency_penalty(self, new_value: float):\n new_value = float(new_value)\n if not (-2.0 <= new_value <= 2.0):\n raise ValueError(f'\\n{BAD_FP.format(new_value)}')\n else:\n self._frequency_penalty = new_value\n\n @property\n def presence_penalty(self):\n return self._presence_penalty\n\n @presence_penalty.setter\n def presence_penalty(self, new_value: float):\n new_value = float(new_value)\n if not (-2.0 <= new_value <= 2.0):\n raise ValueError(f'\\n{BAD_PP.format(new_value)}')\n else:\n self._presence_penalty = new_value\n\n @property\n def n(self):\n return self._n\n\n @n.setter\n def n(self, new_value):\n new_value = int(new_value)\n if not (1 <= new_value <= 20):\n raise ValueError(f'\\n{BAD_N.format(new_value)}')\n else:\n self._n = new_value\n\n @property\n def max_tokens(self):\n return self._max_tokens\n\n @max_tokens.setter\n def max_tokens(self, new_value: int):\n new_value = int(new_value)\n if not (1 <= new_value <= 4096):\n raise ValueError(f'\\n{BAD_MT.format(new_value)}')\n else:\n self._max_tokens = new_value" }, { "identifier": "Client", "path": "src/client.py", "snippet": "class Client:\n \"\"\"A class representing the OpenAI API Client\"\"\"\n def __init__(self, config):\n self.client = None\n self.api_key = ''\n self.config = config\n\n def initialize(self):\n \"\"\"Checks config.json for a stored API key, or prompts the user to input a new key\"\"\"\n config_data = self._json_load(self.config)\n api_key = config_data['api_key']\n if api_key:\n good_key = self.test_key(api_key)\n if good_key:\n self.api_key = api_key\n self.client = openai.OpenAI(api_key=self.api_key)\n else:\n self.set_key()\n else:\n self.set_key()\n\n @staticmethod\n def test_key(api_key):\n \"\"\"Send a test message to the GPT API to check if an API key is valid\"\"\"\n client = openai.OpenAI(api_key=api_key)\n try:\n try:\n response = client.chat.completions.create(\n model='gpt-3.5-turbo',\n max_tokens=5,\n messages=[{'role': 'user', 'content': 'This is a test .'}])\n except openai.AuthenticationError:\n print('[*] error, invalid API key')\n return False\n else:\n print('[*] API key verified')\n return True\n except openai.APIConnectionError:\n print('[*] network connection error\\n[*] exiting')\n sys.exit()\n\n def set_key(self):\n \"\"\"Set a new API key and test if it is valid\"\"\"\n while True:\n self.api_key = input('[*] insert OpenAI API key:\\n>')\n valid_key = self.test_key(self.api_key)\n if valid_key:\n config_data = self._json_load(self.config)\n config_data['api_key'] = self.api_key\n self._json_dump(config_data, self.config)\n self.client = openai.OpenAI(api_key=self.api_key)\n return\n\n @staticmethod\n def _json_load(json_file):\n \"\"\"Loads JSON object from a file\"\"\"\n with open(json_file, 'r') as f:\n data = json.load(f)\n return data\n\n @staticmethod\n def _json_dump(json_dict, json_file):\n \"\"\"Dumps a JSON object to a file\"\"\"\n with open(json_file, 'w') as f:\n json.dump(json_dict, f, indent=6)" }, { "identifier": "Conversation", "path": "src/conversation.py", "snippet": "class Conversation:\n messages: list[dict] = dataclasses.field(default_factory=list)\n query: str = ''\n reply: str = ''\n response: dict = dataclasses.field(default_factory=dict)\n tokens: int = 0\n\n def start(self, system_prompt: str):\n self.messages = [{\"role\": \"system\", \"content\": system_prompt}]\n print()\n return Conversation(messages=self.messages)\n\n def speak(self, content: str):\n self.messages.append({\"role\": \"user\", \"content\": content})\n return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)\n\n def think(self, thought):\n if self.query == '':\n self.query = thought\n else:\n self.query = f'{self.query}\\n{thought}'\n return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)\n\n def listen(self, gpt: GPT):\n \"\"\"Function to perform GPT chat completions via the API\"\"\"\n self.response = gpt.client.chat.completions.create(\n model=gpt.model,\n messages=self.messages,\n temperature=gpt.temperature,\n top_p=gpt.top_p,\n n=gpt.n,\n max_tokens=gpt.max_tokens,\n frequency_penalty=gpt.frequency_penalty,\n presence_penalty=gpt.presence_penalty,\n )\n self.reply = self.response.choices[0].message.content\n self.tokens = self.response.usage.total_tokens\n print(f\"\\n{self.reply}\\n\")\n self.messages.append({\"role\": \"assistant\", \"content\": self.reply})\n\n return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)\n\n def breath(self):\n return Conversation(messages=self.messages, query='', reply=self.reply, response=self.response)\n\n @staticmethod\n def greet():\n return Conversation(messages=[], query='', reply='', response=None)" }, { "identifier": "SystemPrompt", "path": "src/systemprompt.py", "snippet": "class SystemPrompt:\n \"\"\"A class that manages setting the system prompt used to define AI assistants. \\\n To add a new system prompt that will be selectable from the runtime menu, \\\n copy the prompt to an extensionless file in the appropriate category folder.\"\"\"\n def __init__(self, prompts_dir, path=''):\n self.dir = prompts_dir\n self.path = path\n self.content = ''\n self.title = 'custom'\n self._start()\n\n def _start(self):\n \"\"\"Allow the user to define a custom prompt, or select one of the pre-made options\"\"\"\n if not self.path:\n self.content = input(\"\\n[*] input a custom system prompt, \\\n \\n[*] hit enter to view preexisting options:\\n>\")\n if not self.content:\n self._set()\n else:\n self.content = self._fetch_contents(self.path)\n self.title = self.path.rpartition('/')[-1]\n\n def _set(self):\n \"\"\"Loop that runs until a prompt has been selected\"\"\"\n while True:\n category = self._select_category()\n title = self._select_prompt(category)\n if title == 'back':\n pass\n else:\n self.path = f'{self.dir}/{category}/{title}'\n prompt = self._fetch_contents(self.path)\n print(f'\\n{prompt}\\n')\n set_prompt = input(\"[*] select prompt\\n\\n[-] 'enter' to accept\\n[-] 'n' to go back\\n\"\n \"[-] 'x' to enter a custom font'\\n>\")\n if set_prompt == 'x':\n return SystemPrompt(prompts_dir=self.dir)\n elif set_prompt == 'n':\n pass\n else:\n self.title = self.path.rpartition('/')[-1]\n self.content = prompt\n print(f'[*] system prompt: {self.title}\\n[*] query AI:')\n return\n\n def _select_category(self):\n \"\"\"Select a system prompt category from the pre-made options\"\"\"\n print('\\n[-] categories\\n')\n categories = self._fetch_from(self.dir)\n categories.sort()\n choice = self._make_choice(categories)\n print(f'\\n[*] category: {choice}')\n return choice\n\n def _select_prompt(self, category):\n \"\"\"Select a pre-made system prompt from a particular category\"\"\"\n print('[-] prompts\\n')\n category = f'{self.dir}/{category}'\n system_prompts = self._fetch_from(category)\n system_prompts.sort()\n self.path = self._make_choice(system_prompts, go_back=True)\n return self.path\n\n def _make_choice(self, options_list, go_back=False):\n \"\"\"Provides the user with the ability to select a prompt from an enumerated options list\"\"\"\n # Select from a list of options by the objects enumerated position\n while True:\n try:\n self._enumerate_list(options_list, go_back)\n selection = input('\\n[*] select by position:\\n>')\n selection = int(selection)\n if 1 <= selection <= len(options_list):\n return options_list[selection - 1]\n elif go_back and selection == len(options_list) + 1:\n return 'back'\n except ValueError:\n print('[*] invalid selection')\n\n @staticmethod\n def _enumerate_list(options_list, go_back=False):\n \"\"\"\"Enumerates a list of options\"\"\"\n for x, _item in enumerate(options_list, 1):\n print(f'{x}. {_item}')\n if go_back:\n print(f'{x + 1}. back')\n\n @staticmethod\n def _fetch_contents(file_path):\n \"\"\"Fetches the contents of a file\"\"\"\n try:\n with open(file_path, 'r') as f:\n return f.read()\n except FileNotFoundError:\n pass\n\n @staticmethod\n def _fetch_from(root_dir):\n \"\"\"Returns a list containing the contents of a directory\"\"\"\n directories = os.listdir(root_dir)\n return directories" }, { "identifier": "Logger", "path": "src/logger.py", "snippet": "class Logger:\n def __init__(self, paths: PathFinder, log_level: int, log_format: str):\n \"\"\"Logs conversations and saves data at the user's request\"\"\"\n self.level: int = log_level\n self.format: str = log_format\n self.paths: Paths = paths\n self.number: int = 0\n self.file: str = ''\n self.savefile: str = ''\n self.save_number: int = 0\n self.new_log()\n\n @property\n def level(self):\n return self._level\n\n @level.setter\n def level(self, new_value: int):\n if 1 != new_value != 2:\n raise TypeError\n else:\n self._level = new_value\n\n @property\n def format(self):\n return self._format\n\n @format.setter\n def format(self, new_value: str):\n if new_value == 'txt' or new_value == 'json':\n self._format = new_value\n else:\n self._format = new_value\n\n def new_log(self):\n self.number = self._next_number()\n self.file = self._new_file()\n \n def _next_number(self):\n \"\"\"Fetch the next log number from config.json and updates it\"\"\"\n config_data = self._load(self.paths.config)\n self.number = log_num = config_data['log_number']\n config_data['log_number'] = self.number + 1\n self._dump(config_data, self.paths.config)\n return self.number\n \n def _new_file(self):\n \"\"\"Generates a new logfile relative the current log number\"\"\"\n while True: # to prevent inadvertently overwriting logs if the value is changed in config.json\n self.file = f'{self.paths.logs}/log{self.number}.{self.format}'\n try:\n with open(self.file, 'x'):\n print(f'[*] logfile generated ~ {self.file}')\n return self.file\n except FileExistsError:\n self.number += 1\n\n def log(self, conversation: Conversation):\n \"\"\"Logs the response or messages as a JSON or TXT file relative to args\"\"\"\n if self.level == 1 and self.format != 'txt':\n print('[*] level 1 only supports .txt output')\n self.format = 'txt'\n if self.level == 1:\n self._dump(str(conversation.response), self.file)\n return self\n elif self.level == 2 and self.format == 'json':\n self._dump(conversation.messages, self.file)\n return self\n elif self.level == 2 and self.format == 'txt':\n with open(self.file, 'w') as f:\n for i in range(len(conversation.messages)):\n f.write(f\"{conversation.messages[i]['role']}:--------------\\n\\n\" \\\n f\"{conversation.messages[i]['content']}\\n\\n\")\n return self\n\n # >save\n def save(self, arguments, conversation):\n \"\"\"Saves information at the user's request\"\"\"\n if len(arguments) == 0:\n self._update_savefile()\n self._save_text(self.savefile, conversation.reply)\n print(f'[*] saving reply to ~ {self.savefile}')\n return\n if len(arguments) != 2:\n self._update_savefile()\n else:\n self.savefile = arguments[1]\n if arguments[0] == 'code':\n p = re.compile(r\"```((.|\\n)*)```\")\n match = re.search(p, conversation.reply)\n if match:\n self._save_text(self.savefile, match.group())\n print(f'[*] saving code to ~ {self.savefile}')\n else:\n print('[*] error: regex failed.\\n[*] ensure that GPT presents code in blocks ```code```')\n if arguments[0] == 'reply':\n self._save_text(self.savefile, conversation.reply)\n print(f'[*] saving reply to ~ {self.savefile}')\n elif arguments[0] == 'response':\n self._save_text(self.savefile, str(conversation.response))\n print(f'[*] saving response to ~ {self.savefile}')\n\n def _update_savefile(self):\n self.savefile = f'{self.paths.logs}/log{self.number}-{self.save_number}.pktai'\n self.save_number += 1\n\n @staticmethod\n def _save_text(filename, _text):\n \"\"\"Simple funtion to save text to a file\"\"\"\n with open(filename, 'w') as f:\n f.write(_text)\n\n @staticmethod\n def _load(json_file):\n \"\"\"Loads JSON object from a file\"\"\"\n with open(json_file, 'r') as f:\n data = json.load(f)\n return data\n\n @staticmethod\n def _dump(json_dict, json_file):\n \"\"\"Dumps a JSON object to a file\"\"\"\n with open(json_file, 'w') as f:\n json.dump(json_dict, f, indent=6)" }, { "identifier": "HELP_STRING", "path": "src/cmdshelp.py", "snippet": "HELP_STRING = \"\"\"----------------------------------------------\ncommand: >stop\n\ninfo: Sets thinking=True, halting messages being sent to GPT. While thinking=True, \\\nnew inputs are appended to the query with a '\\n', allowing the user to create a stacked query \\\nfrom X inputs. While thinking=True, the user is able to copy+paste to the program's Python \\\nshell, as well as use the '>insert' command (see below).\n----------------------------------------------\ncommand: >flush\n\ninfo: While thinking=True, clears the value stored in the query. Aka, the fat fingers insurance \\\nclause.\n----------------------------------------------\ncommand: >insert\n\n>insert /absolute/path/filename.extension\n>insert ./relative/path/filename.extension\n\ninfo: When thinking=True, '>insert' fetches the contents of a file and appends it to the query. \\\nPrimarily a feature for developers to easily import code from their projects into the program, \\\nor to enable more advanced scripting capabilities.\n----------------------------------------------\ncommand: >start\n\ninfo: Set thinking=False. The next input will trigger sending the stored query to GPT, \\\nresuming the conversation.\n----------------------------------------------\ncommand: >exec\n\n>exec {system command} {args}\n>exec cd ./logs # cd to relative or absolute file path\n>exec cd home # returns to the hkrsAI parent directory\n>exec ls -l\n>exec cat ./filepath/filename.extension # fetches and prints the contents of a file\n\ninfo: You can execute system-wide commands from within the program's Python shell. Note that \\\n'cd', and 'cat' are hacked in. Many Linux programs will fail to execute. Primarily included to enable \\\neasy directory traversal for workflow integration.\n----------------------------------------------\ncommand: >save\n\n>save # saves the AIs last reply to a generic save file\n>save /path/filename.extension # saves the AIs last reply to relative or absolute path\n>save code # extracts code from the last reply and saves it to a generic save file\n>save code ./path/filename.extension # extracts and saves code to a relative or absolute path\n>save reply {None | /path/filename.extension}\n>save response {None | /path/filename.extension}\n>save messages {None | /path/filename.extension}\n\ninfo: Allows the user to extract and save code or text to relative, absolute, or generic file path.\n----------------------------------------------\ncommand: >set\n\n>set gpt {parameter} {value}\n>set logger {level | format} \n>set {gpt_parameter} {value} # for more information see ./hkrsai.py -h\n>set {level} {value} # levels: (1, 2)\n>set {format} {value} # format: ['json', 'txt']\n\ninfo: Changes the value with the associated parameter.\n----------------------------------------------\ncommand: >show\n\n>show # prints the value stored in conversation.query\n>show {conversation | gpt | logger } {key}\n>show {gpt parameter} # prints the value for a specific gpt parameter\n>show # prints the values stored in gpt and logger\n>show gpt # prints the values stored in gpt\n\ninfo: Prints stored values to the console.\n----------------------------------------------\ncommand: >reset\n\n>reset # resets the AI assistant\n>reset conversation # resets the AI assistant\n>reset log # starts a new log file\n\ninfo: Allows the user to reset the conversation or start a new log.\n----------------------------------------------\ncommand: >exit\n\ninfo: Quit the program.\n\"\"\"" } ]
from typing import Type from src.inputparser import InputParser from src.action import Action from src.gpt import GPT from src.client import Client from src.conversation import Conversation from src.systemprompt import SystemPrompt from src.logger import Logger from src.cmdshelp import HELP_STRING import openai import subprocess import ast import sys import os
5,401
class Dispatcher: """Dispatches functions and manages conversation state.""" def __init__(self): self.thinking: bool = False def dispatch(self, action: Action): """Turns an Action into a function""" if action.command == 'stop': self.thinking = True # >stop return self.silence elif action.command == 'start': self.thinking = False # >start return self.silence elif self.thinking and action.command == 'chat': return self.think elif action.command == 'chat': return self.speak elif action.command == 'exec': return self.execute elif action.command == 'insert': return self.insert elif action.command == 'show': return self.show elif action.command == 'flush': return self.flush elif action.command == 'save': return self.save elif action.command == 'set': return self.set elif action.command == 'reset': return self.reset elif action.command == 'help': return self.help elif action.command == 'exit': return self.goodbye else: return self.silence @staticmethod
class Dispatcher: """Dispatches functions and manages conversation state.""" def __init__(self): self.thinking: bool = False def dispatch(self, action: Action): """Turns an Action into a function""" if action.command == 'stop': self.thinking = True # >stop return self.silence elif action.command == 'start': self.thinking = False # >start return self.silence elif self.thinking and action.command == 'chat': return self.think elif action.command == 'chat': return self.speak elif action.command == 'exec': return self.execute elif action.command == 'insert': return self.insert elif action.command == 'show': return self.show elif action.command == 'flush': return self.flush elif action.command == 'save': return self.save elif action.command == 'set': return self.set elif action.command == 'reset': return self.reset elif action.command == 'help': return self.help elif action.command == 'exit': return self.goodbye else: return self.silence @staticmethod
def silence(gpt: GPT, conversation: Conversation, action: Action, logger: Logger):
6
2023-12-22 07:04:47+00:00
8k
daswer123/rvc-python
rvc_python/infer.py
[ { "identifier": "VC", "path": "rvc_python/modules/vc/modules.py", "snippet": "class VC:\n def __init__(self, lib_dir, config):\n self.lib_dir = lib_dir\n self.n_spk = None\n self.tgt_sr = None\n self.net_g = None\n self.pipeline = None\n self.cpt = None\n self.version = None\n self.if_f0 = None\n self.version = None\n self.hubert_model = None\n\n self.config = config\n\n def get_vc(self,sid,version = \"v2\", *to_return_protect):\n # logger.info(\"Get sid: \" + sid)\n\n to_return_protect0 = {\n \"visible\": self.if_f0 != 0,\n \"value\": to_return_protect[0]\n if self.if_f0 != 0 and to_return_protect\n else 0.5,\n \"__type__\": \"update\",\n }\n to_return_protect1 = {\n \"visible\": self.if_f0 != 0,\n \"value\": to_return_protect[1]\n if self.if_f0 != 0 and to_return_protect\n else 0.33,\n \"__type__\": \"update\",\n }\n\n if sid == \"\" or sid == []:\n if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的\n logger.info(\"Clean model cache\")\n del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt\n self.hubert_model = (\n self.net_g\n ) = self.n_spk = self.hubert_model = self.tgt_sr = None\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n ###楼下不这么折腾清理不干净\n self.if_f0 = self.cpt.get(\"f0\", 1)\n self.version = self.cpt.get(\"version\", \"v1\")\n if self.version == \"v1\":\n if self.if_f0 == 1:\n self.net_g = SynthesizerTrnMs256NSFsid(\n *self.cpt[\"config\"], is_half=self.config.is_half\n )\n else:\n self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt[\"config\"])\n elif self.version == \"v2\":\n if self.if_f0 == 1:\n self.net_g = SynthesizerTrnMs768NSFsid(\n *self.cpt[\"config\"], is_half=self.config.is_half\n )\n else:\n self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt[\"config\"])\n del self.net_g, self.cpt\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return (\n {\"visible\": False, \"__type__\": \"update\"},\n {\n \"visible\": True,\n \"value\": to_return_protect0,\n \"__type__\": \"update\",\n },\n {\n \"visible\": True,\n \"value\": to_return_protect1,\n \"__type__\": \"update\",\n },\n \"\",\n \"\",\n )\n person = f'{sid}'\n logger.info(f\"Loading: {person}\")\n # print(sid,person)\n\n self.cpt = torch.load(sid, map_location=\"cpu\")\n self.tgt_sr = self.cpt[\"config\"][-1]\n self.cpt[\"config\"][-3] = self.cpt[\"weight\"][\"emb_g.weight\"].shape[0] # n_spk\n self.if_f0 = self.cpt.get(\"f0\", 1)\n self.version = version\n\n synthesizer_class = {\n (\"v1\", 1): SynthesizerTrnMs256NSFsid,\n (\"v1\", 0): SynthesizerTrnMs256NSFsid_nono,\n (\"v2\", 1): SynthesizerTrnMs768NSFsid,\n (\"v2\", 0): SynthesizerTrnMs768NSFsid_nono,\n }\n\n self.net_g = synthesizer_class.get(\n (self.version, self.if_f0), SynthesizerTrnMs256NSFsid\n )(*self.cpt[\"config\"], is_half=self.config.is_half)\n\n del self.net_g.enc_q\n\n self.net_g.load_state_dict(self.cpt[\"weight\"], strict=False)\n self.net_g.eval().to(self.config.device)\n if self.config.is_half:\n self.net_g = self.net_g.half()\n else:\n self.net_g = self.net_g.float()\n\n self.pipeline = Pipeline(self.tgt_sr, self.config,lib_dir=self.lib_dir)\n n_spk = self.cpt[\"config\"][-3]\n\n return (\n (\n {\"visible\": True, \"maximum\": n_spk, \"__type__\": \"update\"},\n to_return_protect0,\n to_return_protect1,\n )\n if to_return_protect\n else {\"visible\": True, \"maximum\": n_spk, \"__type__\": \"update\"}\n )\n\n def vc_single(\n self,\n sid,\n input_audio_path,\n f0_up_key,\n f0_file,\n f0_method,\n file_index,\n file_index2,\n index_rate,\n filter_radius,\n resample_sr,\n rms_mix_rate,\n protect,\n ):\n if input_audio_path is None:\n return \"You need to upload an audio\", None\n f0_up_key = int(f0_up_key)\n try:\n audio = load_audio(input_audio_path, 16000)\n audio_max = np.abs(audio).max() / 0.95\n if audio_max > 1:\n audio /= audio_max\n times = [0, 0, 0]\n\n if self.hubert_model is None:\n self.hubert_model = load_hubert(self.config,self.lib_dir)\n\n if file_index:\n file_index = (\n file_index.strip(\" \")\n .strip('\"')\n .strip(\"\\n\")\n .strip('\"')\n .strip(\" \")\n .replace(\"trained\", \"added\")\n )\n elif file_index2:\n file_index = file_index2\n else:\n file_index = \"\" # 防止小白写错,自动帮他替换掉\n\n audio_opt = self.pipeline.pipeline(\n self.hubert_model,\n self.net_g,\n sid,\n audio,\n input_audio_path,\n times,\n f0_up_key,\n f0_method,\n file_index,\n index_rate,\n self.if_f0,\n filter_radius,\n self.tgt_sr,\n resample_sr,\n rms_mix_rate,\n self.version,\n protect,\n f0_file,\n )\n if self.tgt_sr != resample_sr >= 16000:\n tgt_sr = resample_sr\n else:\n tgt_sr = self.tgt_sr\n index_info = (\n \"Index:\\n%s.\" % file_index\n if os.path.exists(file_index)\n else \"Index not used.\"\n )\n return audio_opt\n except:\n info = traceback.format_exc()\n logger.warning(info)\n return info, (None, None)\n\n def vc_multi(\n self,\n sid,\n dir_path,\n opt_root,\n paths,\n f0_up_key,\n f0_method,\n file_index,\n file_index2,\n index_rate,\n filter_radius,\n resample_sr,\n rms_mix_rate,\n protect,\n format1,\n ):\n try:\n dir_path = (\n dir_path.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n ) # 防止小白拷路径头尾带了空格和\"和回车\n opt_root = opt_root.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n os.makedirs(opt_root, exist_ok=True)\n try:\n if dir_path != \"\":\n paths = [\n os.path.join(dir_path, name) for name in os.listdir(dir_path)\n ]\n else:\n paths = [path.name for path in paths]\n except:\n traceback.print_exc()\n paths = [path.name for path in paths]\n infos = []\n print(paths)\n for path in paths:\n info, opt = self.vc_single(\n sid,\n path,\n f0_up_key,\n None,\n f0_method,\n file_index,\n file_index2,\n # file_big_npy,\n index_rate,\n filter_radius,\n resample_sr,\n rms_mix_rate,\n protect,\n )\n print(info)\n if \"Success\" in info:\n try:\n tgt_sr, audio_opt = opt\n if format1 in [\"wav\", \"flac\"]:\n sf.write(\n \"%s/%s.%s\"\n % (opt_root, os.path.basename(path), format1),\n audio_opt,\n tgt_sr,\n )\n else:\n path = \"%s/%s.%s\" % (\n opt_root,\n os.path.basename(path),\n format1,\n )\n with BytesIO() as wavf:\n sf.write(wavf, audio_opt, tgt_sr, format=\"wav\")\n wavf.seek(0, 0)\n with open(path, \"wb\") as outf:\n wav2(wavf, outf, format1)\n except:\n info += traceback.format_exc()\n infos.append(\"%s->%s\" % (os.path.basename(path), info))\n yield \"\\n\".join(infos)\n yield \"\\n\".join(infos)\n except:\n yield traceback.format_exc()" }, { "identifier": "Config", "path": "rvc_python/configs/config.py", "snippet": "class Config:\n def __init__(self,lib_dir,device,is_dml = False):\n self.lib_dir = lib_dir\n self.device = device\n self.is_half = True if device != \"cpu\" else False\n self.use_jit = False\n self.n_cpu = 0\n self.gpu_name = None\n self.json_config = self.load_config_json()\n self.gpu_mem = None\n self.dml = is_dml\n self.instead = \"\"\n self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()\n\n def load_config_json(self) -> dict:\n d = {}\n for config_file in version_config_list:\n with open(f\"{self.lib_dir}/configs/{config_file}\", \"r\") as f:\n d[config_file] = json.load(f)\n return d\n\n # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.\n # check `getattr` and try it for compatibility\n @staticmethod\n def has_mps() -> bool:\n if not torch.backends.mps.is_available():\n return False\n try:\n torch.zeros(1).to(torch.device(\"mps\"))\n return True\n except Exception:\n return False\n\n @staticmethod\n def has_xpu() -> bool:\n if hasattr(torch, \"xpu\") and torch.xpu.is_available():\n return True\n else:\n return False\n\n def use_fp32_config(self):\n for config_file in version_config_list:\n self.json_config[config_file][\"train\"][\"fp16_run\"] = False\n with open(f\"{self.lib_dir}/configs/{config_file}\", \"r\") as f:\n strr = f.read().replace(\"true\", \"false\")\n with open(f\"{self.lib_dir}/configs/{config_file}\", \"w\") as f:\n f.write(strr)\n with open(f\"{self.lib_dir}/modules/train/preprocess.py\", \"r\") as f:\n strr = f.read().replace(\"3.7\", \"3.0\")\n with open(f\"{self.lib_dir}/modules/train/preprocess.py\", \"w\") as f:\n f.write(strr)\n print(\"overwrite preprocess and configs.json\")\n\n def device_config(self) -> tuple:\n if torch.cuda.is_available():\n if self.has_xpu():\n self.device = self.instead = \"xpu:0\"\n self.is_half = True\n # We can parse cuda:0 or cuda if cuda then i_device = 0\n \n if ':' in self.device:\n i_device = int(self.device.split(\":\")[-1])\n else:\n i_device = 0 # If no number is specified, use 0 as the default value.\n\n self.gpu_name = torch.cuda.get_device_name(i_device)\n if (\n (\"16\" in self.gpu_name and \"V100\" not in self.gpu_name.upper())\n or \"P40\" in self.gpu_name.upper()\n or \"P10\" in self.gpu_name.upper()\n or \"1060\" in self.gpu_name\n or \"1070\" in self.gpu_name\n or \"1080\" in self.gpu_name\n ):\n logger.info(\"Found GPU %s, force to fp32\", self.gpu_name)\n self.is_half = False\n self.use_fp32_config()\n else:\n logger.info(\"Found GPU %s\", self.gpu_name)\n self.gpu_mem = int(\n torch.cuda.get_device_properties(i_device).total_memory\n / 1024\n / 1024\n / 1024\n + 0.4\n )\n if self.gpu_mem <= 4:\n with open(f\"{self.lib_dir}/modules/train/preprocess.py\", \"r\") as f:\n strr = f.read().replace(\"3.7\", \"3.0\")\n with open(f\"{self.lib_dir}/modules/train/preprocess.py\", \"w\") as f:\n f.write(strr)\n elif self.has_mps():\n logger.info(\"No supported Nvidia GPU found, using MPS\")\n self.device = self.instead = \"mps\"\n self.is_half = False\n self.use_fp32_config()\n else:\n logger.info(\"No supported Nvidia GPU found, using CPU\")\n self.device = self.instead = \"cpu\"\n self.is_half = False\n self.use_fp32_config()\n\n if self.n_cpu == 0:\n self.n_cpu = cpu_count()\n\n if self.is_half:\n # 6G显存配置\n x_pad = 3\n x_query = 10\n x_center = 60\n x_max = 65\n else:\n # 5G显存配置\n x_pad = 1\n x_query = 6\n x_center = 38\n x_max = 41\n\n if self.gpu_mem is not None and self.gpu_mem <= 4:\n x_pad = 1\n x_query = 5\n x_center = 30\n x_max = 32\n if self.dml:\n logger.info(\"Use DirectML instead\")\n if (\n os.path.exists(\n \"venv\\Lib\\site-packages\\onnxruntime\\capi\\DirectML.dll\"\n )\n == False\n ):\n try:\n os.rename(\n \"venv\\Lib\\site-packages\\onnxruntime\",\n \"venv\\Lib\\site-packages\\onnxruntime-cuda\",\n )\n except:\n pass\n try:\n os.rename(\n \"venv\\Lib\\site-packages\\onnxruntime-dml\",\n \"venv\\Lib\\site-packages\\onnxruntime\",\n )\n except:\n pass\n # if self.device != \"cpu\":\n import torch_directml\n\n self.device = torch_directml.device(torch_directml.default_device())\n self.is_half = False\n else:\n if self.instead:\n logger.info(f\"Use {self.instead} instead\")\n if (\n os.path.exists(\n \"venv\\Lib\\site-packages\\onnxruntime\\capi\\onnxruntime_providers_cuda.dll\"\n )\n == False\n ):\n try:\n os.rename(\n \"venv\\Lib\\site-packages\\onnxruntime\",\n \"venv\\Lib\\site-packages\\onnxruntime-dml\",\n )\n except:\n pass\n try:\n os.rename(\n \"venv\\Lib\\site-packages\\onnxruntime-cuda\",\n \"venv\\Lib\\site-packages\\onnxruntime\",\n )\n except:\n pass\n print(\"is_half:%s, device:%s\" % (self.is_half, self.device))\n return x_pad, x_query, x_center, x_max" }, { "identifier": "VC", "path": "rvc_python/modules/vc/modules.py", "snippet": "class VC:\n def __init__(self, lib_dir, config):\n self.lib_dir = lib_dir\n self.n_spk = None\n self.tgt_sr = None\n self.net_g = None\n self.pipeline = None\n self.cpt = None\n self.version = None\n self.if_f0 = None\n self.version = None\n self.hubert_model = None\n\n self.config = config\n\n def get_vc(self,sid,version = \"v2\", *to_return_protect):\n # logger.info(\"Get sid: \" + sid)\n\n to_return_protect0 = {\n \"visible\": self.if_f0 != 0,\n \"value\": to_return_protect[0]\n if self.if_f0 != 0 and to_return_protect\n else 0.5,\n \"__type__\": \"update\",\n }\n to_return_protect1 = {\n \"visible\": self.if_f0 != 0,\n \"value\": to_return_protect[1]\n if self.if_f0 != 0 and to_return_protect\n else 0.33,\n \"__type__\": \"update\",\n }\n\n if sid == \"\" or sid == []:\n if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的\n logger.info(\"Clean model cache\")\n del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt\n self.hubert_model = (\n self.net_g\n ) = self.n_spk = self.hubert_model = self.tgt_sr = None\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n ###楼下不这么折腾清理不干净\n self.if_f0 = self.cpt.get(\"f0\", 1)\n self.version = self.cpt.get(\"version\", \"v1\")\n if self.version == \"v1\":\n if self.if_f0 == 1:\n self.net_g = SynthesizerTrnMs256NSFsid(\n *self.cpt[\"config\"], is_half=self.config.is_half\n )\n else:\n self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt[\"config\"])\n elif self.version == \"v2\":\n if self.if_f0 == 1:\n self.net_g = SynthesizerTrnMs768NSFsid(\n *self.cpt[\"config\"], is_half=self.config.is_half\n )\n else:\n self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt[\"config\"])\n del self.net_g, self.cpt\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return (\n {\"visible\": False, \"__type__\": \"update\"},\n {\n \"visible\": True,\n \"value\": to_return_protect0,\n \"__type__\": \"update\",\n },\n {\n \"visible\": True,\n \"value\": to_return_protect1,\n \"__type__\": \"update\",\n },\n \"\",\n \"\",\n )\n person = f'{sid}'\n logger.info(f\"Loading: {person}\")\n # print(sid,person)\n\n self.cpt = torch.load(sid, map_location=\"cpu\")\n self.tgt_sr = self.cpt[\"config\"][-1]\n self.cpt[\"config\"][-3] = self.cpt[\"weight\"][\"emb_g.weight\"].shape[0] # n_spk\n self.if_f0 = self.cpt.get(\"f0\", 1)\n self.version = version\n\n synthesizer_class = {\n (\"v1\", 1): SynthesizerTrnMs256NSFsid,\n (\"v1\", 0): SynthesizerTrnMs256NSFsid_nono,\n (\"v2\", 1): SynthesizerTrnMs768NSFsid,\n (\"v2\", 0): SynthesizerTrnMs768NSFsid_nono,\n }\n\n self.net_g = synthesizer_class.get(\n (self.version, self.if_f0), SynthesizerTrnMs256NSFsid\n )(*self.cpt[\"config\"], is_half=self.config.is_half)\n\n del self.net_g.enc_q\n\n self.net_g.load_state_dict(self.cpt[\"weight\"], strict=False)\n self.net_g.eval().to(self.config.device)\n if self.config.is_half:\n self.net_g = self.net_g.half()\n else:\n self.net_g = self.net_g.float()\n\n self.pipeline = Pipeline(self.tgt_sr, self.config,lib_dir=self.lib_dir)\n n_spk = self.cpt[\"config\"][-3]\n\n return (\n (\n {\"visible\": True, \"maximum\": n_spk, \"__type__\": \"update\"},\n to_return_protect0,\n to_return_protect1,\n )\n if to_return_protect\n else {\"visible\": True, \"maximum\": n_spk, \"__type__\": \"update\"}\n )\n\n def vc_single(\n self,\n sid,\n input_audio_path,\n f0_up_key,\n f0_file,\n f0_method,\n file_index,\n file_index2,\n index_rate,\n filter_radius,\n resample_sr,\n rms_mix_rate,\n protect,\n ):\n if input_audio_path is None:\n return \"You need to upload an audio\", None\n f0_up_key = int(f0_up_key)\n try:\n audio = load_audio(input_audio_path, 16000)\n audio_max = np.abs(audio).max() / 0.95\n if audio_max > 1:\n audio /= audio_max\n times = [0, 0, 0]\n\n if self.hubert_model is None:\n self.hubert_model = load_hubert(self.config,self.lib_dir)\n\n if file_index:\n file_index = (\n file_index.strip(\" \")\n .strip('\"')\n .strip(\"\\n\")\n .strip('\"')\n .strip(\" \")\n .replace(\"trained\", \"added\")\n )\n elif file_index2:\n file_index = file_index2\n else:\n file_index = \"\" # 防止小白写错,自动帮他替换掉\n\n audio_opt = self.pipeline.pipeline(\n self.hubert_model,\n self.net_g,\n sid,\n audio,\n input_audio_path,\n times,\n f0_up_key,\n f0_method,\n file_index,\n index_rate,\n self.if_f0,\n filter_radius,\n self.tgt_sr,\n resample_sr,\n rms_mix_rate,\n self.version,\n protect,\n f0_file,\n )\n if self.tgt_sr != resample_sr >= 16000:\n tgt_sr = resample_sr\n else:\n tgt_sr = self.tgt_sr\n index_info = (\n \"Index:\\n%s.\" % file_index\n if os.path.exists(file_index)\n else \"Index not used.\"\n )\n return audio_opt\n except:\n info = traceback.format_exc()\n logger.warning(info)\n return info, (None, None)\n\n def vc_multi(\n self,\n sid,\n dir_path,\n opt_root,\n paths,\n f0_up_key,\n f0_method,\n file_index,\n file_index2,\n index_rate,\n filter_radius,\n resample_sr,\n rms_mix_rate,\n protect,\n format1,\n ):\n try:\n dir_path = (\n dir_path.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n ) # 防止小白拷路径头尾带了空格和\"和回车\n opt_root = opt_root.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n os.makedirs(opt_root, exist_ok=True)\n try:\n if dir_path != \"\":\n paths = [\n os.path.join(dir_path, name) for name in os.listdir(dir_path)\n ]\n else:\n paths = [path.name for path in paths]\n except:\n traceback.print_exc()\n paths = [path.name for path in paths]\n infos = []\n print(paths)\n for path in paths:\n info, opt = self.vc_single(\n sid,\n path,\n f0_up_key,\n None,\n f0_method,\n file_index,\n file_index2,\n # file_big_npy,\n index_rate,\n filter_radius,\n resample_sr,\n rms_mix_rate,\n protect,\n )\n print(info)\n if \"Success\" in info:\n try:\n tgt_sr, audio_opt = opt\n if format1 in [\"wav\", \"flac\"]:\n sf.write(\n \"%s/%s.%s\"\n % (opt_root, os.path.basename(path), format1),\n audio_opt,\n tgt_sr,\n )\n else:\n path = \"%s/%s.%s\" % (\n opt_root,\n os.path.basename(path),\n format1,\n )\n with BytesIO() as wavf:\n sf.write(wavf, audio_opt, tgt_sr, format=\"wav\")\n wavf.seek(0, 0)\n with open(path, \"wb\") as outf:\n wav2(wavf, outf, format1)\n except:\n info += traceback.format_exc()\n infos.append(\"%s->%s\" % (os.path.basename(path), info))\n yield \"\\n\".join(infos)\n yield \"\\n\".join(infos)\n except:\n yield traceback.format_exc()" }, { "identifier": "download_rvc_models", "path": "rvc_python/download_model.py", "snippet": "def download_rvc_models(this_dir):\n folder = os.path.join(this_dir,'base_model')\n \n if not os.path.exists(folder):\n os.makedirs(folder)\n \n files = {\n \"hubert_base.pt\": \"https://huggingface.co/Daswer123/RVC_Base/resolve/main/hubert_base.pt\",\n \"rmvpe.pt\": \"https://huggingface.co/Daswer123/RVC_Base/resolve/main/rmvpe.pt\",\n \"rmvpe.onnx\": \"https://huggingface.co/Daswer123/RVC_Base/resolve/main/rmvpe.onnx\"\n }\n \n for filename, url in files.items():\n file_path = os.path.join(folder, filename)\n \n if not os.path.exists(file_path):\n print(f'File {filename} not found, start loading...')\n \n response = requests.get(url)\n \n if response.status_code == 200:\n with open(file_path, 'wb') as f:\n f.write(response.content)\n print(f'File {filename} successfully loaded.')\n else:\n print(f'f {filename}.')" } ]
from rvc_python.modules.vc.modules import VC from rvc_python.configs.config import Config from scipy.io import wavfile from glob import glob from rvc_python.modules.vc.modules import VC from rvc_python.download_model import download_rvc_models import os import soundfile as sf
6,923
def infer_file( input_path, model_path, index_path = "", device = "cpu:0", f0method = "harvest", opt_path = "out.wav", index_rate = 0.5, filter_radius = 3, resample_sr = 0, rms_mix_rate = 1, protect = 0.33, f0up_key = 0, version = "v2" ): lib_dir = os.path.dirname(os.path.abspath(__file__)) download_rvc_models(lib_dir) config = Config(lib_dir,device)
def infer_file( input_path, model_path, index_path = "", device = "cpu:0", f0method = "harvest", opt_path = "out.wav", index_rate = 0.5, filter_radius = 3, resample_sr = 0, rms_mix_rate = 1, protect = 0.33, f0up_key = 0, version = "v2" ): lib_dir = os.path.dirname(os.path.abspath(__file__)) download_rvc_models(lib_dir) config = Config(lib_dir,device)
vc = VC(lib_dir,config)
2
2023-12-26 19:05:42+00:00
8k
run-llama/rags
core/agent_builder/loader.py
[ { "identifier": "BUILDER_LLM", "path": "core/builder_config.py", "snippet": "BUILDER_LLM = OpenAI(model=\"gpt-4-1106-preview\")" }, { "identifier": "ParamCache", "path": "core/param_cache.py", "snippet": "class ParamCache(BaseModel):\n \"\"\"Cache for RAG agent builder.\n\n Created a wrapper class around a dict in case we wanted to more explicitly\n type different items in the cache.\n\n \"\"\"\n\n # arbitrary types\n class Config:\n arbitrary_types_allowed = True\n\n # system prompt\n system_prompt: Optional[str] = Field(\n default=None, description=\"System prompt for RAG agent.\"\n )\n # data\n file_names: List[str] = Field(\n default_factory=list, description=\"File names as data source (if specified)\"\n )\n urls: List[str] = Field(\n default_factory=list, description=\"URLs as data source (if specified)\"\n )\n directory: Optional[str] = Field(\n default=None, description=\"Directory as data source (if specified)\"\n )\n\n docs: List = Field(default_factory=list, description=\"Documents for RAG agent.\")\n # tools\n tools: List = Field(\n default_factory=list, description=\"Additional tools for RAG agent (e.g. web)\"\n )\n # RAG params\n rag_params: RAGParams = Field(\n default_factory=RAGParams, description=\"RAG parameters for RAG agent.\"\n )\n\n # agent params\n builder_type: str = Field(\n default=\"default\", description=\"Builder type (default, multimodal).\"\n )\n vector_index: Optional[VectorStoreIndex] = Field(\n default=None, description=\"Vector index for RAG agent.\"\n )\n agent_id: str = Field(\n default_factory=lambda: f\"Agent_{str(uuid.uuid4())}\",\n description=\"Agent ID for RAG agent.\",\n )\n agent: Optional[BaseChatEngine] = Field(default=None, description=\"RAG agent.\")\n\n def save_to_disk(self, save_dir: str) -> None:\n \"\"\"Save cache to disk.\"\"\"\n # NOTE: more complex than just calling dict() because we want to\n # only store serializable fields and be space-efficient\n\n dict_to_serialize = {\n \"system_prompt\": self.system_prompt,\n \"file_names\": self.file_names,\n \"urls\": self.urls,\n \"directory\": self.directory,\n # TODO: figure out tools\n \"tools\": self.tools,\n \"rag_params\": self.rag_params.dict(),\n \"builder_type\": self.builder_type,\n \"agent_id\": self.agent_id,\n }\n # store the vector store within the agent\n if self.vector_index is None:\n raise ValueError(\"Must specify vector index in order to save.\")\n self.vector_index.storage_context.persist(Path(save_dir) / \"storage\")\n\n # if save_path directories don't exist, create it\n if not Path(save_dir).exists():\n Path(save_dir).mkdir(parents=True)\n with open(Path(save_dir) / \"cache.json\", \"w\") as f:\n json.dump(dict_to_serialize, f)\n\n @classmethod\n def load_from_disk(\n cls,\n save_dir: str,\n ) -> \"ParamCache\":\n \"\"\"Load cache from disk.\"\"\"\n with open(Path(save_dir) / \"cache.json\", \"r\") as f:\n cache_dict = json.load(f)\n\n storage_context = StorageContext.from_defaults(\n persist_dir=str(Path(save_dir) / \"storage\")\n )\n if cache_dict[\"builder_type\"] == \"multimodal\":\n from llama_index.indices.multi_modal.base import MultiModalVectorStoreIndex\n\n vector_index: VectorStoreIndex = cast(\n MultiModalVectorStoreIndex, load_index_from_storage(storage_context)\n )\n else:\n vector_index = cast(\n VectorStoreIndex, load_index_from_storage(storage_context)\n )\n\n # replace rag params with RAGParams object\n cache_dict[\"rag_params\"] = RAGParams(**cache_dict[\"rag_params\"])\n\n # add in the missing fields\n # load docs\n cache_dict[\"docs\"] = load_data(\n file_names=cache_dict[\"file_names\"],\n urls=cache_dict[\"urls\"],\n directory=cache_dict[\"directory\"],\n )\n # load agent from index\n additional_tools = get_tool_objects(cache_dict[\"tools\"])\n\n if cache_dict[\"builder_type\"] == \"multimodal\":\n vector_index = cast(MultiModalVectorStoreIndex, vector_index)\n agent, _ = construct_mm_agent(\n cache_dict[\"system_prompt\"],\n cache_dict[\"rag_params\"],\n cache_dict[\"docs\"],\n mm_vector_index=vector_index,\n )\n else:\n agent, _ = construct_agent(\n cache_dict[\"system_prompt\"],\n cache_dict[\"rag_params\"],\n cache_dict[\"docs\"],\n vector_index=vector_index,\n additional_tools=additional_tools,\n # TODO: figure out tools\n )\n cache_dict[\"vector_index\"] = vector_index\n cache_dict[\"agent\"] = agent\n\n return cls(**cache_dict)" }, { "identifier": "load_meta_agent", "path": "core/utils.py", "snippet": "def load_meta_agent(\n tools: List,\n llm: LLM,\n system_prompt: str,\n extra_kwargs: Optional[Dict] = None,\n **kwargs: Any,\n) -> BaseAgent:\n \"\"\"Load meta agent.\n\n TODO: consolidate with load_agent.\n\n The meta-agent *has* to perform tool-use.\n\n \"\"\"\n extra_kwargs = extra_kwargs or {}\n if isinstance(llm, OpenAI) and is_function_calling_model(llm.model):\n # get OpenAI Agent\n\n agent: BaseAgent = OpenAIAgent.from_tools(\n tools=tools,\n llm=llm,\n system_prompt=system_prompt,\n **kwargs,\n )\n else:\n agent = ReActAgent.from_tools(\n tools=tools,\n llm=llm,\n react_chat_formatter=ReActChatFormatter(\n system_header=system_prompt + \"\\n\" + REACT_CHAT_SYSTEM_HEADER,\n ),\n **kwargs,\n )\n\n return agent" }, { "identifier": "AgentCacheRegistry", "path": "core/agent_builder/registry.py", "snippet": "class AgentCacheRegistry:\n \"\"\"Registry for agent caches, in disk.\n\n Can register new agent caches, load agent caches, delete agent caches, etc.\n\n \"\"\"\n\n def __init__(self, dir: Union[str, Path]) -> None:\n \"\"\"Init params.\"\"\"\n self._dir = dir\n\n def _add_agent_id_to_directory(self, agent_id: str) -> None:\n \"\"\"Save agent id to directory.\"\"\"\n full_path = Path(self._dir) / \"agent_ids.json\"\n if not full_path.exists():\n with open(full_path, \"w\") as f:\n json.dump({\"agent_ids\": [agent_id]}, f)\n else:\n with open(full_path, \"r\") as f:\n agent_ids = json.load(f)[\"agent_ids\"]\n if agent_id in agent_ids:\n raise ValueError(f\"Agent id {agent_id} already exists.\")\n agent_ids_set = set(agent_ids)\n agent_ids_set.add(agent_id)\n with open(full_path, \"w\") as f:\n json.dump({\"agent_ids\": list(agent_ids_set)}, f)\n\n def add_new_agent_cache(self, agent_id: str, cache: ParamCache) -> None:\n \"\"\"Register agent.\"\"\"\n # save the cache to disk\n agent_cache_path = f\"{self._dir}/{agent_id}\"\n cache.save_to_disk(agent_cache_path)\n # save to agent ids\n self._add_agent_id_to_directory(agent_id)\n\n def get_agent_ids(self) -> List[str]:\n \"\"\"Get agent ids.\"\"\"\n full_path = Path(self._dir) / \"agent_ids.json\"\n if not full_path.exists():\n return []\n with open(full_path, \"r\") as f:\n agent_ids = json.load(f)[\"agent_ids\"]\n\n return agent_ids\n\n def get_agent_cache(self, agent_id: str) -> ParamCache:\n \"\"\"Get agent cache.\"\"\"\n full_path = Path(self._dir) / f\"{agent_id}\"\n if not full_path.exists():\n raise ValueError(f\"Cache for agent {agent_id} does not exist.\")\n cache = ParamCache.load_from_disk(str(full_path))\n return cache\n\n def delete_agent_cache(self, agent_id: str) -> None:\n \"\"\"Delete agent cache.\"\"\"\n # modify / resave agent_ids\n agent_ids = self.get_agent_ids()\n new_agent_ids = [id for id in agent_ids if id != agent_id]\n full_path = Path(self._dir) / \"agent_ids.json\"\n with open(full_path, \"w\") as f:\n json.dump({\"agent_ids\": new_agent_ids}, f)\n\n # remove agent cache\n full_path = Path(self._dir) / f\"{agent_id}\"\n if full_path.exists():\n # recursive delete\n shutil.rmtree(full_path)" }, { "identifier": "RAGAgentBuilder", "path": "core/agent_builder/base.py", "snippet": "class RAGAgentBuilder(BaseRAGAgentBuilder):\n \"\"\"RAG Agent builder.\n\n Contains a set of functions to construct a RAG agent, including:\n - setting system prompts\n - loading data\n - adding web search\n - setting parameters (e.g. top-k)\n\n Must pass in a cache. This cache will be modified as the agent is built.\n\n \"\"\"\n\n def __init__(\n self,\n cache: Optional[ParamCache] = None,\n agent_registry: Optional[AgentCacheRegistry] = None,\n ) -> None:\n \"\"\"Init params.\"\"\"\n self._cache = cache or ParamCache()\n self._agent_registry = agent_registry or AgentCacheRegistry(\n str(AGENT_CACHE_DIR)\n )\n\n @property\n def cache(self) -> ParamCache:\n \"\"\"Cache.\"\"\"\n return self._cache\n\n @property\n def agent_registry(self) -> AgentCacheRegistry:\n \"\"\"Agent registry.\"\"\"\n return self._agent_registry\n\n def create_system_prompt(self, task: str) -> str:\n \"\"\"Create system prompt for another agent given an input task.\"\"\"\n llm = BUILDER_LLM\n fmt_messages = GEN_SYS_PROMPT_TMPL.format_messages(task=task)\n response = llm.chat(fmt_messages)\n self._cache.system_prompt = response.message.content\n\n return f\"System prompt created: {response.message.content}\"\n\n def load_data(\n self,\n file_names: Optional[List[str]] = None,\n directory: Optional[str] = None,\n urls: Optional[List[str]] = None,\n ) -> str:\n \"\"\"Load data for a given task.\n\n Only ONE of file_names or directory or urls should be specified.\n\n Args:\n file_names (Optional[List[str]]): List of file names to load.\n Defaults to None.\n directory (Optional[str]): Directory to load files from.\n urls (Optional[List[str]]): List of urls to load.\n Defaults to None.\n\n \"\"\"\n file_names = file_names or []\n urls = urls or []\n directory = directory or \"\"\n docs = load_data(file_names=file_names, directory=directory, urls=urls)\n self._cache.docs = docs\n self._cache.file_names = file_names\n self._cache.urls = urls\n self._cache.directory = directory\n return \"Data loaded successfully.\"\n\n def add_web_tool(self) -> str:\n \"\"\"Add a web tool to enable agent to solve a task.\"\"\"\n # TODO: make this not hardcoded to a web tool\n # Set up Metaphor tool\n if \"web_search\" in self._cache.tools:\n return \"Web tool already added.\"\n else:\n self._cache.tools.append(\"web_search\")\n return \"Web tool added successfully.\"\n\n def get_rag_params(self) -> Dict:\n \"\"\"Get parameters used to configure the RAG pipeline.\n\n Should be called before `set_rag_params` so that the agent is aware of the\n schema.\n\n \"\"\"\n rag_params = self._cache.rag_params\n return rag_params.dict()\n\n def set_rag_params(self, **rag_params: Dict) -> str:\n \"\"\"Set RAG parameters.\n\n These parameters will then be used to actually initialize the agent.\n Should call `get_rag_params` first to get the schema of the input dictionary.\n\n Args:\n **rag_params (Dict): dictionary of RAG parameters.\n\n \"\"\"\n new_dict = self._cache.rag_params.dict()\n new_dict.update(rag_params)\n rag_params_obj = RAGParams(**new_dict)\n self._cache.rag_params = rag_params_obj\n return \"RAG parameters set successfully.\"\n\n def create_agent(self, agent_id: Optional[str] = None) -> str:\n \"\"\"Create an agent.\n\n There are no parameters for this function because all the\n functions should have already been called to set up the agent.\n\n \"\"\"\n if self._cache.system_prompt is None:\n raise ValueError(\"Must set system prompt before creating agent.\")\n\n # construct additional tools\n additional_tools = get_tool_objects(self.cache.tools)\n agent, extra_info = construct_agent(\n cast(str, self._cache.system_prompt),\n cast(RAGParams, self._cache.rag_params),\n self._cache.docs,\n additional_tools=additional_tools,\n )\n\n # if agent_id not specified, randomly generate one\n agent_id = agent_id or self._cache.agent_id or f\"Agent_{str(uuid.uuid4())}\"\n self._cache.vector_index = extra_info[\"vector_index\"]\n self._cache.agent_id = agent_id\n self._cache.agent = agent\n\n # save the cache to disk\n self._agent_registry.add_new_agent_cache(agent_id, self._cache)\n return \"Agent created successfully.\"\n\n def update_agent(\n self,\n agent_id: str,\n system_prompt: Optional[str] = None,\n include_summarization: Optional[bool] = None,\n top_k: Optional[int] = None,\n chunk_size: Optional[int] = None,\n embed_model: Optional[str] = None,\n llm: Optional[str] = None,\n additional_tools: Optional[List] = None,\n ) -> None:\n \"\"\"Update agent.\n\n Delete old agent by ID and create a new one.\n Optionally update the system prompt and RAG parameters.\n\n NOTE: Currently is manually called, not meant for agent use.\n\n \"\"\"\n self._agent_registry.delete_agent_cache(self.cache.agent_id)\n\n # set agent id\n self.cache.agent_id = agent_id\n\n # set system prompt\n if system_prompt is not None:\n self.cache.system_prompt = system_prompt\n # get agent_builder\n # We call set_rag_params and create_agent, which will\n # update the cache\n # TODO: decouple functions from tool functions exposed to the agent\n rag_params_dict: Dict[str, Any] = {}\n if include_summarization is not None:\n rag_params_dict[\"include_summarization\"] = include_summarization\n if top_k is not None:\n rag_params_dict[\"top_k\"] = top_k\n if chunk_size is not None:\n rag_params_dict[\"chunk_size\"] = chunk_size\n if embed_model is not None:\n rag_params_dict[\"embed_model\"] = embed_model\n if llm is not None:\n rag_params_dict[\"llm\"] = llm\n\n self.set_rag_params(**rag_params_dict)\n\n # update tools\n if additional_tools is not None:\n self.cache.tools = additional_tools\n\n # this will update the agent in the cache\n self.create_agent()" }, { "identifier": "BaseRAGAgentBuilder", "path": "core/agent_builder/base.py", "snippet": "class BaseRAGAgentBuilder(ABC):\n \"\"\"Base RAG Agent builder class.\"\"\"\n\n @property\n @abstractmethod\n def cache(self) -> ParamCache:\n \"\"\"Cache.\"\"\"\n\n @property\n @abstractmethod\n def agent_registry(self) -> AgentCacheRegistry:\n \"\"\"Agent registry.\"\"\"" }, { "identifier": "MultimodalRAGAgentBuilder", "path": "core/agent_builder/multimodal.py", "snippet": "class MultimodalRAGAgentBuilder(BaseRAGAgentBuilder):\n \"\"\"Multimodal RAG Agent builder.\n\n Contains a set of functions to construct a RAG agent, including:\n - setting system prompts\n - loading data\n - adding web search\n - setting parameters (e.g. top-k)\n\n Must pass in a cache. This cache will be modified as the agent is built.\n\n \"\"\"\n\n def __init__(\n self,\n cache: Optional[ParamCache] = None,\n agent_registry: Optional[AgentCacheRegistry] = None,\n ) -> None:\n \"\"\"Init params.\"\"\"\n self._cache = cache or ParamCache()\n self._agent_registry = agent_registry or AgentCacheRegistry(\n str(AGENT_CACHE_DIR)\n )\n\n @property\n def cache(self) -> ParamCache:\n \"\"\"Cache.\"\"\"\n return self._cache\n\n @property\n def agent_registry(self) -> AgentCacheRegistry:\n \"\"\"Agent registry.\"\"\"\n return self._agent_registry\n\n def create_system_prompt(self, task: str) -> str:\n \"\"\"Create system prompt for another agent given an input task.\"\"\"\n llm = BUILDER_LLM\n fmt_messages = GEN_SYS_PROMPT_TMPL.format_messages(task=task)\n response = llm.chat(fmt_messages)\n self._cache.system_prompt = response.message.content\n\n return f\"System prompt created: {response.message.content}\"\n\n def load_data(\n self,\n file_names: Optional[List[str]] = None,\n directory: Optional[str] = None,\n ) -> str:\n \"\"\"Load data for a given task.\n\n Only ONE of file_names or directory should be specified.\n **NOTE**: urls not supported in multi-modal setting.\n\n Args:\n file_names (Optional[List[str]]): List of file names to load.\n Defaults to None.\n directory (Optional[str]): Directory to load files from.\n\n \"\"\"\n file_names = file_names or []\n directory = directory or \"\"\n docs = load_data(file_names=file_names, directory=directory)\n self._cache.docs = docs\n self._cache.file_names = file_names\n self._cache.directory = directory\n return \"Data loaded successfully.\"\n\n def get_rag_params(self) -> Dict:\n \"\"\"Get parameters used to configure the RAG pipeline.\n\n Should be called before `set_rag_params` so that the agent is aware of the\n schema.\n\n \"\"\"\n rag_params = self._cache.rag_params\n return rag_params.dict()\n\n def set_rag_params(self, **rag_params: Dict) -> str:\n \"\"\"Set RAG parameters.\n\n These parameters will then be used to actually initialize the agent.\n Should call `get_rag_params` first to get the schema of the input dictionary.\n\n Args:\n **rag_params (Dict): dictionary of RAG parameters.\n\n \"\"\"\n new_dict = self._cache.rag_params.dict()\n new_dict.update(rag_params)\n rag_params_obj = RAGParams(**new_dict)\n self._cache.rag_params = rag_params_obj\n return \"RAG parameters set successfully.\"\n\n def create_agent(self, agent_id: Optional[str] = None) -> str:\n \"\"\"Create an agent.\n\n There are no parameters for this function because all the\n functions should have already been called to set up the agent.\n\n \"\"\"\n if self._cache.system_prompt is None:\n raise ValueError(\"Must set system prompt before creating agent.\")\n\n # construct additional tools\n agent, extra_info = construct_mm_agent(\n cast(str, self._cache.system_prompt),\n cast(RAGParams, self._cache.rag_params),\n self._cache.docs,\n )\n\n # if agent_id not specified, randomly generate one\n agent_id = agent_id or self._cache.agent_id or f\"Agent_{str(uuid.uuid4())}\"\n self._cache.builder_type = \"multimodal\"\n self._cache.vector_index = extra_info[\"vector_index\"]\n self._cache.agent_id = agent_id\n self._cache.agent = agent\n\n # save the cache to disk\n self._agent_registry.add_new_agent_cache(agent_id, self._cache)\n return \"Agent created successfully.\"\n\n def update_agent(\n self,\n agent_id: str,\n system_prompt: Optional[str] = None,\n include_summarization: Optional[bool] = None,\n top_k: Optional[int] = None,\n chunk_size: Optional[int] = None,\n embed_model: Optional[str] = None,\n llm: Optional[str] = None,\n additional_tools: Optional[List] = None,\n ) -> None:\n \"\"\"Update agent.\n\n Delete old agent by ID and create a new one.\n Optionally update the system prompt and RAG parameters.\n\n NOTE: Currently is manually called, not meant for agent use.\n\n \"\"\"\n self._agent_registry.delete_agent_cache(self.cache.agent_id)\n\n # set agent id\n self.cache.agent_id = agent_id\n\n # set system prompt\n if system_prompt is not None:\n self.cache.system_prompt = system_prompt\n # get agent_builder\n # We call set_rag_params and create_agent, which will\n # update the cache\n # TODO: decouple functions from tool functions exposed to the agent\n rag_params_dict: Dict[str, Any] = {}\n if include_summarization is not None:\n rag_params_dict[\"include_summarization\"] = include_summarization\n if top_k is not None:\n rag_params_dict[\"top_k\"] = top_k\n if chunk_size is not None:\n rag_params_dict[\"chunk_size\"] = chunk_size\n if embed_model is not None:\n rag_params_dict[\"embed_model\"] = embed_model\n if llm is not None:\n rag_params_dict[\"llm\"] = llm\n\n self.set_rag_params(**rag_params_dict)\n\n # update tools\n if additional_tools is not None:\n self.cache.tools = additional_tools\n\n # this will update the agent in the cache\n self.create_agent()" } ]
from typing import List, cast, Optional from llama_index.tools import FunctionTool from llama_index.agent.types import BaseAgent from core.builder_config import BUILDER_LLM from typing import Tuple, Callable from core.param_cache import ParamCache from core.utils import ( load_meta_agent, ) from core.agent_builder.registry import AgentCacheRegistry from core.agent_builder.base import RAGAgentBuilder, BaseRAGAgentBuilder from core.agent_builder.multimodal import MultimodalRAGAgentBuilder import streamlit as st
5,697
"""Loader agent.""" #################### #### META Agent #### #################### RAG_BUILDER_SYS_STR = """\ You are helping to construct an agent given a user-specified task. You should generally use the tools in this rough order to build the agent. 1) Create system prompt tool: to create the system prompt for the agent. 2) Load in user-specified data (based on file paths they specify). 3) Decide whether or not to add additional tools. 4) Set parameters for the RAG pipeline. 5) Build the agent This will be a back and forth conversation with the user. You should continue asking users if there's anything else they want to do until they say they're done. To help guide them on the process, you can give suggestions on parameters they can set based on the tools they have available (e.g. "Do you want to set the number of documents to retrieve?") """ ### DEFINE Agent #### # NOTE: here we define a function that is dependent on the LLM, # please make sure to update the LLM above if you change the function below
"""Loader agent.""" #################### #### META Agent #### #################### RAG_BUILDER_SYS_STR = """\ You are helping to construct an agent given a user-specified task. You should generally use the tools in this rough order to build the agent. 1) Create system prompt tool: to create the system prompt for the agent. 2) Load in user-specified data (based on file paths they specify). 3) Decide whether or not to add additional tools. 4) Set parameters for the RAG pipeline. 5) Build the agent This will be a back and forth conversation with the user. You should continue asking users if there's anything else they want to do until they say they're done. To help guide them on the process, you can give suggestions on parameters they can set based on the tools they have available (e.g. "Do you want to set the number of documents to retrieve?") """ ### DEFINE Agent #### # NOTE: here we define a function that is dependent on the LLM, # please make sure to update the LLM above if you change the function below
def _get_builder_agent_tools(agent_builder: RAGAgentBuilder) -> List[FunctionTool]:
4
2023-11-16 07:49:44+00:00
8k
open-mmlab/Amphion
processors/content_extractor.py
[ { "identifier": "TorchaudioDataset", "path": "utils/io_optim.py", "snippet": "class TorchaudioDataset(torch.utils.data.Dataset):\n def __init__(self, cfg, dataset, sr, accelerator=None, metadata=None):\n \"\"\"\n Args:\n cfg: config\n dataset: dataset name\n\n \"\"\"\n assert isinstance(dataset, str)\n\n self.sr = sr\n self.cfg = cfg\n\n if metadata is None:\n self.train_metadata_path = os.path.join(\n cfg.preprocess.processed_dir, dataset, cfg.preprocess.train_file\n )\n self.valid_metadata_path = os.path.join(\n cfg.preprocess.processed_dir, dataset, cfg.preprocess.valid_file\n )\n self.metadata = self.get_metadata()\n else:\n self.metadata = metadata\n\n if accelerator is not None:\n self.device = accelerator.device\n elif torch.cuda.is_available():\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n\n def get_metadata(self):\n metadata = []\n with open(self.train_metadata_path, \"r\", encoding=\"utf-8\") as t:\n metadata.extend(json.load(t))\n with open(self.valid_metadata_path, \"r\", encoding=\"utf-8\") as v:\n metadata.extend(json.load(v))\n return metadata\n\n def __len__(self):\n return len(self.metadata)\n\n def __getitem__(self, index):\n utt_info = self.metadata[index]\n wav_path = utt_info[\"Path\"]\n\n wav, sr = torchaudio.load(wav_path)\n\n # resample\n if sr != self.sr:\n wav = torchaudio.functional.resample(wav, sr, self.sr)\n # downmixing\n if wav.shape[0] > 1:\n wav = torch.mean(wav, dim=0, keepdim=True)\n assert wav.shape[0] == 1\n wav = wav.squeeze(0)\n # record the length of wav without padding\n length = wav.shape[0]\n # wav: (T)\n return utt_info, wav, length" }, { "identifier": "LibrosaDataset", "path": "utils/io_optim.py", "snippet": "class LibrosaDataset(TorchaudioDataset):\n def __init__(self, cfg, dataset, sr, accelerator=None, metadata=None):\n super().__init__(cfg, dataset, sr, accelerator, metadata)\n\n def __getitem__(self, index):\n utt_info = self.metadata[index]\n wav_path = utt_info[\"Path\"]\n\n wav, _ = librosa.load(wav_path, sr=self.sr)\n # wav: (T)\n wav = torch.from_numpy(wav)\n\n # record the length of wav without padding\n length = wav.shape[0]\n return utt_info, wav, length" }, { "identifier": "FFmpegDataset", "path": "utils/io_optim.py", "snippet": "class FFmpegDataset(TorchaudioDataset):\n def __init__(self, cfg, dataset, sr, accelerator=None, metadata=None):\n super().__init__(cfg, dataset, sr, accelerator, metadata)\n\n def __getitem__(self, index):\n utt_info = self.metadata[index]\n wav_path = utt_info[\"Path\"]\n\n # wav: (T,)\n wav = whisper.load_audio(wav_path, sr=16000) # sr = 16000\n # convert to torch tensor\n wav = torch.from_numpy(wav)\n # record the length of wav without padding\n length = wav.shape[0]\n\n return utt_info, wav, length" }, { "identifier": "collate_batch", "path": "utils/io_optim.py", "snippet": "def collate_batch(batch_list):\n \"\"\"\n Args:\n batch_list: list of (metadata, wav, length)\n \"\"\"\n metadata = [item[0] for item in batch_list]\n # wavs: (B, T)\n wavs = pad_sequence([item[1] for item in batch_list], batch_first=True)\n lens = [item[2] for item in batch_list]\n\n return metadata, wavs, lens" }, { "identifier": "init_model", "path": "modules/wenet_extractor/utils/init_model.py", "snippet": "def init_model(configs):\n if configs[\"cmvn_file\"] is not None:\n mean, istd = load_cmvn(configs[\"cmvn_file\"], configs[\"is_json_cmvn\"])\n global_cmvn = GlobalCMVN(\n torch.from_numpy(mean).float(), torch.from_numpy(istd).float()\n )\n else:\n global_cmvn = None\n\n input_dim = configs[\"input_dim\"]\n vocab_size = configs[\"output_dim\"]\n\n encoder_type = configs.get(\"encoder\", \"conformer\")\n decoder_type = configs.get(\"decoder\", \"bitransformer\")\n\n if encoder_type == \"conformer\":\n encoder = ConformerEncoder(\n input_dim, global_cmvn=global_cmvn, **configs[\"encoder_conf\"]\n )\n elif encoder_type == \"squeezeformer\":\n encoder = SqueezeformerEncoder(\n input_dim, global_cmvn=global_cmvn, **configs[\"encoder_conf\"]\n )\n elif encoder_type == \"efficientConformer\":\n encoder = EfficientConformerEncoder(\n input_dim,\n global_cmvn=global_cmvn,\n **configs[\"encoder_conf\"],\n **configs[\"encoder_conf\"][\"efficient_conf\"]\n if \"efficient_conf\" in configs[\"encoder_conf\"]\n else {},\n )\n else:\n encoder = TransformerEncoder(\n input_dim, global_cmvn=global_cmvn, **configs[\"encoder_conf\"]\n )\n if decoder_type == \"transformer\":\n decoder = TransformerDecoder(\n vocab_size, encoder.output_size(), **configs[\"decoder_conf\"]\n )\n else:\n assert 0.0 < configs[\"model_conf\"][\"reverse_weight\"] < 1.0\n assert configs[\"decoder_conf\"][\"r_num_blocks\"] > 0\n decoder = BiTransformerDecoder(\n vocab_size, encoder.output_size(), **configs[\"decoder_conf\"]\n )\n ctc = CTC(vocab_size, encoder.output_size())\n\n # Init joint CTC/Attention or Transducer model\n if \"predictor\" in configs:\n predictor_type = configs.get(\"predictor\", \"rnn\")\n if predictor_type == \"rnn\":\n predictor = RNNPredictor(vocab_size, **configs[\"predictor_conf\"])\n elif predictor_type == \"embedding\":\n predictor = EmbeddingPredictor(vocab_size, **configs[\"predictor_conf\"])\n configs[\"predictor_conf\"][\"output_size\"] = configs[\"predictor_conf\"][\n \"embed_size\"\n ]\n elif predictor_type == \"conv\":\n predictor = ConvPredictor(vocab_size, **configs[\"predictor_conf\"])\n configs[\"predictor_conf\"][\"output_size\"] = configs[\"predictor_conf\"][\n \"embed_size\"\n ]\n else:\n raise NotImplementedError(\"only rnn, embedding and conv type support now\")\n configs[\"joint_conf\"][\"enc_output_size\"] = configs[\"encoder_conf\"][\n \"output_size\"\n ]\n configs[\"joint_conf\"][\"pred_output_size\"] = configs[\"predictor_conf\"][\n \"output_size\"\n ]\n joint = TransducerJoint(vocab_size, **configs[\"joint_conf\"])\n model = Transducer(\n vocab_size=vocab_size,\n blank=0,\n predictor=predictor,\n encoder=encoder,\n attention_decoder=decoder,\n joint=joint,\n ctc=ctc,\n **configs[\"model_conf\"],\n )\n elif \"paraformer\" in configs:\n predictor = Predictor(**configs[\"cif_predictor_conf\"])\n model = Paraformer(\n vocab_size=vocab_size,\n encoder=encoder,\n decoder=decoder,\n ctc=ctc,\n predictor=predictor,\n **configs[\"model_conf\"],\n )\n else:\n model = ASRModel(\n vocab_size=vocab_size,\n encoder=encoder,\n decoder=decoder,\n ctc=ctc,\n lfmmi_dir=configs.get(\"lfmmi_dir\", \"\"),\n **configs[\"model_conf\"],\n )\n return model" }, { "identifier": "load_checkpoint", "path": "modules/wenet_extractor/utils/checkpoint.py", "snippet": "def load_checkpoint(model: torch.nn.Module, path: str) -> dict:\n if torch.cuda.is_available():\n logging.info(\"Checkpoint: loading from checkpoint %s for GPU\" % path)\n checkpoint = torch.load(path)\n else:\n logging.info(\"Checkpoint: loading from checkpoint %s for CPU\" % path)\n checkpoint = torch.load(path, map_location=\"cpu\")\n model.load_state_dict(checkpoint, strict=False)\n info_path = re.sub(\".pt$\", \".yaml\", path)\n configs = {}\n if os.path.exists(info_path):\n with open(info_path, \"r\") as fin:\n configs = yaml.load(fin, Loader=yaml.FullLoader)\n return configs" } ]
import os import torch import numpy as np import yaml import copy import whisper from tqdm import tqdm from torchaudio.compliance import kaldi from torch.nn.utils.rnn import pad_sequence from torch.utils.data import DataLoader from fairseq import checkpoint_utils from transformers import AutoModel, Wav2Vec2FeatureExtractor from utils.io_optim import ( TorchaudioDataset, LibrosaDataset, FFmpegDataset, collate_batch, ) from modules.wenet_extractor.utils.init_model import init_model from modules.wenet_extractor.utils.checkpoint import load_checkpoint
4,396
frameshift = self.cfg.preprocess.mert_frameshift else: raise NotImplementedError # calculate the number of valid frames num_frames = int(np.ceil((duration - frameshift) / frameshift)) + 1 # (num_frames, dim) -> (valid_frames, dim) assert ( len(content_feature.shape) == 2 ), "content feature shape error, it should be (num_frames, dim)" content_feature = content_feature[:num_frames, :] np.save(save_path, content_feature.cpu().detach().numpy()) class WhisperExtractor(BaseExtractor): def __init__(self, config): super(WhisperExtractor, self).__init__(config) self.extractor_type = "whisper" self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def load_model(self): # load whisper checkpoint print("Loading Whisper Model...") if "whisper_model_path" in self.cfg.preprocess: if os.path.isfile(self.cfg.preprocess.whisper_model_path): # "pretrained/whisper/medium.pt" download_root = os.path.dirname(self.cfg.preprocess.whisper_model_path) elif os.path.isdir(self.cfg.preprocess.whisper_model_path): # "pretrained/whisper" download_root = self.cfg.preprocess.whisper_model_path else: # if the path does not exist, download the model to the path download_root = self.cfg.preprocess.whisper_model_path if download_root.endswith(".pt"): download_root = os.path.dirname(download_root) else: download_root = None model = whisper.load_model( self.cfg.preprocess.whisper_model, self.device, download_root ) if torch.cuda.is_available(): print("Using GPU...\n") model = model.cuda() else: print("Using CPU...\n") self.model = model.eval() def extract_content_features(self, wavs, lens): """extract content features from a batch of dataloader Args: wavs: tensor (batch_size, T) lens: list """ # wavs: (batch, max_len) wavs = whisper.pad_or_trim(wavs) # batch_mel: (batch, 80, 3000) batch_mel = whisper.log_mel_spectrogram(wavs, device=self.model.device) with torch.no_grad(): # (batch, 1500, 1024) features = self.model.embed_audio(batch_mel) return features class ContentvecExtractor(BaseExtractor): def __init__(self, cfg): super(ContentvecExtractor, self).__init__(cfg) self.extractor_type = "contentvec" def load_model(self): assert self.model == None # Load model ckpt_path = self.cfg.preprocess.contentvec_file print("Load Contentvec Model...") models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( [ckpt_path], suffix="", ) model = models[0] model.eval() if torch.cuda.is_available(): # print("Using GPU...\n") model = model.cuda() self.model = model def extract_content_features(self, wavs, lens): """extract content features from a batch of dataloader Args: wavs: tensor (batch, T) lens: list """ device = next(self.model.parameters()).device wavs = wavs.to(device) # (batch, max_len) padding_mask = torch.eq(wavs, torch.zeros_like(wavs)).to(device) with torch.no_grad(): logits = self.model.extract_features( source=wavs, padding_mask=padding_mask, output_layer=12 ) # feats: (batch, T, 256) feats = self.model.final_proj(logits[0]) return feats class WenetExtractor(BaseExtractor): def __init__(self, config): super(WenetExtractor, self).__init__(config) self.extractor_type = "wenet" def load_model(self): wenet_cfg = self.cfg.preprocess.wenet_config wenet_model_path = self.cfg.preprocess.wenet_model_path # load Wenet config with open(wenet_cfg, "r") as w: wenet_configs = yaml.load(w, Loader=yaml.FullLoader) self.extract_conf = copy.deepcopy(wenet_configs["dataset_conf"]) print("Loading Wenet Model...")
# Copyright (c) 2023 Amphion. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Extractor for content features 1. whisper 2. contentvec 3. wenet 4. mert Pipeline: in preprocess.py: call extract_utt_content_features() to extract content features for each utterance extract_utt_content_features() envelopes the following steps: 1. load the model (whisper, contentvec, wenet) 2. extract the content features 3. save the content features into files in svc_dataset.py: call offline_align() to align the content features to the given target length """ """ Extractor Usage: 1. initialize an instance of extractor extractor = WhisperExtractor(cfg) 2. load the specified model extractor.load_model() 3. extract the content features extractor.extract_content(utt) for single utterance extractor.extract_content_batch(utts) for batch utterances 4. save the content features extractor.save_feature(utt, content_feature) for single utterance """ class BaseExtractor: def __init__(self, cfg): self.cfg = cfg self.extractor_type = None self.model = None def offline_align(self, content, target_len): """ args: content: (source_len, dim) target_len: target length return: mapped_feature: (target_len, dim) """ target_hop = self.cfg.preprocess.hop_size assert self.extractor_type in ["whisper", "contentvec", "wenet"] if self.extractor_type == "whisper": source_hop = ( self.cfg.preprocess.whisper_frameshift * self.cfg.preprocess.whisper_downsample_rate * self.cfg.preprocess.sample_rate ) elif self.extractor_type == "contentvec": source_hop = ( self.cfg.preprocess.contentvec_frameshift * self.cfg.preprocess.sample_rate ) elif self.extractor_type == "wenet": source_hop = ( self.cfg.preprocess.wenet_frameshift * self.cfg.preprocess.wenet_downsample_rate * self.cfg.preprocess.sample_rate ) source_hop = int(source_hop) factor = np.gcd(source_hop, target_hop) source_hop //= factor target_hop //= factor # (source_len, 256) _, width = content.shape # slice the content from padded feature source_len = min(target_len * target_hop // source_hop + 1, len(content)) # const ~= target_len * target_hop const = source_len * source_hop // target_hop * target_hop # (source_len * source_hop, dim) up_sampling_feats = np.repeat(content, source_hop, axis=0) # (const, dim) -> (const/target_hop, target_hop, dim) -> (const/target_hop, dim) down_sampling_feats = np.average( up_sampling_feats[:const].reshape(-1, target_hop, width), axis=1 ) err = abs(target_len - len(down_sampling_feats)) if err > 8: # err_log_dir is indeterminate err_log_dir = os.path.join( self.cfg.preprocess.processed_dir, "align_max_err.log" ) try: with open(err_log_dir, "r") as f: err_num = int(f.read()) except: with open(err_log_dir, "w") as f: f.write("0") err_num = 0 if err > err_num: with open(err_log_dir, "w") as f: f.write(str(err)) if len(down_sampling_feats) < target_len: # (1, dim) -> (err, dim) end = down_sampling_feats[-1][None, :].repeat(err, axis=0) down_sampling_feats = np.concatenate([down_sampling_feats, end], axis=0) # (target_len, dim) mapped_feature = down_sampling_feats[:target_len] return mapped_feature def save_feature(self, utt, content_feature): """Save a single utternace to path {cfg.preprocess.processed_dir} Args: utt (dict): one item in metadata, containing information for one utterance content_feature (tensor): content feature of one utterance """ uid = utt["Uid"] assert self.extractor_type != None out_dir = os.path.join( self.cfg.preprocess.processed_dir, utt["Dataset"], self.extractor_type ) os.makedirs(out_dir, exist_ok=True) save_path = os.path.join(out_dir, uid + ".npy") # only keep effective parts duration = utt["Duration"] if self.extractor_type == "whisper": frameshift = ( self.cfg.preprocess.whisper_frameshift * self.cfg.preprocess.whisper_downsample_rate ) # 20ms elif self.extractor_type == "contentvec": frameshift = self.cfg.preprocess.contentvec_frameshift # 20ms elif self.extractor_type == "wenet": frameshift = ( self.cfg.preprocess.wenet_frameshift * self.cfg.preprocess.wenet_downsample_rate ) # 40ms elif self.extractor_type == "mert": frameshift = self.cfg.preprocess.mert_frameshift else: raise NotImplementedError # calculate the number of valid frames num_frames = int(np.ceil((duration - frameshift) / frameshift)) + 1 # (num_frames, dim) -> (valid_frames, dim) assert ( len(content_feature.shape) == 2 ), "content feature shape error, it should be (num_frames, dim)" content_feature = content_feature[:num_frames, :] np.save(save_path, content_feature.cpu().detach().numpy()) class WhisperExtractor(BaseExtractor): def __init__(self, config): super(WhisperExtractor, self).__init__(config) self.extractor_type = "whisper" self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def load_model(self): # load whisper checkpoint print("Loading Whisper Model...") if "whisper_model_path" in self.cfg.preprocess: if os.path.isfile(self.cfg.preprocess.whisper_model_path): # "pretrained/whisper/medium.pt" download_root = os.path.dirname(self.cfg.preprocess.whisper_model_path) elif os.path.isdir(self.cfg.preprocess.whisper_model_path): # "pretrained/whisper" download_root = self.cfg.preprocess.whisper_model_path else: # if the path does not exist, download the model to the path download_root = self.cfg.preprocess.whisper_model_path if download_root.endswith(".pt"): download_root = os.path.dirname(download_root) else: download_root = None model = whisper.load_model( self.cfg.preprocess.whisper_model, self.device, download_root ) if torch.cuda.is_available(): print("Using GPU...\n") model = model.cuda() else: print("Using CPU...\n") self.model = model.eval() def extract_content_features(self, wavs, lens): """extract content features from a batch of dataloader Args: wavs: tensor (batch_size, T) lens: list """ # wavs: (batch, max_len) wavs = whisper.pad_or_trim(wavs) # batch_mel: (batch, 80, 3000) batch_mel = whisper.log_mel_spectrogram(wavs, device=self.model.device) with torch.no_grad(): # (batch, 1500, 1024) features = self.model.embed_audio(batch_mel) return features class ContentvecExtractor(BaseExtractor): def __init__(self, cfg): super(ContentvecExtractor, self).__init__(cfg) self.extractor_type = "contentvec" def load_model(self): assert self.model == None # Load model ckpt_path = self.cfg.preprocess.contentvec_file print("Load Contentvec Model...") models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( [ckpt_path], suffix="", ) model = models[0] model.eval() if torch.cuda.is_available(): # print("Using GPU...\n") model = model.cuda() self.model = model def extract_content_features(self, wavs, lens): """extract content features from a batch of dataloader Args: wavs: tensor (batch, T) lens: list """ device = next(self.model.parameters()).device wavs = wavs.to(device) # (batch, max_len) padding_mask = torch.eq(wavs, torch.zeros_like(wavs)).to(device) with torch.no_grad(): logits = self.model.extract_features( source=wavs, padding_mask=padding_mask, output_layer=12 ) # feats: (batch, T, 256) feats = self.model.final_proj(logits[0]) return feats class WenetExtractor(BaseExtractor): def __init__(self, config): super(WenetExtractor, self).__init__(config) self.extractor_type = "wenet" def load_model(self): wenet_cfg = self.cfg.preprocess.wenet_config wenet_model_path = self.cfg.preprocess.wenet_model_path # load Wenet config with open(wenet_cfg, "r") as w: wenet_configs = yaml.load(w, Loader=yaml.FullLoader) self.extract_conf = copy.deepcopy(wenet_configs["dataset_conf"]) print("Loading Wenet Model...")
self.model = init_model(wenet_configs)
4
2023-11-15 09:19:27+00:00
8k
KwaiKEG/KwaiAgents
kwaiagents/agent_start.py
[ { "identifier": "Config", "path": "kwaiagents/config.py", "snippet": "class Config(object):\n def __init__(self) -> None:\n \"\"\"Initialize the Config class\"\"\"\n self.fast_llm_model = \"gpt-3.5-turbo\"\n self.smart_llm_model = \"gpt-4\"\n self.use_local_llm = False\n self.local_llm_host = \"localhost\"\n self.local_llm_port = 8888\n self.browse_chunk_max_length = 4096\n self.browse_summary_max_token = 300\n self.selenium_web_browser = \"chrome\"\n self.llm_max_retries = 5\n self.temperature = 1.0\n self.max_tokens_num = 4096\n self.chain_logger = ChainMessageLogger()\n\n def __str__(self):\n s = \"============ CONFIG ============\\n\"\n for key, val in self.__dict__.items():\n s += \"· \" + key.upper() + \":\\t\" + str(val) + '\\n'\n return s\n\n def to_json_file(self, fname):\n with open(fname, \"w\") as f:\n json.dump({k:v for k, v in self.__dict__.items() if k not in [\"chain_logger\"]},f, ensure_ascii=False, indent=2)\n\n def set_chain_logger(self, chain_logger):\n self.chain_logger = chain_logger" }, { "identifier": "CFG", "path": "kwaiagents/config.py", "snippet": "CFG = Config()" }, { "identifier": "AgentProfile", "path": "kwaiagents/agents/agent_profile.py", "snippet": "class AgentProfile(object):\n def __init__(self, input_dict: dict = None):\n self.input_dict = input_dict\n self.lang = input_dict.get(\"lang\", \"en\")\n self.from_json(input_dict)\n\n def from_json(self, input_dict):\n self.name = input_dict.get(\"agent_name\", \"\")\n if not self.name:\n self.name = _profile_default_name_fn(self.lang)\n self.bio = input_dict.get(\"agent_bio\", \"\")\n if not self.bio:\n self.bio = _profile_default_bio_fn(self.lang)\n self.max_iter_num = int(input_dict.get(\"max_iter_num\", 5))\n self.instructions = input_dict.get(\"agent_instructions\", \"\")\n if self.instructions:\n self.instructions = _profile_default_instruct_pre_fn(self.lang) + self.instructions\n tool_names = input_dict.get(\"tool_names\", '[\"auto\"]')\n if isinstance(tool_names, str):\n self.tools = json.loads(tool_names)\n else:\n self.tools = tool_names\n\n def to_json_file(self, fname):\n with open(fname, \"w\") as f:\n json.dump({k:v for k, v in self.__dict__.items()},f, ensure_ascii=False, indent=2)\n\n def __str__(self):\n s = \"============ Agent Profile ============\\n\"\n for key, val in self.__dict__.items():\n if key == \"input_dict\":\n continue\n s += f\"· {key.upper()}:\\t{val}\\n\"\n return s" }, { "identifier": "KAgentSysLite", "path": "kwaiagents/agents/kagent.py", "snippet": "class KAgentSysLite(object):\n def __init__(self, cfg, session_id=None, agent_profile=None, lang=\"en\"):\n self.cfg = cfg\n self.agent_profile = agent_profile\n self.lang = lang\n self.max_task_num = agent_profile.max_iter_num\n self.session_id = session_id if session_id else str(uuid.uuid1())\n self.tokenizer = self.initialize_tokenizer(self.cfg.fast_llm_model)\n\n self.initialize_logger()\n self.initialize_memory()\n self.tool_retrival()\n\n def initialize_logger(self):\n self.chain_logger = ChainMessageLogger(output_streams=[sys.stdout], lang=self.lang)\n self.cfg.set_chain_logger(self.chain_logger)\n\n def initialize_memory(self):\n pass\n \n def initialize_tokenizer(self, llm_name):\n if \"baichuan\" in llm_name:\n model_name = \"kwaikeg/kagentlms_baichuan2_13b_mat\"\n elif \"qwen\" in llm_name:\n model_name = \"kwaikeg/kagentlms_qwen_7b_mat\"\n else:\n model_name = \"gpt2\"\n tokenizer = AutoTokenizer.from_pretrained(\n model_name,\n use_fast=False,\n padding_side='left',\n trust_remote_code=True\n )\n return tokenizer\n\n def tool_retrival(self):\n if \"notool\" in self.agent_profile.tools:\n self.tools = list()\n else:\n all_tools = [tool_cls(cfg=self.cfg) for tool_cls in ALL_TOOLS]\n\n if \"auto\" in self.agent_profile.tools:\n used_tools = [tool_cls(cfg=self.cfg) for tool_cls in ALL_TOOLS]\n else:\n used_tools = list()\n for tool in all_tools:\n if tool.zh_name in self.agent_profile.tools or tool.name in self.agent_profile.tools:\n used_tools.append(tool)\n used_tools += [tool_cls(cfg=self.cfg) for tool_cls in ALL_NO_TOOLS]\n \n self.tools = used_tools\n self.name2tools = {t.name: t for t in self.tools}\n\n def memory_retrival(self, \n goal: str, \n conversation_history: List[List], \n complete_task_list: List[Dict]):\n\n memory = \"\"\n if conversation_history:\n memory += f\"* Conversation History:\\n\"\n for tmp in conversation_history[-3:]:\n memory += f\"User: {tmp['query']}\\nAssistant:{tmp['answer']}\\n\"\n\n if complete_task_list:\n complete_task_str = json.dumps(complete_task_list, ensure_ascii=False, indent=4)\n memory += f\"* Complete tasks: {complete_task_str}\\n\"\n return memory\n\n def task_plan(self, goal, memory):\n prompt = make_planning_prompt(self.agent_profile, goal, self.tools, memory, self.cfg.max_tokens_num, self.tokenizer, lang=self.lang)\n # print(f'\\n************** TASK PLAN AGENT PROMPT *************')\n # print(prompt)\n try:\n response, _ = create_chat_completion(\n query=prompt, llm_model_name=self.cfg.smart_llm_model)\n self.chain_logger.put_prompt_response(\n prompt=prompt, \n response=response, \n session_id=self.session_id, \n mtype=\"auto_task_create\",\n llm_name=self.cfg.smart_llm_model)\n response = correct_json(find_json_dict(response))\n task = json.loads(response)\n new_tasks = [task]\n except:\n print(traceback.format_exc())\n print(\"+\" + response)\n self.chain_logger.put(\"fail\", logging_think_fail_msg(self.lang))\n new_tasks = list()\n \n return new_tasks\n\n def tool_use(self, command) -> str:\n try:\n command_name = command.get(\"name\", \"\")\n if command_name == \"search\":\n command_name = \"web_search\"\n args_text = \",\".join([f'{key}={val}' for key, val in command[\"args\"].items()])\n execute_str = f'{command_name}({args_text})'.replace(\"wikipedia(\", \"kuaipedia(\")\n self.chain_logger.put(\"execute\", execute_str)\n if not command_name:\n raise RuntimeError(\"{} has no tool name\".format(command))\n if command_name not in self.name2tools:\n raise RuntimeError(\"has no tool named {}\".format(command_name))\n tool = self.name2tools[command_name]\n\n tool_output = tool(**command[\"args\"])\n self.chain_logger.put(\"observation\", tool_output.answer_md)\n\n for prompt, response in tool_output.prompt_responses:\n self.chain_logger.put_prompt_response(\n prompt=prompt,\n response=response,\n session_id=self.session_id,\n mtype=f\"auto_command_{command_name}\",\n llm_name=self.cfg.fast_llm_model\n )\n return tool_output.answer\n except KeyboardInterrupt:\n exit()\n except:\n print(traceback.format_exc())\n self.chain_logger.put(\"observation\", logging_execute_fail_msg(self.lang))\n return \"\"\n\n def conclusion(self, \n goal: str, \n memory,\n conversation_history: List[List],\n no_task_planned: bool = False\n ):\n\n if no_task_planned:\n prompt = make_no_task_conclusion_prompt(goal, conversation_history)\n else:\n prompt = make_task_conclusion_prompt(self.agent_profile, goal, memory, self.cfg.max_tokens_num, self.tokenizer, lang=self.lang)\n # print(f'\\n************** CONCLUSION AGENT PROMPT *************')\n # print(prompt)\n\n response, _ = create_chat_completion(\n query=prompt, \n chat_id=\"kwaiagents_answer_\" + self.session_id, \n llm_model_name=self.cfg.smart_llm_model)\n\n # print(response)\n\n self.chain_logger.put_prompt_response(\n prompt=prompt, \n response=response, \n session_id=self.session_id, \n mtype=\"auto_conclusion\",\n llm_name=self.cfg.smart_llm_model)\n return response\n\n def check_task_complete(self, task, iter_id):\n command_name = task[\"command\"][\"name\"]\n if not task or (\"task_name\" not in task) or (\"command\" not in task) \\\n or (\"args\" not in task[\"command\"]) or (\"name\" not in task[\"command\"]):\n self.chain_logger.put(\"finish\", str(task.get(\"task_name\", \"\")))\n return True\n elif command_name == FinishTool.name:\n self.chain_logger.put(\"finish\", str(task[\"command\"][\"args\"].get(\"reason\", \"\")))\n return True\n elif command_name == NoTool.name:\n if iter_id == 1:\n self.chain_logger.put(\"finish\", logging_do_not_need_use_tool_msg(self.lang))\n else:\n self.chain_logger.put(\"finish\", logging_do_not_need_use_tool_anymore_msg(self.lang))\n return True\n elif command_name not in self.name2tools:\n self.chain_logger.put(\"finish\", logging_do_not_need_use_tool_msg(self.lang))\n return True\n else:\n return False\n\n def chat(self, query, history=list(), initial_task_name=None, *args, **kwargs):\n goal = query\n\n if not self.tools:\n no_task_planned = True\n else:\n tasks_storage = SingleTaskListStorage()\n tasks_storage.clear()\n\n start = True\n loop = True\n iter_id = 0\n complete_task_list = list()\n no_task_planned = False\n while loop:\n iter_id += 1\n if start or not tasks_storage.is_empty():\n start = False\n if not tasks_storage.is_empty():\n task = tasks_storage.popleft()\n \n if (self.check_task_complete(task, iter_id,)):\n if iter_id <= 2:\n no_task_planned = True\n break\n\n self.chain_logger.put(\"thought\", task.get(\"task_name\", \"\"))\n\n result = self.tool_use(task[\"command\"])\n\n task[\"result\"] = result\n complete_task_list.append(task)\n\n if iter_id > self.agent_profile.max_iter_num:\n self.chain_logger.put(\"finish\", logging_stop_thinking_msg(self.lang))\n break\n self.chain_logger.put(\"thinking\")\n memory = self.memory_retrival(goal, history, complete_task_list)\n new_tasks = self.task_plan(goal, memory)\n\n for new_task in new_tasks:\n new_task.update({\"task_id\": tasks_storage.next_task_id()})\n tasks_storage.append(new_task)\n else:\n loop = False\n self.chain_logger.put(\"finish\", logging_finish_task_msg(self.lang))\n\n memory = self.memory_retrival(goal, history, complete_task_list)\n self.chain_logger.put(\"conclusion\", \"\")\n\n conclusion = self.conclusion(\n goal, \n memory=memory,\n conversation_history=history,\n no_task_planned=no_task_planned)\n self.chain_logger.put(\"chain_end\", \"\")\n\n new_history = history[:] + [{\"query\": query, \"answer\": conclusion}]\n\n return {\n \"response\": conclusion,\n \"history\": new_history,\n \"chain_msg\": self.chain_logger.chain_msgs,\n \"chain_msg_str\": self.chain_logger.chain_msgs_str,\n \"more_info\": {},\n }" } ]
import argparse import json import os import sys import time import traceback from datetime import datetime from kwaiagents.config import Config, CFG from kwaiagents.agents import KAgentSysLite, AgentProfile
3,934
class AgentService(object): def __init__(self, *args, **kwargs): self.cfg = Config() self.agent_profile = None self.p_date = datetime.today().strftime('%Y%m%d') @staticmethod def parse_config(input_dict): cfg = Config() llm_name = input_dict.get("llm_name", "").lower() cfg.fast_llm_model = llm_name cfg.smart_llm_model = llm_name cfg.max_tokens_num = input_dict.get("max_tokens_num", 4096) if llm_name == "gpt-4": cfg.fast_llm_model = "gpt-3.5-turbo" return cfg @staticmethod def load_history(input_dict): history = input_dict.get("history", list()) if not history: history = list() if isinstance(history, str): history = json.loads(history) return history def chat(self, input_dict): s = "============ INPUT_DICT ============\n" for key, val in input_dict.items(): s += f"· {key.upper()}:\t{val}\n" print(s) chat_id = str(input_dict["id"]) history = self.load_history(input_dict) self.cfg = self.parse_config(input_dict) self.agent_profile = AgentProfile(input_dict) print(self.cfg) print(self.agent_profile) try: agent = KAgentSysLite( cfg=self.cfg, session_id=chat_id, agent_profile=self.agent_profile, lang=input_dict.get("lang", "en")) print("\033[95m\033[1m" + "\n***** Question *****" + "\033[0m\033[0m") print(input_dict["query"]) agent_results = agent.chat( input_dict["query"], history=history) print("\033[95m\033[1m" + "\n***** Response *****" + "\033[0m\033[0m") print(agent_results["response"]) result = { "id": chat_id, "response": agent_results["response"], "history": json.dumps(agent_results["history"], ensure_ascii=False), "chain_msg": agent_results["chain_msg"], "chain_msg_str": agent_results["chain_msg_str"], "more_info": agent_results["more_info"] } except KeyboardInterrupt: exit() except: print(traceback.format_exc()) result = { "id": chat_id, "response": "error" } return result def main(): parser = argparse.ArgumentParser() parser.add_argument("--id", type=str, default="test", help="ID of this conversation") parser.add_argument("--query", type=str, required=True, help="User query") parser.add_argument("--history", type=str, default='[]', help="History of conversation") parser.add_argument("--llm_name", type=str, default="gpt-3.5-turbo", help="the name of llm") parser.add_argument("--use_local_llm", default=False, action='store_true', help="Whether to use local llm") parser.add_argument("--local_llm_host", type=str, default="localhost", help="The host of local llm service") parser.add_argument("--local_llm_port", type=int, default="8888", help="The port of local llm service") parser.add_argument("--tool_names", type=str, default='["auto"]', help="the name of llm") parser.add_argument("--max_iter_num", type=int, default=1, help="the number of iteration of agents") parser.add_argument("--agent_name", type=str, default="", help="The agent name") parser.add_argument("--agent_bio", type=str, default="", help="The agent bio, a short description") parser.add_argument("--agent_instructions", type=str, default="", help="The instructions of how agent thinking, acting, or talking") parser.add_argument("--external_knowledge", type=str, default="", help="The link of external knowledge") parser.add_argument("--lang", type=str, default="en", choices=["en", "zh"], help="The language of the overall system") parser.add_argument("--max_tokens_num", type=int, default=4096, help="Maximum length of model input") args = parser.parse_args()
class AgentService(object): def __init__(self, *args, **kwargs): self.cfg = Config() self.agent_profile = None self.p_date = datetime.today().strftime('%Y%m%d') @staticmethod def parse_config(input_dict): cfg = Config() llm_name = input_dict.get("llm_name", "").lower() cfg.fast_llm_model = llm_name cfg.smart_llm_model = llm_name cfg.max_tokens_num = input_dict.get("max_tokens_num", 4096) if llm_name == "gpt-4": cfg.fast_llm_model = "gpt-3.5-turbo" return cfg @staticmethod def load_history(input_dict): history = input_dict.get("history", list()) if not history: history = list() if isinstance(history, str): history = json.loads(history) return history def chat(self, input_dict): s = "============ INPUT_DICT ============\n" for key, val in input_dict.items(): s += f"· {key.upper()}:\t{val}\n" print(s) chat_id = str(input_dict["id"]) history = self.load_history(input_dict) self.cfg = self.parse_config(input_dict) self.agent_profile = AgentProfile(input_dict) print(self.cfg) print(self.agent_profile) try: agent = KAgentSysLite( cfg=self.cfg, session_id=chat_id, agent_profile=self.agent_profile, lang=input_dict.get("lang", "en")) print("\033[95m\033[1m" + "\n***** Question *****" + "\033[0m\033[0m") print(input_dict["query"]) agent_results = agent.chat( input_dict["query"], history=history) print("\033[95m\033[1m" + "\n***** Response *****" + "\033[0m\033[0m") print(agent_results["response"]) result = { "id": chat_id, "response": agent_results["response"], "history": json.dumps(agent_results["history"], ensure_ascii=False), "chain_msg": agent_results["chain_msg"], "chain_msg_str": agent_results["chain_msg_str"], "more_info": agent_results["more_info"] } except KeyboardInterrupt: exit() except: print(traceback.format_exc()) result = { "id": chat_id, "response": "error" } return result def main(): parser = argparse.ArgumentParser() parser.add_argument("--id", type=str, default="test", help="ID of this conversation") parser.add_argument("--query", type=str, required=True, help="User query") parser.add_argument("--history", type=str, default='[]', help="History of conversation") parser.add_argument("--llm_name", type=str, default="gpt-3.5-turbo", help="the name of llm") parser.add_argument("--use_local_llm", default=False, action='store_true', help="Whether to use local llm") parser.add_argument("--local_llm_host", type=str, default="localhost", help="The host of local llm service") parser.add_argument("--local_llm_port", type=int, default="8888", help="The port of local llm service") parser.add_argument("--tool_names", type=str, default='["auto"]', help="the name of llm") parser.add_argument("--max_iter_num", type=int, default=1, help="the number of iteration of agents") parser.add_argument("--agent_name", type=str, default="", help="The agent name") parser.add_argument("--agent_bio", type=str, default="", help="The agent bio, a short description") parser.add_argument("--agent_instructions", type=str, default="", help="The instructions of how agent thinking, acting, or talking") parser.add_argument("--external_knowledge", type=str, default="", help="The link of external knowledge") parser.add_argument("--lang", type=str, default="en", choices=["en", "zh"], help="The language of the overall system") parser.add_argument("--max_tokens_num", type=int, default=4096, help="Maximum length of model input") args = parser.parse_args()
CFG.local_llm_host = args.local_llm_host
1
2023-11-13 03:37:02+00:00
8k
EnVision-Research/LucidDreamer
scene/gaussian_model.py
[ { "identifier": "inverse_sigmoid", "path": "utils/general_utils.py", "snippet": "def inverse_sigmoid(x):\n return torch.log(x/(1-x))" }, { "identifier": "get_expon_lr_func", "path": "utils/general_utils.py", "snippet": "def get_expon_lr_func(\n lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000\n):\n \"\"\"\n Copied from Plenoxels\n\n Continuous learning rate decay function. Adapted from JaxNeRF\n The returned rate is lr_init when step=0 and lr_final when step=max_steps, and\n is log-linearly interpolated elsewhere (equivalent to exponential decay).\n If lr_delay_steps>0 then the learning rate will be scaled by some smooth\n function of lr_delay_mult, such that the initial learning rate is\n lr_init*lr_delay_mult at the beginning of optimization but will be eased back\n to the normal learning rate when steps>lr_delay_steps.\n :param conf: config subtree 'lr' or similar\n :param max_steps: int, the number of steps during optimization.\n :return HoF which takes step as input\n \"\"\"\n\n def helper(step):\n if step < 0 or (lr_init == 0.0 and lr_final == 0.0):\n # Disable this parameter\n return 0.0\n if lr_delay_steps > 0:\n # A kind of reverse cosine decay.\n delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(\n 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)\n )\n else:\n delay_rate = 1.0\n t = np.clip(step / max_steps, 0, 1)\n log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)\n return delay_rate * log_lerp\n\n return helper" }, { "identifier": "build_rotation", "path": "utils/general_utils.py", "snippet": "def build_rotation(r):\n norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])\n\n q = r / norm[:, None]\n\n R = torch.zeros((q.size(0), 3, 3), device='cuda')\n\n r = q[:, 0]\n x = q[:, 1]\n y = q[:, 2]\n z = q[:, 3]\n\n R[:, 0, 0] = 1 - 2 * (y*y + z*z)\n R[:, 0, 1] = 2 * (x*y - r*z)\n R[:, 0, 2] = 2 * (x*z + r*y)\n R[:, 1, 0] = 2 * (x*y + r*z)\n R[:, 1, 1] = 1 - 2 * (x*x + z*z)\n R[:, 1, 2] = 2 * (y*z - r*x)\n R[:, 2, 0] = 2 * (x*z - r*y)\n R[:, 2, 1] = 2 * (y*z + r*x)\n R[:, 2, 2] = 1 - 2 * (x*x + y*y)\n return R" }, { "identifier": "mkdir_p", "path": "utils/system_utils.py", "snippet": "def mkdir_p(folder_path):\n # Creates a directory. equivalent to using mkdir -p on the command line\n try:\n makedirs(folder_path)\n except OSError as exc: # Python >2.5\n if exc.errno == EEXIST and path.isdir(folder_path):\n pass\n else:\n raise" }, { "identifier": "RGB2SH", "path": "utils/sh_utils.py", "snippet": "def RGB2SH(rgb):\n return (rgb - 0.5) / C0" }, { "identifier": "SH2RGB", "path": "utils/sh_utils.py", "snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5" }, { "identifier": "BasicPointCloud", "path": "utils/graphics_utils.py", "snippet": "class BasicPointCloud(NamedTuple):\n points : np.array\n colors : np.array\n normals : np.array" }, { "identifier": "strip_symmetric", "path": "utils/general_utils.py", "snippet": "def strip_symmetric(sym):\n return strip_lowerdiag(sym)" }, { "identifier": "build_scaling_rotation", "path": "utils/general_utils.py", "snippet": "def build_scaling_rotation(s, r):\n L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device=\"cuda\")\n R = build_rotation(r)\n\n L[:,0,0] = s[:,0]\n L[:,1,1] = s[:,1]\n L[:,2,2] = s[:,2]\n\n L = R @ L\n return L" } ]
import torch import numpy as np import os from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation from torch import nn from utils.system_utils import mkdir_p from plyfile import PlyData, PlyElement from utils.sh_utils import RGB2SH,SH2RGB from simple_knn._C import distCUDA2 from utils.graphics_utils import BasicPointCloud from utils.general_utils import strip_symmetric, build_scaling_rotation
4,875
rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")] rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1])) rots = np.zeros((xyz.shape[0], len(rot_names))) for idx, attr_name in enumerate(rot_names): rots[:, idx] = np.asarray(plydata.elements[0][attr_name]) self._xyz = nn.Parameter(torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True)) self._features_dc = nn.Parameter(torch.tensor(features_dc, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True)) self._features_rest = nn.Parameter(torch.tensor(features_extra, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True)) self._opacity = nn.Parameter(torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(True)) self._scaling = nn.Parameter(torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True)) self._rotation = nn.Parameter(torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True)) self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") self.active_sh_degree = self.max_sh_degree def replace_tensor_to_optimizer(self, tensor, name): optimizable_tensors = {} for group in self.optimizer.param_groups: if group["name"] not in ['background']: if group["name"] == name: stored_state = self.optimizer.state.get(group['params'][0], None) stored_state["exp_avg"] = torch.zeros_like(tensor) stored_state["exp_avg_sq"] = torch.zeros_like(tensor) del self.optimizer.state[group['params'][0]] group["params"][0] = nn.Parameter(tensor.requires_grad_(True)) self.optimizer.state[group['params'][0]] = stored_state optimizable_tensors[group["name"]] = group["params"][0] return optimizable_tensors def _prune_optimizer(self, mask): optimizable_tensors = {} for group in self.optimizer.param_groups: stored_state = self.optimizer.state.get(group['params'][0], None) if group["name"] not in ['background']: if stored_state is not None: stored_state["exp_avg"] = stored_state["exp_avg"][mask] stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask] del self.optimizer.state[group['params'][0]] group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True))) self.optimizer.state[group['params'][0]] = stored_state optimizable_tensors[group["name"]] = group["params"][0] else: group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True)) optimizable_tensors[group["name"]] = group["params"][0] return optimizable_tensors def prune_points(self, mask): valid_points_mask = ~mask optimizable_tensors = self._prune_optimizer(valid_points_mask) self._xyz = optimizable_tensors["xyz"] self._features_dc = optimizable_tensors["f_dc"] self._features_rest = optimizable_tensors["f_rest"] self._opacity = optimizable_tensors["opacity"] self._scaling = optimizable_tensors["scaling"] self._rotation = optimizable_tensors["rotation"] self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask] self.denom = self.denom[valid_points_mask] self.max_radii2D = self.max_radii2D[valid_points_mask] def cat_tensors_to_optimizer(self, tensors_dict): optimizable_tensors = {} for group in self.optimizer.param_groups: if group["name"] not in ['background']: assert len(group["params"]) == 1 extension_tensor = tensors_dict[group["name"]] stored_state = self.optimizer.state.get(group['params'][0], None) if stored_state is not None: stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0) stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0) del self.optimizer.state[group['params'][0]] group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True)) self.optimizer.state[group['params'][0]] = stored_state optimizable_tensors[group["name"]] = group["params"][0] else: group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True)) optimizable_tensors[group["name"]] = group["params"][0] return optimizable_tensors def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation): d = {"xyz": new_xyz, "f_dc": new_features_dc, "f_rest": new_features_rest, "opacity": new_opacities, "scaling" : new_scaling, "rotation" : new_rotation} optimizable_tensors = self.cat_tensors_to_optimizer(d) self._xyz = optimizable_tensors["xyz"] self._features_dc = optimizable_tensors["f_dc"] self._features_rest = optimizable_tensors["f_rest"] self._opacity = optimizable_tensors["opacity"] self._scaling = optimizable_tensors["scaling"] self._rotation = optimizable_tensors["rotation"] self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") def densify_and_split(self, grads, grad_threshold, scene_extent, N=2): n_init_points = self.get_xyz.shape[0] # Extract points that satisfy the gradient condition padded_grad = torch.zeros((n_init_points), device="cuda") padded_grad[:grads.shape[0]] = grads.squeeze() selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False) selected_pts_mask = torch.logical_and(selected_pts_mask, torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent) stds = self.get_scaling[selected_pts_mask].repeat(N,1) means =torch.zeros((stds.size(0), 3),device="cuda") samples = torch.normal(mean=means, std=stds)
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # # from .resnet import * class GaussianModel: def setup_functions(self): def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation): L = build_scaling_rotation(scaling_modifier * scaling, rotation) actual_covariance = L @ L.transpose(1, 2) symm = strip_symmetric(actual_covariance) return symm self.scaling_activation = torch.exp self.scaling_inverse_activation = torch.log self.covariance_activation = build_covariance_from_scaling_rotation self.opacity_activation = torch.sigmoid self.inverse_opacity_activation = inverse_sigmoid self.rotation_activation = torch.nn.functional.normalize def __init__(self, sh_degree : int): self.active_sh_degree = 0 self.max_sh_degree = sh_degree self._xyz = torch.empty(0) self._features_dc = torch.empty(0) self._features_rest = torch.empty(0) self._scaling = torch.empty(0) self._rotation = torch.empty(0) self._opacity = torch.empty(0) self._background = torch.empty(0) self.max_radii2D = torch.empty(0) self.xyz_gradient_accum = torch.empty(0) self.denom = torch.empty(0) self.optimizer = None self.percent_dense = 0 self.spatial_lr_scale = 0 self.setup_functions() def capture(self): return ( self.active_sh_degree, self._xyz, self._features_dc, self._features_rest, self._scaling, self._rotation, self._opacity, self.max_radii2D, self.xyz_gradient_accum, self.denom, self.optimizer.state_dict(), self.spatial_lr_scale, ) def restore(self, model_args, training_args): (self.active_sh_degree, self._xyz, self._features_dc, self._features_rest, self._scaling, self._rotation, self._opacity, self.max_radii2D, xyz_gradient_accum, denom, opt_dict, self.spatial_lr_scale) = model_args self.training_setup(training_args) self.xyz_gradient_accum = xyz_gradient_accum self.denom = denom self.optimizer.load_state_dict(opt_dict) @property def get_scaling(self): return self.scaling_activation(self._scaling) @property def get_rotation(self): return self.rotation_activation(self._rotation) @property def get_xyz(self): return self._xyz @property def get_background(self): return torch.sigmoid(self._background) @property def get_features(self): features_dc = self._features_dc features_rest = self._features_rest return torch.cat((features_dc, features_rest), dim=1) @property def get_opacity(self): return self.opacity_activation(self._opacity) def get_covariance(self, scaling_modifier = 1): return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation) def oneupSHdegree(self): if self.active_sh_degree < self.max_sh_degree: self.active_sh_degree += 1 def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float): self.spatial_lr_scale = spatial_lr_scale fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda() fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors))).float().cuda() #RGB2SH( features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda() features[:, :3, 0 ] = fused_color features[:, 3:, 1:] = 0.0 print("Number of points at initialisation : ", fused_point_cloud.shape[0]) dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001) scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3) rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda") rots[:, 0] = 1 opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda")) self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True)) self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True)) self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True)) self._scaling = nn.Parameter(scales.requires_grad_(True)) self._rotation = nn.Parameter(rots.requires_grad_(True)) self._opacity = nn.Parameter(opacities.requires_grad_(True)) self._background = nn.Parameter(torch.zeros((3,1,1), device="cuda").requires_grad_(True)) self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") def training_setup(self, training_args): self.percent_dense = training_args.percent_dense self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") l = [ {'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"}, {'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"}, {'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"}, {'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"}, {'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"}, {'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"}, {'params': [self._background], 'lr': training_args.feature_lr, "name": "background"}, ] self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15) self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale, lr_final=training_args.position_lr_final*self.spatial_lr_scale, lr_delay_mult=training_args.position_lr_delay_mult, max_steps=training_args.iterations) self.rotation_scheduler_args = get_expon_lr_func(lr_init=training_args.rotation_lr, lr_final=training_args.rotation_lr_final, lr_delay_mult=training_args.position_lr_delay_mult, max_steps=training_args.iterations) self.scaling_scheduler_args = get_expon_lr_func(lr_init=training_args.scaling_lr, lr_final=training_args.scaling_lr_final, lr_delay_mult=training_args.position_lr_delay_mult, max_steps=training_args.iterations) self.feature_scheduler_args = get_expon_lr_func(lr_init=training_args.feature_lr, lr_final=training_args.feature_lr_final, lr_delay_mult=training_args.position_lr_delay_mult, max_steps=training_args.iterations) def update_learning_rate(self, iteration): ''' Learning rate scheduling per step ''' for param_group in self.optimizer.param_groups: if param_group["name"] == "xyz": lr = self.xyz_scheduler_args(iteration) param_group['lr'] = lr return lr def update_feature_learning_rate(self, iteration): ''' Learning rate scheduling per step ''' for param_group in self.optimizer.param_groups: if param_group["name"] == "f_dc": lr = self.feature_scheduler_args(iteration) param_group['lr'] = lr return lr def update_rotation_learning_rate(self, iteration): ''' Learning rate scheduling per step ''' for param_group in self.optimizer.param_groups: if param_group["name"] == "rotation": lr = self.rotation_scheduler_args(iteration) param_group['lr'] = lr return lr def update_scaling_learning_rate(self, iteration): ''' Learning rate scheduling per step ''' for param_group in self.optimizer.param_groups: if param_group["name"] == "scaling": lr = self.scaling_scheduler_args(iteration) param_group['lr'] = lr return lr def construct_list_of_attributes(self): l = ['x', 'y', 'z', 'nx', 'ny', 'nz'] # All channels except the 3 DC for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]): l.append('f_dc_{}'.format(i)) for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]): l.append('f_rest_{}'.format(i)) l.append('opacity') for i in range(self._scaling.shape[1]): l.append('scale_{}'.format(i)) for i in range(self._rotation.shape[1]): l.append('rot_{}'.format(i)) return l def save_ply(self, path): mkdir_p(os.path.dirname(path)) xyz = self._xyz.detach().cpu().numpy() normals = np.zeros_like(xyz) f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy() f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy() opacities = self._opacity.detach().cpu().numpy() scale = self._scaling.detach().cpu().numpy() rotation = self._rotation.detach().cpu().numpy() dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()] elements = np.empty(xyz.shape[0], dtype=dtype_full) attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1) elements[:] = list(map(tuple, attributes)) el = PlyElement.describe(elements, 'vertex') PlyData([el]).write(path) np.savetxt(os.path.join(os.path.split(path)[0],"point_cloud_rgb.txt"),np.concatenate((xyz, SH2RGB(f_dc)), axis=1)) def reset_opacity(self): opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity)*0.01)) optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity") self._opacity = optimizable_tensors["opacity"] def load_ply(self, path): plydata = PlyData.read(path) xyz = np.stack((np.asarray(plydata.elements[0]["x"]), np.asarray(plydata.elements[0]["y"]), np.asarray(plydata.elements[0]["z"])), axis=1) opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis] features_dc = np.zeros((xyz.shape[0], 3, 1)) features_dc[:, 0, 0] = np.asarray(plydata.elements[0]["f_dc_0"]) features_dc[:, 1, 0] = np.asarray(plydata.elements[0]["f_dc_1"]) features_dc[:, 2, 0] = np.asarray(plydata.elements[0]["f_dc_2"]) extra_f_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("f_rest_")] extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1])) assert len(extra_f_names)==3*(self.max_sh_degree + 1) ** 2 - 3 features_extra = np.zeros((xyz.shape[0], len(extra_f_names))) for idx, attr_name in enumerate(extra_f_names): features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name]) # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC) features_extra = features_extra.reshape((features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1)) scale_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("scale_")] scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1])) scales = np.zeros((xyz.shape[0], len(scale_names))) for idx, attr_name in enumerate(scale_names): scales[:, idx] = np.asarray(plydata.elements[0][attr_name]) rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")] rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1])) rots = np.zeros((xyz.shape[0], len(rot_names))) for idx, attr_name in enumerate(rot_names): rots[:, idx] = np.asarray(plydata.elements[0][attr_name]) self._xyz = nn.Parameter(torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True)) self._features_dc = nn.Parameter(torch.tensor(features_dc, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True)) self._features_rest = nn.Parameter(torch.tensor(features_extra, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True)) self._opacity = nn.Parameter(torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(True)) self._scaling = nn.Parameter(torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True)) self._rotation = nn.Parameter(torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True)) self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") self.active_sh_degree = self.max_sh_degree def replace_tensor_to_optimizer(self, tensor, name): optimizable_tensors = {} for group in self.optimizer.param_groups: if group["name"] not in ['background']: if group["name"] == name: stored_state = self.optimizer.state.get(group['params'][0], None) stored_state["exp_avg"] = torch.zeros_like(tensor) stored_state["exp_avg_sq"] = torch.zeros_like(tensor) del self.optimizer.state[group['params'][0]] group["params"][0] = nn.Parameter(tensor.requires_grad_(True)) self.optimizer.state[group['params'][0]] = stored_state optimizable_tensors[group["name"]] = group["params"][0] return optimizable_tensors def _prune_optimizer(self, mask): optimizable_tensors = {} for group in self.optimizer.param_groups: stored_state = self.optimizer.state.get(group['params'][0], None) if group["name"] not in ['background']: if stored_state is not None: stored_state["exp_avg"] = stored_state["exp_avg"][mask] stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask] del self.optimizer.state[group['params'][0]] group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True))) self.optimizer.state[group['params'][0]] = stored_state optimizable_tensors[group["name"]] = group["params"][0] else: group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True)) optimizable_tensors[group["name"]] = group["params"][0] return optimizable_tensors def prune_points(self, mask): valid_points_mask = ~mask optimizable_tensors = self._prune_optimizer(valid_points_mask) self._xyz = optimizable_tensors["xyz"] self._features_dc = optimizable_tensors["f_dc"] self._features_rest = optimizable_tensors["f_rest"] self._opacity = optimizable_tensors["opacity"] self._scaling = optimizable_tensors["scaling"] self._rotation = optimizable_tensors["rotation"] self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask] self.denom = self.denom[valid_points_mask] self.max_radii2D = self.max_radii2D[valid_points_mask] def cat_tensors_to_optimizer(self, tensors_dict): optimizable_tensors = {} for group in self.optimizer.param_groups: if group["name"] not in ['background']: assert len(group["params"]) == 1 extension_tensor = tensors_dict[group["name"]] stored_state = self.optimizer.state.get(group['params'][0], None) if stored_state is not None: stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0) stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0) del self.optimizer.state[group['params'][0]] group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True)) self.optimizer.state[group['params'][0]] = stored_state optimizable_tensors[group["name"]] = group["params"][0] else: group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True)) optimizable_tensors[group["name"]] = group["params"][0] return optimizable_tensors def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation): d = {"xyz": new_xyz, "f_dc": new_features_dc, "f_rest": new_features_rest, "opacity": new_opacities, "scaling" : new_scaling, "rotation" : new_rotation} optimizable_tensors = self.cat_tensors_to_optimizer(d) self._xyz = optimizable_tensors["xyz"] self._features_dc = optimizable_tensors["f_dc"] self._features_rest = optimizable_tensors["f_rest"] self._opacity = optimizable_tensors["opacity"] self._scaling = optimizable_tensors["scaling"] self._rotation = optimizable_tensors["rotation"] self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") def densify_and_split(self, grads, grad_threshold, scene_extent, N=2): n_init_points = self.get_xyz.shape[0] # Extract points that satisfy the gradient condition padded_grad = torch.zeros((n_init_points), device="cuda") padded_grad[:grads.shape[0]] = grads.squeeze() selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False) selected_pts_mask = torch.logical_and(selected_pts_mask, torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent) stds = self.get_scaling[selected_pts_mask].repeat(N,1) means =torch.zeros((stds.size(0), 3),device="cuda") samples = torch.normal(mean=means, std=stds)
rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1)
2
2023-11-18 08:05:50+00:00
8k
VRSEN/agency-swarm
agency_swarm/threads/thread.py
[ { "identifier": "Agent", "path": "agency_swarm/agents/agent.py", "snippet": "class Agent():\n @property\n def assistant(self):\n if self._assistant is None:\n raise Exception(\"Assistant is not initialized. Please run init_oai() first.\")\n return self._assistant\n\n @assistant.setter\n def assistant(self, value):\n self._assistant = value\n\n @property\n def functions(self):\n return [tool for tool in self.tools if issubclass(tool, BaseTool)]\n\n def __init__(self, id: str = None, name: str = None, description: str = None, instructions: str = \"\",\n tools: List[Union[Type[BaseTool], Type[Retrieval], Type[CodeInterpreter]]] = None,\n files_folder: Union[List[str], str] = None,\n file_ids: List[str] = None, metadata: Dict[str, str] = None, model: str = \"gpt-4-1106-preview\"):\n \"\"\"\n Initializes an Agent with specified attributes, tools, and OpenAI client.\n\n Parameters:\n id (str, optional): Unique identifier for the agent. Defaults to None.\n name (str, optional): Name of the agent. Defaults to the class name if not provided.\n description (str, optional): A brief description of the agent's purpose. Defaults to None.\n instructions (str, optional): Path to a file containing specific instructions for the agent. Defaults to an empty string.\n tools (List[Union[Type[BaseTool], Type[Retrieval], Type[CodeInterpreter]]], optional): A list of tools (as classes) that the agent can use. Defaults to an empty list.\n files_folder (Union[List[str], str], optional): Path or list of paths to directories containing files associated with the agent. Defaults to None.\n file_ids (List[str], optional): List of file IDs for files associated with the agent. Defaults to an empty list.\n metadata (Dict[str, str], optional): Metadata associated with the agent. Defaults to an empty dictionary.\n model (str, optional): The model identifier for the OpenAI API. Defaults to \"gpt-4-1106-preview\".\n\n This constructor sets up the agent with its unique properties, initializes the OpenAI client, reads instructions if provided, and uploads any associated files.\n \"\"\"\n self.id = id\n self.name = name if name else self.__class__.__name__\n self.description = description\n self.instructions = instructions\n self.tools = tools[:] if tools is not None else []\n self.files_folder = files_folder\n self.file_ids = file_ids if file_ids else []\n self.metadata = metadata if metadata else {}\n self.model = model\n\n self._assistant: Any = None\n self._shared_instructions = None\n\n self.client = get_openai_client()\n\n if os.path.isfile(self.instructions):\n self._read_instructions(self.instructions)\n elif os.path.isfile(os.path.join(self.get_class_folder_path(), self.instructions)):\n self._read_instructions(os.path.join(self.get_class_folder_path(), self.instructions))\n\n self._upload_files()\n\n def init_oai(self):\n \"\"\"\n Initializes the OpenAI assistant for the agent.\n\n This method handles the initialization and potential updates of the agent's OpenAI assistant. It loads the assistant based on a saved ID, updates the assistant if necessary, or creates a new assistant if it doesn't exist. After initialization or update, it saves the assistant's settings.\n\n Output:\n self: Returns the agent instance for chaining methods or further processing.\n \"\"\"\n\n # check if settings.json exists\n path = self.get_settings_path()\n\n # load assistant from id\n if self.id:\n self.assistant = self.client.beta.assistants.retrieve(self.id)\n # update assistant if parameters are different\n if not self._check_parameters(self.assistant.model_dump()):\n self._update_assistant()\n return self\n\n # load assistant from settings\n if os.path.exists(path):\n with open(path, 'r') as f:\n settings = json.load(f)\n # iterate settings and find the assistant with the same name\n for assistant_settings in settings:\n if assistant_settings['name'] == self.name:\n self.assistant = self.client.beta.assistants.retrieve(assistant_settings['id'])\n self.id = assistant_settings['id']\n # update assistant if parameters are different\n if not self._check_parameters(self.assistant.model_dump()):\n print(\"Updating assistant... \" + self.name)\n self._update_assistant()\n self._update_settings()\n return self\n # create assistant if settings.json does not exist or assistant with the same name does not exist\n self.assistant = self.client.beta.assistants.create(\n name=self.name,\n description=self.description,\n instructions=self.instructions,\n tools=self.get_oai_tools(),\n file_ids=self.file_ids,\n metadata=self.metadata,\n model=self.model\n )\n\n self.id = self.assistant.id\n\n self._save_settings()\n\n return self\n\n def _update_assistant(self):\n \"\"\"\n Updates the existing assistant's parameters on the OpenAI server.\n\n This method updates the assistant's details such as name, description, instructions, tools, file IDs, metadata, and the model. It only updates parameters that have non-empty values. After updating the assistant, it also updates the local settings file to reflect these changes.\n\n No input parameters are directly passed to this method as it uses the agent's instance attributes.\n\n No output parameters are returned, but the method updates the assistant's details on the OpenAI server and locally updates the settings file.\n \"\"\"\n\n params = {\n \"name\": self.name,\n \"description\": self.description,\n \"instructions\": self.instructions,\n \"tools\": self.get_oai_tools(),\n \"file_ids\": self.file_ids,\n \"metadata\": self.metadata,\n \"model\": self.model\n }\n params = {k: v for k, v in params.items() if v}\n self.assistant = self.client.beta.assistants.update(\n self.id,\n **params,\n )\n self._update_settings()\n\n def _check_parameters(self, assistant_settings):\n \"\"\"\n Checks if the agent's parameters match with the given assistant settings.\n\n Parameters:\n assistant_settings (dict): A dictionary containing the settings of an assistant.\n\n Returns:\n bool: True if all the agent's parameters match the assistant settings, False otherwise.\n\n This method compares the current agent's parameters such as name, description, instructions, tools, file IDs, metadata, and model with the given assistant settings. It uses DeepDiff to compare complex structures like tools and metadata. If any parameter does not match, it returns False; otherwise, it returns True.\n \"\"\"\n\n if self.name != assistant_settings['name']:\n return False\n if self.description != assistant_settings['description']:\n return False\n if self.instructions != assistant_settings['instructions']:\n return False\n tools_diff = DeepDiff(self.get_oai_tools(), assistant_settings['tools'], ignore_order=True)\n if tools_diff != {}:\n return False\n if set(self.file_ids) != set(assistant_settings['file_ids']):\n return False\n metadata_diff = DeepDiff(self.metadata, assistant_settings['metadata'], ignore_order=True)\n if metadata_diff != {}:\n return False\n if self.model != assistant_settings['model']:\n return False\n return True\n\n def _save_settings(self):\n path = self.get_settings_path()\n # check if settings.json exists\n if not os.path.isfile(path):\n with open(path, 'w') as f:\n json.dump([self.assistant.model_dump()], f, indent=4)\n else:\n settings = []\n with open(path, 'r') as f:\n settings = json.load(f)\n settings.append(self.assistant.model_dump())\n with open(path, 'w') as f:\n json.dump(settings, f, indent=4)\n\n def _update_settings(self):\n path = self.get_settings_path()\n # check if settings.json exists\n if os.path.isfile(path):\n settings = []\n with open(path, 'r') as f:\n settings = json.load(f)\n for i, assistant_settings in enumerate(settings):\n if assistant_settings['id'] == self.id:\n settings[i] = self.assistant.model_dump()\n break\n with open(path, 'w') as f:\n json.dump(settings, f, indent=4)\n\n def _read_instructions(self, path):\n with open(path, 'r') as f:\n self.instructions = f.read()\n\n def _upload_files(self):\n if isinstance(self.files_folder, str):\n f_path = self.files_folder\n\n if not os.path.isdir(f_path):\n f_path = os.path.join(self.get_class_folder_path(), self.files_folder)\n\n if os.path.isdir(f_path):\n f_paths = os.listdir(f_path)\n\n f_paths = [f for f in f_paths if not f.startswith(\".\")]\n\n f_paths = [os.path.join(f_path, f) for f in f_paths]\n\n for f_path in f_paths:\n file_id = self._get_id_from_file(f_path)\n if file_id:\n print(\"File already uploaded. Skipping... \" + os.path.basename(f_path))\n self.file_ids.append(file_id)\n else:\n print(\"Uploading new file... \" + os.path.basename(f_path))\n with open(f_path, 'rb') as f:\n file_id = self.client.files.create(file=f, purpose=\"assistants\").id\n self.file_ids.append(file_id)\n f.close()\n self._add_id_to_file(f_path, file_id)\n\n if Retrieval not in self.tools:\n print(\"Detected files without Retrieval. Adding Retrieval tool...\")\n self.add_tool(Retrieval)\n else:\n raise Exception(\"Files folder path is not a directory.\")\n\n def _add_id_to_file(self, f_path, id):\n \"\"\"Add file id to file name\"\"\"\n if os.path.isfile(f_path):\n file_name, file_ext = os.path.splitext(f_path)\n f_path_new = file_name + \"_\" + id + file_ext\n os.rename(f_path, f_path_new)\n return f_path_new\n else:\n raise Exception(\"Items in files folder must be files.\")\n\n def _get_id_from_file(self, f_path):\n \"\"\"Get file id from file name\"\"\"\n if os.path.isfile(f_path):\n file_name, file_ext = os.path.splitext(f_path)\n file_name = os.path.basename(file_name)\n file_name = file_name.split(\"_\")\n if len(file_name) > 1:\n return file_name[-1] if \"file-\" in file_name[-1] else None\n else:\n return None\n else:\n raise Exception(\"Items in files folder must be files.\")\n\n def get_settings_path(self):\n return os.path.join(\"./\", 'settings.json')\n\n def get_class_folder_path(self):\n return os.path.abspath(os.path.dirname(inspect.getfile(self.__class__)))\n\n def set_params(self, **params):\n for k, v in params.items():\n setattr(self, k, v)\n\n def add_tool(self, tool):\n if not isinstance(tool, type):\n raise Exception(\"Tool must not be initialized.\")\n if issubclass(tool, Retrieval):\n # check that tools name is not already in tools\n for t in self.tools:\n if issubclass(t, Retrieval):\n return\n self.tools.append(tool)\n elif issubclass(tool, CodeInterpreter):\n for t in self.tools:\n if issubclass(t, Retrieval):\n return\n self.tools.append(tool)\n elif issubclass(tool, BaseTool):\n for t in self.tools:\n if t.__name__ == tool.__name__:\n self.tools.remove(t)\n self.tools.append(tool)\n else:\n raise Exception(\"Invalid tool type.\")\n\n def add_instructions(self, instructions: str):\n if self._shared_instructions is None:\n self._shared_instructions = instructions\n else:\n self.instructions = self.instructions.replace(self._shared_instructions, \"\")\n self.instructions = self.instructions.strip().strip(\"\\n\")\n self._shared_instructions = instructions\n\n self.instructions = self._shared_instructions + \"\\n\\n\" + self.instructions\n\n def get_oai_tools(self):\n tools = []\n for tool in self.tools:\n if not isinstance(tool, type):\n raise Exception(\"Tool must not be initialized.\")\n\n if issubclass(tool, Retrieval):\n tools.append(tool().model_dump())\n elif issubclass(tool, CodeInterpreter):\n tools.append(tool().model_dump())\n elif issubclass(tool, BaseTool):\n tools.append({\n \"type\": \"function\",\n \"function\": tool.openai_schema\n })\n else:\n raise Exception(\"Invalid tool type.\")\n return tools\n\n def delete_assistant(self):\n self.client.beta.assistants.delete(self.id)\n self._delete_settings()\n\n def _delete_settings(self):\n path = self.get_settings_path()\n # check if settings.json exists\n if os.path.isfile(path):\n settings = []\n with open(path, 'r') as f:\n settings = json.load(f)\n for i, assistant_settings in enumerate(settings):\n if assistant_settings['id'] == self.id:\n settings.pop(i)\n break\n with open(path, 'w') as f:\n json.dump(settings, f, indent=4)" }, { "identifier": "MessageOutput", "path": "agency_swarm/messages/message_output.py", "snippet": "class MessageOutput:\n def __init__(self, msg_type: Literal[\"function\", \"function_output\", \"text\", \"system\"], sender_name: str, receiver_name: str, content):\n self.msg_type = msg_type\n self.sender_name = str(sender_name)\n self.receiver_name = str(receiver_name)\n self.content = str(content)\n\n self.client = get_openai_client()\n\n def hash_names_to_color(self):\n if self.msg_type == \"function\" or self.msg_type == \"function_output\":\n return \"dim\"\n\n if self.msg_type == \"system\":\n return \"red\"\n\n combined_str = self.sender_name + self.receiver_name\n encoded_str = combined_str.encode()\n hash_obj = hashlib.md5(encoded_str)\n hash_int = int(hash_obj.hexdigest(), 16)\n colors = [\n 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white',\n ]\n color_index = hash_int % len(colors)\n return colors[color_index]\n\n def cprint(self):\n console.rule()\n\n emoji = self.get_sender_emoji()\n\n header = emoji + self.get_formatted_header()\n\n color = self.hash_names_to_color()\n\n console.print(header, style=color)\n\n console.print(str(self.content), style=color)\n\n def get_formatted_header(self):\n if self.msg_type == \"function\":\n text = f\"{self.sender_name} 🛠️ Executing Function\"\n return text\n\n if self.msg_type == \"function_output\":\n text = f\"{self.sender_name} ⚙️Function Output\"\n return text\n\n text = f\"{self.sender_name} 🗣️ @{self.receiver_name}\"\n\n return text\n\n def get_formatted_content(self):\n header = self.get_formatted_header()\n content = f\"\\n{self.content}\\n\"\n return header + content\n\n def get_sender_emoji(self):\n if self.msg_type == \"system\":\n return \"🤖\"\n\n sender_name = self.sender_name.lower()\n if self.msg_type == \"function_output\":\n sender_name = self.receiver_name.lower()\n\n if sender_name == \"user\":\n return \"👤\"\n\n if sender_name == \"ceo\":\n return \"🤵\"\n\n # output emoji based on hash of sender name\n encoded_str = sender_name.encode()\n hash_obj = hashlib.md5(encoded_str)\n hash_int = int(hash_obj.hexdigest(), 16)\n emojis = [\n '🐶', '🐱', '🐭', '🐹', '🐰', '🦊',\n '🐻', '🐼', '🐨', '🐯', '🦁', '🐮',\n '🐷', '🐸', '🐵', '🐔', '🐧', '🐦',\n '🐤']\n\n emoji_index = hash_int % len(emojis)\n\n return emojis[emoji_index]" }, { "identifier": "User", "path": "agency_swarm/user/user.py", "snippet": "class User:\n name: str = \"User\"\n\n def __init__(self, name: str = None):\n # later, we can add more attributes to the user like bio, etc\n pass" }, { "identifier": "get_openai_client", "path": "agency_swarm/util/oai.py", "snippet": "def get_openai_client():\n global client\n with client_lock:\n if client is None:\n # Check if the API key is set\n api_key = openai.api_key or os.getenv('OPENAI_API_KEY')\n if api_key is None:\n raise ValueError(\"OpenAI API key is not set. Please set it using set_openai_key.\")\n client = instructor.patch(openai.OpenAI(api_key=api_key,\n max_retries=5))\n return client" } ]
import inspect import time from typing import Literal from agency_swarm.agents import Agent from agency_swarm.messages import MessageOutput from agency_swarm.user import User from agency_swarm.util.oai import get_openai_client
4,241
class Thread: id: str = None thread = None run = None
class Thread: id: str = None thread = None run = None
def __init__(self, agent: Literal[Agent, User], recipient_agent: Agent):
2
2023-11-16 02:29:26+00:00
8k
pbelcak/UltraFastBERT
training/cramming/architectures/crammed_bert.py
[ { "identifier": "FFF", "path": "training/cramming/architectures/fff.py", "snippet": "class FFF(nn.Module):\n\tdef __init__(self, input_width, output_width, depth, parallel_size, activation=nn.GELU):\n\t\tsuper().__init__()\n\n\t\tself.input_width = input_width\n\t\tself.output_width = output_width\n\t\tself.depth = depth\n\t\tself.parallel_size = parallel_size\n\t\tself.n_nodes = 2 ** (self.depth + 1) - 1\n\n\t\tself.linear_in = nn.Linear(input_width, parallel_size * self.n_nodes, bias=True)\n\t\tself.linear_out = nn.Linear(parallel_size * self.n_nodes, output_width, bias=False)\n\n\t\tinit_k = math.sqrt(1.0 / self.input_width)\n\t\tself.linear_in.weight.data = torch.empty((self.parallel_size * self.n_nodes, self.input_width)).uniform_(-init_k, +init_k)\n\t\tself.linear_in.bias.data = torch.empty((self.parallel_size * self.n_nodes)).uniform_(-init_k, +init_k)\n\t\tinit_k2 = math.sqrt(1.0 / ((self.depth+1) * self.parallel_size))\n\t\tself.linear_out.weight.data = torch.empty((self.output_width, self.parallel_size * self.n_nodes)).uniform_(-init_k2, +init_k2)\n\n\t\tself.activation = activation()\n\n\tdef forward(self, oldx: torch.Tensor) -> torch.Tensor:\n\t\t# x has shape (..., input_width)\n\t\tx = oldx.reshape(-1, self.input_width)\n\t\t# x has shape (batch_size, input_width)\n\t\tbatch_size = x.shape[0]\n\n\t\tlogits = self.linear_in(x) # (batch_size, parallel_size * n_nodes)\n\t\tlogit_decisions = (logits > 0).long() # (batch_size, parallel_size * n_nodes)\n\t\tactivations = self.activation(logits) # (batch_size, parallel_size * n_nodes)\n\n\t\t# recursively descending by depth, enforce conditionality\n\t\tactivations = activations.view(batch_size, self.parallel_size, self.n_nodes) # (batch_size, parallel_size, n_nodes)\n\t\tdecisions = logit_decisions.view(batch_size, self.parallel_size, self.n_nodes) # (batch_size, parallel_size, n_nodes)\n\n\t\twith torch.no_grad():\n\t\t\tcurrent_nodes = torch.zeros((batch_size, self.parallel_size), dtype=torch.long, device=x.device)\n\t\t\tdecision_map = torch.zeros_like(decisions, dtype=torch.float) # (batch_size, parallel_size, n_nodes)\n\t\t\tdecision_map.scatter_(dim=2, index=current_nodes.unsqueeze(-1), value=1.0)\n\n\t\t\tfor d in range(self.depth):\n\t\t\t\tcurrent_platform = 2 ** d - 1\n\t\t\t\tnext_platform = 2 ** (d + 1) - 1\n\t\t\t\tmoves = torch.gather(decisions, 2, current_nodes.unsqueeze(2)).squeeze(2)\n\t\t\t\tnext_nodes = (current_nodes - current_platform) * 2 + moves + next_platform\n\t\t\t\tdecision_map.scatter_(2, next_nodes.unsqueeze(-1), 1.0)\n\t\t\t\tcurrent_nodes = next_nodes\n\n\t\tactivations = activations * decision_map # (batch_size, parallel_size, n_nodes)\n\t\tnew_logits = self.linear_out(activations.flatten(1, 2)) # (batch_size, output_width)\n\n\t\tret = new_logits.reshape_as(oldx)\n\t\treturn ret" }, { "identifier": "_get_norm_fn", "path": "training/cramming/architectures/components.py", "snippet": "def _get_norm_fn(norm_name):\n if norm_name == \"ScaleNorm\":\n norm_fn = ScaleNorm\n elif norm_name == \"RMSNorm\":\n norm_fn = RMSNorm\n elif norm_name == \"ApexLayerNorm\":\n from apex.normalization import FusedLayerNorm\n\n norm_fn = FusedLayerNorm\n else:\n norm_fn = getattr(torch.nn, norm_name)\n return norm_fn" }, { "identifier": "_get_nonlin_fn", "path": "training/cramming/architectures/components.py", "snippet": "def _get_nonlin_fn(nonlin_name, use_gating=True):\n if \"glu\" in nonlin_name.lower():\n nonlin_name = nonlin_name.split(\"glu\")[0]\n wrap_in_glu = use_gating\n else:\n wrap_in_glu = False\n nonlin_fn = getattr(torch.nn, nonlin_name) # dont mess this up :<\n try:\n nonlin_fn = partial(nonlin_fn, inplace=INPLACE)\n nonlin_fn()\n except TypeError:\n nonlin_fn = getattr(torch.nn, nonlin_name)\n\n if wrap_in_glu:\n return partial(GLU, nonlin_fn)\n else:\n return nonlin_fn" }, { "identifier": "EmbeddingComponent", "path": "training/cramming/architectures/components.py", "snippet": "class EmbeddingComponent(torch.nn.Module):\n def __init__(self, cfg_embedding, norm, norm_eps):\n super().__init__()\n self.word_embedding = torch.nn.Embedding(\n cfg_embedding.vocab_size, cfg_embedding.embedding_dim, padding_idx=cfg_embedding.pad_token_id\n )\n if cfg_embedding.pos_embedding == \"learned\":\n self.pos_embedding = LearnablePositional(cfg_embedding.embedding_dim, cfg_embedding.max_seq_length)\n elif cfg_embedding.pos_embedding == \"sinusoidal\":\n self.pos_embedding = SinusoidalPositional(cfg_embedding.embedding_dim, cfg_embedding.max_seq_length)\n elif cfg_embedding.pos_embedding == \"scaled-sinusoidal\":\n self.pos_embedding = ScaledSinosoidal(cfg_embedding.embedding_dim, cfg_embedding.max_seq_length)\n else:\n self.pos_embedding = None\n\n self.dropout = torch.nn.Dropout(p=cfg_embedding.dropout_prob, inplace=INPLACE)\n if cfg_embedding.normalization:\n self.stabilize_low_precision = cfg_embedding.get(\"stable_low_precision\", False)\n self.norm = _get_norm_fn(norm)(cfg_embedding.embedding_dim, eps=norm_eps)\n else:\n self.stabilize_low_precision = False\n self.norm = torch.nn.Identity()\n\n def forward(self, input_ids):\n embeds = self.word_embedding(input_ids)\n if self.pos_embedding is not None:\n embeds += self.pos_embedding(input_ids)\n\n if self.stabilize_low_precision:\n # Stabilize as in bnb StableEmbedding\n return self.dropout(self.norm(embeds.to(torch.get_default_dtype()))).to(embeds.dtype)\n else:\n return self.dropout(self.norm(embeds))" }, { "identifier": "PoolingComponent", "path": "training/cramming/architectures/components.py", "snippet": "class PoolingComponent(torch.nn.Module):\n def __init__(self, cfg_head, main_model_hidden_size):\n super().__init__()\n self.dense = torch.nn.Linear(main_model_hidden_size, cfg_head.head_dim) if cfg_head.include_ff_layer else torch.nn.Identity()\n self.activation = _get_nonlin_fn(cfg_head.nonlin, use_gating=False)()\n self.dropout = torch.nn.Dropout(cfg_head.classifier_dropout)\n self.pool_scheme: str = cfg_head.pooler\n\n def forward(self, hidden_states):\n \"\"\"A variety of pooling options. Some ignore the cls token. Input needs to be B S H.\"\"\"\n if self.pool_scheme == \"zero_index\":\n first_token_tensor = hidden_states[:, 0]\n elif self.pool_scheme == \"avg\":\n first_token_tensor = hidden_states.mean(dim=1)\n elif self.pool_scheme == \"max\":\n first_token_tensor = hidden_states.max(dim=1)[0]\n elif self.pool_scheme == \"lse\":\n first_token_tensor = hidden_states.logsumexp(dim=1)\n else:\n raise ValueError(f\"Invalid pooling scheme {self.pool_scheme} given.\")\n\n pooled_output = self.activation(self.dense(first_token_tensor))\n return self.dropout(pooled_output)" }, { "identifier": "PredictionHeadComponent", "path": "training/cramming/architectures/components.py", "snippet": "class PredictionHeadComponent(torch.nn.Module):\n def __init__(self, cfg_arch):\n super().__init__()\n\n if cfg_arch.embedding.embedding_dim == cfg_arch.hidden_size:\n output_size = cfg_arch.hidden_size\n else:\n output_size = cfg_arch.embedding.embedding_dim\n\n self.dense = torch.nn.Linear(cfg_arch.hidden_size, output_size, bias=cfg_arch.use_bias)\n self.nonlin = _get_nonlin_fn(cfg_arch.nonlin, use_gating=False)()\n self.norm = _get_norm_fn(cfg_arch.norm)(output_size, eps=cfg_arch.norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.norm(self.nonlin(self.dense(hidden_states)))\n return hidden_states" }, { "identifier": "GLU", "path": "training/cramming/architectures/components.py", "snippet": "class GLU(torch.nn.Module):\n \"\"\"*-GLU activation functions.\n\n Implementation mostly following megatron\n \"\"\"\n\n def __init__(self, sub_activation):\n super().__init__()\n self.sub_activation = sub_activation()\n\n def forward(self, inputs):\n x, gate = inputs.chunk(2, dim=-1)\n return self.sub_activation(gate) * x" }, { "identifier": "get_extended_attention_mask", "path": "training/cramming/architectures/components.py", "snippet": "def get_extended_attention_mask(attention_mask: torch.Tensor, input_shape: Tuple[int], causal_attention: bool = False) -> torch.Tensor:\n \"\"\"\n Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n Arguments:\n attention_mask (`torch.Tensor`):\n Mask with ones indicating tokens to attend to, zeros for tokens to ignore.\n input_shape (`Tuple[int]`):\n The shape of the input to the model.\n Returns:\n `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.\n\n Method stolen from huggingface :)\n \"\"\"\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n # Provided a padding mask of dimensions [batch_size, seq_length]\n # - if the model is a decoder, apply a causal mask in addition to the padding mask\n # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if causal_attention:\n batch_size, seq_length = input_shape\n seq_ids = torch.arange(seq_length, device=attention_mask.device)\n causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]\n # in case past_key_values are used we need to add a prefix ones mask to the causal mask\n # causal and attention masks must have same type with pytorch version < 1.3\n causal_mask = causal_mask.to(attention_mask.dtype)\n\n if causal_mask.shape[1] < attention_mask.shape[1]:\n prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]\n causal_mask = torch.cat(\n [\n torch.ones((batch_size, seq_length, prefix_seq_len), device=attention_mask.device, dtype=causal_mask.dtype),\n causal_mask,\n ],\n axis=-1,\n )\n extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]\n else:\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError(f\"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})\")\n\n # extended_attention_mask = extended_attention_mask.to(dtype=self.setup[\"dtype\"]) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n return extended_attention_mask" }, { "identifier": "_init_module", "path": "training/cramming/architectures/components.py", "snippet": "@torch.no_grad()\ndef _init_module(module, init_method=\"normal\", init_std=0.02, hidden_size=768, num_layers=12):\n\n if init_method == \"normal\":\n std = init_std\n elif init_method == \"small\":\n # Transformers without Tears: Improving\n # the Normalization of Self-Attention - Nguyen, T. & Salazar, J. (2010)\n std = torch.as_tensor(2 / (5 * hidden_size)).sqrt()\n elif init_method == \"megatron\":\n std = torch.as_tensor(1 / (3 * hidden_size)).sqrt()\n elif init_method == \"wang\":\n std = 2 / num_layers / torch.as_tensor(hidden_size).sqrt()\n elif init_method == \"deepnorm\":\n std = torch.as_tensor(8 * num_layers).pow(-0.25) # todo: apply this only to some layers\n elif init_method == \"agd-orthogonal\":\n std = init_std # no std modification necessary, setting to default\n else:\n raise ValueError(f\"Invalid init method {init_method} given.\")\n\n if isinstance(module, torch.nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, torch.nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, torch.nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n if init_method == \"agd-orthogonal\":\n for name, p in module.named_parameters():\n if p.dim() == 1:\n print(f\"WARNING: Biases are not supported. This breaks scaling of parameter {name} in theory.\")\n if p.dim() == 2:\n torch.nn.init.orthogonal_(p)\n p *= singular_value(p.shape)\n if p.dim() == 4:\n for kx in range(p.shape[2]):\n for ky in range(p.shape[3]):\n torch.nn.init.orthogonal_(p[:, :, kx, ky])\n p *= singular_value(p.shape)" }, { "identifier": "get_attention_mechanism", "path": "training/cramming/architectures/attention.py", "snippet": "def get_attention_mechanism(\n idx,\n hidden_size,\n cfg_attention,\n):\n if cfg_attention.type == \"self-attention\":\n mechanism = SeqFirstSelfAttention(hidden_size, cfg_attention) # neox\n elif cfg_attention.type == \"pytorch\":\n # Sanity check 1: [Warning: This includes the output projection twice...]\n mechanism = SelfAttentionPyTorch(hidden_size, cfg_attention) # torch default\n elif cfg_attention.type == \"pytorch-seqfirst\":\n # Sanity check 1: [Warning: This includes the output projection twice...]\n mechanism = SeqFirstSelfAttentionPyTorch(hidden_size, cfg_attention) # torch default\n elif cfg_attention.type == \"huggingface\":\n mechanism = BertAttentionWrapper(hidden_size, cfg_attention) # always includes bias!\n elif cfg_attention.type == \"flash-attention-impl\": # the fast implementation called flash\n mechanism = FlashMultiHeadAttention(hidden_size, cfg_attention)\n elif cfg_attention.type == \"fourier\":\n mechanism = FourierMixing(hidden_size, cfg_attention)\n elif cfg_attention.type == \"fourier-experimental\":\n mechanism = FourierMixingParametrized(hidden_size, cfg_attention)\n elif cfg_attention.type == \"flash\": # flash from transformer quality in linear time\n mechanism = FLASH(hidden_size, cfg_attention)\n elif cfg_attention.type == \"tuformer\":\n mechanism = TuFormAttention(hidden_size, cfg_attention)\n elif cfg_attention.type == \"funnel\": # dont use this with a normal seq->seq model\n mechanism = FunnelAttention(hidden_size, cfg_attention)\n elif cfg_attention.type == \"seqfirst_tuformer\":\n mechanism = SeqFirstTuFormAttention(hidden_size, cfg_attention)\n elif cfg_attention.type == \"seqfirst2_tuformer\":\n mechanism = SeqFirstTuFormAttention(hidden_size, cfg_attention)\n elif cfg_attention.type == \"none\":\n mechanism = Identity(hidden_size)\n elif cfg_attention.type == \"fourier-hybrid\":\n if idx in cfg_attention.hybrid_layers:\n mechanism = SeqFirstSelfAttention(hidden_size, cfg_attention)\n else:\n mechanism = FourierMixing(hidden_size, cfg_attention)\n else:\n raise ValueError(f\"Invalid attention type {cfg_attention.type} given.\")\n return mechanism" } ]
import torch from transformers import PretrainedConfig, PreTrainedModel from transformers import AutoConfig, AutoModel, AutoModelForMaskedLM, AutoModelForSequenceClassification, AutoModelForTokenClassification from typing import Optional from omegaconf import OmegaConf from .fff import FFF from .components import ( _get_norm_fn, _get_nonlin_fn, EmbeddingComponent, PoolingComponent, PredictionHeadComponent, GLU, get_extended_attention_mask, _init_module, ) from .attention import get_attention_mechanism
5,579
self.dense_out = torch.nn.Linear(intermed_output_size, hidden_size, bias=use_bias) def forward(self, hidden_states): return self.dense_out(self.nonlin(self.dense_in(hidden_states))) class TransformerLayer(torch.nn.Module): """A transformer-encoder structure based on the components from above.""" def __init__(self, idx, cfg_arch): super().__init__() self.dropout = torch.nn.Dropout(cfg_arch.hidden_dropout_prob, inplace=False) self.norm1 = _get_norm_fn(cfg_arch.norm)(cfg_arch.hidden_size, eps=cfg_arch.norm_eps) self.norm2 = _get_norm_fn(cfg_arch.norm)(cfg_arch.hidden_size, eps=cfg_arch.norm_eps) self.attn = AttentionComponent( idx, cfg_arch.hidden_size, cfg_arch.attention, cfg_arch.use_bias, ) self.LAYOUT = self.attn.LAYOUT UNDEFINED_VALUE = 3430323896892821 if OmegaConf.select(cfg_arch, "intermed_type", default=UNDEFINED_VALUE) == UNDEFINED_VALUE \ or cfg_arch.intermed_type == 'ff': self.ffn = FFNComponent( cfg_arch.hidden_size, cfg_arch.intermed_size, _get_nonlin_fn(cfg_arch.nonlin), cfg_arch.use_bias, ) elif cfg_arch.intermed_type == 'fff': self.ffn = FFF( cfg_arch.hidden_size, cfg_arch.hidden_size, cfg_arch.intermed_depth, cfg_arch.intermed_size, _get_nonlin_fn(cfg_arch.nonlin), ) else: raise ValueError(f"Invalid intermed_type {cfg_arch.intermed_type}") def forward(self, states, attention_mask: Optional[torch.Tensor] = None): states = states + self.dropout(self.attn(self.norm1(states), attention_mask)) states = states + self.dropout(self.ffn(self.norm2(states))) return states class ScriptableLM(PreTrainedModel): """Simplified transformer wrapper.""" config_class = crammedBertConfig def __init__(self, config): super().__init__(config) self.cfg = OmegaConf.create(config.arch) self.embedding = EmbeddingComponent(self.cfg.embedding, self.cfg.norm, self.cfg.norm_eps) self.layers = torch.nn.ModuleList([TransformerLayer(idx, self.cfg) for idx in range(self.cfg.num_transformer_layers)]) self.seq_first = self.layers[0].LAYOUT == "[S B H]" if len(self.layers) > 0 else False self.use_causal_attention = self.cfg.attention.causal_attention if self.cfg.final_norm: self.final_norm = _get_norm_fn(self.cfg.norm)(self.cfg.hidden_size, eps=self.cfg.norm_eps) else: self.final_norm = torch.nn.Identity() def set_hardness(self, hardness: float): for layer in self.layers: if hasattr(layer, "set_hardness"): layer.set_hardness(hardness) def forward(self, input_ids, attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None): if attention_mask is not None: attention_mask = get_extended_attention_mask(attention_mask, input_ids.shape, self.use_causal_attention) hidden_states = self.embedding(input_ids) if self.seq_first: hidden_states = hidden_states.transpose(0, 1).contiguous() for i, layer_module in enumerate(self.layers): hidden_states = layer_module(hidden_states, attention_mask) if self.seq_first: hidden_states = hidden_states.transpose(0, 1).contiguous() return self.final_norm(hidden_states) class ScriptableLMForPreTraining(PreTrainedModel): """Pretraining version with optional prediction head and variant for sparse prediction.""" config_class = crammedBertConfig def __init__(self, config): super().__init__(config) self.cfg = OmegaConf.create(config.arch) self.encoder = ScriptableLM(config) if not self.cfg.skip_head_transform: self.prediction_head = PredictionHeadComponent(self.cfg) else: self.prediction_head = torch.nn.Identity() # from linear in old version self.decoder = torch.nn.Linear(self.cfg.embedding.embedding_dim, self.cfg.embedding.vocab_size, bias=self.cfg.decoder_bias) self.decoder.weight = self.encoder.embedding.word_embedding.weight self.loss_fn = torch.nn.CrossEntropyLoss() self.sparse_prediction = self.cfg.sparse_prediction self._init_weights() def set_hardness(self, hardness: float): self.encoder.set_hardness(hardness) def _init_weights(self, module=None): modules = self.modules() if module is None else [module] for module in modules:
"""This rewrite is a simplified version of the proposed changes that actually compiles statically in torch 2.0. This model is the final, optimized crammed model. Not all ablations discussed in the paper are implemented as switches in this version, for all those, check scriptable_bert.py on the old branch. """ class crammedBertConfig(PretrainedConfig): model_type = "crammedBERT" def __init__(self, cfg_arch_container: dict = {}, **kwargs): self.arch = cfg_arch_container super().__init__(**kwargs) def construct_crammed_bert(cfg_arch, vocab_size, downstream_classes=None): """See the config file for details on what is possible.""" config = crammedBertConfig(OmegaConf.to_container(cfg_arch, resolve=True)) config.arch["embedding"]["vocab_size"] = vocab_size config.arch["num_labels"] = downstream_classes if downstream_classes is None: if config.arch["objective_layout"] == "MLM": model = ScriptableLMForPreTraining(config) elif config.arch["objective_layout"] == "SCRIPT": model = ScriptableLMForSCRIPTTraining(config) else: raise ValueError(f"Invalid layout {config.arch['objective_layout']} of training objective given.") else: model = ScriptableLMForSequenceClassification(config) return model class AttentionComponent(torch.nn.Module): def __init__(self, idx, hidden_size, cfg_attention, use_bias=True): super().__init__() self.self_attention = get_attention_mechanism(idx, hidden_size, cfg_attention) if cfg_attention.skip_output_projection: self.dense = torch.nn.Identity() else: self.dense = torch.nn.Linear(self.self_attention.output_dim, hidden_size, bias=use_bias) self.LAYOUT = self.self_attention.LAYOUT def forward(self, hidden_states, attention_mask: Optional[torch.Tensor] = None): return self.dense(self.self_attention(hidden_states, attention_mask)) class FFNComponent(torch.nn.Module): """Note: The FF layer is not auto-scaled when using a GLU type activation. It actually turned out better not to scale it, so here the block is effectively smaller than may be expected. The neox suggestion for approx. equal parameter count is int(4 * 2 / 3 * hidden_size) * 2 [this is ~5.33] """ def __init__(self, hidden_size, intermed_size, nonlin_fn=torch.nn.GELU, use_bias=True): super().__init__() self.dense_in = torch.nn.Linear(hidden_size, intermed_size, bias=use_bias) self.nonlin = nonlin_fn() if isinstance(self.nonlin, GLU): intermed_output_size = intermed_size // 2 else: intermed_output_size = intermed_size self.dense_out = torch.nn.Linear(intermed_output_size, hidden_size, bias=use_bias) def forward(self, hidden_states): return self.dense_out(self.nonlin(self.dense_in(hidden_states))) class TransformerLayer(torch.nn.Module): """A transformer-encoder structure based on the components from above.""" def __init__(self, idx, cfg_arch): super().__init__() self.dropout = torch.nn.Dropout(cfg_arch.hidden_dropout_prob, inplace=False) self.norm1 = _get_norm_fn(cfg_arch.norm)(cfg_arch.hidden_size, eps=cfg_arch.norm_eps) self.norm2 = _get_norm_fn(cfg_arch.norm)(cfg_arch.hidden_size, eps=cfg_arch.norm_eps) self.attn = AttentionComponent( idx, cfg_arch.hidden_size, cfg_arch.attention, cfg_arch.use_bias, ) self.LAYOUT = self.attn.LAYOUT UNDEFINED_VALUE = 3430323896892821 if OmegaConf.select(cfg_arch, "intermed_type", default=UNDEFINED_VALUE) == UNDEFINED_VALUE \ or cfg_arch.intermed_type == 'ff': self.ffn = FFNComponent( cfg_arch.hidden_size, cfg_arch.intermed_size, _get_nonlin_fn(cfg_arch.nonlin), cfg_arch.use_bias, ) elif cfg_arch.intermed_type == 'fff': self.ffn = FFF( cfg_arch.hidden_size, cfg_arch.hidden_size, cfg_arch.intermed_depth, cfg_arch.intermed_size, _get_nonlin_fn(cfg_arch.nonlin), ) else: raise ValueError(f"Invalid intermed_type {cfg_arch.intermed_type}") def forward(self, states, attention_mask: Optional[torch.Tensor] = None): states = states + self.dropout(self.attn(self.norm1(states), attention_mask)) states = states + self.dropout(self.ffn(self.norm2(states))) return states class ScriptableLM(PreTrainedModel): """Simplified transformer wrapper.""" config_class = crammedBertConfig def __init__(self, config): super().__init__(config) self.cfg = OmegaConf.create(config.arch) self.embedding = EmbeddingComponent(self.cfg.embedding, self.cfg.norm, self.cfg.norm_eps) self.layers = torch.nn.ModuleList([TransformerLayer(idx, self.cfg) for idx in range(self.cfg.num_transformer_layers)]) self.seq_first = self.layers[0].LAYOUT == "[S B H]" if len(self.layers) > 0 else False self.use_causal_attention = self.cfg.attention.causal_attention if self.cfg.final_norm: self.final_norm = _get_norm_fn(self.cfg.norm)(self.cfg.hidden_size, eps=self.cfg.norm_eps) else: self.final_norm = torch.nn.Identity() def set_hardness(self, hardness: float): for layer in self.layers: if hasattr(layer, "set_hardness"): layer.set_hardness(hardness) def forward(self, input_ids, attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None): if attention_mask is not None: attention_mask = get_extended_attention_mask(attention_mask, input_ids.shape, self.use_causal_attention) hidden_states = self.embedding(input_ids) if self.seq_first: hidden_states = hidden_states.transpose(0, 1).contiguous() for i, layer_module in enumerate(self.layers): hidden_states = layer_module(hidden_states, attention_mask) if self.seq_first: hidden_states = hidden_states.transpose(0, 1).contiguous() return self.final_norm(hidden_states) class ScriptableLMForPreTraining(PreTrainedModel): """Pretraining version with optional prediction head and variant for sparse prediction.""" config_class = crammedBertConfig def __init__(self, config): super().__init__(config) self.cfg = OmegaConf.create(config.arch) self.encoder = ScriptableLM(config) if not self.cfg.skip_head_transform: self.prediction_head = PredictionHeadComponent(self.cfg) else: self.prediction_head = torch.nn.Identity() # from linear in old version self.decoder = torch.nn.Linear(self.cfg.embedding.embedding_dim, self.cfg.embedding.vocab_size, bias=self.cfg.decoder_bias) self.decoder.weight = self.encoder.embedding.word_embedding.weight self.loss_fn = torch.nn.CrossEntropyLoss() self.sparse_prediction = self.cfg.sparse_prediction self._init_weights() def set_hardness(self, hardness: float): self.encoder.set_hardness(hardness) def _init_weights(self, module=None): modules = self.modules() if module is None else [module] for module in modules:
_init_module(
8
2023-11-12 17:52:59+00:00
8k
resemble-ai/resemble-enhance
resemble_enhance/enhancer/enhancer.py
[ { "identifier": "Normalizer", "path": "resemble_enhance/common.py", "snippet": "class Normalizer(nn.Module):\n def __init__(self, momentum=0.01, eps=1e-9):\n super().__init__()\n self.momentum = momentum\n self.eps = eps\n self.running_mean_unsafe: Tensor\n self.running_var_unsafe: Tensor\n self.register_buffer(\"running_mean_unsafe\", torch.full([], torch.nan))\n self.register_buffer(\"running_var_unsafe\", torch.full([], torch.nan))\n\n @property\n def started(self):\n return not torch.isnan(self.running_mean_unsafe)\n\n @property\n def running_mean(self):\n if not self.started:\n return torch.zeros_like(self.running_mean_unsafe)\n return self.running_mean_unsafe\n\n @property\n def running_std(self):\n if not self.started:\n return torch.ones_like(self.running_var_unsafe)\n return (self.running_var_unsafe + self.eps).sqrt()\n\n @torch.no_grad()\n def _ema(self, a: Tensor, x: Tensor):\n return (1 - self.momentum) * a + self.momentum * x\n\n def update_(self, x):\n if not self.started:\n self.running_mean_unsafe = x.mean()\n self.running_var_unsafe = x.var()\n else:\n self.running_mean_unsafe = self._ema(self.running_mean_unsafe, x.mean())\n self.running_var_unsafe = self._ema(self.running_var_unsafe, (x - self.running_mean).pow(2).mean())\n\n def forward(self, x: Tensor, update=True):\n if self.training and update:\n self.update_(x)\n self.stats = dict(mean=self.running_mean.item(), std=self.running_std.item())\n x = (x - self.running_mean) / self.running_std\n return x\n\n def inverse(self, x: Tensor):\n return x * self.running_std + self.running_mean" }, { "identifier": "load_denoiser", "path": "resemble_enhance/denoiser/inference.py", "snippet": "@cache\ndef load_denoiser(run_dir, device):\n if run_dir is None:\n return Denoiser(HParams())\n hp = HParams.load(run_dir)\n denoiser = Denoiser(hp)\n path = run_dir / \"ds\" / \"G\" / \"default\" / \"mp_rank_00_model_states.pt\"\n state_dict = torch.load(path, map_location=\"cpu\")[\"module\"]\n denoiser.load_state_dict(state_dict)\n denoiser.eval()\n denoiser.to(device)\n return denoiser" }, { "identifier": "MelSpectrogram", "path": "resemble_enhance/melspec.py", "snippet": "class MelSpectrogram(nn.Module):\n def __init__(self, hp: HParams):\n \"\"\"\n Torch implementation of Resemble's mel extraction.\n Note that the values are NOT identical to librosa's implementation\n due to floating point precisions.\n \"\"\"\n super().__init__()\n self.hp = hp\n self.melspec = TorchMelSpectrogram(\n hp.wav_rate,\n n_fft=hp.n_fft,\n win_length=hp.win_size,\n hop_length=hp.hop_size,\n f_min=0,\n f_max=hp.wav_rate // 2,\n n_mels=hp.num_mels,\n power=1,\n normalized=False,\n # NOTE: Folowing librosa's default.\n pad_mode=\"constant\",\n norm=\"slaney\",\n mel_scale=\"slaney\",\n )\n self.register_buffer(\"stft_magnitude_min\", torch.FloatTensor([hp.stft_magnitude_min]))\n self.min_level_db = 20 * np.log10(hp.stft_magnitude_min)\n self.preemphasis = hp.preemphasis\n self.hop_size = hp.hop_size\n\n def forward(self, wav, pad=True):\n \"\"\"\n Args:\n wav: [B, T]\n \"\"\"\n device = wav.device\n if wav.is_mps:\n wav = wav.cpu()\n self.to(wav.device)\n if self.preemphasis > 0:\n wav = torch.nn.functional.pad(wav, [1, 0], value=0)\n wav = wav[..., 1:] - self.preemphasis * wav[..., :-1]\n mel = self.melspec(wav)\n mel = self._amp_to_db(mel)\n mel_normed = self._normalize(mel)\n assert not pad or mel_normed.shape[-1] == 1 + wav.shape[-1] // self.hop_size # Sanity check\n mel_normed = mel_normed.to(device)\n return mel_normed # (M, T)\n\n def _normalize(self, s, headroom_db=15):\n return (s - self.min_level_db) / (-self.min_level_db + headroom_db)\n\n def _amp_to_db(self, x):\n return x.clamp_min(self.hp.stft_magnitude_min).log10() * 20" }, { "identifier": "global_leader_only", "path": "resemble_enhance/utils/distributed.py", "snippet": "def get_free_port():\ndef fix_unset_envs():\ndef init_distributed():\ndef local_rank():\ndef global_rank():\ndef is_local_leader():\ndef is_global_leader():\ndef leader_only(leader_only_type, fn: Callable | None = None, boardcast_return=False) -> Callable:\n def wrapper(fn):\n def wrapped(*args, **kwargs):" }, { "identifier": "TrainLoop", "path": "resemble_enhance/utils/train_loop.py", "snippet": "class TrainLoop:\n _ = KW_ONLY\n\n run_dir: Path\n train_dl: DataLoader\n\n load_G: EngineLoader\n feed_G: GenFeeder\n load_D: EngineLoader | None = None\n feed_D: DisFeeder | None = None\n\n update_every: int = 5_000\n eval_every: int = 5_000\n backup_steps: tuple[int, ...] = (5_000, 100_000, 500_000)\n\n device: str = \"cuda\"\n eval_fn: EvalFn | None = None\n gan_training_start_step: int | None = None\n\n @property\n def global_step(self):\n return self.engine_G.global_step # How many steps have been completed?\n\n @property\n def eval_dir(self) -> Path | None:\n if self.eval_every != 0:\n eval_dir = self.run_dir.joinpath(\"eval\")\n eval_dir.mkdir(exist_ok=True)\n else:\n eval_dir = None\n return eval_dir\n\n @property\n def viz_dir(self) -> Path:\n return Path(self.run_dir / \"viz\")\n\n def make_current_step_viz_path(self, name: str, suffix: str) -> Path:\n path = (self.viz_dir / name / f\"{self.global_step}\").with_suffix(suffix)\n path.parent.mkdir(exist_ok=True, parents=True)\n return path\n\n def __post_init__(self):\n engine_G = self.load_G(self.run_dir)\n if self.load_D is None:\n engine_D = None\n else:\n engine_D = self.load_D(self.run_dir)\n self.engine_G = engine_G\n self.engine_D = engine_D\n\n @property\n def model_G(self):\n return self.engine_G.module\n\n @property\n def model_D(self):\n if self.engine_D is None:\n return None\n return self.engine_D.module\n\n def save_checkpoint(self, tag=\"default\"):\n engine_G = self.engine_G\n engine_D = self.engine_D\n engine_G.save_checkpoint(tag=tag)\n if engine_D is not None:\n engine_D.save_checkpoint(tag=tag)\n\n def run(self, max_steps: int = -1):\n self.set_running_loop_(self)\n\n train_dl = self.train_dl\n update_every = self.update_every\n eval_every = self.eval_every\n device = self.device\n eval_fn = self.eval_fn\n\n engine_G = self.engine_G\n engine_D = self.engine_D\n eval_dir = self.eval_dir\n\n init_step = self.global_step\n\n logger.info(f\"\\nTraining from step {init_step} to step {max_steps}\")\n warmup_steps = {init_step + x for x in [50, 100, 500]}\n\n engine_G.train()\n\n if engine_D is not None:\n engine_D.train()\n\n gan_start_step = self.gan_training_start_step\n\n while True:\n loss_G = loss_D = 0\n\n for batch in train_dl:\n torch.cuda.synchronize()\n start_time = time.time()\n\n # What's the step after this batch?\n step = self.global_step + 1\n\n # Send data to the GPU\n batch = tree_map(lambda x: x.to(device) if isinstance(x, Tensor) else x, batch)\n\n stats = {\"step\": step}\n\n # Include step == 1 for sanity check\n gan_started = gan_start_step is not None and (step >= gan_start_step or step == 1)\n gan_started &= engine_D is not None\n\n # Generator step\n fake, losses = self.feed_G(engine=engine_G, batch=batch)\n\n # Train generator\n if gan_started:\n assert engine_D is not None\n assert self.feed_D is not None\n\n # Freeze the discriminator to let gradient go through fake\n engine_D.freeze_()\n losses |= self.feed_D(engine=engine_D, batch=None, fake=fake)\n\n loss_G = sum(losses.values())\n stats |= {f\"G/{k}\": v.item() for k, v in losses.items()}\n stats |= {f\"G/{k}\": v for k, v in engine_G.gather_attribute(\"stats\").items()}\n del losses\n\n assert isinstance(loss_G, Tensor)\n stats[\"G/loss\"] = loss_G.item()\n stats[\"G/lr\"] = engine_G.get_lr()[0]\n stats[\"G/grad_norm\"] = engine_G.get_grad_norm() or 0\n\n if loss_G.isnan().item():\n logger.error(\"Generator loss is NaN, skipping step\")\n continue\n\n engine_G.backward(loss_G)\n engine_G.step()\n\n # Discriminator step\n if gan_started:\n assert engine_D is not None\n assert self.feed_D is not None\n\n engine_D.unfreeze_()\n losses = self.feed_D(engine=engine_D, batch=batch, fake=fake.detach())\n del fake\n\n assert isinstance(losses, dict)\n loss_D = sum(losses.values())\n assert isinstance(loss_D, Tensor)\n\n stats |= {f\"D/{k}\": v.item() for k, v in losses.items()}\n stats |= {f\"D/{k}\": v for k, v in engine_D.gather_attribute(\"stats\").items()}\n del losses\n\n if loss_D.isnan().item():\n logger.error(\"Discriminator loss is NaN, skipping step\")\n continue\n\n engine_D.backward(loss_D)\n engine_D.step()\n\n stats[\"D/loss\"] = loss_D.item()\n stats[\"D/lr\"] = engine_D.get_lr()[0]\n stats[\"D/grad_norm\"] = engine_D.get_grad_norm() or 0\n\n torch.cuda.synchronize()\n stats[\"elapsed_time\"] = time.time() - start_time\n stats = tree_map(lambda x: float(f\"{x:.4g}\") if isinstance(x, float) else x, stats)\n logger.info(json.dumps(stats, indent=0))\n\n command = non_blocking_input()\n\n evaling = step % eval_every == 0 or step in warmup_steps or command.strip() == \"eval\"\n if eval_fn is not None and is_global_leader() and eval_dir is not None and evaling:\n engine_G.eval()\n eval_fn(engine_G, eval_dir=eval_dir)\n engine_G.train()\n\n if command.strip() == \"quit\":\n logger.info(\"Training paused\")\n self.save_checkpoint(\"default\")\n return\n\n if command.strip() == \"backup\" or step in self.backup_steps:\n logger.info(\"Backing up\")\n self.save_checkpoint(tag=f\"backup_{step:07d}\")\n\n if step % update_every == 0 or command.strip() == \"save\":\n self.save_checkpoint(tag=\"default\")\n\n if step == max_steps:\n logger.info(\"Training finished\")\n self.save_checkpoint(tag=\"default\")\n return\n\n @classmethod\n def set_running_loop_(cls, loop):\n assert isinstance(loop, cls), f\"Expected {cls}, got {type(loop)}\"\n cls._running_loop: cls = loop\n\n @classmethod\n def get_running_loop(cls) -> \"TrainLoop | None\":\n if hasattr(cls, \"_running_loop\"):\n assert isinstance(cls._running_loop, cls)\n return cls._running_loop\n return None\n\n @classmethod\n def get_running_loop_global_step(cls) -> int | None:\n if loop := cls.get_running_loop():\n return loop.global_step\n return None\n\n @classmethod\n def get_running_loop_viz_path(cls, name: str, suffix: str) -> Path | None:\n if loop := cls.get_running_loop():\n return loop.make_current_step_viz_path(name, suffix)\n return None" }, { "identifier": "HParams", "path": "resemble_enhance/enhancer/hparams.py", "snippet": "class HParams(HParamsBase):\n cfm_solver_method: str = \"midpoint\"\n cfm_solver_nfe: int = 64\n cfm_time_mapping_divisor: int = 4\n univnet_nc: int = 96\n\n lcfm_latent_dim: int = 64\n lcfm_training_mode: str = \"ae\"\n lcfm_z_scale: float = 5\n\n vocoder_extra_dim: int = 32\n\n gan_training_start_step: int | None = 5_000\n enhancer_stage1_run_dir: Path | None = None\n\n denoiser_run_dir: Path | None = None" }, { "identifier": "IRMAE", "path": "resemble_enhance/enhancer/lcfm/irmae.py", "snippet": "class IRMAE(nn.Module):\n def __init__(\n self,\n input_dim,\n output_dim,\n latent_dim,\n hidden_dim=1024,\n num_irms=4,\n ):\n \"\"\"\n Args:\n input_dim: input dimension\n output_dim: output dimension\n latent_dim: latent dimension\n hidden_dim: hidden layer dimension\n num_irm_matrics: number of implicit rank minimization matrices\n norm: normalization layer\n \"\"\"\n self.input_dim = input_dim\n super().__init__()\n\n self.encoder = nn.Sequential(\n nn.Conv1d(input_dim, hidden_dim, 3, padding=\"same\"),\n *[ResBlock(hidden_dim) for _ in range(4)],\n # Try to obtain compact representation (https://proceedings.neurips.cc/paper/2020/file/a9078e8653368c9c291ae2f8b74012e7-Paper.pdf)\n *[nn.Conv1d(hidden_dim if i == 0 else latent_dim, latent_dim, 1, bias=False) for i in range(num_irms)],\n nn.Tanh(),\n )\n\n self.decoder = nn.Sequential(\n nn.Conv1d(latent_dim, hidden_dim, 3, padding=\"same\"),\n *[ResBlock(hidden_dim) for _ in range(4)],\n nn.Conv1d(hidden_dim, output_dim, 1),\n )\n\n self.head = nn.Sequential(\n nn.Conv1d(output_dim, hidden_dim, 3, padding=\"same\"),\n nn.GELU(),\n nn.Conv1d(hidden_dim, input_dim, 1),\n )\n\n self.estimator = Normalizer()\n\n def encode(self, x):\n \"\"\"\n Args:\n x: (b c t) tensor\n \"\"\"\n z = self.encoder(x) # (b c t)\n _ = self.estimator(z) # Estimate the glboal mean and std of z\n self.stats = {}\n self.stats[\"z_mean\"] = z.mean().item()\n self.stats[\"z_std\"] = z.std().item()\n self.stats[\"z_abs_68\"] = z.abs().quantile(0.6827).item()\n self.stats[\"z_abs_95\"] = z.abs().quantile(0.9545).item()\n self.stats[\"z_abs_99\"] = z.abs().quantile(0.9973).item()\n return z\n\n def decode(self, z):\n \"\"\"\n Args:\n z: (b c t) tensor\n \"\"\"\n return self.decoder(z)\n\n def forward(self, x, skip_decoding=False):\n \"\"\"\n Args:\n x: (b c t) tensor\n skip_decoding: if True, skip the decoding step\n \"\"\"\n z = self.encode(x) # q(z|x)\n\n if skip_decoding:\n # This speeds up the training in cfm only mode\n decoded = None\n else:\n decoded = self.decode(z) # p(x|z)\n predicted = self.head(decoded)\n self.losses = dict(mse=F.mse_loss(predicted, x))\n\n return IRMAEOutput(latent=z, decoded=decoded)" }, { "identifier": "CFM", "path": "resemble_enhance/enhancer/lcfm/lcfm.py", "snippet": "CFM = \"cfm\"" }, { "identifier": "LCFM", "path": "resemble_enhance/enhancer/lcfm/lcfm.py", "snippet": "class LCFM(nn.Module):\n class Mode(Enum):\n AE = \"ae\"\n CFM = \"cfm\"\n\n def __init__(self, ae: IRMAE, cfm: CFM, z_scale: float = 1.0):\n super().__init__()\n self.ae = ae\n self.cfm = cfm\n self.z_scale = z_scale\n self._mode = None\n self._eval_tau = 0.5\n\n @property\n def mode(self):\n return self._mode\n\n def set_mode_(self, mode):\n mode = self.Mode(mode)\n self._mode = mode\n\n if mode == mode.AE:\n freeze_(self.cfm)\n logger.info(\"Freeze cfm\")\n elif mode == mode.CFM:\n freeze_(self.ae)\n logger.info(\"Freeze ae (encoder and decoder)\")\n else:\n raise ValueError(f\"Unknown training mode: {mode}\")\n\n def get_running_train_loop(self):\n try:\n # Lazy import\n from ...utils.train_loop import TrainLoop\n\n return TrainLoop.get_running_loop()\n except ImportError:\n return None\n\n @property\n def global_step(self):\n loop = self.get_running_train_loop()\n if loop is None:\n return None\n return loop.global_step\n\n @torch.no_grad()\n def _visualize(self, x, y, y_):\n loop = self.get_running_train_loop()\n if loop is None:\n return\n\n plt.subplot(221)\n plt.imshow(y[0].detach().cpu().numpy(), aspect=\"auto\", origin=\"lower\", interpolation=\"none\")\n plt.title(\"GT\")\n\n plt.subplot(222)\n y_ = y_[:, : y.shape[1]]\n plt.imshow(y_[0].detach().cpu().numpy(), aspect=\"auto\", origin=\"lower\", interpolation=\"none\")\n plt.title(\"Posterior\")\n\n plt.subplot(223)\n z_ = self.cfm(x)\n y__ = self.ae.decode(z_)\n y__ = y__[:, : y.shape[1]]\n plt.imshow(y__[0].detach().cpu().numpy(), aspect=\"auto\", origin=\"lower\", interpolation=\"none\")\n plt.title(\"C-Prior\")\n del y__\n\n plt.subplot(224)\n z_ = torch.randn_like(z_)\n y__ = self.ae.decode(z_)\n y__ = y__[:, : y.shape[1]]\n plt.imshow(y__[0].detach().cpu().numpy(), aspect=\"auto\", origin=\"lower\", interpolation=\"none\")\n plt.title(\"Prior\")\n del z_, y__\n\n path = loop.make_current_step_viz_path(\"recon\", \".png\")\n path.parent.mkdir(exist_ok=True, parents=True)\n plt.tight_layout()\n plt.savefig(path, dpi=500)\n plt.close()\n\n def _scale(self, z: Tensor):\n return z * self.z_scale\n\n def _unscale(self, z: Tensor):\n return z / self.z_scale\n\n def eval_tau_(self, tau):\n self._eval_tau = tau\n\n def forward(self, x, y: Tensor | None = None, ψ0: Tensor | None = None):\n \"\"\"\n Args:\n x: (b d t), condition mel\n y: (b d t), target mel\n ψ0: (b d t), starting mel\n \"\"\"\n if self.mode == self.Mode.CFM:\n self.ae.eval() # Always set to eval when training cfm\n\n if ψ0 is not None:\n ψ0 = self._scale(self.ae.encode(ψ0))\n if self.training:\n tau = torch.rand_like(ψ0[:, :1, :1])\n else:\n tau = self._eval_tau\n ψ0 = tau * torch.randn_like(ψ0) + (1 - tau) * ψ0\n\n if y is None:\n if self.mode == self.Mode.AE:\n with torch.no_grad():\n training = self.ae.training\n self.ae.eval()\n z = self.ae.encode(x)\n self.ae.train(training)\n else:\n z = self._unscale(self.cfm(x, ψ0=ψ0))\n\n h = self.ae.decode(z)\n else:\n ae_output: IRMAEOutput = self.ae(y, skip_decoding=self.mode == self.Mode.CFM)\n\n if self.mode == self.Mode.CFM:\n _ = self.cfm(x, self._scale(ae_output.latent.detach()), ψ0=ψ0)\n\n h = ae_output.decoded\n\n if h is not None and self.global_step is not None and self.global_step % 100 == 0:\n self._visualize(x[:1], y[:1], h[:1])\n\n return h" }, { "identifier": "UnivNet", "path": "resemble_enhance/enhancer/univnet/univnet.py", "snippet": "class UnivNet(nn.Module):\n @property\n def d_noise(self):\n return 128\n\n @property\n def strides(self):\n return [7, 5, 4, 3]\n\n @property\n def dilations(self):\n return [1, 3, 9, 27]\n\n @property\n def nc(self):\n return self.hp.univnet_nc\n\n @property\n def scale_factor(self) -> int:\n return self.hp.hop_size\n\n def __init__(self, hp: HParams, d_input):\n super().__init__()\n self.d_input = d_input\n\n self.hp = hp\n\n self.blocks = nn.ModuleList(\n [\n LVCBlock(\n self.nc,\n d_input,\n stride=stride,\n dilations=self.dilations,\n cond_hop_length=hop_length,\n kpnet_conv_size=3,\n )\n for stride, hop_length in zip(self.strides, np.cumprod(self.strides))\n ]\n )\n\n self.conv_pre = weight_norm(nn.Conv1d(self.d_noise, self.nc, 7, padding=3, padding_mode=\"reflect\"))\n\n self.conv_post = nn.Sequential(\n nn.LeakyReLU(0.2),\n weight_norm(nn.Conv1d(self.nc, 1, 7, padding=3, padding_mode=\"reflect\")),\n nn.Tanh(),\n )\n\n self.mrstft = MRSTFTLoss(hp)\n\n @property\n def eps(self):\n return 1e-5\n\n def forward(self, x: Tensor, y: Tensor | None = None, npad=10):\n \"\"\"\n Args:\n x: (b c t), acoustic features\n y: (b t), waveform\n Returns:\n z: (b t), waveform\n \"\"\"\n assert x.ndim == 3, \"x must be 3D tensor\"\n assert y is None or y.ndim == 2, \"y must be 2D tensor\"\n assert x.shape[1] == self.d_input, f\"x.shape[1] must be {self.d_input}, but got {x.shape}\"\n assert npad >= 0, \"npad must be positive or zero\"\n\n x = F.pad(x, (0, npad), \"constant\", 0)\n z = torch.randn(x.shape[0], self.d_noise, x.shape[2]).to(x)\n z = self.conv_pre(z) # (b c t)\n\n for block in self.blocks:\n z = block(z, x) # (b c t)\n\n z = self.conv_post(z) # (b 1 t)\n z = z[..., : -self.scale_factor * npad]\n z = z.squeeze(1) # (b t)\n\n if y is not None:\n self.losses = self.mrstft(z, y)\n\n return z" } ]
import logging import matplotlib.pyplot as plt import pandas as pd import torch from torch import Tensor, nn from torch.distributions import Beta from ..common import Normalizer from ..denoiser.inference import load_denoiser from ..melspec import MelSpectrogram from ..utils.distributed import global_leader_only from ..utils.train_loop import TrainLoop from .hparams import HParams from .lcfm import CFM, IRMAE, LCFM from .univnet import UnivNet
6,410
logger = logging.getLogger(__name__) def _maybe(fn): def _fn(*args): if args[0] is None: return None return fn(*args) return _fn def _normalize_wav(x: Tensor): return x / (x.abs().max(dim=-1, keepdim=True).values + 1e-7) class Enhancer(nn.Module): def __init__(self, hp: HParams): super().__init__() self.hp = hp n_mels = self.hp.num_mels vocoder_input_dim = n_mels + self.hp.vocoder_extra_dim latent_dim = self.hp.lcfm_latent_dim self.lcfm = LCFM(
logger = logging.getLogger(__name__) def _maybe(fn): def _fn(*args): if args[0] is None: return None return fn(*args) return _fn def _normalize_wav(x: Tensor): return x / (x.abs().max(dim=-1, keepdim=True).values + 1e-7) class Enhancer(nn.Module): def __init__(self, hp: HParams): super().__init__() self.hp = hp n_mels = self.hp.num_mels vocoder_input_dim = n_mels + self.hp.vocoder_extra_dim latent_dim = self.hp.lcfm_latent_dim self.lcfm = LCFM(
IRMAE(
6
2023-11-15 08:15:51+00:00
8k
PKU-YuanGroup/Chat-UniVi
ChatUniVi/model/language_model/llama.py
[ { "identifier": "MetaModel", "path": "ChatUniVi/model/arch.py", "snippet": "class MetaModel:\n def __init__(self, config):\n super(MetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n self.vision_tower = build_vision_tower(config, delay_load=True)\n self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n if hasattr(config, \"config\"):\n self.use_cluster = config.config[\"use_cluster\"]\n if self.use_cluster:\n self.ctm0 = CTM(sample_ratio=config.config[\"spatial_cluster_rate0\"], embed_dim=self.config.mm_hidden_size, dim_out=self.config.mm_hidden_size, k=5)\n self.block0 = TCBlock(dim=self.config.mm_hidden_size, num_heads=8)\n\n self.ctm1 = CTM(sample_ratio=config.config[\"spatial_cluster_rate1\"], embed_dim=self.config.mm_hidden_size, dim_out=self.config.mm_hidden_size, k=3)\n self.block1 = TCBlock(dim=self.config.mm_hidden_size, num_heads=8)\n\n self.ctm2 = CTM(sample_ratio=config.config[\"spatial_cluster_rate2\"], embed_dim=self.config.mm_hidden_size, dim_out=self.config.mm_hidden_size, k=3)\n self.block2 = TCBlock(dim=self.config.mm_hidden_size, num_heads=8)\n\n self.ctm3 = CTM(sample_ratio=config.config[\"temporal_cluster_rate\"], embed_dim=self.config.mm_hidden_size, dim_out=self.config.mm_hidden_size, k=5)\n self.block3 = TCBlock(dim=self.config.mm_hidden_size, num_heads=8)\n else:\n self.use_cluster = False\n\n def get_vision_tower(self):\n vision_tower = getattr(self, 'vision_tower', None)\n if type(vision_tower) is list:\n vision_tower = vision_tower[0]\n return vision_tower\n\n def initialize_vision_modules(self, model_args, fsdp=None):\n vision_tower = model_args.vision_tower\n mm_vision_select_layer = model_args.mm_vision_select_layer\n mm_vision_select_feature = model_args.mm_vision_select_feature\n pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter\n\n self.config.mm_vision_tower = vision_tower\n\n vision_tower = build_vision_tower(model_args)\n\n self.config.use_mm_proj = True\n self.config.mm_hidden_size = vision_tower.hidden_size\n self.config.mm_vision_select_layer = mm_vision_select_layer\n self.config.mm_vision_select_feature = mm_vision_select_feature\n\n if fsdp is not None and len(fsdp) > 0:\n self.vision_tower = [vision_tower]\n else:\n self.vision_tower = vision_tower\n\n if not hasattr(self, 'mm_projector') or not self.mm_projector.weight.size(0):\n self.mm_projector = nn.Linear(self.config.mm_hidden_size, self.config.hidden_size)\n\n if pretrain_mm_mlp_adapter is not None:\n mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')\n def get_w(weights, keyword):\n return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}\n\n self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))\n\n def initialize_cluster_modules(self, model_args):\n self.use_cluster = model_args.use_cluster\n\n if self.use_cluster and not hasattr(self, 'ctm0'):\n self.ctm0 = CTM(sample_ratio=model_args.spatial_cluster_rate0, embed_dim=self.config.mm_hidden_size, dim_out=self.config.mm_hidden_size, k=5)\n self.block0 = TCBlock(dim=self.config.mm_hidden_size, num_heads=8)\n\n self.ctm1 = CTM(sample_ratio=model_args.spatial_cluster_rate1, embed_dim=self.config.mm_hidden_size, dim_out=self.config.mm_hidden_size, k=3)\n self.block1 = TCBlock(dim=self.config.mm_hidden_size, num_heads=8)\n\n self.ctm2 = CTM(sample_ratio=model_args.spatial_cluster_rate2, embed_dim=self.config.mm_hidden_size, dim_out=self.config.mm_hidden_size, k=3)\n self.block2 = TCBlock(dim=self.config.mm_hidden_size, num_heads=8)\n\n self.ctm3 = CTM(sample_ratio=model_args.temporal_cluster_rate, embed_dim=self.config.mm_hidden_size, dim_out=self.config.mm_hidden_size, k=5)\n self.block3 = TCBlock(dim=self.config.mm_hidden_size, num_heads=8)" }, { "identifier": "ChatUniViMetaForCausalLM", "path": "ChatUniVi/model/arch.py", "snippet": "class ChatUniViMetaForCausalLM(ABC):\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images, select_feature=\"patch\")\n return image_features\n\n def positional_encoding(self, x, num_features=1024, max_len=64):\n p = torch.zeros((1, max_len, num_features))\n _x = torch.arange(max_len, dtype=torch.float32).reshape(-1, 1) / torch.pow(10000,\n torch.arange(0, num_features, 2, dtype=torch.float32) / num_features)\n\n p[:, :, 0::2] = torch.sin(_x)\n p[:, :, 1::2] = torch.cos(_x)\n x = x + p[:, :x.shape[1], :].to(x.device).to(x.dtype)\n return x\n\n def project(self, image_features, input_type=\"image\"):\n if self.get_model().use_cluster:\n if input_type == \"image\":\n cluster_image_features = []\n token_dict = {'x': image_features,\n 'token_num': image_features.size(1),\n 'idx_token': torch.arange(image_features.size(1))[None, :].repeat(\n image_features.size(0), 1),\n 'agg_weight': image_features.new_ones(image_features.size(0), image_features.size(1),\n 1),\n 'mask': None}\n\n token_dict = self.get_model().block0(self.get_model().ctm0(token_dict))\n cluster_image_features.append(token_dict[\"x\"])\n\n token_dict = self.get_model().block1(self.get_model().ctm1(token_dict))\n cluster_image_features.append(token_dict[\"x\"])\n\n token_dict = self.get_model().block2(self.get_model().ctm2(token_dict))\n cluster_image_features.append(token_dict[\"x\"])\n\n image_features = torch.cat(cluster_image_features, dim=1)\n image_features = image_features.to(self.get_model().mm_projector.weight.dtype)\n else:\n cls_features = torch.mean(image_features, dim=1, keepdim=False).unsqueeze(0).clone()\n token_dict = {'x': cls_features,\n 'token_num': cls_features.size(1),\n 'idx_token': torch.arange(cls_features.size(1))[None, :].repeat(\n cls_features.size(0), 1),\n 'agg_weight': cls_features.new_ones(cls_features.size(0), cls_features.size(1),\n 1),\n 'mask': None}\n\n down_dict, token_dict = self.get_model().ctm3(token_dict)\n events = OrderedDict()\n\n max_len = 0\n for id, i in enumerate(down_dict[\"idx_token\"][0].tolist()):\n if i not in events:\n events[i] = [id]\n else:\n events[i].append(id)\n max_len = len(events[i]) if max_len < len(events[i]) else max_len\n\n cluster_image_features = []\n token_dict = {'x': image_features,\n 'token_num': image_features.size(1),\n 'idx_token': torch.arange(image_features.size(1))[None, :].repeat(\n image_features.size(0), 1),\n 'agg_weight': image_features.new_ones(image_features.size(0), image_features.size(1),\n 1),\n 'mask': None}\n\n token_dict0 = self.get_model().block0(self.get_model().ctm0(token_dict))\n token_dict1 = self.get_model().block1(self.get_model().ctm1(token_dict0))\n token_dict2 = self.get_model().block2(self.get_model().ctm2(token_dict1))\n\n for id, key in enumerate(events):\n cur_image_features0 = torch.cat([token_dict0[\"x\"][i] for i in events[key]], dim=0).unsqueeze(0)\n token_dict = {'x': cur_image_features0,\n 'token_num': cur_image_features0.size(1),\n 'idx_token': torch.arange(cur_image_features0.size(1))[None, :].repeat(\n cur_image_features0.size(0), 1),\n 'agg_weight': cur_image_features0.new_ones(cur_image_features0.size(0),\n cur_image_features0.size(1),\n 1),\n 'mask': None}\n\n cur_token_dict0 = self.get_model().block0(self.get_model().ctm0(token_dict))\n cluster_image_features.append(cur_token_dict0[\"x\"])\n\n cur_image_features1 = torch.cat([token_dict1[\"x\"][i] for i in events[key]], dim=0).unsqueeze(0)\n token_dict = {'x': cur_image_features1,\n 'token_num': cur_image_features1.size(1),\n 'idx_token': torch.arange(cur_image_features1.size(1))[None, :].repeat(\n cur_image_features1.size(0), 1),\n 'agg_weight': cur_image_features1.new_ones(cur_image_features1.size(0),\n cur_image_features1.size(1),\n 1),\n 'mask': None}\n\n cur_token_dict1 = self.get_model().block1(self.get_model().ctm1(token_dict))\n cluster_image_features.append(cur_token_dict1[\"x\"])\n\n cur_image_features2 = torch.cat([token_dict2[\"x\"][i] for i in events[key]], dim=0).unsqueeze(0)\n token_dict = {'x': cur_image_features2,\n 'token_num': cur_image_features2.size(1),\n 'idx_token': torch.arange(cur_image_features2.size(1))[None, :].repeat(\n cur_image_features2.size(0), 1),\n 'agg_weight': cur_image_features2.new_ones(cur_image_features2.size(0),\n cur_image_features2.size(1),\n 1),\n 'mask': None}\n\n cur_token_dict2 = self.get_model().block2(self.get_model().ctm2(token_dict))\n cluster_image_features.append(cur_token_dict2[\"x\"])\n\n image_features = torch.cat(cluster_image_features, dim=1)\n image_features = image_features.to(self.get_model().mm_projector.weight.dtype)\n\n else:\n if input_type == \"video\":\n image_features, cls_features = torch.mean(image_features, dim=0, keepdim=False).unsqueeze(\n 0), torch.mean(image_features, dim=1, keepdim=False).unsqueeze(0)\n image_features = torch.cat([image_features, cls_features], dim=1)\n\n image_features = self.get_model().mm_projector(image_features)\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (\n 0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n\n if len(image_token_indices) > 1:\n temp = []\n cur, pre = image_token_indices[0], image_token_indices[0]\n for i in image_token_indices:\n cur = i\n if cur - pre == 1:\n temp[-1] = temp[-1] + [cur]\n else:\n temp.append([cur])\n pre = cur\n\n for i in temp:\n image_token_start = image_token_indices[0]\n image_token_end = image_token_indices[-1]\n cur_image_features = []\n\n for _ in i:\n cur_image_features.append(image_features[cur_image_idx])\n cur_image_idx += 1\n\n if len(i) > 2:\n cur_image_features = torch.stack(cur_image_features, dim=0)\n cur_image_features = self.project(cur_image_features, input_type=\"video\")\n t, l, n = cur_image_features.size()\n cur_image_features = cur_image_features.contiguous().view(t * l, n)\n else:\n cur_image_features = torch.stack(cur_image_features, dim=0)\n cur_image_features = self.project(cur_image_features, input_type=\"image\")\n t, l, n = cur_image_features.size()\n cur_image_features = cur_image_features.contiguous().view(t * l, n)\n\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start - 1]).detach())\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start - 1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_end + 1:image_token_end + 2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_end:image_token_end + 1])\n cur_labels = cur_labels[image_token_end + 2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_end + 1:]\n\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end',\n False):\n cur_input_ids = cur_input_ids[image_token_end + 2:]\n else:\n cur_input_ids = cur_input_ids[image_token_end + 1:]\n\n elif image_token_indices.numel() > 0:\n cur_image_features = []\n image_token_start = image_token_indices[0]\n image_token_end = image_token_indices[-1]\n\n for _ in image_token_indices:\n cur_image_features.append(image_features[cur_image_idx])\n cur_image_idx += 1\n\n cur_image_features = torch.stack(cur_image_features, dim=0)\n cur_image_features = self.project(cur_image_features, input_type=\"image\")\n t, l, n = cur_image_features.size()\n cur_image_features = cur_image_features.contiguous().view(t * l, n)\n\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_end+1:image_token_end+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_end:image_token_end+1])\n cur_labels = cur_labels[image_token_end+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_end+1:]\n\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_end+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_end+1:]\n\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n tokenizer.add_tokens([DEFAULT_VIDEO_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, DEFAULT_VID_START_TOKEN, DEFAULT_VID_END_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False" } ]
from typing import List, Optional, Tuple, Union from torch.nn import CrossEntropyLoss from transformers import AutoConfig, AutoModelForCausalLM, \ LlamaConfig, LlamaModel, LlamaForCausalLM from transformers.modeling_outputs import CausalLMOutputWithPast from ChatUniVi.model.arch import MetaModel, ChatUniViMetaForCausalLM import torch import torch.nn as nn
5,914
class ChatUniViConfig(LlamaConfig): model_type = "ChatUniVi" class ChatUniViLlamaModel(MetaModel, LlamaModel): config_class = ChatUniViConfig def __init__(self, config: LlamaConfig): super(ChatUniViLlamaModel, self).__init__(config)
class ChatUniViConfig(LlamaConfig): model_type = "ChatUniVi" class ChatUniViLlamaModel(MetaModel, LlamaModel): config_class = ChatUniViConfig def __init__(self, config: LlamaConfig): super(ChatUniViLlamaModel, self).__init__(config)
class ChatUniViLlamaForCausalLM(LlamaForCausalLM, ChatUniViMetaForCausalLM):
1
2023-11-13 11:52:56+00:00
8k
banodoco/Steerable-Motion
imports/AdvancedControlNet/weight_nodes.py
[ { "identifier": "TimestepKeyframeImport", "path": "imports/AdvancedControlNet/control.py", "snippet": "class TimestepKeyframeImport:\n def __init__(self,\n start_percent: float = 0.0,\n strength: float = 1.0,\n interpolation: str = StrengthInterpolationImport.NONE,\n control_weights: ControlWeightsImport = None,\n latent_keyframes: LatentKeyframeGroupImport = None,\n null_latent_kf_strength: float = 0.0,\n inherit_missing: bool = True,\n guarantee_usage: bool = True,\n mask_hint_orig: Tensor = None) -> None:\n self.start_percent = start_percent\n self.start_t = 999999999.9\n self.strength = strength\n self.interpolation = interpolation\n self.control_weights = control_weights\n self.latent_keyframes = latent_keyframes\n self.null_latent_kf_strength = null_latent_kf_strength\n self.inherit_missing = inherit_missing\n self.guarantee_usage = guarantee_usage\n self.mask_hint_orig = mask_hint_orig\n\n def has_control_weights(self):\n return self.control_weights is not None\n \n def has_latent_keyframes(self):\n return self.latent_keyframes is not None\n \n def has_mask_hint(self):\n return self.mask_hint_orig is not None\n \n \n @classmethod\n def default(cls) -> 'TimestepKeyframeImport':\n return cls(0.0)" }, { "identifier": "TimestepKeyframeGroupImport", "path": "imports/AdvancedControlNet/control.py", "snippet": "class TimestepKeyframeGroupImport:\n def __init__(self) -> None:\n self.keyframes: list[TimestepKeyframeImport] = []\n self.keyframes.append(TimestepKeyframeImport.default())\n\n def add(self, keyframe: TimestepKeyframeImport) -> None:\n added = False\n # replace existing keyframe if same start_percent\n for i in range(len(self.keyframes)):\n if self.keyframes[i].start_percent == keyframe.start_percent:\n self.keyframes[i] = keyframe\n added = True\n break\n if not added:\n self.keyframes.append(keyframe)\n self.keyframes.sort(key=lambda k: k.start_percent)\n\n def get_index(self, index: int) -> Union[TimestepKeyframeImport, None]:\n try:\n return self.keyframes[index]\n except IndexError:\n return None\n \n def has_index(self, index: int) -> int:\n return index >=0 and index < len(self.keyframes)\n\n def __getitem__(self, index) -> TimestepKeyframeImport:\n return self.keyframes[index]\n \n def __len__(self) -> int:\n return len(self.keyframes)\n\n def is_empty(self) -> bool:\n return len(self.keyframes) == 0\n \n def clone(self) -> 'TimestepKeyframeGroupImport':\n cloned = TimestepKeyframeGroupImport()\n for tk in self.keyframes:\n cloned.add(tk)\n return cloned\n \n @classmethod\n def default(cls, keyframe: TimestepKeyframeImport) -> 'TimestepKeyframeGroupImport':\n group = cls()\n group.keyframes[0] = keyframe\n return group" }, { "identifier": "ControlWeightsImport", "path": "imports/AdvancedControlNet/control.py", "snippet": "class ControlWeightsImport:\n def __init__(self, weight_type: str, base_multiplier: float=1.0, flip_weights: bool=False, weights: list[float]=None, weight_mask: Tensor=None):\n self.weight_type = weight_type\n self.base_multiplier = base_multiplier\n self.flip_weights = flip_weights\n self.weights = weights\n if self.weights is not None and self.flip_weights:\n self.weights.reverse()\n self.weight_mask = weight_mask\n\n def get(self, idx: int) -> Union[float, Tensor]:\n # if weights is not none, return index\n if self.weights is not None:\n return self.weights[idx]\n return 1.0\n\n @classmethod\n def default(cls):\n return cls(ControlWeightTypeImport.DEFAULT)\n\n @classmethod\n def universal(cls, base_multiplier: float, flip_weights: bool=False):\n return cls(ControlWeightTypeImport.UNIVERSAL, base_multiplier=base_multiplier, flip_weights=flip_weights)\n \n @classmethod\n def universal_mask(cls, weight_mask: Tensor):\n return cls(ControlWeightTypeImport.UNIVERSAL, weight_mask=weight_mask)\n\n @classmethod\n def t2iadapter(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n weights = [1.0]*12\n return cls(ControlWeightTypeImport.T2IADAPTER, weights=weights,flip_weights=flip_weights)\n\n @classmethod\n def controlnet(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n weights = [1.0]*13\n return cls(ControlWeightTypeImport.CONTROLNET, weights=weights, flip_weights=flip_weights)\n \n @classmethod\n def controllora(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n weights = [1.0]*10\n return cls(ControlWeightTypeImport.CONTROLLORA, weights=weights, flip_weights=flip_weights)\n \n @classmethod\n def controllllite(cls, weights: list[float]=None, flip_weights: bool=False):\n if weights is None:\n # TODO: make this have a real value\n weights = [1.0]*200\n return cls(ControlWeightTypeImport.CONTROLLLLITE, weights=weights, flip_weights=flip_weights)" }, { "identifier": "get_properly_arranged_t2i_weights", "path": "imports/AdvancedControlNet/control.py", "snippet": "def get_properly_arranged_t2i_weights(initial_weights: list[float]):\n new_weights = []\n new_weights.extend([initial_weights[0]]*3)\n new_weights.extend([initial_weights[1]]*3)\n new_weights.extend([initial_weights[2]]*3)\n new_weights.extend([initial_weights[3]]*3)\n return new_weights" }, { "identifier": "linear_conversion", "path": "imports/AdvancedControlNet/control.py", "snippet": "def linear_conversion(x, x_min=0.0, x_max=1.0, new_min=0.0, new_max=1.0):\n return (((x - x_min)/(x_max - x_min)) * (new_max - new_min)) + new_min" }, { "identifier": "logger", "path": "imports/AdvancedControlNet/logger.py", "snippet": "class ColoredFormatter(logging.Formatter):\n COLORS = {\n \"DEBUG\": \"\\033[0;36m\", # CYAN\n \"INFO\": \"\\033[0;32m\", # GREEN\n \"WARNING\": \"\\033[0;33m\", # YELLOW\n \"ERROR\": \"\\033[0;31m\", # RED\n \"CRITICAL\": \"\\033[0;37;41m\", # WHITE ON RED\n \"RESET\": \"\\033[0m\", # RESET COLOR\n }\n def format(self, record):" } ]
from torch import Tensor from .control import TimestepKeyframeImport, TimestepKeyframeGroupImport, ControlWeightsImport, get_properly_arranged_t2i_weights, linear_conversion from .logger import logger import torch
4,377
else: mask = linear_conversion(mask, x_min, x_max, min_base_multiplier, max_base_multiplier) weights = ControlWeightsImport.universal_mask(weight_mask=mask) return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) class ScaledSoftUniversalWeightsImport: @classmethod def INPUT_TYPES(s): return { "required": { "base_multiplier": ("FLOAT", {"default": 0.825, "min": 0.0, "max": 1.0, "step": 0.001}, ), "flip_weights": ("BOOLEAN", {"default": False}), }, } RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) RETURN_NAMES = WEIGHTS_RETURN_NAMES FUNCTION = "load_weights" CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights" def load_weights(self, base_multiplier, flip_weights): weights = ControlWeightsImport.universal(base_multiplier=base_multiplier, flip_weights=flip_weights) return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) class SoftControlNetWeightsImport: @classmethod def INPUT_TYPES(s): return { "required": { "weight_00": ("FLOAT", {"default": 0.09941396206337118, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_01": ("FLOAT", {"default": 0.12050177219802567, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_02": ("FLOAT", {"default": 0.14606275417942507, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_03": ("FLOAT", {"default": 0.17704576264172736, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_04": ("FLOAT", {"default": 0.214600924414215, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_05": ("FLOAT", {"default": 0.26012233262329093, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_06": ("FLOAT", {"default": 0.3152997971191405, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_07": ("FLOAT", {"default": 0.3821815722656249, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_08": ("FLOAT", {"default": 0.4632503906249999, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_09": ("FLOAT", {"default": 0.561515625, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_10": ("FLOAT", {"default": 0.6806249999999999, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_11": ("FLOAT", {"default": 0.825, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_12": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "flip_weights": ("BOOLEAN", {"default": False}), }, } RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) RETURN_NAMES = WEIGHTS_RETURN_NAMES FUNCTION = "load_weights" CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights/ControlNet" def load_weights(self, weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, weight_07, weight_08, weight_09, weight_10, weight_11, weight_12, flip_weights): weights = [weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, weight_07, weight_08, weight_09, weight_10, weight_11, weight_12] weights = ControlWeightsImport.controlnet(weights, flip_weights=flip_weights) return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) class CustomControlNetWeightsImport: @classmethod def INPUT_TYPES(s): return { "required": { "weight_00": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_01": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_02": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_03": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_04": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_05": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_06": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_07": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_08": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_09": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_10": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_11": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_12": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "flip_weights": ("BOOLEAN", {"default": False}), } } RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) RETURN_NAMES = WEIGHTS_RETURN_NAMES FUNCTION = "load_weights" CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights/ControlNet" def load_weights(self, weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, weight_07, weight_08, weight_09, weight_10, weight_11, weight_12, flip_weights): weights = [weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, weight_07, weight_08, weight_09, weight_10, weight_11, weight_12] weights = ControlWeightsImport.controlnet(weights, flip_weights=flip_weights) return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) class SoftT2IAdapterWeightsImport: @classmethod def INPUT_TYPES(s): return { "required": { "weight_00": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_01": ("FLOAT", {"default": 0.62, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_02": ("FLOAT", {"default": 0.825, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_03": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "flip_weights": ("BOOLEAN", {"default": False}), }, } RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) RETURN_NAMES = WEIGHTS_RETURN_NAMES FUNCTION = "load_weights" CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights/T2IAdapter" def load_weights(self, weight_00, weight_01, weight_02, weight_03, flip_weights): weights = [weight_00, weight_01, weight_02, weight_03]
WEIGHTS_RETURN_NAMES = ("CN_WEIGHTS", "TK_SHORTCUT") class DefaultWeightsImport: @classmethod def INPUT_TYPES(s): return { } RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) RETURN_NAMES = WEIGHTS_RETURN_NAMES FUNCTION = "load_weights" CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights" def load_weights(self): weights = ControlWeightsImport.default() return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) class ScaledSoftMaskedUniversalWeightsImport: @classmethod def INPUT_TYPES(s): return { "required": { "mask": ("MASK", ), "min_base_multiplier": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, ), "max_base_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}, ), #"lock_min": ("BOOLEAN", {"default": False}, ), #"lock_max": ("BOOLEAN", {"default": False}, ), }, } RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) RETURN_NAMES = WEIGHTS_RETURN_NAMES FUNCTION = "load_weights" CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights" def load_weights(self, mask: Tensor, min_base_multiplier: float, max_base_multiplier: float, lock_min=False, lock_max=False): # normalize mask mask = mask.clone() x_min = 0.0 if lock_min else mask.min() x_max = 1.0 if lock_max else mask.max() if x_min == x_max: mask = torch.ones_like(mask) * max_base_multiplier else: mask = linear_conversion(mask, x_min, x_max, min_base_multiplier, max_base_multiplier) weights = ControlWeightsImport.universal_mask(weight_mask=mask) return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) class ScaledSoftUniversalWeightsImport: @classmethod def INPUT_TYPES(s): return { "required": { "base_multiplier": ("FLOAT", {"default": 0.825, "min": 0.0, "max": 1.0, "step": 0.001}, ), "flip_weights": ("BOOLEAN", {"default": False}), }, } RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) RETURN_NAMES = WEIGHTS_RETURN_NAMES FUNCTION = "load_weights" CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights" def load_weights(self, base_multiplier, flip_weights): weights = ControlWeightsImport.universal(base_multiplier=base_multiplier, flip_weights=flip_weights) return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) class SoftControlNetWeightsImport: @classmethod def INPUT_TYPES(s): return { "required": { "weight_00": ("FLOAT", {"default": 0.09941396206337118, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_01": ("FLOAT", {"default": 0.12050177219802567, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_02": ("FLOAT", {"default": 0.14606275417942507, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_03": ("FLOAT", {"default": 0.17704576264172736, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_04": ("FLOAT", {"default": 0.214600924414215, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_05": ("FLOAT", {"default": 0.26012233262329093, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_06": ("FLOAT", {"default": 0.3152997971191405, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_07": ("FLOAT", {"default": 0.3821815722656249, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_08": ("FLOAT", {"default": 0.4632503906249999, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_09": ("FLOAT", {"default": 0.561515625, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_10": ("FLOAT", {"default": 0.6806249999999999, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_11": ("FLOAT", {"default": 0.825, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_12": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "flip_weights": ("BOOLEAN", {"default": False}), }, } RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) RETURN_NAMES = WEIGHTS_RETURN_NAMES FUNCTION = "load_weights" CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights/ControlNet" def load_weights(self, weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, weight_07, weight_08, weight_09, weight_10, weight_11, weight_12, flip_weights): weights = [weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, weight_07, weight_08, weight_09, weight_10, weight_11, weight_12] weights = ControlWeightsImport.controlnet(weights, flip_weights=flip_weights) return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) class CustomControlNetWeightsImport: @classmethod def INPUT_TYPES(s): return { "required": { "weight_00": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_01": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_02": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_03": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_04": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_05": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_06": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_07": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_08": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_09": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_10": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_11": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_12": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "flip_weights": ("BOOLEAN", {"default": False}), } } RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) RETURN_NAMES = WEIGHTS_RETURN_NAMES FUNCTION = "load_weights" CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights/ControlNet" def load_weights(self, weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, weight_07, weight_08, weight_09, weight_10, weight_11, weight_12, flip_weights): weights = [weight_00, weight_01, weight_02, weight_03, weight_04, weight_05, weight_06, weight_07, weight_08, weight_09, weight_10, weight_11, weight_12] weights = ControlWeightsImport.controlnet(weights, flip_weights=flip_weights) return (weights, TimestepKeyframeGroupImport.default(TimestepKeyframeImport(control_weights=weights))) class SoftT2IAdapterWeightsImport: @classmethod def INPUT_TYPES(s): return { "required": { "weight_00": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_01": ("FLOAT", {"default": 0.62, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_02": ("FLOAT", {"default": 0.825, "min": 0.0, "max": 10.0, "step": 0.001}, ), "weight_03": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ), "flip_weights": ("BOOLEAN", {"default": False}), }, } RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) RETURN_NAMES = WEIGHTS_RETURN_NAMES FUNCTION = "load_weights" CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/weights/T2IAdapter" def load_weights(self, weight_00, weight_01, weight_02, weight_03, flip_weights): weights = [weight_00, weight_01, weight_02, weight_03]
weights = get_properly_arranged_t2i_weights(weights)
3
2023-11-11 01:26:26+00:00
8k
x0rzavi/github-readme-terminal
gifos/utils/fetch_github_stats.py
[ { "identifier": "calc_github_rank", "path": "gifos/utils/calc_github_rank.py", "snippet": "def calc_github_rank(\n all_commits: bool,\n commits: int,\n prs: int,\n issues: int,\n reviews: int,\n stars: int,\n followers: int,\n) -> GithubUserRank:\n \"\"\"Calculate the GitHub rank of a user based on their activity.\n\n The rank is calculated using a weighted sum of various activity metrics, including\n commits, pull requests, issues, reviews, stars, and followers. Each metric is\n normalized using a cumulative distribution function (either exponential or log-\n normal) before being weighted and summed.\n\n :param all_commits: Whether to consider all commits or only those in the last year.\n :type all_commits: bool\n :param commits: The number of commits the user has made.\n :type commits: int\n :param prs: The number of pull requests the user has made.\n :type prs: int\n :param issues: The number of issues the user has opened.\n :type issues: int\n :param reviews: The number of reviews the user has made.\n :type reviews: int\n :param stars: The number of stars the user's repositories have received.\n :type stars: int\n :param followers: The number of followers the user has.\n :type followers: int\n :return: The user's GitHub rank and percentile.\n :rtype: GithubUserRank\n \"\"\"\n COMMITS_MEDIAN = 1000 if all_commits else 250\n COMMITS_WEIGHT = 2\n PRS_MEDIAN = 50\n PRS_WEIGHT = 3\n ISSUES_MEDIAN = 25\n ISSUES_WEIGHT = 1\n REVIEWS_MEDIAN = 2\n REVIEWS_WEIGHT = 1\n STARS_MEDIAN = 50\n STARS_WEIGHT = 4\n FOLLOWERS_MEDIAN = 10\n FOLLOWERS_WEIGHT = 1\n TOTAL_WEIGHT = (\n COMMITS_WEIGHT\n + PRS_WEIGHT\n + ISSUES_WEIGHT\n + REVIEWS_WEIGHT\n + STARS_WEIGHT\n + FOLLOWERS_WEIGHT\n )\n\n THRESHOLDS = [1, 12.5, 25, 37.5, 50, 62.5, 75, 87.5, 100]\n LEVELS = [\"S\", \"A+\", \"A\", \"A-\", \"B+\", \"B\", \"B-\", \"C+\", \"C\"]\n rank = (\n 1\n - (\n COMMITS_WEIGHT * exponential_cdf(commits / COMMITS_MEDIAN)\n + PRS_WEIGHT * exponential_cdf(prs / PRS_MEDIAN)\n + ISSUES_WEIGHT * exponential_cdf(issues / ISSUES_MEDIAN)\n + REVIEWS_WEIGHT * exponential_cdf(reviews / REVIEWS_MEDIAN)\n + STARS_WEIGHT * log_normal_cdf(stars / STARS_MEDIAN)\n + FOLLOWERS_WEIGHT * log_normal_cdf(followers / FOLLOWERS_MEDIAN)\n )\n / TOTAL_WEIGHT\n )\n\n level = LEVELS[\n next((i for i, t in enumerate(THRESHOLDS) if rank * 100 <= t), len(LEVELS) - 1)\n ]\n percentile = round(rank * 100, 2)\n return GithubUserRank(level, percentile)" }, { "identifier": "GithubUserStats", "path": "gifos/utils/schemas/github_user_stats.py", "snippet": "class GithubUserStats:\n \"\"\"A class to represent a GitHub user's statistics.\n\n This class represents a GitHub user's statistics.\n\n Attributes:\n account_name: A string that represents the user's account name.\n total_followers: An integer that represents the total number of followers the user has.\n total_stargazers: An integer that represents the total number of stargazers the user has.\n total_issues: An integer that represents the total number of issues the user has opened.\n total_commits_all_time: An integer that represents the total number of commits the user has made all time.\n total_commits_last_year: An integer that represents the total number of commits the user has made in the last year.\n total_pull_requests_made: An integer that represents the total number of pull requests the user has made.\n total_pull_requests_merged: An integer that represents the total number of the user's pull requests that have been merged.\n pull_requests_merge_percentage: A float that represents the percentage of the user's pull requests that have been merged.\n total_pull_requests_reviewed: An integer that represents the total number of pull requests the user has reviewed.\n total_repo_contributions: An integer that represents the total number of repositories the user has contributed to.\n languages_sorted: A list of tuples that represents the user's most used languages, sorted by usage. Each tuple contains a language name and a usage percentage.\n user_rank: A `GithubUserRank` object that represents the user's GitHub rank.\n \"\"\"\n\n __slots__ = [\n \"account_name\",\n \"total_followers\",\n \"total_stargazers\",\n \"total_issues\",\n \"total_commits_all_time\",\n \"total_commits_last_year\",\n \"total_pull_requests_made\",\n \"total_pull_requests_merged\",\n \"pull_requests_merge_percentage\",\n \"total_pull_requests_reviewed\",\n \"total_repo_contributions\",\n \"languages_sorted\",\n \"user_rank\",\n ]\n account_name: str\n total_followers: int\n total_stargazers: int\n total_issues: int\n total_commits_all_time: int\n total_commits_last_year: int\n total_pull_requests_made: int\n total_pull_requests_merged: int\n pull_requests_merge_percentage: float\n total_pull_requests_reviewed: int\n total_repo_contributions: int\n languages_sorted: list\n user_rank: GithubUserRank" } ]
import os import requests import sys from dotenv import load_dotenv from gifos.utils.calc_github_rank import calc_github_rank from gifos.utils.schemas.github_user_stats import GithubUserStats
3,685
""" REST_API_URL = f"https://api.github.com/search/commits?q=author:{user_name}" headers = { "Content-Type": "application/json", "User-Agent": "x0rzavi", "Accept": "application/vnd.github+json", "Authorization": f"token {GITHUB_TOKEN}", } response = requests.get(REST_API_URL, headers=headers) if response.status_code == 200: json_obj = response.json() total_commits_all_time = json_obj["total_count"] print(f"INFO: Total commits fetched for {user_name}") return total_commits_all_time else: print(f"ERROR: {response.status_code}") return None def fetch_github_stats( user_name: str, ignore_repos: list = None, include_all_commits: bool = False ) -> GithubUserStats: """Fetch GitHub statistics for a user. This function fetches various statistics for a GitHub user. The function uses the `GITHUB_TOKEN` environment variable for authentication. :param user_name: The username of the user to fetch statistics for. :type user_name: str :param ignore_repos: A list of repository names to ignore when fetching statistics. If not provided, all repositories are included. :type ignore_repos: list, optional :param include_all_commits: A boolean indicating whether to include all commits when calculating the user's GitHub rank. If False, only commits from the last year are included. :type include_all_commits: bool, optional :return: A `GithubUserStats` object containing the fetched statistics if the request is successful, otherwise None. :rtype: GithubUserStats or None """ if not GITHUB_TOKEN: print("ERROR: Please provide GITHUB_TOKEN") sys.exit(1) repo_end_cursor = None total_stargazers = 0 languages_dict = {} def update_languages(languages, languages_dict): for language in languages: language_name = language["node"]["name"] language_size = language["size"] languages_dict[language_name] = ( languages_dict.get(language_name, 0) + language_size ) def process_repo(repos, ignore_repos, languages_dict): total_stargazers = 0 for repo in repos: if repo["name"] not in (ignore_repos or []): total_stargazers += repo["stargazerCount"] if not repo["isFork"]: update_languages(repo["languages"]["edges"], languages_dict) return total_stargazers while True: # paginate repository stats repo_stats = fetch_repo_stats(user_name, repo_end_cursor) if repo_stats: total_stargazers = process_repo( repo_stats["nodes"], ignore_repos, languages_dict ) if repo_stats["pageInfo"]["hasNextPage"]: repo_end_cursor = repo_stats["pageInfo"]["endCursor"] else: break else: break total_commits_all_time = fetch_total_commits(user_name) # fetch only once total_languages_size = sum(languages_dict.values()) languages_percentage = { language: round((size / total_languages_size) * 100, 2) for language, size in languages_dict.items() } languages_sorted = sorted( languages_percentage.items(), key=lambda n: n[1], reverse=True ) user_stats = fetch_user_stats(user_name) if user_stats: if user_stats["pullRequests"]["totalCount"] > 0: pull_requests_merge_percentage = round( (user_stats["mergedPullRequests"]["totalCount"] / user_stats["pullRequests"]["totalCount"]) * 100, 2 ) else: pull_requests_merge_percentage = 0 user_details = GithubUserStats( account_name=user_stats["name"], total_followers=user_stats["followers"]["totalCount"], total_stargazers=total_stargazers, total_issues=user_stats["issues"]["totalCount"], total_commits_all_time=total_commits_all_time, total_commits_last_year=( user_stats["contributionsCollection"]["restrictedContributionsCount"] + user_stats["contributionsCollection"]["totalCommitContributions"] ), total_pull_requests_made=user_stats["pullRequests"]["totalCount"], total_pull_requests_merged=user_stats["mergedPullRequests"]["totalCount"], pull_requests_merge_percentage=pull_requests_merge_percentage, total_pull_requests_reviewed=user_stats["contributionsCollection"][ "totalPullRequestReviewContributions" ], total_repo_contributions=user_stats["repositoriesContributedTo"][ "totalCount" ], languages_sorted=languages_sorted[:6], # top 6 languages
# TODO # [] Language colors # [] Profile image ascii art # [] Optimize code # [] Optimize API calls # [] Catch errors # [] Retry on error """This module contains a function for fetching a GitHub user's statistics.""" load_dotenv() GITHUB_TOKEN = os.getenv("GITHUB_TOKEN") GRAPHQL_ENDPOINT = "https://api.github.com/graphql" def fetch_repo_stats(user_name: str, repo_end_cursor: str = None) -> dict: """Fetch statistics for a user's repositories. This function sends a GraphQL query to the GitHub API to fetch statistics for a user's repositories. The function uses the `GITHUB_TOKEN` environment variable for authentication and the `GRAPHQL_ENDPOINT` constant for the API endpoint. :param user_name: The username of the user to fetch statistics for. :type user_name: str :param repo_end_cursor: The end cursor for pagination. If not provided, the function fetches statistics from the beginning. :type repo_end_cursor: str, optional :return: A dictionary containing the fetched statistics if the request is successful, otherwise None. :rtype: dict or None """ query = """ query repoInfo( $user_name: String! $repo_end_cursor: String ) { user(login: $user_name) { repositories ( first: 100, after: $repo_end_cursor orderBy: { field: STARGAZERS, direction: DESC } ownerAffiliations: OWNER ) { totalCount nodes { name isFork stargazerCount languages( first: 10 orderBy: { field: SIZE, direction: DESC } ) { edges { node { name # color } size } } } pageInfo { endCursor hasNextPage } } } # rateLimit { # cost # limit # remaining # used # resetAt # } } """ headers = {"Authorization": f"bearer {GITHUB_TOKEN}"} variables = {"user_name": user_name, "repo_end_cursor": repo_end_cursor} response = requests.post( GRAPHQL_ENDPOINT, json={"query": query, "variables": variables}, headers=headers ) if response.status_code == 200: json_obj = response.json() if "errors" in json_obj: print(f"ERROR: {json_obj['errors']}") return None else: print(f"INFO: Repository details fetched for {user_name}") return json_obj["data"]["user"]["repositories"] else: print(f"ERROR: {response.status_code}") return None def fetch_user_stats(user_name: str) -> dict: """Fetch statistics for a GitHub user. This function sends a GraphQL query to the GitHub API to fetch statistics for a GitHub user. The function uses the `GITHUB_TOKEN` environment variable for authentication and the `GRAPHQL_ENDPOINT` constant for the API endpoint. :param user_name: The username of the user to fetch statistics for. :type user_name: str :return: A dictionary containing the fetched statistics if the request is successful, otherwise None. :rtype: dict or None """ query = """ query userInfo($user_name: String!) { user(login: $user_name) { name followers (first: 1) { totalCount } repositoriesContributedTo ( first: 1 contributionTypes: [COMMIT, ISSUE, PULL_REQUEST, REPOSITORY] ) { totalCount } contributionsCollection { # contributionCalendar { # totalContributions # } totalCommitContributions restrictedContributionsCount totalPullRequestReviewContributions } issues(first: 1) { totalCount } pullRequests(first: 1) { totalCount } mergedPullRequests: pullRequests(states: MERGED, first: 1) { totalCount } } # rateLimit { # cost # limit # remaining # used # resetAt # } } """ headers = {"Authorization": f"bearer {GITHUB_TOKEN}"} variables = {"user_name": user_name} response = requests.post( GRAPHQL_ENDPOINT, json={"query": query, "variables": variables}, headers=headers ) if response.status_code == 200: json_obj = response.json() if "errors" in json_obj: print(f"ERROR: {json_obj['errors']}") return None else: print(f"INFO: User details fetched for {user_name}") return json_obj["data"]["user"] else: print(f"ERROR: {response.status_code}") return None # Reference: https://github.com/anuraghazra/github-readme-stats/blob/23472f40e81170ba452c38a99abc674db0000ce6/src/fetchers/stats-fetcher.js#L170 def fetch_total_commits(user_name: str) -> int: """Fetch the total number of commits (lifetime) made by a GitHub user. This function sends a GET request to the GitHub REST API to fetch the total number of commits made by a GitHub user. The function uses the `GITHUB_TOKEN` environment variable for authentication. :param user_name: The username of the user to fetch the total number of commits for. :type user_name: str :return: The total number of commits made by the user if the request is successful, otherwise None. :rtype: int or None """ REST_API_URL = f"https://api.github.com/search/commits?q=author:{user_name}" headers = { "Content-Type": "application/json", "User-Agent": "x0rzavi", "Accept": "application/vnd.github+json", "Authorization": f"token {GITHUB_TOKEN}", } response = requests.get(REST_API_URL, headers=headers) if response.status_code == 200: json_obj = response.json() total_commits_all_time = json_obj["total_count"] print(f"INFO: Total commits fetched for {user_name}") return total_commits_all_time else: print(f"ERROR: {response.status_code}") return None def fetch_github_stats( user_name: str, ignore_repos: list = None, include_all_commits: bool = False ) -> GithubUserStats: """Fetch GitHub statistics for a user. This function fetches various statistics for a GitHub user. The function uses the `GITHUB_TOKEN` environment variable for authentication. :param user_name: The username of the user to fetch statistics for. :type user_name: str :param ignore_repos: A list of repository names to ignore when fetching statistics. If not provided, all repositories are included. :type ignore_repos: list, optional :param include_all_commits: A boolean indicating whether to include all commits when calculating the user's GitHub rank. If False, only commits from the last year are included. :type include_all_commits: bool, optional :return: A `GithubUserStats` object containing the fetched statistics if the request is successful, otherwise None. :rtype: GithubUserStats or None """ if not GITHUB_TOKEN: print("ERROR: Please provide GITHUB_TOKEN") sys.exit(1) repo_end_cursor = None total_stargazers = 0 languages_dict = {} def update_languages(languages, languages_dict): for language in languages: language_name = language["node"]["name"] language_size = language["size"] languages_dict[language_name] = ( languages_dict.get(language_name, 0) + language_size ) def process_repo(repos, ignore_repos, languages_dict): total_stargazers = 0 for repo in repos: if repo["name"] not in (ignore_repos or []): total_stargazers += repo["stargazerCount"] if not repo["isFork"]: update_languages(repo["languages"]["edges"], languages_dict) return total_stargazers while True: # paginate repository stats repo_stats = fetch_repo_stats(user_name, repo_end_cursor) if repo_stats: total_stargazers = process_repo( repo_stats["nodes"], ignore_repos, languages_dict ) if repo_stats["pageInfo"]["hasNextPage"]: repo_end_cursor = repo_stats["pageInfo"]["endCursor"] else: break else: break total_commits_all_time = fetch_total_commits(user_name) # fetch only once total_languages_size = sum(languages_dict.values()) languages_percentage = { language: round((size / total_languages_size) * 100, 2) for language, size in languages_dict.items() } languages_sorted = sorted( languages_percentage.items(), key=lambda n: n[1], reverse=True ) user_stats = fetch_user_stats(user_name) if user_stats: if user_stats["pullRequests"]["totalCount"] > 0: pull_requests_merge_percentage = round( (user_stats["mergedPullRequests"]["totalCount"] / user_stats["pullRequests"]["totalCount"]) * 100, 2 ) else: pull_requests_merge_percentage = 0 user_details = GithubUserStats( account_name=user_stats["name"], total_followers=user_stats["followers"]["totalCount"], total_stargazers=total_stargazers, total_issues=user_stats["issues"]["totalCount"], total_commits_all_time=total_commits_all_time, total_commits_last_year=( user_stats["contributionsCollection"]["restrictedContributionsCount"] + user_stats["contributionsCollection"]["totalCommitContributions"] ), total_pull_requests_made=user_stats["pullRequests"]["totalCount"], total_pull_requests_merged=user_stats["mergedPullRequests"]["totalCount"], pull_requests_merge_percentage=pull_requests_merge_percentage, total_pull_requests_reviewed=user_stats["contributionsCollection"][ "totalPullRequestReviewContributions" ], total_repo_contributions=user_stats["repositoriesContributedTo"][ "totalCount" ], languages_sorted=languages_sorted[:6], # top 6 languages
user_rank=calc_github_rank(
0
2023-11-17 06:21:18+00:00
8k
Zaloog/kanban-python
src/kanban_python/controls.py
[ { "identifier": "cfg", "path": "src/kanban_python/config.py", "snippet": "class KanbanConfig:\n def __init__(self, path=CONFIG_FILE_PATH) -> None:\n def __repr__(self) -> str:\n def save(self):\n def config(self) -> configparser.ConfigParser:\n def active_board(self) -> str:\n def active_board(self, new_board):\n def kanban_boards(self) -> list:\n def kanban_boards_dict(self) -> dict:\n def kanban_boards_dict(self, board_name: str) -> dict:\n def active_board_path(self) -> str:\n def show_footer(self):\n def show_footer(self, visible):\n def col_min_width(self) -> int:\n def col_min_width(self, new_width: int) -> None:\n def kanban_columns_dict(self) -> dict:\n def kanban_columns_dict(self, updated_dict) -> dict:\n def vis_cols(self) -> list:\n def done_limit(self) -> int:\n def done_limit(self, new_limit: int) -> None:\n def scanned_files(self) -> list:\n def scanned_files(self, new_files_to_scan: str) -> None:\n def scanned_patterns(self) -> list:\n def scanned_patterns(self, new_patterns_to_scan: str) -> None:\ndef create_init_config(conf_path=CONFIG_PATH, data_path=DATA_PATH):\ndef delete_current_folder_board_from_config(\n cfg=cfg, curr_path: str = str(Path.cwd())\n) -> None:\ndef check_if_board_name_exists_in_config(boardname: str, cfg=cfg) -> bool:\ndef check_if_current_active_board_in_board_list(cfg=cfg) -> bool:\ndef delete_board_from_config(board_name, cfg=cfg) -> None:\ndef check_config_exists(path=CONFIG_FILE_PATH) -> bool:\ndef get_json_path(boardname: str):" }, { "identifier": "DUMMY_DB", "path": "src/kanban_python/constants.py", "snippet": "DUMMY_DB = {1: DUMMY_TASK}" }, { "identifier": "KANBAN_BOARDS_PATH", "path": "src/kanban_python/constants.py", "snippet": "KANBAN_BOARDS_PATH = DATA_PATH / KANBAN_BOARDS_FOLDER_NAME" }, { "identifier": "REPORT_FILE_NAME", "path": "src/kanban_python/constants.py", "snippet": "REPORT_FILE_NAME = \"pykanban.md\"" }, { "identifier": "REPORT_FILE_PATH", "path": "src/kanban_python/constants.py", "snippet": "REPORT_FILE_PATH = DATA_PATH / REPORTS_FOLDER_NAME" }, { "identifier": "TASK_FILE_NAME", "path": "src/kanban_python/constants.py", "snippet": "TASK_FILE_NAME = \"pykanban.json\"" }, { "identifier": "create_config_table", "path": "src/kanban_python/interface.py", "snippet": "def create_config_table():\n settings_table = Table(\n title=\":hammer_and_wrench: [grey69]Settings Overview[/]:hammer_and_wrench:\",\n highlight=True,\n show_header=True,\n caption=f\"Your config file is located under [light_green]{CONFIG_FILE_PATH}[/]\",\n )\n for col in [\"Option\", \"Current Value\"]:\n settings_table.add_column(\n header=col,\n header_style=\"bold\",\n justify=\"left\",\n overflow=\"fold\",\n min_width=30,\n )\n for section in cfg.config:\n if section:\n settings_table.add_section()\n settings_table.add_row(f\"[blue]{section}[/]\", \"\")\n for key, val in cfg.config[section].items():\n settings_table.add_row(key, val)\n\n return settings_table" }, { "identifier": "create_github_like_report_table", "path": "src/kanban_python/interface.py", "snippet": "def create_github_like_report_table(boards_dict: dict):\n done_tasks = []\n for _, task_dict in boards_dict.items():\n done_tasks += [task for _, task in task_dict.items() if task[\"Complete_Time\"]]\n\n max_val, report_dict = create_dict_for_report_view(done_tasks)\n current_year = datetime.now().year\n done_tasks_this_year = [\n task\n for task in done_tasks\n if datetime.strptime(task[\"Complete_Time\"], \"%Y-%m-%d %H:%M:%S\").year\n == current_year\n ]\n\n gh_table = Table(\n title=f\"[{REPORT_COLORS[4]}]{len(done_tasks_this_year)}[/] Tasks completed\"\n + f\" in [{REPORT_COLORS[4]}]{current_year}[/]\",\n title_justify=\"left\",\n highlight=True,\n padding=False,\n show_header=True,\n box=None,\n caption=\"\\nless\"\n + \" \".join([f\"[{scale} on {scale}] [/] \" for scale in REPORT_COLORS])\n + \" more\",\n caption_justify=\"right\",\n )\n for work_week in range(0, 53):\n gh_table.add_column(\n header=\"\" if (work_week % 5 or work_week == 0) else f\"{work_week}\",\n header_style=\"bold\",\n justify=\"left\",\n overflow=\"fold\",\n )\n\n for day in range(1, 8):\n day_name = calendar.day_abbr[day - 1] if day % 2 else \"\"\n day_row_vals = [report_dict[day].get(week, 0) for week in range(1, 53)]\n mapped_day_row_vals = create_color_mapping(day_row_vals, max_val=max_val)\n\n gh_table.add_row(\n day_name,\n *[\n f\"[{REPORT_COLORS[i]} on {REPORT_COLORS[i]}] [/]\"\n for i in mapped_day_row_vals\n ],\n )\n\n return gh_table" }, { "identifier": "create_table", "path": "src/kanban_python/interface.py", "snippet": "def create_table(data: dict) -> Table:\n status_dict = create_status_dict_for_rows(data=data, vis_cols=cfg.vis_cols)\n\n table_name = cfg.active_board\n table = Table(\n title=f\"[blue]Active Board: {table_name}[/]\",\n highlight=True,\n show_header=True,\n show_footer=True if cfg.show_footer == \"True\" else False,\n caption=BOARD_CAPTION_STRING,\n )\n\n for i, category in enumerate([COLOR_DICT.get(col, col) for col in cfg.vis_cols]):\n table.add_column(\n header=category + f\"\\t({len(status_dict[cfg.vis_cols[i]])} Task/s)\",\n header_style=\"bold\",\n justify=\"left\",\n overflow=\"fold\",\n footer=FOOTER[0]\n if i == 0\n else FOOTER[1]\n if i == len(cfg.vis_cols) - 1\n else \"\",\n min_width=cfg.col_min_width,\n )\n\n for row_tasks in zip_longest(*status_dict.values()):\n table.add_row(*row_tasks)\n\n return table" }, { "identifier": "input_ask_for_action", "path": "src/kanban_python/interface.py", "snippet": "def input_ask_for_action():\n console.print(\n \"[yellow]Whats up!?[/], how can I help you being productive today :rocket:?\"\n )\n console.print(\n \"\\t[1] :clipboard: [green]Create new Task[/]\"\n + 2 * \"\\t\"\n + \"[2] :clockwise_vertical_arrows: [bold cornflower_blue]Update/Check Task[/]\"\n )\n console.print(\n \"\\t[3] :bookmark_tabs: [bold yellow]Change Kanban Board[/]\"\n + \"\\t\"\n + \"[4] :magnifying_glass_tilted_left: [bold blue]Show Task Details[/]\"\n )\n console.print(\n \"\\t[5] :cross_mark: [red]Delete Kanban Board[/]\"\n + \"\\t\"\n + \"[6] :hammer_and_wrench: [grey69]Show Current Settings[/]\"\n )\n action = IntPrompt.ask(\n prompt=\"Choose wisely :books:\",\n choices=[\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n ],\n show_choices=False,\n )\n return action" }, { "identifier": "input_ask_for_action_settings", "path": "src/kanban_python/interface.py", "snippet": "def input_ask_for_action_settings() -> int:\n console.print(\n \"[yellow]Not happy with current settings!?[/],\"\n + \"which [blue]Section[/] do you want to change :hammer_and_wrench:?\"\n )\n console.print(\n \"\\t[1] :clipboard: [blue]settings.general[/]\"\n + 2 * \"\\t\"\n + \"[2] :eye: [blue]settings.columns.visibility[/]\"\n )\n console.print(\n \"\\t[3] :magnifying_glass_tilted_left: [blue]settings.scanner[/]\"\n + 2 * \"\\t\"\n + \"[4] :cross_mark: [red]Go back to Kanban Board[/]\"\n )\n action = IntPrompt.ask(\n prompt=\"Choose [blue]Section[/], where you want to change the Current Value\",\n choices=[\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n ],\n show_choices=False,\n )\n return action" }, { "identifier": "input_ask_for_change_board", "path": "src/kanban_python/interface.py", "snippet": "def input_ask_for_change_board(boards_dict: dict) -> str:\n boards = cfg.kanban_boards\n max_board_len = max([len(b) for b in cfg.kanban_boards])\n\n # if active Board is not in Board List dont show default\n try:\n active_board_idx = boards.index(cfg.active_board) + 1\n except ValueError:\n active_board_idx = None\n\n for idx, (board, board_data) in enumerate(boards_dict.items(), start=1):\n status_dict = create_status_dict_for_rows(board_data, cfg.vis_cols)\n days_left_list = [\n calculate_days_left_till_due(val[\"Due_Date\"])\n for val in board_data.values()\n if (val.get(\"Due_Date\") and (val[\"Status\"] in [\"Ready\", \"Doing\"]))\n ]\n # Use -9999 to as placeholder for no tasks to make comparison later\n days_left = min(days_left_list) if days_left_list else -9999\n console.print(\n f\"[{idx}] {board}\"\n + \" \" * ((max_board_len - len(board) + 1))\n + \" | \".join(\n [\n f\"{COLOR_DICT[col]}: {len(status_dict[col]):02d}\"\n for col in cfg.vis_cols\n ]\n )\n + (\n f\"\\t next due in {days_left} day/s\"\n if days_left > 0\n else f\"[red]\\t task {-days_left} day/s overdue[/]\"\n if days_left != -9999\n else \"\\t no dues present here\"\n )\n )\n\n answer = IntPrompt.ask(\n prompt=\"Which board to activate\",\n choices=[f\"{i}\" for i, _ in enumerate(boards, start=1)],\n show_choices=False,\n default=active_board_idx,\n show_default=True,\n )\n return boards[int(answer) - 1]" }, { "identifier": "input_ask_for_delete_board", "path": "src/kanban_python/interface.py", "snippet": "def input_ask_for_delete_board() -> str:\n boards = [b for b in cfg.kanban_boards]\n for idx, board in enumerate(boards, start=1):\n console.print(f\"[{idx}] {board}\")\n\n answer = IntPrompt.ask(\n prompt=\"Which board to delete\",\n choices=[f\"{i}\" for i, _ in enumerate(boards, start=1)],\n show_choices=False,\n )\n return boards[int(answer) - 1]" }, { "identifier": "input_ask_for_new_board_name", "path": "src/kanban_python/interface.py", "snippet": "def input_ask_for_new_board_name() -> str:\n return Prompt.ask(\n prompt=\"A new folder will be created for your board\\n\"\n + \":warning: [yellow]Only[/] use alpha-numeric characters or\"\n + \" [green]'-', '_', ' '[/] for new board names.\\n\"\n + \"What should the new board be called?\"\n )" }, { "identifier": "input_ask_which_task_to_update", "path": "src/kanban_python/interface.py", "snippet": "def input_ask_which_task_to_update(data: dict) -> str:\n choice_task_ids = [\n id for id, task in data.items() if task[\"Status\"] in cfg.vis_cols\n ]\n task_id_to_update = IntPrompt.ask(\n prompt=\"Which Task to update? Select an [[cyan]Id[/]]\",\n choices=choice_task_ids,\n show_choices=False,\n )\n return str(task_id_to_update)" }, { "identifier": "input_ask_which_tasks_to_show", "path": "src/kanban_python/interface.py", "snippet": "def input_ask_which_tasks_to_show(choices):\n return Prompt.ask(\n prompt=\"What Task/s to show? Select an [[cyan]Id[/]] or ([orange3]Tag[/])?\",\n default=False,\n show_default=False,\n choices=choices,\n show_choices=False,\n )" }, { "identifier": "input_change_column_settings", "path": "src/kanban_python/interface.py", "snippet": "def input_change_column_settings():\n updated_column_dict = {}\n for col, vis in cfg.kanban_columns_dict.items():\n new_visible = Confirm.ask(\n prompt=f\"Should Column {COLOR_DICT.get(col,col)} be visible?\",\n default=True if vis == \"True\" else False,\n show_default=True,\n )\n updated_column_dict[col] = \"True\" if new_visible else \"False\"\n\n return updated_column_dict" }, { "identifier": "input_change_done_limit_settings", "path": "src/kanban_python/interface.py", "snippet": "def input_change_done_limit_settings() -> int:\n done_limit = IntPrompt.ask(\n prompt=f\"What should the Limit of Tasks in {COLOR_DICT.get('Done','Done')} \"\n + f\"Column be, before moving to {COLOR_DICT.get('Archived','Archived')}?\",\n default=cfg.done_limit,\n show_default=True,\n )\n\n return str(done_limit)" }, { "identifier": "input_change_files_to_scan_settings", "path": "src/kanban_python/interface.py", "snippet": "def input_change_files_to_scan_settings():\n files_to_scan = Prompt.ask(\n prompt=\"Which Files to scan? Enter [green]' '[/] separated File Endings\",\n default=\" \".join(cfg.scanned_files),\n show_default=True,\n )\n\n return files_to_scan" }, { "identifier": "input_change_footer_settings", "path": "src/kanban_python/interface.py", "snippet": "def input_change_footer_settings():\n footer_visible = Confirm.ask(\n prompt=\"Should Footer be visible?\",\n default=True if cfg.show_footer == \"True\" else False,\n show_default=True,\n )\n\n return footer_visible" }, { "identifier": "input_change_min_col_width_settings", "path": "src/kanban_python/interface.py", "snippet": "def input_change_min_col_width_settings():\n new_min_col_width = IntPrompt.ask(\n prompt=\"What should the minimum Column Width be?\",\n default=cfg.col_min_width,\n show_default=True,\n )\n\n return new_min_col_width" }, { "identifier": "input_change_patterns_to_scan_settings", "path": "src/kanban_python/interface.py", "snippet": "def input_change_patterns_to_scan_settings():\n files_to_scan = Prompt.ask(\n prompt=\"Which Patterns to scan? Enter [green]','[/] separated Patterns\",\n default=\",\".join(cfg.scanned_patterns),\n show_default=True,\n )\n\n return files_to_scan" }, { "identifier": "input_confirm_add_todos_to_board", "path": "src/kanban_python/interface.py", "snippet": "def input_confirm_add_todos_to_board(todos: list) -> bool:\n # Question Also print tasks already in Board?\n console.print(f\"Found [blue]{len(todos)}[/] TODOs.\")\n if len(todos) > 10:\n if input_confirm_show_all_todos():\n print_all_todos(todos)\n else:\n print_all_todos(todos)\n\n return Confirm.ask(\n prompt=\"Add found Tasks to active board?\", default=False, show_default=True\n )" }, { "identifier": "input_confirm_delete_board", "path": "src/kanban_python/interface.py", "snippet": "def input_confirm_delete_board(name) -> bool:\n return Confirm.ask(\n f\"Are you sure you want to delete the Board '{name}':question_mark:\"\n )" }, { "identifier": "input_confirm_set_board_active", "path": "src/kanban_python/interface.py", "snippet": "def input_confirm_set_board_active(name) -> bool:\n return Confirm.ask(\n f\"Do you want to set the Board '{name}' as active:question_mark:\"\n )" }, { "identifier": "input_create_new_task", "path": "src/kanban_python/interface.py", "snippet": "def input_create_new_task() -> dict:\n title = Prompt.ask(\n prompt=\"[1/5] Add Task Title\",\n )\n\n description = Prompt.ask(\n prompt=\"[2/5] Add Task Description\",\n show_default=True,\n default=\"\",\n )\n\n tag = Prompt.ask(\n prompt=\"[3/5] Add a Tag\",\n show_default=True,\n default=\"ETC\",\n )\n\n while True:\n due_date = Prompt.ask(\n prompt=\"[4/5] Add a Due Date (YYYY-MM-DD)\",\n show_default=True,\n default=\"\",\n )\n if not due_date or check_due_date_format(date_str=due_date):\n break\n else:\n console.print(\n f\":warning: '{due_date}' has [red]not[/] \"\n + \"the right format YYYY-MM-DD\"\n )\n\n console.print(f\"\\t[1] {COLOR_DICT['Ready']}\")\n console.print(f\"\\t[2] {COLOR_DICT['Doing']}\")\n\n status = IntPrompt.ask(\n prompt=\"[5/5] Status of Task\",\n show_choices=False,\n choices=[\"1\", \"2\"],\n show_default=True,\n default=\"1\",\n )\n\n new_task = {\n \"Title\": title,\n \"Description\": description,\n \"Status\": \"Ready\" if str(status) == \"1\" else \"Doing\",\n \"Tag\": tag.upper(),\n \"Creation_Date\": current_time_to_str(),\n \"Due_Date\": due_date_date_to_datetime(due_date),\n \"Begin_Time\": current_time_to_str() if str(status) == \"2\" else \"\",\n \"Complete_Time\": \"\",\n \"Duration\": 0,\n }\n return new_task" }, { "identifier": "input_update_task", "path": "src/kanban_python/interface.py", "snippet": "def input_update_task(current_task: dict) -> dict:\n title = input_update_task_title(current_task[\"Title\"])\n description = input_update_task_description(current_task[\"Description\"])\n tag = input_update_task_tag(current_task[\"Tag\"])\n due_date = input_update_due_date(current_task.get(\"Due_Date\", \"\"))\n status = input_ask_to_what_status_to_move(current_task[\"Title\"])\n\n if (status == \"Doing\") and (current_task[\"Status\"] != \"Doing\"):\n start_doing = current_time_to_str()\n stop_doing = current_task.get(\"Complete_Time\", \"\")\n duration = current_task.get(\"Duration\", 0)\n elif (status != \"Doing\") and (current_task[\"Status\"] == \"Doing\"):\n start_doing = current_task.get(\"Begin_Time\", \"\")\n stop_doing = current_time_to_str()\n duration = calculate_time_delta_str(\n start_time_str=current_task.get(\"Begin_Time\", \"\"), end_time_str=stop_doing\n ) + current_task.get(\"Duration\", 0)\n else:\n start_doing = current_task.get(\"Begin_Time\", \"\")\n stop_doing = current_task.get(\"Complete_Time\", \"\")\n duration = current_task.get(\"Duration\", 0)\n\n if status == \"Done\":\n stop_doing = current_time_to_str()\n console.print(\n f\":sparkle: Congrats, you just completed '{title}'\"\n + f\" after {duration} minutes :muscle:\"\n )\n\n updated_task = {\n \"Title\": title,\n \"Description\": description,\n \"Status\": status,\n \"Tag\": tag.upper(),\n \"Due_Date\": due_date,\n \"Begin_Time\": start_doing,\n \"Complete_Time\": stop_doing,\n \"Duration\": duration,\n }\n current_task.update(updated_task)\n return current_task" }, { "identifier": "check_board_name_valid", "path": "src/kanban_python/utils.py", "snippet": "def get_motivational_quote() -> str:\ndef current_time_to_str() -> str:\ndef calculate_time_delta_str(start_time_str: str, end_time_str: str) -> float:\ndef create_status_dict_for_rows(data: dict, vis_cols: list) -> dict:\ndef check_if_done_col_leq_X(cfg, data: dict) -> bool:\ndef check_if_there_are_visible_tasks_in_board(data: dict, vis_cols: list) -> bool:\ndef move_first_done_task_to_archive(data: dict):\ndef delete_json_file(db_path: str) -> None:\ndef check_board_name_valid(boardname: str):\ndef scan_files(path=Path.cwd(), endings: list = [\".py\"]):\n def recursive_search(path, file_list: list, progress):\ndef scan_for_todos(\n file_paths: list, rel_path=Path.cwd(), patterns: list = [\"#TODO\", \"# TODO\"]\n) -> list:\ndef split_todo_in_tag_and_title(todo: str, patterns: list):\ndef get_tag_id_choices(data_dict: dict, vis_cols: list) -> list:\ndef check_scanner_files_valid(files: str) -> bool:\ndef check_scanner_patterns_valid(patterns: str) -> bool:\ndef get_iso_calender_info(date_str: str):\ndef create_dict_for_report_view(completed_tasks: list):\ndef create_color_mapping(amount_list: list, max_val: int):\ndef create_report_document(boards_dict: dict):\ndef check_due_date_format(date_str: str) -> bool:\ndef due_date_datetime_to_date(date_datetime: str) -> str:\ndef due_date_date_to_datetime(date_str: str) -> str:\ndef calculate_days_left_till_due(due_date: str):" } ]
from json import dump, load from rich.pretty import pprint from .config import ( cfg, check_if_board_name_exists_in_config, check_if_current_active_board_in_board_list, delete_board_from_config, get_json_path, ) from .constants import ( DUMMY_DB, KANBAN_BOARDS_PATH, REPORT_FILE_NAME, REPORT_FILE_PATH, TASK_FILE_NAME, ) from .interface import ( create_config_table, create_github_like_report_table, create_table, input_ask_for_action, input_ask_for_action_settings, input_ask_for_change_board, input_ask_for_delete_board, input_ask_for_new_board_name, input_ask_which_task_to_update, input_ask_which_tasks_to_show, input_change_column_settings, input_change_done_limit_settings, input_change_files_to_scan_settings, input_change_footer_settings, input_change_min_col_width_settings, input_change_patterns_to_scan_settings, input_confirm_add_todos_to_board, input_confirm_delete_board, input_confirm_set_board_active, input_create_new_task, input_update_task, ) from .utils import ( check_board_name_valid, check_if_done_col_leq_X, check_if_there_are_visible_tasks_in_board, check_scanner_files_valid, check_scanner_patterns_valid, console, create_report_document, current_time_to_str, delete_json_file, get_tag_id_choices, move_first_done_task_to_archive, scan_files, scan_for_todos, split_todo_in_tag_and_title, )
6,639
if not path: path = cfg.active_board_path if path == "all": board_dict = { b: read_single_board(b_path) for b, b_path in cfg.kanban_boards_dict.items() } return board_dict try: data = read_single_board(path) return data except FileNotFoundError: print(path) console.print(f":warning: No [orange3]{TASK_FILE_NAME}[/] file here anymore.") console.print("Please change to another board.") change_kanban_board() console.print(f"[red]Seems like the previous {TASK_FILE_NAME} file was deleted[/]") console.print(f"Create new [orange3]{TASK_FILE_NAME}[/] file here.") create_new_db() return read_db() def read_single_board(path): with open(path, "r") as file: data = load(file) return data # User Action Controls ##################################################################################### # Get User Action def get_user_action(): return input_ask_for_action() # Action 1 def add_new_task_to_db(): new_task = input_create_new_task() add_tasks_to_db(tasks=new_task) # Action 2 def update_task_from_db(): db_data = read_db() if not check_if_there_are_visible_tasks_in_board(db_data, cfg.vis_cols): console.print(":cross_mark:[red]No Tasks available on this Kanban board[/]") return selected_id = input_ask_which_task_to_update(db_data) updated_task = input_update_task(current_task=db_data[selected_id]) db_data[selected_id] = updated_task while not check_if_done_col_leq_X(cfg=cfg, data=db_data): first_task_id, archive_task = move_first_done_task_to_archive(data=db_data) db_data[first_task_id] = archive_task save_db(data=db_data) # Action 3 def change_kanban_board(): boards_dict = read_db(path="all") new_active_board = input_ask_for_change_board(boards_dict) cfg.active_board = new_active_board # Action 4 def show_tasks(): db_data = read_db() choices = get_tag_id_choices(db_data, cfg.vis_cols) selection_criteria = input_ask_which_tasks_to_show(choices) for i, task in db_data.items(): if selection_criteria in [i, task["Tag"]]: console.print( 20 * "[bold blue]#[/]" + f" Task {i} " + 20 * "[bold blue]#[/]" ) pprint( { key: val for key, val in task.items() if key in ["Title", "Description", "Tag", "Status", "Due_Date"] }, console=console, expand_all=True, ) # Action 5 def delete_kanban_board(): board_to_delete = input_ask_for_delete_board() if input_confirm_delete_board(board_to_delete): board_to_delete_path = cfg.kanban_boards_dict[board_to_delete] delete_json_file(board_to_delete_path) delete_board_from_config(board_to_delete) def show(): if not cfg.kanban_boards: console.print(":warning: [red]No Boards created yet[/]:warning:") console.print("Use 'kanban init' to create a new kanban board.") raise KeyboardInterrupt if not check_if_current_active_board_in_board_list(): console.print( "[yellow]Hmm, Something went wrong.[/] " + f"The active board '{cfg.active_board}' is not in the list of boards." ) change_kanban_board() show() return db_data = read_db() table = create_table(data=db_data) console.print(table) # Scan Functionality ##################################################################################### def add_todos_to_board(): files = scan_files(endings=cfg.scanned_files)
from __future__ import annotations # DB Controls ##################################################################################### def create_new_db() -> None: while True: while True: new_board_name = input_ask_for_new_board_name() if check_board_name_valid(new_board_name): break console.print(f":warning: '{new_board_name}' is [red]not[/] a valid Name.") if not check_if_board_name_exists_in_config(new_board_name): break console.print( f":warning: Board '{new_board_name}' already exists, choose another Name." ) cfg.kanban_boards_dict = new_board_name # Options: # 1. ~/.kanban-python/<BOARDNAME>.json # 2. ~/.kanban-python/kanban_boards/<BOARDNAME>.json # 3. ~/.kanban-python/kanban_boards/<BOARDNAME>/pykanban.json <- THIS # 4. ~/.kanban-python/kanban_boards/<BOARDNAME>/<BOARDNAME>.json new_db_path = KANBAN_BOARDS_PATH / new_board_name if not new_db_path.exists(): new_db_path.mkdir() with open(get_json_path(new_board_name), "w", encoding="utf-8") as f: dump(DUMMY_DB, f, ensure_ascii=False, indent=4) console.print( f"Created new [orange3]{TASK_FILE_NAME}[/] file at " + f"[orange3]{KANBAN_BOARDS_PATH / new_board_name}[/] to save tasks." ) if input_confirm_set_board_active(name=new_board_name): cfg.active_board = new_board_name def save_db(data): path = cfg.active_board_path with open(path, "w", encoding="utf-8") as f: dump(data, f, ensure_ascii=False, indent=4) def add_tasks_to_db(tasks: dict | list[dict]) -> None: db_data = read_db() if isinstance(tasks, dict): new_id = str(max(int(i) for i in db_data.keys()) + 1) db_data[new_id] = tasks else: for task in tasks: new_id = str(max(int(i) for i in db_data.keys()) + 1) db_data[new_id] = task save_db(data=db_data) def read_db(path: str = None) -> dict: if not path: path = cfg.active_board_path if path == "all": board_dict = { b: read_single_board(b_path) for b, b_path in cfg.kanban_boards_dict.items() } return board_dict try: data = read_single_board(path) return data except FileNotFoundError: print(path) console.print(f":warning: No [orange3]{TASK_FILE_NAME}[/] file here anymore.") console.print("Please change to another board.") change_kanban_board() console.print(f"[red]Seems like the previous {TASK_FILE_NAME} file was deleted[/]") console.print(f"Create new [orange3]{TASK_FILE_NAME}[/] file here.") create_new_db() return read_db() def read_single_board(path): with open(path, "r") as file: data = load(file) return data # User Action Controls ##################################################################################### # Get User Action def get_user_action(): return input_ask_for_action() # Action 1 def add_new_task_to_db(): new_task = input_create_new_task() add_tasks_to_db(tasks=new_task) # Action 2 def update_task_from_db(): db_data = read_db() if not check_if_there_are_visible_tasks_in_board(db_data, cfg.vis_cols): console.print(":cross_mark:[red]No Tasks available on this Kanban board[/]") return selected_id = input_ask_which_task_to_update(db_data) updated_task = input_update_task(current_task=db_data[selected_id]) db_data[selected_id] = updated_task while not check_if_done_col_leq_X(cfg=cfg, data=db_data): first_task_id, archive_task = move_first_done_task_to_archive(data=db_data) db_data[first_task_id] = archive_task save_db(data=db_data) # Action 3 def change_kanban_board(): boards_dict = read_db(path="all") new_active_board = input_ask_for_change_board(boards_dict) cfg.active_board = new_active_board # Action 4 def show_tasks(): db_data = read_db() choices = get_tag_id_choices(db_data, cfg.vis_cols) selection_criteria = input_ask_which_tasks_to_show(choices) for i, task in db_data.items(): if selection_criteria in [i, task["Tag"]]: console.print( 20 * "[bold blue]#[/]" + f" Task {i} " + 20 * "[bold blue]#[/]" ) pprint( { key: val for key, val in task.items() if key in ["Title", "Description", "Tag", "Status", "Due_Date"] }, console=console, expand_all=True, ) # Action 5 def delete_kanban_board(): board_to_delete = input_ask_for_delete_board() if input_confirm_delete_board(board_to_delete): board_to_delete_path = cfg.kanban_boards_dict[board_to_delete] delete_json_file(board_to_delete_path) delete_board_from_config(board_to_delete) def show(): if not cfg.kanban_boards: console.print(":warning: [red]No Boards created yet[/]:warning:") console.print("Use 'kanban init' to create a new kanban board.") raise KeyboardInterrupt if not check_if_current_active_board_in_board_list(): console.print( "[yellow]Hmm, Something went wrong.[/] " + f"The active board '{cfg.active_board}' is not in the list of boards." ) change_kanban_board() show() return db_data = read_db() table = create_table(data=db_data) console.print(table) # Scan Functionality ##################################################################################### def add_todos_to_board(): files = scan_files(endings=cfg.scanned_files)
todos = scan_for_todos(file_paths=files, patterns=cfg.scanned_patterns)
27
2023-11-11 14:43:55+00:00
8k
AMAAI-Lab/mustango
diffusers/src/diffusers/loaders.py
[ { "identifier": "LoRAAttnProcessor", "path": "diffusers/src/diffusers/models/attention_processor.py", "snippet": "class LoRAAttnProcessor(nn.Module):\n def __init__(self, hidden_size, cross_attention_dim=None, rank=4):\n super().__init__()\n\n self.hidden_size = hidden_size\n self.cross_attention_dim = cross_attention_dim\n self.rank = rank\n\n self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank)\n self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank)\n self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank)\n self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank)\n\n def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, scale=1.0):\n batch_size, sequence_length, _ = (\n hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape\n )\n attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)\n\n query = attn.to_q(hidden_states) + scale * self.to_q_lora(hidden_states)\n query = attn.head_to_batch_dim(query)\n\n encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states\n\n key = attn.to_k(encoder_hidden_states) + scale * self.to_k_lora(encoder_hidden_states)\n value = attn.to_v(encoder_hidden_states) + scale * self.to_v_lora(encoder_hidden_states)\n\n key = attn.head_to_batch_dim(key)\n value = attn.head_to_batch_dim(value)\n\n attention_probs = attn.get_attention_scores(query, key, attention_mask)\n hidden_states = torch.bmm(attention_probs, value)\n hidden_states = attn.batch_to_head_dim(hidden_states)\n\n # linear proj\n hidden_states = attn.to_out[0](hidden_states) + scale * self.to_out_lora(hidden_states)\n # dropout\n hidden_states = attn.to_out[1](hidden_states)\n\n return hidden_states" }, { "identifier": "logging", "path": "diffusers/src/diffusers/utils/logging.py", "snippet": "def _get_default_logging_level():\ndef _get_library_name() -> str:\ndef _get_library_root_logger() -> logging.Logger:\ndef _configure_library_root_logger() -> None:\ndef _reset_library_root_logger() -> None:\ndef get_log_levels_dict():\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\ndef get_verbosity() -> int:\ndef set_verbosity(verbosity: int) -> None:\ndef set_verbosity_info():\ndef set_verbosity_warning():\ndef set_verbosity_debug():\ndef set_verbosity_error():\ndef disable_default_handler() -> None:\ndef enable_default_handler() -> None:\ndef add_handler(handler: logging.Handler) -> None:\ndef remove_handler(handler: logging.Handler) -> None:\ndef disable_propagation() -> None:\ndef enable_propagation() -> None:\ndef enable_explicit_format() -> None:\ndef reset_format() -> None:\ndef warning_advice(self, *args, **kwargs):\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\n def __iter__(self):\n def __getattr__(self, _):\n def empty_fn(*args, **kwargs): # pylint: disable=unused-argument\n def __enter__(self):\n def __exit__(self, type_, value, traceback):\n def __call__(self, *args, **kwargs):\n def set_lock(self, *args, **kwargs):\n def get_lock(self):\ndef is_progress_bar_enabled() -> bool:\ndef enable_progress_bar():\ndef disable_progress_bar():\nclass EmptyTqdm:\nclass _tqdm_cls:" }, { "identifier": "DIFFUSERS_CACHE", "path": "diffusers/src/diffusers/utils/constants.py", "snippet": "DIFFUSERS_CACHE = default_cache_path" }, { "identifier": "deprecate", "path": "diffusers/src/diffusers/utils/deprecation_utils.py", "snippet": "def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True):\n from .. import __version__\n\n deprecated_kwargs = take_from\n values = ()\n if not isinstance(args[0], tuple):\n args = (args,)\n\n for attribute, version_name, message in args:\n if version.parse(version.parse(__version__).base_version) >= version.parse(version_name):\n raise ValueError(\n f\"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'\"\n f\" version {__version__} is >= {version_name}\"\n )\n\n warning = None\n if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs:\n values += (deprecated_kwargs.pop(attribute),)\n warning = f\"The `{attribute}` argument is deprecated and will be removed in version {version_name}.\"\n elif hasattr(deprecated_kwargs, attribute):\n values += (getattr(deprecated_kwargs, attribute),)\n warning = f\"The `{attribute}` attribute is deprecated and will be removed in version {version_name}.\"\n elif deprecated_kwargs is None:\n warning = f\"`{attribute}` is deprecated and will be removed in version {version_name}.\"\n\n if warning is not None:\n warning = warning + \" \" if standard_warn else \"\"\n warnings.warn(warning + message, FutureWarning, stacklevel=2)\n\n if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0:\n call_frame = inspect.getouterframes(inspect.currentframe())[1]\n filename = call_frame.filename\n line_number = call_frame.lineno\n function = call_frame.function\n key, value = next(iter(deprecated_kwargs.items()))\n raise TypeError(f\"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`\")\n\n if len(values) == 0:\n return\n elif len(values) == 1:\n return values[0]\n return values" }, { "identifier": "HF_HUB_OFFLINE", "path": "diffusers/src/diffusers/utils/hub_utils.py", "snippet": "HF_HUB_OFFLINE = os.getenv(\"HF_HUB_OFFLINE\", \"\").upper() in ENV_VARS_TRUE_VALUES" }, { "identifier": "_get_model_file", "path": "diffusers/src/diffusers/utils/hub_utils.py", "snippet": "def _get_model_file(\n pretrained_model_name_or_path,\n *,\n weights_name,\n subfolder,\n cache_dir,\n force_download,\n proxies,\n resume_download,\n local_files_only,\n use_auth_token,\n user_agent,\n revision,\n commit_hash=None,\n):\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n if os.path.isfile(pretrained_model_name_or_path):\n return pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)):\n # Load from a PyTorch checkpoint\n model_file = os.path.join(pretrained_model_name_or_path, weights_name)\n return model_file\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, weights_name)\n ):\n model_file = os.path.join(pretrained_model_name_or_path, subfolder, weights_name)\n return model_file\n else:\n raise EnvironmentError(\n f\"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n # 1. First check if deprecated way of loading from branches is used\n if (\n revision in DEPRECATED_REVISION_ARGS\n and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)\n and version.parse(version.parse(__version__).base_version) >= version.parse(\"0.17.0\")\n ):\n try:\n model_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=_add_variant(weights_name, revision),\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision or commit_hash,\n )\n warnings.warn(\n f\"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.\",\n FutureWarning,\n )\n return model_file\n except: # noqa: E722\n warnings.warn(\n f\"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(weights_name, revision)} file in the 'main' branch of {pretrained_model_name_or_path}. \\n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(weights_name, revision)}' so that the correct variant file can be added.\",\n FutureWarning,\n )\n try:\n # 2. Load model file as usual\n model_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=weights_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision or commit_hash,\n )\n return model_file\n\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier \"\n \"listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a \"\n \"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli \"\n \"login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for \"\n \"this model name. Check the model page at \"\n f\"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n f\"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a file named {weights_name} or\"\n \" \\nCheckout your internet connection or see how to run the library in\"\n \" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a file named {weights_name}\"\n )" }, { "identifier": "is_safetensors_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_safetensors_available():\n return _safetensors_available" }, { "identifier": "is_transformers_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_transformers_available():\n return _transformers_available" } ]
import os import torch import safetensors from collections import defaultdict from typing import Callable, Dict, List, Optional, Union from .models.attention_processor import LoRAAttnProcessor from .utils import ( DIFFUSERS_CACHE, HF_HUB_OFFLINE, _get_model_file, deprecate, is_safetensors_available, is_transformers_available, logging, ) from transformers import PreTrainedModel, PreTrainedTokenizer
4,281
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if is_safetensors_available(): if is_transformers_available(): logger = logging.get_logger(__name__) LORA_WEIGHT_NAME = "pytorch_lora_weights.bin" LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors" TEXT_INVERSION_NAME = "learned_embeds.bin" TEXT_INVERSION_NAME_SAFE = "learned_embeds.safetensors" class AttnProcsLayers(torch.nn.Module): def __init__(self, state_dict: Dict[str, torch.Tensor]): super().__init__() self.layers = torch.nn.ModuleList(state_dict.values()) self.mapping = dict(enumerate(state_dict.keys())) self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())} # we add a hook to state_dict() and load_state_dict() so that the # naming fits with `unet.attn_processors` def map_to(module, state_dict, *args, **kwargs): new_state_dict = {} for key, value in state_dict.items(): num = int(key.split(".")[1]) # 0 is always "layers" new_key = key.replace(f"layers.{num}", module.mapping[num]) new_state_dict[new_key] = value return new_state_dict def map_from(module, state_dict, *args, **kwargs): all_keys = list(state_dict.keys()) for key in all_keys: replace_key = key.split(".processor")[0] + ".processor" new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}") state_dict[new_key] = state_dict[key] del state_dict[key] self._register_state_dict_hook(map_to) self._register_load_state_dict_pre_hook(map_from, with_module=True) class UNet2DConditionLoadersMixin: def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): r""" Load pretrained attention processor layers into `UNet2DConditionModel`. Attention processor layers have to be defined in [cross_attention.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py) and be a `torch.nn.Module` class. <Tip warning={true}> This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids should have an organization name, like `google/ddpm-celebahq-256`. - A path to a *directory* containing model weights saved using [`~ModelMixin.save_config`], e.g., `./my_model_directory/`. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `diffusers-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo (either remote in huggingface.co or downloaded locally), you can specify the folder name here. mirror (`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. <Tip> It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models). </Tip> """
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if is_safetensors_available(): if is_transformers_available(): logger = logging.get_logger(__name__) LORA_WEIGHT_NAME = "pytorch_lora_weights.bin" LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors" TEXT_INVERSION_NAME = "learned_embeds.bin" TEXT_INVERSION_NAME_SAFE = "learned_embeds.safetensors" class AttnProcsLayers(torch.nn.Module): def __init__(self, state_dict: Dict[str, torch.Tensor]): super().__init__() self.layers = torch.nn.ModuleList(state_dict.values()) self.mapping = dict(enumerate(state_dict.keys())) self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())} # we add a hook to state_dict() and load_state_dict() so that the # naming fits with `unet.attn_processors` def map_to(module, state_dict, *args, **kwargs): new_state_dict = {} for key, value in state_dict.items(): num = int(key.split(".")[1]) # 0 is always "layers" new_key = key.replace(f"layers.{num}", module.mapping[num]) new_state_dict[new_key] = value return new_state_dict def map_from(module, state_dict, *args, **kwargs): all_keys = list(state_dict.keys()) for key in all_keys: replace_key = key.split(".processor")[0] + ".processor" new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}") state_dict[new_key] = state_dict[key] del state_dict[key] self._register_state_dict_hook(map_to) self._register_load_state_dict_pre_hook(map_from, with_module=True) class UNet2DConditionLoadersMixin: def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): r""" Load pretrained attention processor layers into `UNet2DConditionModel`. Attention processor layers have to be defined in [cross_attention.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py) and be a `torch.nn.Module` class. <Tip warning={true}> This function is experimental and might change in the future. </Tip> Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids should have an organization name, like `google/ddpm-celebahq-256`. - A path to a *directory* containing model weights saved using [`~ModelMixin.save_config`], e.g., `./my_model_directory/`. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `diffusers-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo (either remote in huggingface.co or downloaded locally), you can specify the folder name here. mirror (`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. <Tip> It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models). </Tip> """
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
2
2023-11-14 23:29:31+00:00
8k
ai-forever/Kandinsky-3
kandinsky3/inpainting_pipeline.py
[ { "identifier": "UNet", "path": "kandinsky3/model/unet.py", "snippet": "class UNet(nn.Module):\n\n def __init__(self,\n model_channels,\n init_channels=None,\n num_channels=3,\n out_channels=4,\n time_embed_dim=None,\n context_dim=None,\n groups=32,\n head_dim=64,\n expansion_ratio=4,\n compression_ratio=2,\n dim_mult=(1, 2, 4, 8),\n num_blocks=(3, 3, 3, 3),\n add_cross_attention=(False, True, True, True),\n add_self_attention=(False, True, True, True),\n *args,\n **kwargs,\n ):\n super().__init__()\n init_channels = init_channels or model_channels\n self.to_time_embed = nn.Sequential(\n SinusoidalPosEmb(init_channels),\n nn.Linear(init_channels, time_embed_dim),\n nn.SiLU(),\n nn.Linear(time_embed_dim, time_embed_dim)\n )\n self.feature_pooling = AttentionPolling(time_embed_dim, context_dim, head_dim)\n\n self.in_layer = nn.Conv2d(num_channels, init_channels, kernel_size=3, padding=1)\n\n hidden_dims = [init_channels, *map(lambda mult: model_channels * mult, dim_mult)]\n in_out_dims = list(zip(hidden_dims[:-1], hidden_dims[1:]))\n text_dims = [set_default_item(is_exist, context_dim) for is_exist in add_cross_attention]\n layer_params = [num_blocks, text_dims, add_self_attention]\n rev_layer_params = map(reversed, layer_params)\n\n cat_dims = []\n self.num_levels = len(in_out_dims)\n self.down_samples = nn.ModuleList([])\n for level, ((in_dim, out_dim), res_block_num, text_dim, self_attention) in enumerate(zip(in_out_dims, *layer_params)):\n down_sample = level != (self.num_levels - 1)\n cat_dims.append(set_default_item(level != (self.num_levels - 1), out_dim, 0))\n self.down_samples.append(\n DownSampleBlock(\n in_dim, out_dim, time_embed_dim, text_dim, res_block_num, groups, head_dim, expansion_ratio,\n compression_ratio, down_sample, self_attention\n )\n )\n\n self.up_samples = nn.ModuleList([])\n for level, ((out_dim, in_dim), res_block_num, text_dim, self_attention) in enumerate(zip(reversed(in_out_dims), *rev_layer_params)):\n up_sample = level != 0\n self.up_samples.append(\n UpSampleBlock(\n in_dim, cat_dims.pop(), out_dim, time_embed_dim, text_dim, res_block_num, groups, head_dim,\n expansion_ratio, compression_ratio, up_sample, self_attention\n )\n )\n\n self.out_layer = nn.Sequential(\n nn.GroupNorm(groups, init_channels),\n nn.SiLU(),\n nn.Conv2d(init_channels, out_channels, kernel_size=3, padding=1)\n )\n\n self.control_net = None\n\n def forward(self, x, time, context=None, context_mask=None, control_net_residual=None):\n time_embed = self.to_time_embed(time)\n if exist(context):\n time_embed = self.feature_pooling(time_embed, context, context_mask)\n\n hidden_states = []\n x = self.in_layer(x)\n for level, down_sample in enumerate(self.down_samples):\n x = down_sample(x, time_embed, context, context_mask, control_net_residual)\n if level != self.num_levels - 1:\n hidden_states.append(x)\n for level, up_sample in enumerate(self.up_samples):\n if level != 0:\n x = torch.cat([x, hidden_states.pop()], dim=1)\n x = up_sample(x, time_embed, context, context_mask)\n x = self.out_layer(x)\n return x" }, { "identifier": "MoVQ", "path": "kandinsky3/movq.py", "snippet": "class MoVQ(nn.Module):\n \n def __init__(self, generator_params):\n super().__init__()\n z_channels = generator_params[\"z_channels\"]\n self.encoder = Encoder(**generator_params)\n self.quant_conv = torch.nn.Conv2d(z_channels, z_channels, 1)\n self.post_quant_conv = torch.nn.Conv2d(z_channels, z_channels, 1)\n self.decoder = Decoder(zq_ch=z_channels, **generator_params)\n \n @torch.no_grad()\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n @torch.no_grad()\n def decode(self, quant):\n decoder_input = self.post_quant_conv(quant)\n decoded = self.decoder(decoder_input, quant)\n return decoded" }, { "identifier": "T5TextConditionEncoder", "path": "kandinsky3/condition_encoders.py", "snippet": "class T5TextConditionEncoder(ConditionEncoder):\n\n def __init__(self, model_names, context_dim, model_dims, low_cpu_mem_usage: bool = True, device_map: Optional[str] = None):\n super().__init__(context_dim, model_dims)\n t5_model = T5Model.from_pretrained(model_names['t5'], low_cpu_mem_usage=low_cpu_mem_usage, device_map=device_map)\n self.encoders = nn.ModuleDict({\n 't5': t5_model.encoder.half(),\n })\n self.encoders = freeze(self.encoders)\n\n @torch.no_grad()\n def encode(self, model_input, model_name):\n embeddings = self.encoders[model_name](**model_input).last_hidden_state\n is_inf_embeddings = torch.isinf(embeddings).any(-1).any(-1)\n is_nan_embeddings = torch.isnan(embeddings).any(-1).any(-1)\n bad_embeddings = is_inf_embeddings + is_nan_embeddings\n embeddings[bad_embeddings] = torch.zeros_like(embeddings[bad_embeddings])\n embeddings = embeddings.type(torch.float32)\n return embeddings" }, { "identifier": "T5TextConditionProcessor", "path": "kandinsky3/condition_processors.py", "snippet": "class T5TextConditionProcessor:\n\n def __init__(self, tokens_length, processor_names):\n self.tokens_length = tokens_length['t5']\n self.processor = T5Tokenizer.from_pretrained(processor_names['t5'])\n\n def encode(self, text=None, negative_text=None):\n encoded = self.processor(text, max_length=self.tokens_length, truncation=True)\n pad_length = self.tokens_length - len(encoded['input_ids'])\n input_ids = encoded['input_ids'] + [self.processor.pad_token_id] * pad_length\n attention_mask = encoded['attention_mask'] + [0] * pad_length\n condition_model_input = {'t5': {\n 'input_ids': torch.tensor(input_ids, dtype=torch.long),\n 'attention_mask': torch.tensor(attention_mask, dtype=torch.long)\n }}\n\n if negative_text is not None:\n negative_encoded = self.processor(negative_text, max_length=self.tokens_length, truncation=True)\n negative_input_ids = negative_encoded['input_ids'][:len(encoded['input_ids'])]\n negative_input_ids[-1] = self.processor.eos_token_id\n negative_pad_length = self.tokens_length - len(negative_input_ids)\n negative_input_ids = negative_input_ids + [self.processor.pad_token_id] * negative_pad_length\n negative_attention_mask = encoded['attention_mask'] + [0] * pad_length\n negative_condition_model_input = {'t5': {\n 'input_ids': torch.tensor(negative_input_ids, dtype=torch.long),\n 'attention_mask': torch.tensor(negative_attention_mask, dtype=torch.long)\n }}\n else:\n negative_condition_model_input = None\n return condition_model_input, negative_condition_model_input" }, { "identifier": "BaseDiffusion", "path": "kandinsky3/model/diffusion.py", "snippet": "class BaseDiffusion:\n\n def __init__(self, betas, percentile=None, gen_noise=torch.randn_like):\n self.betas = betas\n self.num_timesteps = betas.shape[0]\n\n alphas = 1. - betas\n self.alphas_cumprod = torch.cumprod(alphas, dim=0)\n self.alphas_cumprod_prev = torch.cat([torch.ones(1, dtype=betas.dtype), self.alphas_cumprod[:-1]])\n\n # calculate q(x_t | x_{t-1})\n self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)\n self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)\n\n # calculate q(x_{t-1} | x_t, x_0)\n self.posterior_mean_coef_1 = torch.sqrt(self.alphas_cumprod_prev) * betas / (1. - self.alphas_cumprod)\n self.posterior_mean_coef_2 = torch.sqrt(alphas) * (1. - self.alphas_cumprod_prev) / (1. - self.alphas_cumprod)\n self.posterior_variance = betas * (1. - self.alphas_cumprod_prev) / (1. - self.alphas_cumprod)\n self.posterior_log_variance = torch.log(\n torch.cat([self.posterior_variance[1].unsqueeze(0), self.posterior_variance[1:]])\n )\n\n self.percentile = percentile\n self.time_scale = 1000 // self.num_timesteps\n self.gen_noise = gen_noise\n self.jump_length = 3\n\n def process_x_start(self, x_start):\n bs, ndims = x_start.shape[0], len(x_start.shape[1:])\n if self.percentile is not None:\n quantile = torch.quantile(\n rearrange(x_start, 'b ... -> b (...)').abs(),\n self.percentile,\n dim=-1\n )\n quantile = torch.clip(quantile, min=1.)\n quantile = quantile.reshape(bs, *((1,) * ndims))\n return torch.clip(x_start, -quantile, quantile) / quantile\n else:\n return torch.clip(x_start, -1., 1.)\n\n def q_sample(self, x_start, t, noise=None, mask=None):\n if noise is None:\n noise = self.gen_noise(x_start)\n sqrt_alphas_cumprod = get_tensor_items(self.sqrt_alphas_cumprod, t, x_start.shape)\n sqrt_one_minus_alphas_cumprod = get_tensor_items(self.sqrt_one_minus_alphas_cumprod, t, noise.shape)\n x_t = sqrt_alphas_cumprod * x_start + sqrt_one_minus_alphas_cumprod * noise\n return x_t\n\n def q_posterior_mean_variance(self, x_start, x_t, t):\n posterior_mean_coef_1 = get_tensor_items(self.posterior_mean_coef_1, t, x_start.shape)\n posterior_mean_coef_2 = get_tensor_items(self.posterior_mean_coef_2, t, x_t.shape)\n posterior_mean = posterior_mean_coef_1 * x_start + posterior_mean_coef_2 * x_t\n\n posterior_variance = get_tensor_items(self.posterior_variance, t, x_start.shape)\n posterior_log_variance = get_tensor_items(self.posterior_log_variance, t, x_start.shape)\n return posterior_mean, posterior_variance, posterior_log_variance\n\n def text_guidance(self, model, x, t, context, context_mask, null_embedding, guidance_weight_text, \n uncondition_context=None, uncondition_context_mask=None, mask=None, masked_latent=None):\n large_x = x.repeat(2, 1, 1, 1)\n large_t = t.repeat(2)\n \n if uncondition_context is None:\n uncondition_context = torch.zeros_like(context)\n uncondition_context_mask = torch.zeros_like(context_mask)\n uncondition_context[:, 0] = null_embedding\n uncondition_context_mask[:, 0] = 1\n large_context = torch.cat([context, uncondition_context])\n large_context_mask = torch.cat([context_mask, uncondition_context_mask])\n \n if mask is not None:\n mask = mask.repeat(2, 1, 1, 1)\n if masked_latent is not None:\n masked_latent = masked_latent.repeat(2, 1, 1, 1)\n \n if model.in_layer.in_channels == 9:\n large_x = torch.cat([large_x, mask, masked_latent], dim=1)\n \n pred_large_noise = model(large_x, large_t * self.time_scale, large_context, large_context_mask.bool())\n pred_noise, uncond_pred_noise = torch.chunk(pred_large_noise, 2)\n pred_noise = (guidance_weight_text + 1.) * pred_noise - guidance_weight_text * uncond_pred_noise\n return pred_noise\n\n def p_mean_variance(self, model, x, t, context, context_mask, null_embedding, guidance_weight_text,\n negative_context=None, negative_context_mask=None, mask=None, masked_latent=None):\n \n pred_noise = self.text_guidance(model, x, t, context, context_mask, null_embedding, guidance_weight_text,\n negative_context, negative_context_mask, mask, masked_latent)\n\n sqrt_one_minus_alphas_cumprod = get_tensor_items(self.sqrt_one_minus_alphas_cumprod, t, pred_noise.shape)\n sqrt_alphas_cumprod = get_tensor_items(self.sqrt_alphas_cumprod, t, pred_noise.shape)\n pred_x_start = (x - sqrt_one_minus_alphas_cumprod * pred_noise) / sqrt_alphas_cumprod\n pred_x_start = self.process_x_start(pred_x_start)\n\n pred_mean, pred_var, pred_log_var = self.q_posterior_mean_variance(pred_x_start, x, t)\n return pred_mean, pred_var, pred_log_var\n\n @torch.no_grad()\n def p_sample(self, model, x, t, context, context_mask, null_embedding, guidance_weight_text,\n negative_context=None, negative_context_mask=None, mask=None, masked_latent=None):\n bs = x.shape[0]\n ndims = len(x.shape[1:])\n pred_mean, _, pred_log_var = self.p_mean_variance(model, x, t, context, context_mask, null_embedding, guidance_weight_text,\n negative_context=negative_context,\n negative_context_mask=negative_context_mask,\n mask=mask, masked_latent=masked_latent)\n noise = torch.randn_like(x)\n mask = (t != 0).reshape(bs, *((1,) * ndims))\n sample = pred_mean + mask * torch.exp(0.5 * pred_log_var) * noise\n return sample\n\n @torch.no_grad()\n def p_sample_loop(self, model, shape, device, context, context_mask, null_embedding, guidance_weight_text, \n negative_context=None, negative_context_mask=None, mask=None, masked_latent=None,):\n \n img = torch.randn(*shape, device=device)\n t_start = self.num_timesteps\n time = list(range(t_start))[::-1]\n \n for time in tqdm(time, position=0):\n time = torch.tensor([time] * shape[0], device=device)\n img = self.p_sample(\n model, img, time, context, context_mask, null_embedding, guidance_weight_text, \n negative_context=negative_context, negative_context_mask=negative_context_mask,\n mask=mask, masked_latent=masked_latent\n )\n return img" }, { "identifier": "get_named_beta_schedule", "path": "kandinsky3/model/diffusion.py", "snippet": "def get_named_beta_schedule(schedule_name, timesteps):\n if schedule_name == \"linear\":\n scale = 1000 / timesteps\n beta_start = scale * 0.0001\n beta_end = scale * 0.02\n return torch.linspace(\n beta_start, beta_end, timesteps, dtype=torch.float32\n )\n elif schedule_name == \"cosine\":\n alpha_bar = lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2\n betas = []\n for i in range(timesteps):\n t1 = i / timesteps\n t2 = (i + 1) / timesteps\n betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), 0.999))\n return torch.tensor(betas, dtype=torch.float32)" }, { "identifier": "resize_image_for_diffusion", "path": "kandinsky3/utils.py", "snippet": "def resize_image_for_diffusion(image):\n reduce_factor = max(1, (image.size[0] * image.size[1] / 1024**2)**0.5)\n image = image.resize((\n (round(image.size[0] / reduce_factor) // 64) * 64, (round(image.size[1] / reduce_factor) // 64) * 64\n ))\n\n return image" }, { "identifier": "resize_mask_for_diffusion", "path": "kandinsky3/utils.py", "snippet": "def resize_mask_for_diffusion(mask):\n reduce_factor = max(1, (mask.size / 1024**2)**0.5)\n resized_mask = resize(\n mask,\n (\n (round(mask.shape[0] / reduce_factor) // 64) * 64,\n (round(mask.shape[1] / reduce_factor) // 64) * 64\n ),\n preserve_range=True,\n anti_aliasing=False\n )\n\n return resized_mask" } ]
from typing import Optional, Union, List from tqdm import tqdm from PIL import Image, ImageDraw, ImageFont from torch import einsum from einops import repeat from kandinsky3.model.unet import UNet from kandinsky3.movq import MoVQ from kandinsky3.condition_encoders import T5TextConditionEncoder from kandinsky3.condition_processors import T5TextConditionProcessor from kandinsky3.model.diffusion import BaseDiffusion, get_named_beta_schedule from kandinsky3.utils import resize_image_for_diffusion, resize_mask_for_diffusion import PIL import io import os import math import random import omegaconf import numpy as np import pandas as pd import torch import torchvision.transforms as T
4,724
class Kandinsky3InpaintingPipeline: def __init__( self, device: Union[str, torch.device], unet: UNet, null_embedding: torch.Tensor, t5_processor: T5TextConditionProcessor, t5_encoder: T5TextConditionEncoder, movq: MoVQ, fp16: bool = True ): self.device = device self.fp16 = fp16 self.to_pil = T.ToPILImage() self.to_tensor = T.ToTensor() self.unet = unet self.null_embedding = null_embedding self.t5_processor = t5_processor self.t5_encoder = t5_encoder self.movq = movq def shared_step(self, batch: dict) -> dict: image = batch['image'] condition_model_input = batch['text'] negative_condition_model_input = batch['negative_text'] bs = image.shape[0] masked_latent = None mask = batch['mask'] if 'masked_image' in batch: masked_latent = batch['masked_image'] elif self.unet.in_layer.in_channels == 9: masked_latent = image.masked_fill((1 - mask).bool(), 0) else: raise ValueError() masked_latent = self.movq.encode(masked_latent) mask = torch.nn.functional.interpolate(mask, size=(masked_latent.shape[2], masked_latent.shape[3])) with torch.cuda.amp.autocast(enabled=self.fp16): context, context_mask = self.t5_encoder(condition_model_input) if negative_condition_model_input is not None: negative_context, negative_context_mask = self.t5_encoder(negative_condition_model_input) else: negative_context, negative_context_mask = None, None if self.fp16: mask = mask.to(torch.float16) masked_latent = masked_latent.to(torch.float16) return { 'context': context, 'context_mask': context_mask, 'negative_context': negative_context, 'negative_context_mask': negative_context_mask, 'image': image, 'masked_latent': masked_latent, 'mask': mask } def prepare_batch( self, text: str, negative_text: str, image: PIL.Image.Image, mask: np.ndarray, ) -> dict: condition_model_input, negative_condition_model_input = self.t5_processor.encode(text=text, negative_text=negative_text) batch = {
class Kandinsky3InpaintingPipeline: def __init__( self, device: Union[str, torch.device], unet: UNet, null_embedding: torch.Tensor, t5_processor: T5TextConditionProcessor, t5_encoder: T5TextConditionEncoder, movq: MoVQ, fp16: bool = True ): self.device = device self.fp16 = fp16 self.to_pil = T.ToPILImage() self.to_tensor = T.ToTensor() self.unet = unet self.null_embedding = null_embedding self.t5_processor = t5_processor self.t5_encoder = t5_encoder self.movq = movq def shared_step(self, batch: dict) -> dict: image = batch['image'] condition_model_input = batch['text'] negative_condition_model_input = batch['negative_text'] bs = image.shape[0] masked_latent = None mask = batch['mask'] if 'masked_image' in batch: masked_latent = batch['masked_image'] elif self.unet.in_layer.in_channels == 9: masked_latent = image.masked_fill((1 - mask).bool(), 0) else: raise ValueError() masked_latent = self.movq.encode(masked_latent) mask = torch.nn.functional.interpolate(mask, size=(masked_latent.shape[2], masked_latent.shape[3])) with torch.cuda.amp.autocast(enabled=self.fp16): context, context_mask = self.t5_encoder(condition_model_input) if negative_condition_model_input is not None: negative_context, negative_context_mask = self.t5_encoder(negative_condition_model_input) else: negative_context, negative_context_mask = None, None if self.fp16: mask = mask.to(torch.float16) masked_latent = masked_latent.to(torch.float16) return { 'context': context, 'context_mask': context_mask, 'negative_context': negative_context, 'negative_context_mask': negative_context_mask, 'image': image, 'masked_latent': masked_latent, 'mask': mask } def prepare_batch( self, text: str, negative_text: str, image: PIL.Image.Image, mask: np.ndarray, ) -> dict: condition_model_input, negative_condition_model_input = self.t5_processor.encode(text=text, negative_text=negative_text) batch = {
'image': self.to_tensor(resize_image_for_diffusion(image.convert("RGB"))) * 2 - 1,
6
2023-11-13 10:16:04+00:00
8k
spfrommer/torchexplorer
torchexplorer/render/layout.py
[ { "identifier": "utils", "path": "torchexplorer/utils.py", "snippet": "def iter_not_none(iterable: Iterable[Any]) -> Iterator[Any]:\ndef enum_not_none(iterable: Iterable[Any]) -> Iterator[tuple[int, Any]]:\ndef interleave(l1: list[Any], l2: list[Any]) -> list[Any]:\ndef list_add(l1: list[float], l2: list[float]) -> list[float]:" }, { "identifier": "core", "path": "torchexplorer/core.py", "snippet": "class SizeTracker:\nclass ModuleInvocationHistograms:\nclass ModuleSharedHistograms:\nclass ExplorerMetadata:\nclass ModuleInvocationStructure():\nclass DummyAttachModule(nn.Module):\n def __init__(\n self,\n module: nn.Module,\n invocation_id: InvocationId,\n structure_id: int,\n input_n: int,\n output_n: int\n ):\n def module_metadata(self) -> ExplorerMetadata:\n def get_inner_structure(\n self, module: nn.Module, invocation_id: int\n ) -> Optional['ModuleInvocationStructure']:\n def get_inner_structure_from_memory_id(\n self, memory_id: int\n ) -> Optional['ModuleInvocationStructure']:\n def get_inner_structure_from_id(\n self, structure_id: int\n ) -> Optional['ModuleInvocationStructure']:\n def _inner_filter(self, test_fn: Callable) -> Optional['ModuleInvocationStructure']:\n def str_impl(self) -> str:\n def __init__(self):\n def forward(self, x):" }, { "identifier": "Tooltip", "path": "torchexplorer/components/tooltip.py", "snippet": "class Tooltip:\n \"\"\"The tooltip that pops up next to a Module.\"\"\"\n\n def __init__(self, title: str, keys: list[str], vals: list[str]):\n self.title = title\n self.keys = keys\n self.vals = vals\n \n @classmethod\n def create_io(cls, tracker: SizeTracker) -> 'Tooltip':\n name = tracker.type.split('.')[-1]\n keys, vals = ['size'], [str(tracker.size).replace('None', dash)]\n return Tooltip(name, keys, vals)\n \n @classmethod\n def create_moduleinvocation(\n cls, module: Module, parent_module: Module, invocation_id: InvocationId\n ) -> 'Tooltip':\n\n name_in_parent = cls._get_name_in_parent(module, parent_module)\n\n io_shape_keys, io_shape_vals = cls._get_io_shape_keyvals(module, invocation_id)\n extra_repr_keys, extra_repr_vals = cls._get_extra_repr_keyvals(module)\n\n keys = io_shape_keys + extra_repr_keys\n vals = io_shape_vals + extra_repr_vals\n\n assert len(keys) == len(vals)\n\n return Tooltip(name_in_parent, keys, vals)\n \n @classmethod\n def create_attach(cls, module: Module) -> 'Tooltip':\n return cls.create_io(module.torchexplorer_metadata.input_sizes[0][0])\n \n @classmethod\n def _get_name_in_parent(cls, module: Module, parent_module: Module) -> str:\n name_in_parent = ''\n for name, m in parent_module.named_children():\n if m == module:\n name_in_parent = name\n break\n \n if isinstance(m, ModuleList):\n for i, mm in enumerate(m):\n if mm == module:\n name_in_parent = f'{name}[{i}]'\n break\n \n if isinstance(m, ModuleDict):\n for k, mm in m.items():\n if mm == module:\n name_in_parent = f'{name}[{k}]'\n break\n \n return name_in_parent\n\n @classmethod\n def _get_io_shape_keyvals(\n cls, module: Module, invocation_id: InvocationId\n ) -> tuple[list[str], list[str]]:\n\n metadata = module.torchexplorer_metadata \n\n keys, vals = [], []\n\n one_input = len(metadata.input_sizes[invocation_id]) == 1\n for i, input_tracker in enumerate(metadata.input_sizes[invocation_id]):\n keys.append('in_size' if one_input else f'in{i}_size')\n vals.append(str(input_tracker.size).replace('None', dash))\n \n one_output = len(metadata.output_sizes[invocation_id]) == 1\n for i, output_tracker in enumerate(metadata.output_sizes[invocation_id]):\n keys.append('out_size' if one_output else f'out{i}_size')\n vals.append(str(output_tracker.size).replace('None', dash))\n\n return keys, vals\n \n @classmethod\n def _get_extra_repr_keyvals(cls, module: Module) -> tuple[list[str], list[str]]:\n try:\n keys, vals = [], []\n extra_rep = module.extra_repr()\n pairs = re.split(r',\\s*(?![^()]*\\))(?![^[]]*\\])', extra_rep)\n for pair in pairs:\n if pair == '':\n continue\n k, v = pair.split('=') if ('=' in pair) else (dash, pair)\n keys.append(k.strip())\n vals.append(v.strip())\n except Exception:\n keys, vals = [], []\n \n return keys, vals" }, { "identifier": "ModuleInvocationHistograms", "path": "torchexplorer/core.py", "snippet": "class ModuleInvocationHistograms:\n \"\"\"The histograms associated to a particular InvocationId on a module.\"\"\"\n input_hists: list[IncrementalHistogram] = field(default_factory=lambda: [])\n output_hists: list[IncrementalHistogram] = field(default_factory=lambda: [])" }, { "identifier": "ModuleInvocationStructure", "path": "torchexplorer/core.py", "snippet": "class ModuleInvocationStructure():\n \"\"\"The parsed structure of a module invocation.\n\n There can be multiple of these for a particular module if that module's forward\n method is invoked multiple times on the forwards pass of a parent.\"\"\"\n\n def __init__(\n self,\n module: nn.Module,\n invocation_id: InvocationId,\n structure_id: int,\n input_n: int,\n output_n: int\n ):\n\n self.module = module\n self.invocation_id = invocation_id\n # A unique id for this structure, to enable caching of graphviz calls\n self.structure_id = structure_id\n\n # Nodes are either 'Input x'/'Output x' strings or ModuleInvocationStructures\n self.inner_graph = nx.DiGraph()\n\n for i in range(input_n):\n name = f'Input {i}'\n self.inner_graph.add_node(name, memory_id=None, label=name)\n \n for i in range(output_n):\n name = f'Output {i}'\n self.inner_graph.add_node(name, memory_id=None, label=name)\n\n self.upstreams_fetched = False\n\n self.graphviz_json_cache: Optional[dict] = None\n\n def module_metadata(self) -> ExplorerMetadata:\n return self.module.torchexplorer_metadata\n\n def get_inner_structure(\n self, module: nn.Module, invocation_id: int\n ) -> Optional['ModuleInvocationStructure']:\n\n return self._inner_filter(\n lambda node: node.module == module and node.invocation_id == invocation_id\n )\n\n def get_inner_structure_from_memory_id(\n self, memory_id: int\n ) -> Optional['ModuleInvocationStructure']:\n\n return self._inner_filter(lambda node: id(node) == memory_id)\n\n def get_inner_structure_from_id(\n self, structure_id: int\n ) -> Optional['ModuleInvocationStructure']:\n\n return self._inner_filter(lambda node: node.structure_id == structure_id)\n\n def _inner_filter(self, test_fn: Callable) -> Optional['ModuleInvocationStructure']:\n for node in self.inner_graph.nodes:\n if isinstance(node, ModuleInvocationStructure):\n if test_fn(node):\n return node\n\n return None\n\n # NOTE: Overriding __str__ breaks the graphviz rendering...\n def str_impl(self) -> str:\n return f'{self.module.__class__.__name__}, Invocation {self.invocation_id}'" }, { "identifier": "is_input_node", "path": "torchexplorer/structure/structure.py", "snippet": "def is_input_node(node) -> bool:\n if not isinstance(node, str):\n return False\n return bool(re.match(r'Input \\d+', node)) or (node=='Input')" }, { "identifier": "is_io_node", "path": "torchexplorer/structure/structure.py", "snippet": "def is_io_node(node) -> bool:\n return is_input_node(node) or is_output_node(node)" }, { "identifier": "EdgeLayout", "path": "torchexplorer/render/structs.py", "snippet": "class EdgeLayout:\n path_points: list[list[float]]\n arrowhead_points: list[list[float]]\n downstream_input_index: Optional[int]\n upstream_output_index: Optional[int]" }, { "identifier": "TooltipLayout", "path": "torchexplorer/render/structs.py", "snippet": "class TooltipLayout:\n tooltip: Tooltip\n\n # Coordinates in parent of the layout this tooltip belongs to\n bottom_left_corner: list[float] = field(default_factory=lambda: [0, 0]) \n top_right_corner: list[float] = field(default_factory=lambda: [0, 0])" }, { "identifier": "NodeLayout", "path": "torchexplorer/render/structs.py", "snippet": "class NodeLayout:\n display_name: Optional[str] = None\n tooltip: Optional[TooltipLayout] = None\n\n invocation_hists: Optional[ModuleInvocationHistograms] = None\n invocation_grad_hists: Optional[ModuleInvocationHistograms] = None\n shared_hists: Optional[ModuleSharedHistograms] = None\n\n # Coordinates in parent layout\n bottom_left_corner: list[float] = field(default_factory=lambda: [0, 0]) \n top_right_corner: list[float] = field(default_factory=lambda: [0, 0]) \n\n # Inner graph data\n inner_graph_layouts: list['NodeLayout'] = (\n field(default_factory=lambda: [])\n )\n inner_graph_edges: list[EdgeLayout] = field(default_factory=lambda: [])\n\n # Data added in the _process_graph function, after everything has been layed out\n # These ids are not related to the structure_id of the ModuleInvocationStructure\n id: Optional[int] = None\n parent_id: Optional[int] = None\n # Parent stack includes current layout (this goes into the parents view in vega)\n parent_stack: Optional[list[tuple[str, int]]] = None\n child_ids: Optional[list[int]] = None" } ]
import copy import html import json import string import numpy as np import networkx as nx from typing import Optional, Union from subprocess import Popen, PIPE from torchexplorer import utils from torchexplorer import core from torchexplorer.components.tooltip import Tooltip from torchexplorer.core import ModuleInvocationHistograms, ModuleInvocationStructure from torchexplorer.structure.structure import is_input_node, is_io_node from torchexplorer.render.structs import ( EdgeLayout, TooltipLayout, NodeLayout )
4,089
)[parent_invocation_id][io_index] _add_tooltip(layout, Tooltip.create_io(io_tensor_shape)) has_io_hists = parent_invocation_id in parent_metadata.invocation_hists if has_io_hists: hists = parent_metadata.invocation_hists[parent_invocation_id] hist = (hists.input_hists if is_input else hists.output_hists)[io_index] has_grad_hists = parent_invocation_id in parent_metadata.invocation_grad_hists if has_grad_hists: grad_hists = parent_metadata.invocation_grad_hists[parent_invocation_id] grad_hist = ( grad_hists.input_hists if is_input else grad_hists.output_hists )[io_index] layout.invocation_hists = ModuleInvocationHistograms( input_hists=[hist] if has_io_hists else [], output_hists=[hist] if has_io_hists else [] ) layout.invocation_grad_hists = ModuleInvocationHistograms( input_hists=[grad_hist] if has_grad_hists else [], output_hists=[grad_hist] if has_grad_hists else [] ) def _layout_moduleinvocation_node( layout: NodeLayout, parent_structure: ModuleInvocationStructure, object: dict ) -> ModuleInvocationStructure: structure_id = int(object['structure_id']) object_struct = parent_structure.get_inner_structure_from_id(structure_id) assert object_struct is not None if isinstance(object_struct.module, core.DummyAttachModule): _add_tooltip(layout, Tooltip.create_attach(object_struct.module)) else: _add_tooltip(layout, Tooltip.create_moduleinvocation( object_struct.module, parent_structure.module, object_struct.invocation_id )) metadata = object_struct.module_metadata() if object_struct.invocation_id in metadata.invocation_hists: layout.invocation_hists = ( metadata.invocation_hists[object_struct.invocation_id] ) if object_struct.invocation_id in metadata.invocation_grad_hists: layout.invocation_grad_hists = ( metadata.invocation_grad_hists[object_struct.invocation_id] ) layout.shared_hists = metadata.shared_hists return object_struct def _add_tooltip(layout: NodeLayout, tooltip: Tooltip) -> None: tooltip_title_size, tooltip_font_size = 14, 11 def _handle_string(str, truncate=False, title=False): font_size = tooltip_title_size if title else tooltip_font_size truncate_width = 70 return _truncate_string_width(str, font_size, truncate_width, truncate) for i, key in enumerate(tooltip.keys): tooltip.keys[i] = _handle_string(key, True)[0] line_widths = [_handle_string(tooltip.title, False, True)[1]] for key, val in zip(tooltip.keys, tooltip.vals): line_widths.append(_handle_string(f'{key}{val}', False)[1]) tooltip_width = max(line_widths) * 0.95 + 20 tooltip_lines = 1 + len(tooltip.keys) tooltip_height = 20 + (tooltip_font_size + 2) * tooltip_lines tooltip_bl = [ layout.top_right_corner[0] + 20, _center(layout)[1] - tooltip_height / 2 ] tooltip_tr = [tooltip_bl[0] + tooltip_width, tooltip_bl[1] + tooltip_height] layout.tooltip = TooltipLayout( tooltip, bottom_left_corner=tooltip_bl, top_right_corner=tooltip_tr ) def _process_graph(layout: NodeLayout): layout_id_counter = 0 def process_graph_layout( l: NodeLayout, parent_id: int, parent_stack: list[tuple[str, int]] ) -> list[int]: nonlocal layout_id_counter new_id = layout_id_counter layout_id_counter += 1 assert l.display_name is not None new_stack = parent_stack + [(l.display_name, new_id)] child_ids = [] for inner_r in l.inner_graph_layouts: child_ids += process_graph_layout(inner_r, new_id, new_stack) l.id = new_id l.parent_id = parent_id l.parent_stack = new_stack l.child_ids = child_ids return [new_id] + child_ids process_graph_layout(layout, -1, []) def _translate_inner_layouts(layout: NodeLayout) -> None: """Translate visual components to be centered around the input node.""" target_input_pos = [0.0, 0.0] # Based on where vega spec expects input to be input_centers = [] for l in layout.inner_graph_layouts: if is_input_node(l.display_name): input_centers.append(_center(l)) center = np.mean(np.array(input_centers), axis=0)
from __future__ import annotations def layout( structure: ModuleInvocationStructure, cache: Optional[dict] = None ) -> tuple[NodeLayout, dict]: name = structure.module.__class__.__name__ if is_io_node(name): raise RuntimeError(f'Invalid module name: {name}') layout = NodeLayout(display_name=name) if cache is None: _layout_into(layout, structure, None) cache = {'cached_structure': structure} else: _layout_into(layout, structure, cache['cached_structure']) _process_graph(layout) return layout, cache def _layout_into( layout: NodeLayout, structure: ModuleInvocationStructure, cached_structure: Optional[ModuleInvocationStructure] = None ): json_data = _get_graphviz_json_with_caching(structure, cached_structure) for object in json_data['objects']: draw_points = np.array(object['_draw_'][1]['points']) draw_xs, draw_ys = draw_points[:, 0], draw_points[:, 1] inner_layout = NodeLayout() # Replace the attach module label inner_layout.display_name = object['label'].replace('<>', ' ᴬ') inner_layout.bottom_left_corner = [draw_xs.min(), draw_ys.min()] inner_layout.top_right_corner = [draw_xs.max(), draw_ys.max()] if is_io_node(object['label']): _layout_io_node(inner_layout, structure, object) else: struct = _layout_moduleinvocation_node(inner_layout, structure, object) _layout_into(inner_layout, struct, object['cached_structure']) layout.inner_graph_layouts.append(inner_layout) if 'edges' in json_data: for edge in json_data['edges']: layout.inner_graph_edges.append(EdgeLayout( path_points=edge['_draw_'][-1]['points'], arrowhead_points=edge['_hdraw_'][-1]['points'], downstream_input_index=int(edge['downstream_input_index']), upstream_output_index=int(edge['upstream_output_index']), )) _translate_inner_layouts(layout) def _layout_io_node( layout: NodeLayout, parent_structure: ModuleInvocationStructure, object: dict ) -> None: is_input = is_input_node(object['label']) parent_metadata = parent_structure.module_metadata() parent_invocation_id = parent_structure.invocation_id io_index = int(object['name'].split(' ')[-1]) io_tensor_shape = ( parent_metadata.input_sizes if is_input else parent_metadata.output_sizes )[parent_invocation_id][io_index] _add_tooltip(layout, Tooltip.create_io(io_tensor_shape)) has_io_hists = parent_invocation_id in parent_metadata.invocation_hists if has_io_hists: hists = parent_metadata.invocation_hists[parent_invocation_id] hist = (hists.input_hists if is_input else hists.output_hists)[io_index] has_grad_hists = parent_invocation_id in parent_metadata.invocation_grad_hists if has_grad_hists: grad_hists = parent_metadata.invocation_grad_hists[parent_invocation_id] grad_hist = ( grad_hists.input_hists if is_input else grad_hists.output_hists )[io_index] layout.invocation_hists = ModuleInvocationHistograms( input_hists=[hist] if has_io_hists else [], output_hists=[hist] if has_io_hists else [] ) layout.invocation_grad_hists = ModuleInvocationHistograms( input_hists=[grad_hist] if has_grad_hists else [], output_hists=[grad_hist] if has_grad_hists else [] ) def _layout_moduleinvocation_node( layout: NodeLayout, parent_structure: ModuleInvocationStructure, object: dict ) -> ModuleInvocationStructure: structure_id = int(object['structure_id']) object_struct = parent_structure.get_inner_structure_from_id(structure_id) assert object_struct is not None if isinstance(object_struct.module, core.DummyAttachModule): _add_tooltip(layout, Tooltip.create_attach(object_struct.module)) else: _add_tooltip(layout, Tooltip.create_moduleinvocation( object_struct.module, parent_structure.module, object_struct.invocation_id )) metadata = object_struct.module_metadata() if object_struct.invocation_id in metadata.invocation_hists: layout.invocation_hists = ( metadata.invocation_hists[object_struct.invocation_id] ) if object_struct.invocation_id in metadata.invocation_grad_hists: layout.invocation_grad_hists = ( metadata.invocation_grad_hists[object_struct.invocation_id] ) layout.shared_hists = metadata.shared_hists return object_struct def _add_tooltip(layout: NodeLayout, tooltip: Tooltip) -> None: tooltip_title_size, tooltip_font_size = 14, 11 def _handle_string(str, truncate=False, title=False): font_size = tooltip_title_size if title else tooltip_font_size truncate_width = 70 return _truncate_string_width(str, font_size, truncate_width, truncate) for i, key in enumerate(tooltip.keys): tooltip.keys[i] = _handle_string(key, True)[0] line_widths = [_handle_string(tooltip.title, False, True)[1]] for key, val in zip(tooltip.keys, tooltip.vals): line_widths.append(_handle_string(f'{key}{val}', False)[1]) tooltip_width = max(line_widths) * 0.95 + 20 tooltip_lines = 1 + len(tooltip.keys) tooltip_height = 20 + (tooltip_font_size + 2) * tooltip_lines tooltip_bl = [ layout.top_right_corner[0] + 20, _center(layout)[1] - tooltip_height / 2 ] tooltip_tr = [tooltip_bl[0] + tooltip_width, tooltip_bl[1] + tooltip_height] layout.tooltip = TooltipLayout( tooltip, bottom_left_corner=tooltip_bl, top_right_corner=tooltip_tr ) def _process_graph(layout: NodeLayout): layout_id_counter = 0 def process_graph_layout( l: NodeLayout, parent_id: int, parent_stack: list[tuple[str, int]] ) -> list[int]: nonlocal layout_id_counter new_id = layout_id_counter layout_id_counter += 1 assert l.display_name is not None new_stack = parent_stack + [(l.display_name, new_id)] child_ids = [] for inner_r in l.inner_graph_layouts: child_ids += process_graph_layout(inner_r, new_id, new_stack) l.id = new_id l.parent_id = parent_id l.parent_stack = new_stack l.child_ids = child_ids return [new_id] + child_ids process_graph_layout(layout, -1, []) def _translate_inner_layouts(layout: NodeLayout) -> None: """Translate visual components to be centered around the input node.""" target_input_pos = [0.0, 0.0] # Based on where vega spec expects input to be input_centers = [] for l in layout.inner_graph_layouts: if is_input_node(l.display_name): input_centers.append(_center(l)) center = np.mean(np.array(input_centers), axis=0)
trans = utils.list_add(target_input_pos, [-center[0], -center[1]])
0
2023-11-13 05:56:04+00:00
8k
namin/llm-verified-with-monte-carlo-tree-search
run_meta.py
[ { "identifier": "args", "path": "cmdline.py", "snippet": "class CommonArguments:\ndef get_args():" }, { "identifier": "Node", "path": "montecarlo/node.py", "snippet": "class Node:\n def __init__(self, state):\n self.state = state\n self.win_value = 0\n self.policy_value = None\n self.visits = 0\n self.parent = None\n self.children = []\n self.expanded = False\n self.player_number = None\n self.discovery_factor = 0.35\n\n def update_win_value(self, value):\n self.win_value += value\n self.visits += 1\n\n if self.parent:\n self.parent.update_win_value(value)\n\n def update_policy_value(self, value):\n self.policy_value = value\n\n def add_child(self, child):\n self.children.append(child)\n child.parent = self\n\n def add_children(self, children):\n for child in children:\n self.add_child(child)\n\n def get_preferred_child(self, root_node):\n best_children = []\n best_score = float(\"-inf\")\n\n for child in self.children:\n score = child.get_score(root_node)\n\n if score > best_score:\n best_score = score\n best_children = [child]\n elif score == best_score:\n best_children.append(child)\n\n return random.choice(best_children)\n\n def get_score(self, root_node):\n discovery_operand = (\n self.discovery_factor\n * (self.policy_value or 1)\n * sqrt(log(self.parent.visits) / (self.visits or 1))\n )\n\n win_multiplier = (\n 1 if self.parent.player_number == root_node.player_number else -1\n )\n win_operand = win_multiplier * self.win_value / (self.visits or 1)\n\n self.score = win_operand + discovery_operand\n\n return self.score\n\n def is_scorable(self):\n return self.visits or self.policy_value != None\n \n def print_node(self, f, i, root, st):\n escape = lambda x : json.dumps(x).strip('\"')\n if self.parent is None:\n f.write((' ' * i) + st + \" [label=\\\"\" + escape(self.state) + \"\\\",shape=box]\\n\")\n else:\n diff = '\\n'.join([x for x in self.state.split(\"\\n\") if x not in self.parent.state.split(\"\\n\")])\n f.write((' ' * i) + st + \" [label=\\\"\" + escape(diff) + \"\\\",shape=box]\\n\")\n\n num = 0\n for child in self.children:\n new_st = st + \"_\" + str(num)\n child.print_node(f, i + 2, root, new_st)\n f.write(' ' * i + st + \" -- \" + new_st + \"\\n\")\n num = num + 1" }, { "identifier": "MonteCarlo", "path": "montecarlo/montecarlo.py", "snippet": "class MonteCarlo:\n def __init__(self, root_node):\n self.root_node = root_node\n self.solution = None\n self.child_finder = None\n self.node_evaluator = lambda child, montecarlo: None\n self.stats_expansion_count = 0\n self.stats_failed_expansion_count = 0\n\n def make_choice(self):\n best_children = []\n most_visits = float(\"-inf\")\n\n for child in self.root_node.children:\n if child.visits > most_visits:\n most_visits = child.visits\n best_children = [child]\n elif child.visits == most_visits:\n best_children.append(child)\n\n return random.choice(best_children)\n\n def make_exploratory_choice(self):\n children_visits = map(lambda child: child.visits, self.root_node.children)\n children_visit_probabilities = [\n visit / self.root_node.visits for visit in children_visits\n ]\n random_probability = random.uniform(0, 1)\n probabilities_already_counted = 0.0\n\n for i, probability in enumerate(children_visit_probabilities):\n if probabilities_already_counted + probability >= random_probability:\n return self.root_node.children[i]\n\n probabilities_already_counted += probability\n\n def simulate(self, expansion_count=1):\n i = 0\n while expansion_count is None or i < expansion_count:\n i += 1\n\n if self.solution is not None:\n return\n\n current_node = self.root_node\n\n while current_node.expanded:\n current_node = current_node.get_preferred_child(self.root_node)\n\n self.expand(current_node)\n\n def expand(self, node):\n self.stats_expansion_count += 1\n self.child_finder(node, self)\n\n for child in node.children:\n child_win_value = self.node_evaluator(child, self)\n\n if child_win_value != None:\n child.update_win_value(child_win_value)\n\n if not child.is_scorable():\n self.random_rollout(child)\n child.children = []\n\n if len(node.children):\n node.expanded = True\n else:\n self.stats_failed_expansion_count += 1\n\n def random_rollout(self, node):\n self.child_finder(node, self)\n child = random.choice(node.children)\n node.children = []\n node.add_child(child)\n child_win_value = self.node_evaluator(child, self)\n\n if child_win_value != None:\n node.update_win_value(child_win_value)\n else:\n self.random_rollout(child)\n\n def print_tree(self, f):\n f.write(\"graph\\n{\\n\")\n self.root_node.print_node(f, 0, self.root_node, \"a\")\n f.write(\"}\\n\")" }, { "identifier": "LANG", "path": "lang_config.py", "snippet": "LANG = args.language" }, { "identifier": "can_be_solution", "path": "lang.py", "snippet": "def can_be_solution(msg: str, min_lines: int, check_func=None) -> bool:\ndef find_largest_new_block(old_text: str, text: str) -> str:\ndef find_largest_new_block_code(old_code: str, code: str) -> str:" }, { "identifier": "give_context", "path": "coq.py", "snippet": "def give_context(v: str) -> (str, str):\n r = checkCoq(v, giveDetails=True)\n return ((r[\"details\"] or \"\"), r[\"out\"])" }, { "identifier": "extract_lemma", "path": "coq.py", "snippet": "def extract_lemma(v):\n f = io.StringIO()\n with redirect_stderr(f):\n r = annotate([v])\n err = f.getvalue()\n gs = [x for x in r[0] if hasattr(x, \"goals\") and x.goals != []]\n if gs != []:\n g = gs[-1].goals[0]\n return g, err\n return None, err" }, { "identifier": "lemma_statement", "path": "coq.py", "snippet": "def lemma_statement(g):\n s = \"\"\n for h in g.hypotheses:\n s += \"forall \" + \" \".join(h.names) + \" : \" + h.type + \", \"\n s += g.conclusion\n return s" }, { "identifier": "lemma_args", "path": "coq.py", "snippet": "def lemma_args(g):\n return \" \".join([\" \".join(h.names) for h in g.hypotheses])" }, { "identifier": "new_conclusion", "path": "coq.py", "snippet": "def new_conclusion(goal, code):\n conclusion = goal.conclusion.split(\",\")[-1].strip() # TODO: a bit crude?\n return conclusion not in code" }, { "identifier": "score_func_code", "path": "coq.py", "snippet": "def score_func_code(sentence: str) -> (Optional[float], Optional[str]):\n print(\"TEXT\")\n print(sentence)\n score, v = calculateScoreHelper(sentence)\n print(\"SCORE\")\n print(score)\n return score, v" }, { "identifier": "prompt", "path": "prompts.py", "snippet": "NO_CHECK_PROOF = lambda v: True\n CHECK_PROOF = lambda v: proof_marker in v\n CHECK_PROOF2 = lambda v: v.count(proof_marker) >= 2\n CHECK_PROOF = NO_CHECK_PROOF\nNO_CHECK_CHEAT = lambda v: False\n CHECK_CHEAT = lambda v: cheat_marker in v\n CHECK_CHEAT = NO_CHECK_CHEAT\nEXTRA_CONSTANT_FOLDING = \" and performs all additions by constants\"\nEXTRA_CONSTANT_FOLDING = \"\"\ndef remove_hints2(prompt):\ndef remove_hints3(prompt):\ndef remove_hints(prompt):" }, { "identifier": "limit_depth", "path": "common.py", "snippet": "def count_depth(node, f=lambda x: x):\ndef limit_depth(node, f=lambda x: x):" }, { "identifier": "score_first", "path": "common_cache.py", "snippet": "def score_first(x):\n return x[0]" }, { "identifier": "create_score_predicate", "path": "common_cache.py", "snippet": "def create_score_predicate(f=lambda x: x):\n def fetch(x):\n score = f(x)\n return score is None or score > 0\n return fetch" }, { "identifier": "create_cached_func", "path": "common_cache.py", "snippet": "def create_cached_func(f):\n cache = {}\n stats = {'hit': 0, 'miss': 0}\n def fetch(x):\n INITIAL = object()\n y = cache.get(x, INITIAL)\n if y == INITIAL:\n stats['miss'] += 1\n y = f(x)\n cache[x] = y\n else:\n stats['hit'] += 1\n return y\n return fetch, stats" }, { "identifier": "select_diversely_with_scores", "path": "common_diversity.py", "snippet": "def select_diversely_with_scores(texts, scores, score_predicate, features, montecarlo):\n def select(texts, indices):\n return select_diversely(texts, features[indices], montecarlo)\n return select_with_scores(texts, scores, score_predicate, select)" }, { "identifier": "DIVERSITY", "path": "common_diversity.py", "snippet": "DIVERSITY = args.diversity" }, { "identifier": "limit", "path": "common_diversity.py", "snippet": "def limit(x):\n if DIVERSITY:\n return x[0:200]\n else:\n return x" }, { "identifier": "ask_keep", "path": "common_interactive.py", "snippet": "def ask_keep(prompt, texts):\n i = 0\n for t in diffprompt(prompt, texts):\n print(i, t)\n i += 1\n inp = input(\"Keep which? [0...] or comment: \").strip()\n try:\n return int(inp)\n except ValueError:\n return inp" }, { "identifier": "diffprompt", "path": "common_interactive.py", "snippet": "def diffprompt(prompt, results):\n n = len(strip_instructions(prompt))\n return [strip_instructions(r)[n:] for r in results]" }, { "identifier": "stats", "path": "common_stats.py", "snippet": "def stats(montecarlo, f=lambda x: x):\n n_nodes = 0\n n_gen_nodes = 0\n n_back_nodes = 0\n n_gen_leaves = 0\n n_back_leaves = 0\n queue = [montecarlo.root_node]\n while queue != []:\n node = queue.pop()\n n_nodes += 1\n is_back = node.parent is not None and f(node.state) == f(node.parent.state)\n is_leaf = node.children == []\n if is_back:\n n_back_nodes += 1\n else:\n n_gen_nodes += 1\n if is_leaf:\n if is_back:\n n_back_leaves += 1\n else:\n n_gen_leaves += 1\n queue += node.children\n\n print(f\"\"\"\nSTATS\n\nnumber of nodes: {n_nodes}\nnumber of gen nodes: {n_gen_nodes} (including leaves: {n_gen_leaves})\nnumber of back nodes: {n_back_nodes} (including leaves: {n_back_leaves})\n\nexpansion count: {montecarlo.stats_expansion_count} (including failed: {montecarlo.stats_failed_expansion_count})\n\"\"\")\n\n return (n_nodes, n_gen_nodes, n_back_nodes, n_gen_leaves, n_back_leaves)" }, { "identifier": "bad_words_ids", "path": "common_bad_words.py", "snippet": "def get_tokens_as_list(word_list):\ndef get_bad_words_ids():" } ]
from cmdline import args from montecarlo.node import Node from montecarlo.montecarlo import MonteCarlo from lang_config import LANG from lang import can_be_solution, filter_code from coq import give_context, extract_lemma, lemma_statement, lemma_args, new_conclusion from coq import score_func_code as uncached_score_func_code from prompts import prompt, expansion_count, min_lines, check_func from common import limit_depth, max_completion_depth from common_cache import score_first, create_score_predicate, create_cached_func from common_diversity import select_diversely_with_scores, DIVERSITY, limit from common_interactive import ask_keep, diffprompt from common_stats import stats from common_bad_words import bad_words_ids import re import llm
3,845
USE_HAMMER = args.use_hammer EXTRACT_LEMMA_DEPTH = args.extract_lemma_depth EXPLORE_MANY = args.explore_many assert LANG=='Coq' score_func_code, cache_stats = create_cached_func(uncached_score_func_code) score_predicate = create_score_predicate(score_first) class FocusNode: def __init__(self, instructions, code, stack, lemma_counter): (context, outlog) = give_context(code) self.instructions = instructions self.context = context self.code = code self.outlog = outlog self.stack = stack self.lemma_counter = lemma_counter def update(self, text): code = filter_code(text+"```").lstrip() return FocusNode(self.instructions, code, self.stack, self.lemma_counter) def update_lemma(self, goal, code): name = self.lemma_name(self.lemma_counter) statement = lemma_statement(goal) args = lemma_args(goal) last_lemma_index = list(re.finditer(r"Lemma|Theorem", code))[-1].start(0) last_lemma = code[last_lemma_index:] code = code[:last_lemma_index] last_lemma += f" apply (@{name} {args}).\n" stack = [last_lemma] + self.stack code += "\n" code += f"Lemma {name}: {statement}.\nProof.\n" if USE_HAMMER: code += "(* do not use induction, try using hammer *)\n" print(f'Created Lemma {name}.') return FocusNode(self.instructions, code, stack, self.lemma_counter+1) def update_pop(self, text): code = filter_code(text+"```").lstrip() last_lemma = self.stack[0] stack = self.stack[1:] code += "\n\n" code += last_lemma return FocusNode(self.instructions, code, stack, self.lemma_counter) def lemma_name(self, counter): return "helper"+str(counter) def text(self): return f""" <s>[INST] <<SYS>> You are a Coq programmer that writes functional code and prove properties about it. When you are unsure of which lemmas to use, you use the `Search` function, for example `Search (0 < _).`. You can see the output of the Coq verifier in the Out section, and the context of the current proof, comprising the current goal and assumptions, in the Context section. The assumptions have names that you can use in your proofs. {'''You can use Coq Hammer, including the tactic `hammer` to attempt to discharge a goal automatically. To use Coq Hammer effectively, combine it with destruct using `;`: `destruct e1; destruct e2; hammer`.''' if USE_HAMMER else ''} You take a single step and will be given feedback -- listen to the feedback in the instructions. <</SYS>> ## Instructions {self.instructions} ## Out {limit(self.outlog)} ## Context {limit(self.context)} [/INST] ## Code ```{LANG} {self.code}""" def generate_complete(focus, montecarlo): text = focus.text() prev = text if DIVERSITY: texts, features = llm.generate(text, 5, return_hiddens=True, bad_words_ids=bad_words_ids) scores = [score_func_code(text) for text in texts] text, (score, code) = select_diversely_with_scores(texts, scores, score_predicate, features, montecarlo) elif EXPLORE_MANY: texts = llm.generate(text, 5, bad_words_ids=bad_words_ids) idx = 0 for i in range(len(texts)): if score_predicate(score_func_code(texts[i])): idx = i break text = texts[idx] score, code = score_func_code(text) else: texts = llm.generate(text, 1, bad_words_ids=bad_words_ids) text = texts[0] score, code = score_func_code(text)
USE_HAMMER = args.use_hammer EXTRACT_LEMMA_DEPTH = args.extract_lemma_depth EXPLORE_MANY = args.explore_many assert LANG=='Coq' score_func_code, cache_stats = create_cached_func(uncached_score_func_code) score_predicate = create_score_predicate(score_first) class FocusNode: def __init__(self, instructions, code, stack, lemma_counter): (context, outlog) = give_context(code) self.instructions = instructions self.context = context self.code = code self.outlog = outlog self.stack = stack self.lemma_counter = lemma_counter def update(self, text): code = filter_code(text+"```").lstrip() return FocusNode(self.instructions, code, self.stack, self.lemma_counter) def update_lemma(self, goal, code): name = self.lemma_name(self.lemma_counter) statement = lemma_statement(goal) args = lemma_args(goal) last_lemma_index = list(re.finditer(r"Lemma|Theorem", code))[-1].start(0) last_lemma = code[last_lemma_index:] code = code[:last_lemma_index] last_lemma += f" apply (@{name} {args}).\n" stack = [last_lemma] + self.stack code += "\n" code += f"Lemma {name}: {statement}.\nProof.\n" if USE_HAMMER: code += "(* do not use induction, try using hammer *)\n" print(f'Created Lemma {name}.') return FocusNode(self.instructions, code, stack, self.lemma_counter+1) def update_pop(self, text): code = filter_code(text+"```").lstrip() last_lemma = self.stack[0] stack = self.stack[1:] code += "\n\n" code += last_lemma return FocusNode(self.instructions, code, stack, self.lemma_counter) def lemma_name(self, counter): return "helper"+str(counter) def text(self): return f""" <s>[INST] <<SYS>> You are a Coq programmer that writes functional code and prove properties about it. When you are unsure of which lemmas to use, you use the `Search` function, for example `Search (0 < _).`. You can see the output of the Coq verifier in the Out section, and the context of the current proof, comprising the current goal and assumptions, in the Context section. The assumptions have names that you can use in your proofs. {'''You can use Coq Hammer, including the tactic `hammer` to attempt to discharge a goal automatically. To use Coq Hammer effectively, combine it with destruct using `;`: `destruct e1; destruct e2; hammer`.''' if USE_HAMMER else ''} You take a single step and will be given feedback -- listen to the feedback in the instructions. <</SYS>> ## Instructions {self.instructions} ## Out {limit(self.outlog)} ## Context {limit(self.context)} [/INST] ## Code ```{LANG} {self.code}""" def generate_complete(focus, montecarlo): text = focus.text() prev = text if DIVERSITY: texts, features = llm.generate(text, 5, return_hiddens=True, bad_words_ids=bad_words_ids) scores = [score_func_code(text) for text in texts] text, (score, code) = select_diversely_with_scores(texts, scores, score_predicate, features, montecarlo) elif EXPLORE_MANY: texts = llm.generate(text, 5, bad_words_ids=bad_words_ids) idx = 0 for i in range(len(texts)): if score_predicate(score_func_code(texts[i])): idx = i break text = texts[idx] score, code = score_func_code(text) else: texts = llm.generate(text, 1, bad_words_ids=bad_words_ids) text = texts[0] score, code = score_func_code(text)
print(diffprompt(prev, texts))
20
2023-11-11 19:56:04+00:00
8k
BraveGroup/Drive-WM
src/diffusers/models/resnet.py
[ { "identifier": "USE_PEFT_BACKEND", "path": "src/diffusers/utils/constants.py", "snippet": "USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version" }, { "identifier": "get_activation", "path": "src/diffusers/models/activations.py", "snippet": "def get_activation(act_fn: str) -> nn.Module:\n \"\"\"Helper function to get activation function from string.\n\n Args:\n act_fn (str): Name of activation function.\n\n Returns:\n nn.Module: Activation function.\n \"\"\"\n\n act_fn = act_fn.lower()\n if act_fn in ACTIVATION_FUNCTIONS:\n return ACTIVATION_FUNCTIONS[act_fn]\n else:\n raise ValueError(f\"Unsupported activation function: {act_fn}\")" }, { "identifier": "SpatialNorm", "path": "src/diffusers/models/attention_processor.py", "snippet": "class SpatialNorm(nn.Module):\n \"\"\"\n Spatially conditioned normalization as defined in https://arxiv.org/abs/2209.09002.\n\n Args:\n f_channels (`int`):\n The number of channels for input to group normalization layer, and output of the spatial norm layer.\n zq_channels (`int`):\n The number of channels for the quantized vector as described in the paper.\n \"\"\"\n\n def __init__(\n self,\n f_channels: int,\n zq_channels: int,\n ):\n super().__init__()\n self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=32, eps=1e-6, affine=True)\n self.conv_y = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0)\n self.conv_b = nn.Conv2d(zq_channels, f_channels, kernel_size=1, stride=1, padding=0)\n\n def forward(self, f: torch.FloatTensor, zq: torch.FloatTensor) -> torch.FloatTensor:\n f_size = f.shape[-2:]\n zq = F.interpolate(zq, size=f_size, mode=\"nearest\")\n norm_f = self.norm_layer(f)\n new_f = norm_f * self.conv_y(zq) + self.conv_b(zq)\n return new_f" }, { "identifier": "LoRACompatibleConv", "path": "src/diffusers/models/lora.py", "snippet": "class LoRACompatibleConv(nn.Conv2d):\n \"\"\"\n A convolutional layer that can be used with LoRA.\n \"\"\"\n\n def __init__(self, *args, lora_layer: Optional[LoRAConv2dLayer] = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.lora_layer = lora_layer\n\n def set_lora_layer(self, lora_layer: Optional[LoRAConv2dLayer]):\n self.lora_layer = lora_layer\n\n def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False):\n if self.lora_layer is None:\n return\n\n dtype, device = self.weight.data.dtype, self.weight.data.device\n\n w_orig = self.weight.data.float()\n w_up = self.lora_layer.up.weight.data.float()\n w_down = self.lora_layer.down.weight.data.float()\n\n if self.lora_layer.network_alpha is not None:\n w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank\n\n fusion = torch.mm(w_up.flatten(start_dim=1), w_down.flatten(start_dim=1))\n fusion = fusion.reshape((w_orig.shape))\n fused_weight = w_orig + (lora_scale * fusion)\n\n if safe_fusing and torch.isnan(fused_weight).any().item():\n raise ValueError(\n \"This LoRA weight seems to be broken. \"\n f\"Encountered NaN values when trying to fuse LoRA weights for {self}.\"\n \"LoRA weights will not be fused.\"\n )\n\n self.weight.data = fused_weight.to(device=device, dtype=dtype)\n\n # we can drop the lora layer now\n self.lora_layer = None\n\n # offload the up and down matrices to CPU to not blow the memory\n self.w_up = w_up.cpu()\n self.w_down = w_down.cpu()\n self._lora_scale = lora_scale\n\n def _unfuse_lora(self):\n if not (getattr(self, \"w_up\", None) is not None and getattr(self, \"w_down\", None) is not None):\n return\n\n fused_weight = self.weight.data\n dtype, device = fused_weight.data.dtype, fused_weight.data.device\n\n self.w_up = self.w_up.to(device=device).float()\n self.w_down = self.w_down.to(device).float()\n\n fusion = torch.mm(self.w_up.flatten(start_dim=1), self.w_down.flatten(start_dim=1))\n fusion = fusion.reshape((fused_weight.shape))\n unfused_weight = fused_weight.float() - (self._lora_scale * fusion)\n self.weight.data = unfused_weight.to(device=device, dtype=dtype)\n\n self.w_up = None\n self.w_down = None\n\n def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:\n if self.lora_layer is None:\n # make sure to the functional Conv2D function as otherwise torch.compile's graph will break\n # see: https://github.com/huggingface/diffusers/pull/4315\n return F.conv2d(\n hidden_states, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups\n )\n else:\n original_outputs = F.conv2d(\n hidden_states, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups\n )\n return original_outputs + (scale * self.lora_layer(hidden_states))" }, { "identifier": "LoRACompatibleLinear", "path": "src/diffusers/models/lora.py", "snippet": "class LoRACompatibleLinear(nn.Linear):\n \"\"\"\n A Linear layer that can be used with LoRA.\n \"\"\"\n\n def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.lora_layer = lora_layer\n\n def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]):\n self.lora_layer = lora_layer\n\n def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False):\n if self.lora_layer is None:\n return\n\n dtype, device = self.weight.data.dtype, self.weight.data.device\n\n w_orig = self.weight.data.float()\n w_up = self.lora_layer.up.weight.data.float()\n w_down = self.lora_layer.down.weight.data.float()\n\n if self.lora_layer.network_alpha is not None:\n w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank\n\n fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])\n\n if safe_fusing and torch.isnan(fused_weight).any().item():\n raise ValueError(\n \"This LoRA weight seems to be broken. \"\n f\"Encountered NaN values when trying to fuse LoRA weights for {self}.\"\n \"LoRA weights will not be fused.\"\n )\n\n self.weight.data = fused_weight.to(device=device, dtype=dtype)\n\n # we can drop the lora layer now\n self.lora_layer = None\n\n # offload the up and down matrices to CPU to not blow the memory\n self.w_up = w_up.cpu()\n self.w_down = w_down.cpu()\n self._lora_scale = lora_scale\n\n def _unfuse_lora(self):\n if not (getattr(self, \"w_up\", None) is not None and getattr(self, \"w_down\", None) is not None):\n return\n\n fused_weight = self.weight.data\n dtype, device = fused_weight.dtype, fused_weight.device\n\n w_up = self.w_up.to(device=device).float()\n w_down = self.w_down.to(device).float()\n\n unfused_weight = fused_weight.float() - (self._lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])\n self.weight.data = unfused_weight.to(device=device, dtype=dtype)\n\n self.w_up = None\n self.w_down = None\n\n def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:\n if self.lora_layer is None:\n out = super().forward(hidden_states)\n return out\n else:\n out = super().forward(hidden_states) + (scale * self.lora_layer(hidden_states))\n return out" }, { "identifier": "AdaGroupNorm", "path": "src/diffusers/models/normalization.py", "snippet": "class AdaGroupNorm(nn.Module):\n r\"\"\"\n GroupNorm layer modified to incorporate timestep embeddings.\n\n Parameters:\n embedding_dim (`int`): The size of each embedding vector.\n num_embeddings (`int`): The size of the embeddings dictionary.\n num_groups (`int`): The number of groups to separate the channels into.\n act_fn (`str`, *optional*, defaults to `None`): The activation function to use.\n eps (`float`, *optional*, defaults to `1e-5`): The epsilon value to use for numerical stability.\n \"\"\"\n\n def __init__(\n self, embedding_dim: int, out_dim: int, num_groups: int, act_fn: Optional[str] = None, eps: float = 1e-5\n ):\n super().__init__()\n self.num_groups = num_groups\n self.eps = eps\n\n if act_fn is None:\n self.act = None\n else:\n self.act = get_activation(act_fn)\n\n self.linear = nn.Linear(embedding_dim, out_dim * 2)\n\n def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor:\n if self.act:\n emb = self.act(emb)\n emb = self.linear(emb)\n emb = emb[:, :, None, None]\n scale, shift = emb.chunk(2, dim=1)\n\n x = F.group_norm(x, self.num_groups, eps=self.eps)\n x = x * (1 + scale) + shift\n return x" } ]
from functools import partial from typing import Optional, Tuple, Union from ..utils import USE_PEFT_BACKEND from .activations import get_activation from .attention_processor import SpatialNorm from .lora import LoRACompatibleConv, LoRACompatibleLinear from .normalization import AdaGroupNorm import torch import torch.nn as nn import torch.nn.functional as F
5,697
class KDownsample2D(nn.Module): r"""A 2D K-downsampling layer. Parameters: pad_mode (`str`, *optional*, default to `"reflect"`): the padding mode to use. """ def __init__(self, pad_mode: str = "reflect"): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) self.pad = kernel_1d.shape[1] // 2 - 1 self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False) def forward(self, inputs: torch.Tensor) -> torch.Tensor: inputs = F.pad(inputs, (self.pad,) * 4, self.pad_mode) weight = inputs.new_zeros([inputs.shape[1], inputs.shape[1], self.kernel.shape[0], self.kernel.shape[1]]) indices = torch.arange(inputs.shape[1], device=inputs.device) kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) weight[indices, indices] = kernel return F.conv2d(inputs, weight, stride=2) class KUpsample2D(nn.Module): r"""A 2D K-upsampling layer. Parameters: pad_mode (`str`, *optional*, default to `"reflect"`): the padding mode to use. """ def __init__(self, pad_mode: str = "reflect"): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) * 2 self.pad = kernel_1d.shape[1] // 2 - 1 self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False) def forward(self, inputs: torch.Tensor) -> torch.Tensor: inputs = F.pad(inputs, ((self.pad + 1) // 2,) * 4, self.pad_mode) weight = inputs.new_zeros([inputs.shape[1], inputs.shape[1], self.kernel.shape[0], self.kernel.shape[1]]) indices = torch.arange(inputs.shape[1], device=inputs.device) kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) weight[indices, indices] = kernel return F.conv_transpose2d(inputs, weight, stride=2, padding=self.pad * 2 + 1) class ResnetBlock2D(nn.Module): r""" A Resnet block. Parameters: in_channels (`int`): The number of channels in the input. out_channels (`int`, *optional*, default to be `None`): The number of output channels for the first conv2d layer. If None, same as `in_channels`. dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer. groups_out (`int`, *optional*, default to None): The number of groups to use for the second normalization layer. if set to None, same as `groups`. eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. non_linearity (`str`, *optional*, default to `"swish"`): the activation function to use. time_embedding_norm (`str`, *optional*, default to `"default"` ): Time scale shift config. By default, apply timestep embedding conditioning with a simple shift mechanism. Choose "scale_shift" or "ada_group" for a stronger conditioning with scale and shift. kernel (`torch.FloatTensor`, optional, default to None): FIR filter, see [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`]. output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output. use_in_shortcut (`bool`, *optional*, default to `True`): If `True`, add a 1x1 nn.conv2d layer for skip-connection. up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer. down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer. conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the `conv_shortcut` output. conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output. If None, same as `out_channels`. """ def __init__( self, *, in_channels: int, out_channels: Optional[int] = None, conv_shortcut: bool = False, dropout: float = 0.0, temb_channels: int = 512, groups: int = 32, groups_out: Optional[int] = None, pre_norm: bool = True, eps: float = 1e-6, non_linearity: str = "swish", skip_time_act: bool = False, time_embedding_norm: str = "default", # default, scale_shift, ada_group, spatial kernel: Optional[torch.FloatTensor] = None, output_scale_factor: float = 1.0, use_in_shortcut: Optional[bool] = None, up: bool = False, down: bool = False, conv_shortcut_bias: bool = True, conv_2d_out_channels: Optional[int] = None, ): super().__init__() self.pre_norm = pre_norm self.pre_norm = True self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.up = up self.down = down self.output_scale_factor = output_scale_factor self.time_embedding_norm = time_embedding_norm self.skip_time_act = skip_time_act linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv if groups_out is None: groups_out = groups if self.time_embedding_norm == "ada_group":
# Copyright 2023 The HuggingFace Team. All rights reserved. # `TemporalConvLayer` Copyright 2023 Alibaba DAMO-VILAB, The ModelScope Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class Upsample1D(nn.Module): """A 1D upsampling layer with an optional convolution. Parameters: channels (`int`): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. use_conv_transpose (`bool`, default `False`): option to use a convolution transpose. out_channels (`int`, optional): number of output channels. Defaults to `channels`. name (`str`, default `conv`): name of the upsampling 1D layer. """ def __init__( self, channels: int, use_conv: bool = False, use_conv_transpose: bool = False, out_channels: Optional[int] = None, name: str = "conv", ): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.use_conv_transpose = use_conv_transpose self.name = name self.conv = None if use_conv_transpose: self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1) elif use_conv: self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1) def forward(self, inputs: torch.Tensor) -> torch.Tensor: assert inputs.shape[1] == self.channels if self.use_conv_transpose: return self.conv(inputs) outputs = F.interpolate(inputs, scale_factor=2.0, mode="nearest") if self.use_conv: outputs = self.conv(outputs) return outputs class Downsample1D(nn.Module): """A 1D downsampling layer with an optional convolution. Parameters: channels (`int`): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. out_channels (`int`, optional): number of output channels. Defaults to `channels`. padding (`int`, default `1`): padding for the convolution. name (`str`, default `conv`): name of the downsampling 1D layer. """ def __init__( self, channels: int, use_conv: bool = False, out_channels: Optional[int] = None, padding: int = 1, name: str = "conv", ): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.padding = padding stride = 2 self.name = name if use_conv: self.conv = nn.Conv1d(self.channels, self.out_channels, 3, stride=stride, padding=padding) else: assert self.channels == self.out_channels self.conv = nn.AvgPool1d(kernel_size=stride, stride=stride) def forward(self, inputs: torch.Tensor) -> torch.Tensor: assert inputs.shape[1] == self.channels return self.conv(inputs) class Upsample2D(nn.Module): """A 2D upsampling layer with an optional convolution. Parameters: channels (`int`): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. use_conv_transpose (`bool`, default `False`): option to use a convolution transpose. out_channels (`int`, optional): number of output channels. Defaults to `channels`. name (`str`, default `conv`): name of the upsampling 2D layer. """ def __init__( self, channels: int, use_conv: bool = False, use_conv_transpose: bool = False, out_channels: Optional[int] = None, name: str = "conv", ): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.use_conv_transpose = use_conv_transpose self.name = name conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv conv = None if use_conv_transpose: conv = nn.ConvTranspose2d(channels, self.out_channels, 4, 2, 1) elif use_conv: conv = conv_cls(self.channels, self.out_channels, 3, padding=1) # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed if name == "conv": self.conv = conv else: self.Conv2d_0 = conv def forward( self, hidden_states: torch.FloatTensor, output_size: Optional[int] = None, scale: float = 1.0 ) -> torch.FloatTensor: assert hidden_states.shape[1] == self.channels if self.use_conv_transpose: return self.conv(hidden_states) # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16 # TODO(Suraj): Remove this cast once the issue is fixed in PyTorch # https://github.com/pytorch/pytorch/issues/86679 dtype = hidden_states.dtype if dtype == torch.bfloat16: hidden_states = hidden_states.to(torch.float32) # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 if hidden_states.shape[0] >= 64: hidden_states = hidden_states.contiguous() # if `output_size` is passed we force the interpolation output # size and do not make use of `scale_factor=2` if output_size is None: hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest") else: hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest") # If the input is bfloat16, we cast back to bfloat16 if dtype == torch.bfloat16: hidden_states = hidden_states.to(dtype) # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed if self.use_conv: if self.name == "conv": if isinstance(self.conv, LoRACompatibleConv) and not USE_PEFT_BACKEND: hidden_states = self.conv(hidden_states, scale) else: hidden_states = self.conv(hidden_states) else: if isinstance(self.Conv2d_0, LoRACompatibleConv) and not USE_PEFT_BACKEND: hidden_states = self.Conv2d_0(hidden_states, scale) else: hidden_states = self.Conv2d_0(hidden_states) return hidden_states class Downsample2D(nn.Module): """A 2D downsampling layer with an optional convolution. Parameters: channels (`int`): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. out_channels (`int`, optional): number of output channels. Defaults to `channels`. padding (`int`, default `1`): padding for the convolution. name (`str`, default `conv`): name of the downsampling 2D layer. """ def __init__( self, channels: int, use_conv: bool = False, out_channels: Optional[int] = None, padding: int = 1, name: str = "conv", ): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.padding = padding stride = 2 self.name = name conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv if use_conv: conv = conv_cls(self.channels, self.out_channels, 3, stride=stride, padding=padding) else: assert self.channels == self.out_channels conv = nn.AvgPool2d(kernel_size=stride, stride=stride) # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed if name == "conv": self.Conv2d_0 = conv self.conv = conv elif name == "Conv2d_0": self.conv = conv else: self.conv = conv def forward(self, hidden_states: torch.FloatTensor, scale: float = 1.0) -> torch.FloatTensor: assert hidden_states.shape[1] == self.channels if self.use_conv and self.padding == 0: pad = (0, 1, 0, 1) hidden_states = F.pad(hidden_states, pad, mode="constant", value=0) assert hidden_states.shape[1] == self.channels if not USE_PEFT_BACKEND: if isinstance(self.conv, LoRACompatibleConv): hidden_states = self.conv(hidden_states, scale) else: hidden_states = self.conv(hidden_states) else: hidden_states = self.conv(hidden_states) return hidden_states class FirUpsample2D(nn.Module): """A 2D FIR upsampling layer with an optional convolution. Parameters: channels (`int`, optional): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. out_channels (`int`, optional): number of output channels. Defaults to `channels`. fir_kernel (`tuple`, default `(1, 3, 3, 1)`): kernel for the FIR filter. """ def __init__( self, channels: Optional[int] = None, out_channels: Optional[int] = None, use_conv: bool = False, fir_kernel: Tuple[int, int, int, int] = (1, 3, 3, 1), ): super().__init__() out_channels = out_channels if out_channels else channels if use_conv: self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) self.use_conv = use_conv self.fir_kernel = fir_kernel self.out_channels = out_channels def _upsample_2d( self, hidden_states: torch.FloatTensor, weight: Optional[torch.FloatTensor] = None, kernel: Optional[torch.FloatTensor] = None, factor: int = 2, gain: float = 1, ) -> torch.FloatTensor: """Fused `upsample_2d()` followed by `Conv2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order. Args: hidden_states (`torch.FloatTensor`): Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. weight (`torch.FloatTensor`, *optional*): Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. kernel (`torch.FloatTensor`, *optional*): FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor (`int`, *optional*): Integer upsampling factor (default: 2). gain (`float`, *optional*): Scaling factor for signal magnitude (default: 1.0). Returns: output (`torch.FloatTensor`): Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `hidden_states`. """ assert isinstance(factor, int) and factor >= 1 # Setup filter kernel. if kernel is None: kernel = [1] * factor # setup kernel kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * (gain * (factor**2)) if self.use_conv: convH = weight.shape[2] convW = weight.shape[3] inC = weight.shape[1] pad_value = (kernel.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. output_shape = ( (hidden_states.shape[2] - 1) * factor + convH, (hidden_states.shape[3] - 1) * factor + convW, ) output_padding = ( output_shape[0] - (hidden_states.shape[2] - 1) * stride[0] - convH, output_shape[1] - (hidden_states.shape[3] - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = hidden_states.shape[1] // inC # Transpose weights. weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW)) weight = torch.flip(weight, dims=[3, 4]).permute(0, 2, 1, 3, 4) weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW)) inverse_conv = F.conv_transpose2d( hidden_states, weight, stride=stride, output_padding=output_padding, padding=0 ) output = upfirdn2d_native( inverse_conv, torch.tensor(kernel, device=inverse_conv.device), pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2 + 1), ) else: pad_value = kernel.shape[0] - factor output = upfirdn2d_native( hidden_states, torch.tensor(kernel, device=hidden_states.device), up=factor, pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2), ) return output def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: if self.use_conv: height = self._upsample_2d(hidden_states, self.Conv2d_0.weight, kernel=self.fir_kernel) height = height + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: height = self._upsample_2d(hidden_states, kernel=self.fir_kernel, factor=2) return height class FirDownsample2D(nn.Module): """A 2D FIR downsampling layer with an optional convolution. Parameters: channels (`int`): number of channels in the inputs and outputs. use_conv (`bool`, default `False`): option to use a convolution. out_channels (`int`, optional): number of output channels. Defaults to `channels`. fir_kernel (`tuple`, default `(1, 3, 3, 1)`): kernel for the FIR filter. """ def __init__( self, channels: Optional[int] = None, out_channels: Optional[int] = None, use_conv: bool = False, fir_kernel: Tuple[int, int, int, int] = (1, 3, 3, 1), ): super().__init__() out_channels = out_channels if out_channels else channels if use_conv: self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1) self.fir_kernel = fir_kernel self.use_conv = use_conv self.out_channels = out_channels def _downsample_2d( self, hidden_states: torch.FloatTensor, weight: Optional[torch.FloatTensor] = None, kernel: Optional[torch.FloatTensor] = None, factor: int = 2, gain: float = 1, ) -> torch.FloatTensor: """Fused `Conv2d()` followed by `downsample_2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary order. Args: hidden_states (`torch.FloatTensor`): Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. weight (`torch.FloatTensor`, *optional*): Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. kernel (`torch.FloatTensor`, *optional*): FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to average pooling. factor (`int`, *optional*, default to `2`): Integer downsampling factor. gain (`float`, *optional*, default to `1.0`): Scaling factor for signal magnitude. Returns: output (`torch.FloatTensor`): Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and same datatype as `x`. """ assert isinstance(factor, int) and factor >= 1 if kernel is None: kernel = [1] * factor # setup kernel kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * gain if self.use_conv: _, _, convH, convW = weight.shape pad_value = (kernel.shape[0] - factor) + (convW - 1) stride_value = [factor, factor] upfirdn_input = upfirdn2d_native( hidden_states, torch.tensor(kernel, device=hidden_states.device), pad=((pad_value + 1) // 2, pad_value // 2), ) output = F.conv2d(upfirdn_input, weight, stride=stride_value, padding=0) else: pad_value = kernel.shape[0] - factor output = upfirdn2d_native( hidden_states, torch.tensor(kernel, device=hidden_states.device), down=factor, pad=((pad_value + 1) // 2, pad_value // 2), ) return output def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: if self.use_conv: downsample_input = self._downsample_2d(hidden_states, weight=self.Conv2d_0.weight, kernel=self.fir_kernel) hidden_states = downsample_input + self.Conv2d_0.bias.reshape(1, -1, 1, 1) else: hidden_states = self._downsample_2d(hidden_states, kernel=self.fir_kernel, factor=2) return hidden_states # downsample/upsample layer used in k-upscaler, might be able to use FirDownsample2D/DirUpsample2D instead class KDownsample2D(nn.Module): r"""A 2D K-downsampling layer. Parameters: pad_mode (`str`, *optional*, default to `"reflect"`): the padding mode to use. """ def __init__(self, pad_mode: str = "reflect"): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) self.pad = kernel_1d.shape[1] // 2 - 1 self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False) def forward(self, inputs: torch.Tensor) -> torch.Tensor: inputs = F.pad(inputs, (self.pad,) * 4, self.pad_mode) weight = inputs.new_zeros([inputs.shape[1], inputs.shape[1], self.kernel.shape[0], self.kernel.shape[1]]) indices = torch.arange(inputs.shape[1], device=inputs.device) kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) weight[indices, indices] = kernel return F.conv2d(inputs, weight, stride=2) class KUpsample2D(nn.Module): r"""A 2D K-upsampling layer. Parameters: pad_mode (`str`, *optional*, default to `"reflect"`): the padding mode to use. """ def __init__(self, pad_mode: str = "reflect"): super().__init__() self.pad_mode = pad_mode kernel_1d = torch.tensor([[1 / 8, 3 / 8, 3 / 8, 1 / 8]]) * 2 self.pad = kernel_1d.shape[1] // 2 - 1 self.register_buffer("kernel", kernel_1d.T @ kernel_1d, persistent=False) def forward(self, inputs: torch.Tensor) -> torch.Tensor: inputs = F.pad(inputs, ((self.pad + 1) // 2,) * 4, self.pad_mode) weight = inputs.new_zeros([inputs.shape[1], inputs.shape[1], self.kernel.shape[0], self.kernel.shape[1]]) indices = torch.arange(inputs.shape[1], device=inputs.device) kernel = self.kernel.to(weight)[None, :].expand(inputs.shape[1], -1, -1) weight[indices, indices] = kernel return F.conv_transpose2d(inputs, weight, stride=2, padding=self.pad * 2 + 1) class ResnetBlock2D(nn.Module): r""" A Resnet block. Parameters: in_channels (`int`): The number of channels in the input. out_channels (`int`, *optional*, default to be `None`): The number of output channels for the first conv2d layer. If None, same as `in_channels`. dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding. groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer. groups_out (`int`, *optional*, default to None): The number of groups to use for the second normalization layer. if set to None, same as `groups`. eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. non_linearity (`str`, *optional*, default to `"swish"`): the activation function to use. time_embedding_norm (`str`, *optional*, default to `"default"` ): Time scale shift config. By default, apply timestep embedding conditioning with a simple shift mechanism. Choose "scale_shift" or "ada_group" for a stronger conditioning with scale and shift. kernel (`torch.FloatTensor`, optional, default to None): FIR filter, see [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`]. output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output. use_in_shortcut (`bool`, *optional*, default to `True`): If `True`, add a 1x1 nn.conv2d layer for skip-connection. up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer. down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer. conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the `conv_shortcut` output. conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output. If None, same as `out_channels`. """ def __init__( self, *, in_channels: int, out_channels: Optional[int] = None, conv_shortcut: bool = False, dropout: float = 0.0, temb_channels: int = 512, groups: int = 32, groups_out: Optional[int] = None, pre_norm: bool = True, eps: float = 1e-6, non_linearity: str = "swish", skip_time_act: bool = False, time_embedding_norm: str = "default", # default, scale_shift, ada_group, spatial kernel: Optional[torch.FloatTensor] = None, output_scale_factor: float = 1.0, use_in_shortcut: Optional[bool] = None, up: bool = False, down: bool = False, conv_shortcut_bias: bool = True, conv_2d_out_channels: Optional[int] = None, ): super().__init__() self.pre_norm = pre_norm self.pre_norm = True self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels self.out_channels = out_channels self.use_conv_shortcut = conv_shortcut self.up = up self.down = down self.output_scale_factor = output_scale_factor self.time_embedding_norm = time_embedding_norm self.skip_time_act = skip_time_act linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv if groups_out is None: groups_out = groups if self.time_embedding_norm == "ada_group":
self.norm1 = AdaGroupNorm(temb_channels, in_channels, groups, eps=eps)
5
2023-11-18 01:40:55+00:00
8k
basnijholt/unidep
tests/test_unidep.py
[ { "identifier": "create_conda_env_specification", "path": "unidep/_conda_env.py", "snippet": "def create_conda_env_specification( # noqa: PLR0912\n resolved: dict[str, dict[Platform | None, dict[CondaPip, Spec]]],\n channels: list[str],\n platforms: list[Platform],\n selector: Literal[\"sel\", \"comment\"] = \"sel\",\n) -> CondaEnvironmentSpec:\n \"\"\"Create a conda environment specification from resolved requirements.\"\"\"\n if selector not in (\"sel\", \"comment\"): # pragma: no cover\n msg = f\"Invalid selector: {selector}, must be one of ['sel', 'comment']\"\n raise ValueError(msg)\n\n # Split in conda and pip dependencies and prefer conda over pip\n conda, pip = _extract_conda_pip_dependencies(resolved)\n\n conda_deps: list[str | dict[str, str]] = CommentedSeq()\n pip_deps: list[str] = CommentedSeq()\n seen_identifiers: set[str] = set()\n for platform_to_spec in conda.values():\n if len(platform_to_spec) > 1 and selector == \"sel\":\n # None has been expanded already if len>1\n _resolve_multiple_platform_conflicts(platform_to_spec)\n for _platform, spec in sorted(platform_to_spec.items()):\n dep_str = spec.name_with_pin()\n if len(platforms) != 1 and _platform is not None:\n if selector == \"sel\":\n sel = _conda_sel(_platform)\n dep_str = {f\"sel({sel})\": dep_str} # type: ignore[assignment]\n conda_deps.append(dep_str)\n if selector == \"comment\":\n _add_comment(conda_deps, _platform)\n else:\n conda_deps.append(dep_str)\n assert isinstance(spec.identifier, str)\n seen_identifiers.add(spec.identifier)\n\n for platform_to_spec in pip.values():\n spec_to_platforms: dict[Spec, list[Platform | None]] = {}\n for _platform, spec in platform_to_spec.items():\n spec_to_platforms.setdefault(spec, []).append(_platform)\n\n for spec, _platforms in spec_to_platforms.items():\n if spec.identifier in seen_identifiers:\n continue\n\n dep_str = spec.name_with_pin(is_pip=True)\n if _platforms != [None] and len(platforms) != 1:\n if selector == \"sel\":\n marker = build_pep508_environment_marker(_platforms) # type: ignore[arg-type]\n dep_str = f\"{dep_str}; {marker}\"\n pip_deps.append(dep_str)\n else:\n assert selector == \"comment\"\n # We can only add comments with a single platform because\n # `conda-lock` doesn't implement logic, e.g., [linux or win]\n # should be spread into two lines, one with [linux] and the\n # other with [win].\n for _platform in _platforms:\n pip_deps.append(dep_str)\n _add_comment(pip_deps, cast(Platform, _platform))\n else:\n pip_deps.append(dep_str)\n\n return CondaEnvironmentSpec(channels, platforms, conda_deps, pip_deps)" }, { "identifier": "write_conda_environment_file", "path": "unidep/_conda_env.py", "snippet": "def write_conda_environment_file(\n env_spec: CondaEnvironmentSpec,\n output_file: str | Path | None = \"environment.yaml\",\n name: str = \"myenv\",\n *,\n verbose: bool = False,\n) -> None:\n \"\"\"Generate a conda environment.yaml file or print to stdout.\"\"\"\n resolved_dependencies = deepcopy(env_spec.conda)\n if env_spec.pip:\n resolved_dependencies.append({\"pip\": env_spec.pip}) # type: ignore[arg-type, dict-item]\n env_data = CommentedMap({\"name\": name})\n if env_spec.channels:\n env_data[\"channels\"] = env_spec.channels\n if resolved_dependencies:\n env_data[\"dependencies\"] = resolved_dependencies\n if env_spec.platforms:\n env_data[\"platforms\"] = env_spec.platforms\n yaml = YAML(typ=\"rt\")\n yaml.default_flow_style = False\n yaml.width = 4096\n yaml.indent(mapping=2, sequence=2, offset=2)\n if output_file:\n if verbose:\n print(f\"📝 Generating environment file at `{output_file}`\")\n with open(output_file, \"w\") as f: # noqa: PTH123\n yaml.dump(env_data, f)\n if verbose:\n print(\"📝 Environment file generated successfully.\")\n add_comment_to_file(output_file)\n else:\n yaml.dump(env_data, sys.stdout)" }, { "identifier": "resolve_conflicts", "path": "unidep/_conflicts.py", "snippet": "def resolve_conflicts(\n requirements: dict[str, list[Spec]],\n platforms: list[Platform] | None = None,\n) -> dict[str, dict[Platform | None, dict[CondaPip, Spec]]]:\n \"\"\"Resolve conflicts in a dictionary of requirements.\n\n Uses the ``ParsedRequirements.requirements`` dict returned by\n `parse_requirements`.\n \"\"\"\n if platforms and not set(platforms).issubset(get_args(Platform)):\n msg = f\"Invalid platform: {platforms}, must contain only {get_args(Platform)}\"\n raise VersionConflictError(msg)\n\n prepared = _prepare_specs_for_conflict_resolution(requirements)\n for data in prepared.values():\n _pop_unused_platforms_and_maybe_expand_none(data, platforms)\n resolved = {\n pkg: _combine_pinning_within_platform(data) for pkg, data in prepared.items()\n }\n\n for _platforms in resolved.values():\n for _platform, sources in _platforms.items():\n _platforms[_platform] = _resolve_conda_pip_conflicts(sources)\n return resolved" }, { "identifier": "find_requirements_files", "path": "unidep/_dependencies_parsing.py", "snippet": "def find_requirements_files(\n base_dir: str | Path = \".\",\n depth: int = 1,\n *,\n verbose: bool = False,\n) -> list[Path]:\n \"\"\"Scan a directory for `requirements.yaml` and `pyproject.toml` files.\"\"\"\n base_path = Path(base_dir)\n found_files = []\n\n # Define a helper function to recursively scan directories\n def _scan_dir(path: Path, current_depth: int) -> None:\n if verbose:\n print(f\"🔍 Scanning in `{path}` at depth {current_depth}\")\n if current_depth > depth:\n return\n for child in path.iterdir():\n if child.is_dir():\n _scan_dir(child, current_depth + 1)\n elif child.name == \"requirements.yaml\":\n found_files.append(child)\n if verbose:\n print(f'🔍 Found `\"requirements.yaml\"` at `{child}`')\n elif child.name == \"pyproject.toml\" and unidep_configured_in_toml(child):\n if verbose:\n print(f'🔍 Found `\"pyproject.toml\"` with dependencies at `{child}`')\n found_files.append(child)\n\n _scan_dir(base_path, 0)\n return sorted(found_files)" }, { "identifier": "parse_requirements", "path": "unidep/_dependencies_parsing.py", "snippet": "def parse_requirements( # noqa: PLR0912\n *paths: Path,\n ignore_pins: list[str] | None = None,\n overwrite_pins: list[str] | None = None,\n skip_dependencies: list[str] | None = None,\n verbose: bool = False,\n) -> ParsedRequirements:\n \"\"\"Parse a list of `requirements.yaml` or `pyproject.toml` files.\"\"\"\n ignore_pins = ignore_pins or []\n skip_dependencies = skip_dependencies or []\n overwrite_pins_map = _parse_overwrite_pins(overwrite_pins or [])\n requirements: dict[str, list[Spec]] = defaultdict(list)\n channels: set[str] = set()\n platforms: set[Platform] = set()\n datas = []\n seen: set[Path] = set()\n yaml = YAML(typ=\"rt\")\n for p in paths:\n if verbose:\n print(f\"📄 Parsing `{p}`\")\n data = _load(p, yaml)\n datas.append(data)\n seen.add(p.resolve())\n\n # Handle \"local_dependencies\" (or old name \"includes\", changed in 0.42.0)\n for include in _get_local_dependencies(data):\n try:\n requirements_path = dependencies_filename(p.parent / include).resolve()\n except FileNotFoundError:\n # Means that this is a local package that is not managed by unidep.\n # We do not need to do anything here, just in `unidep install`.\n continue\n if requirements_path in seen:\n continue # Avoids circular local_dependencies\n if verbose:\n print(f\"📄 Parsing `{include}` from `local_dependencies`\")\n datas.append(_load(requirements_path, yaml))\n seen.add(requirements_path)\n\n identifier = -1\n for data in datas:\n for channel in data.get(\"channels\", []):\n channels.add(channel)\n for _platform in data.get(\"platforms\", []):\n platforms.add(_platform)\n if \"dependencies\" not in data:\n continue\n dependencies = data[\"dependencies\"]\n for i, dep in enumerate(data[\"dependencies\"]):\n identifier += 1\n if isinstance(dep, str):\n specs = _parse_dependency(\n dep,\n dependencies,\n i,\n \"both\",\n identifier,\n ignore_pins,\n overwrite_pins_map,\n skip_dependencies,\n )\n for spec in specs:\n requirements[spec.name].append(spec)\n continue\n assert isinstance(dep, dict)\n for which in [\"conda\", \"pip\"]:\n if which in dep:\n specs = _parse_dependency(\n dep[which],\n dep,\n which,\n which, # type: ignore[arg-type]\n identifier,\n ignore_pins,\n overwrite_pins_map,\n skip_dependencies,\n )\n for spec in specs:\n requirements[spec.name].append(spec)\n\n return ParsedRequirements(sorted(channels), sorted(platforms), dict(requirements))" }, { "identifier": "filter_python_dependencies", "path": "unidep/_setuptools_integration.py", "snippet": "def filter_python_dependencies(\n resolved: dict[str, dict[Platform | None, dict[CondaPip, Spec]]],\n) -> list[str]:\n \"\"\"Filter out conda dependencies and return only pip dependencies.\n\n Examples\n --------\n >>> requirements = parse_requirements(\"requirements.yaml\")\n >>> resolved = resolve_conflicts(\n ... requirements.requirements, requirements.platforms\n ... )\n >>> python_deps = filter_python_dependencies(resolved)\n \"\"\"\n pip_deps = []\n for platform_data in resolved.values():\n to_process: dict[Platform | None, Spec] = {} # platform -> Spec\n for _platform, sources in platform_data.items():\n pip_spec = sources.get(\"pip\")\n if pip_spec:\n to_process[_platform] = pip_spec\n if not to_process:\n continue\n\n # Check if all Spec objects are identical\n first_spec = next(iter(to_process.values()))\n if all(spec == first_spec for spec in to_process.values()):\n # Build a single combined environment marker\n dep_str = first_spec.name_with_pin(is_pip=True)\n if _platform is not None:\n selector = build_pep508_environment_marker(list(to_process.keys())) # type: ignore[arg-type]\n dep_str = f\"{dep_str}; {selector}\"\n pip_deps.append(dep_str)\n continue\n\n for _platform, pip_spec in to_process.items():\n dep_str = pip_spec.name_with_pin(is_pip=True)\n if _platform is not None:\n selector = build_pep508_environment_marker([_platform])\n dep_str = f\"{dep_str}; {selector}\"\n pip_deps.append(dep_str)\n return sorted(pip_deps)" }, { "identifier": "get_python_dependencies", "path": "unidep/_setuptools_integration.py", "snippet": "def get_python_dependencies(\n filename: str\n | Path\n | Literal[\"requirements.yaml\", \"pyproject.toml\"] = \"requirements.yaml\", # noqa: PYI051\n *,\n verbose: bool = False,\n ignore_pins: list[str] | None = None,\n overwrite_pins: list[str] | None = None,\n skip_dependencies: list[str] | None = None,\n platforms: list[Platform] | None = None,\n raises_if_missing: bool = True,\n) -> list[str]:\n \"\"\"Extract Python (pip) requirements from a `requirements.yaml` or `pyproject.toml` file.\"\"\" # noqa: E501\n p = Path(filename)\n if not p.exists():\n if raises_if_missing:\n msg = f\"File {filename} not found.\"\n raise FileNotFoundError(msg)\n return []\n\n requirements = parse_requirements(\n p,\n ignore_pins=ignore_pins,\n overwrite_pins=overwrite_pins,\n skip_dependencies=skip_dependencies,\n verbose=verbose,\n )\n resolved = resolve_conflicts(\n requirements.requirements,\n platforms or list(requirements.platforms),\n )\n return filter_python_dependencies(resolved)" }, { "identifier": "CondaEnvironmentSpec", "path": "unidep/_conda_env.py", "snippet": "class CondaEnvironmentSpec(NamedTuple):\n \"\"\"A conda environment.\"\"\"\n\n channels: list[str]\n platforms: list[Platform]\n conda: list[str | dict[str, str]] # actually a CommentedSeq[str | dict[str, str]]\n pip: list[str]" }, { "identifier": "VersionConflictError", "path": "unidep/_conflicts.py", "snippet": "class VersionConflictError(ValueError):\n \"\"\"Raised when a version conflict is detected.\"\"\"" }, { "identifier": "yaml_to_toml", "path": "unidep/_dependencies_parsing.py", "snippet": "def yaml_to_toml(yaml_path: Path) -> str:\n \"\"\"Converts a `requirements.yaml` file TOML format.\"\"\"\n try:\n import tomli_w\n except ImportError: # pragma: no cover\n msg = (\n \"❌ `tomli_w` is required to convert YAML to TOML.\"\n \" Install it with `pip install tomli_w`.\"\n )\n raise ImportError(msg) from None\n yaml = YAML(typ=\"rt\")\n data = _load(yaml_path, yaml)\n data.pop(\"name\", None)\n dependencies = data.get(\"dependencies\", [])\n for i, dep in enumerate(dependencies):\n if isinstance(dep, str):\n comment = _extract_first_comment(dependencies, i)\n if comment is not None:\n selector = selector_from_comment(comment)\n if selector is not None:\n dependencies[i] = f\"{dep}:{selector}\"\n continue\n assert isinstance(dep, dict)\n for which in [\"conda\", \"pip\"]:\n if which in dep:\n comment = _extract_first_comment(dep, which)\n if comment is not None:\n selector = selector_from_comment(comment)\n if selector is not None:\n dep[which] = f\"{dep[which]}:{selector}\"\n\n return tomli_w.dumps({\"tool\": {\"unidep\": data}})" }, { "identifier": "Platform", "path": "unidep/platform_definitions.py", "snippet": "VALID_SELECTORS = get_args(Selector)\nPEP508_MARKERS = {\n \"linux-64\": \"sys_platform == 'linux' and platform_machine == 'x86_64'\",\n \"linux-aarch64\": \"sys_platform == 'linux' and platform_machine == 'aarch64'\",\n \"linux-ppc64le\": \"sys_platform == 'linux' and platform_machine == 'ppc64le'\",\n \"osx-64\": \"sys_platform == 'darwin' and platform_machine == 'x86_64'\",\n \"osx-arm64\": \"sys_platform == 'darwin' and platform_machine == 'arm64'\",\n \"win-64\": \"sys_platform == 'win32' and platform_machine == 'AMD64'\",\n (\"linux-64\", \"linux-aarch64\", \"linux-ppc64le\"): \"sys_platform == 'linux'\",\n (\"osx-64\", \"osx-arm64\"): \"sys_platform == 'darwin'\",\n (\n \"linux-64\",\n \"linux-aarch64\",\n \"linux-ppc64le\",\n \"osx-64\",\n \"osx-arm64\",\n ): \"sys_platform == 'linux' or sys_platform == 'darwin'\",\n}\nPLATFORM_SELECTOR_MAP: dict[Platform, list[Selector]] = {\n \"linux-64\": [\"linux64\", \"unix\", \"linux\"],\n \"linux-aarch64\": [\"aarch64\", \"unix\", \"linux\"],\n \"linux-ppc64le\": [\"ppc64le\", \"unix\", \"linux\"],\n # \"osx64\" is a selector unique to conda-build referring to\n # platforms on macOS and the Python architecture is x86-64\n \"osx-64\": [\"osx64\", \"osx\", \"macos\", \"unix\"],\n \"osx-arm64\": [\"arm64\", \"osx\", \"macos\", \"unix\"],\n \"win-64\": [\"win64\", \"win\"],\n}\nPLATFORM_SELECTOR_MAP_REVERSE: dict[Selector, set[Platform]] = {}\ndef validate_selector(selector: Selector) -> None:\ndef platforms_from_selector(selector: str) -> list[Platform]:\n def platforms(self) -> list[Platform] | None:\n def pprint(self) -> str:\n def name_with_pin(self, *, is_pip: bool = False) -> str:\nclass Spec(NamedTuple):" } ]
import textwrap import pytest import sys from pathlib import Path from typing import TYPE_CHECKING from ruamel.yaml import YAML from unidep import ( create_conda_env_specification, filter_python_dependencies, find_requirements_files, get_python_dependencies, parse_requirements, resolve_conflicts, write_conda_environment_file, ) from unidep._conda_env import CondaEnvironmentSpec from unidep._conflicts import VersionConflictError from unidep._dependencies_parsing import yaml_to_toml from unidep.platform_definitions import Platform, Spec from typing import Literal from typing_extensions import Literal
4,985
"""unidep tests.""" from __future__ import annotations if TYPE_CHECKING: if sys.version_info >= (3, 8): else: # pragma: no cover REPO_ROOT = Path(__file__).parent.parent def maybe_as_toml(toml_or_yaml: Literal["toml", "yaml"], p: Path) -> Path: if toml_or_yaml == "toml": toml = yaml_to_toml(p) p.unlink() p = p.with_name("pyproject.toml") p.write_text(toml) return p @pytest.fixture(params=["toml", "yaml"]) def setup_test_files( request: pytest.FixtureRequest, tmp_path: Path, ) -> tuple[Path, Path]: d1 = tmp_path / "dir1" d1.mkdir() f1 = d1 / "requirements.yaml" f1.write_text("dependencies:\n - numpy\n - conda: mumps") d2 = tmp_path / "dir2" d2.mkdir() f2 = d2 / "requirements.yaml" f2.write_text("dependencies:\n - pip: pandas") f1 = maybe_as_toml(request.param, f1) f2 = maybe_as_toml(request.param, f2) return (f1, f2) def test_find_requirements_files( tmp_path: Path, setup_test_files: tuple[Path, Path], ) -> None: # Make sure to pass the depth argument correctly if your function expects it. found_files = find_requirements_files( tmp_path, depth=1, verbose=True, ) # Convert found_files to absolute paths for comparison absolute_results = sorted(str(p.resolve()) for p in found_files) absolute_test_files = sorted(str(p.resolve()) for p in setup_test_files) assert absolute_results == absolute_test_files def test_find_requirements_files_depth(tmp_path: Path) -> None: # Create a nested directory structure (tmp_path / "dir1").mkdir() (tmp_path / "dir1/dir2").mkdir() (tmp_path / "dir1/dir2/dir3").mkdir() # Create test files (tmp_path / "requirements.yaml").touch() (tmp_path / "dir1/requirements.yaml").touch() (tmp_path / "dir1/dir2/requirements.yaml").touch() (tmp_path / "dir1/dir2/dir3/requirements.yaml").touch() # Test depth=0 assert len(find_requirements_files(tmp_path, depth=0)) == 1 # Test depth=1 assert len(find_requirements_files(tmp_path, depth=1)) == 2 # Test depth=2 assert len(find_requirements_files(tmp_path, depth=2)) == 3 # Test depth=3 assert len(find_requirements_files(tmp_path, depth=3)) == 4 # Test depth=4 (or more) assert len(find_requirements_files(tmp_path, depth=4)) == 4 @pytest.mark.parametrize("toml_or_yaml", ["toml", "yaml"]) def test_parse_requirements( toml_or_yaml: Literal["toml", "yaml"], tmp_path: Path, ) -> None: p = tmp_path / "requirements.yaml" p.write_text( textwrap.dedent( """\ dependencies: - foo >1 # [linux64] - foo # [unix] - bar >1 - bar """, ), ) p = maybe_as_toml(toml_or_yaml, p)
"""unidep tests.""" from __future__ import annotations if TYPE_CHECKING: if sys.version_info >= (3, 8): else: # pragma: no cover REPO_ROOT = Path(__file__).parent.parent def maybe_as_toml(toml_or_yaml: Literal["toml", "yaml"], p: Path) -> Path: if toml_or_yaml == "toml": toml = yaml_to_toml(p) p.unlink() p = p.with_name("pyproject.toml") p.write_text(toml) return p @pytest.fixture(params=["toml", "yaml"]) def setup_test_files( request: pytest.FixtureRequest, tmp_path: Path, ) -> tuple[Path, Path]: d1 = tmp_path / "dir1" d1.mkdir() f1 = d1 / "requirements.yaml" f1.write_text("dependencies:\n - numpy\n - conda: mumps") d2 = tmp_path / "dir2" d2.mkdir() f2 = d2 / "requirements.yaml" f2.write_text("dependencies:\n - pip: pandas") f1 = maybe_as_toml(request.param, f1) f2 = maybe_as_toml(request.param, f2) return (f1, f2) def test_find_requirements_files( tmp_path: Path, setup_test_files: tuple[Path, Path], ) -> None: # Make sure to pass the depth argument correctly if your function expects it. found_files = find_requirements_files( tmp_path, depth=1, verbose=True, ) # Convert found_files to absolute paths for comparison absolute_results = sorted(str(p.resolve()) for p in found_files) absolute_test_files = sorted(str(p.resolve()) for p in setup_test_files) assert absolute_results == absolute_test_files def test_find_requirements_files_depth(tmp_path: Path) -> None: # Create a nested directory structure (tmp_path / "dir1").mkdir() (tmp_path / "dir1/dir2").mkdir() (tmp_path / "dir1/dir2/dir3").mkdir() # Create test files (tmp_path / "requirements.yaml").touch() (tmp_path / "dir1/requirements.yaml").touch() (tmp_path / "dir1/dir2/requirements.yaml").touch() (tmp_path / "dir1/dir2/dir3/requirements.yaml").touch() # Test depth=0 assert len(find_requirements_files(tmp_path, depth=0)) == 1 # Test depth=1 assert len(find_requirements_files(tmp_path, depth=1)) == 2 # Test depth=2 assert len(find_requirements_files(tmp_path, depth=2)) == 3 # Test depth=3 assert len(find_requirements_files(tmp_path, depth=3)) == 4 # Test depth=4 (or more) assert len(find_requirements_files(tmp_path, depth=4)) == 4 @pytest.mark.parametrize("toml_or_yaml", ["toml", "yaml"]) def test_parse_requirements( toml_or_yaml: Literal["toml", "yaml"], tmp_path: Path, ) -> None: p = tmp_path / "requirements.yaml" p.write_text( textwrap.dedent( """\ dependencies: - foo >1 # [linux64] - foo # [unix] - bar >1 - bar """, ), ) p = maybe_as_toml(toml_or_yaml, p)
requirements = parse_requirements(p, verbose=False)
4
2023-11-16 04:23:01+00:00
8k
BAAI-DCAI/SegVol
train.py
[ { "identifier": "SegVol", "path": "network/model.py", "snippet": "class SegVol(nn.Module):\n def __init__(self, \n image_encoder, \n mask_decoder,\n prompt_encoder,\n clip_ckpt,\n roi_size,\n patch_size,\n test_mode=False,\n ):\n super().__init__()\n self.image_encoder = image_encoder\n self.mask_decoder = mask_decoder\n self.prompt_encoder = prompt_encoder\n self.text_encoder = TextEncoder(clip_ckpt)\n self.feat_shape = np.array(roi_size)/np.array(patch_size)\n self.test_mode = test_mode\n self.dice_loss = BinaryDiceLoss().cuda()\n self.bce_loss = BCELoss().cuda()\n self.decoder_iter = 6\n\n def forward(self, image, text=None, boxes=None, points=None, **kwargs):\n bs = image.shape[0]\n img_shape = (image.shape[2], image.shape[3], image.shape[4])\n image_embedding, _ = self.image_encoder(image)\n image_embedding = image_embedding.transpose(1, 2).view(bs, -1, \n int(self.feat_shape[0]), int(self.feat_shape[1]), int(self.feat_shape[2]))\n # test mode\n if self.test_mode:\n return self.forward_decoder(image_embedding, img_shape, text, boxes, points)\n \n # train mode\n ## sl\n sl_loss = self.supervised_forward(image, image_embedding, img_shape, kwargs['train_organs'], kwargs['train_labels'])\n ## ssl\n ssl_loss = self.unsupervised_forward(image, image_embedding, kwargs['pseudo_seg_cleaned'], img_shape)\n return sl_loss, ssl_loss\n\n def forward_decoder(self, image_embedding, img_shape, text=None, boxes=None, points=None):\n with torch.no_grad():\n if boxes is not None:\n if len(boxes.shape) == 2:\n boxes = boxes[:, None, :] # (B, 1, 6)\n if text is not None:\n text_embedding = self.text_encoder(text) # (B, 768)\n else:\n text_embedding = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=None,\n text_embedding=text_embedding,\n )\n\n dense_pe = self.prompt_encoder.get_dense_pe()\n low_res_masks, _ = self.mask_decoder(\n image_embeddings=image_embedding,\n text_embedding = text_embedding,\n image_pe=dense_pe,\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=False,\n )\n logits = F.interpolate(low_res_masks, size=img_shape, mode='trilinear', align_corners=False)\n return logits\n\n def supervised_forward(self, image, image_embedding, img_shape, training_organs, train_labels):\n iter_points, iter_bboxes, iter_organs = self.build_prompt_label(image.shape[0], training_organs, train_labels)\n # select prompt\n prompt_options = [[None, iter_points, iter_organs], [iter_bboxes, None, iter_organs], \n [None, None, iter_organs], [iter_bboxes, None, None], [None, iter_points, None],\n [iter_bboxes, iter_points, None]]\n sl_loss = 0\n for prompt in prompt_options:\n bboxes, points, organs = prompt\n logits = self.forward_decoder(image_embedding, img_shape, text=organs, boxes=bboxes, points=points)\n # cal loss\n sl_loss_dice = self.dice_loss.forward(logits.squeeze().float(), train_labels.squeeze().float())\n sl_loss_bce = self.bce_loss.forward(logits.squeeze().float(), train_labels.squeeze().float())\n sl_loss += sl_loss_dice + sl_loss_bce\n return sl_loss\n \n def unsupervised_forward(self, image, image_embedding, pseudo_seg_cleaned, img_shape):\n sll_loss = 0\n for iter in range(self.decoder_iter):\n if iter % 2 == 0:\n pseudo_labels, pseudo_points_prompt = self.build_pseudo_point_prompt_label(image.shape, pseudo_seg_cleaned)\n logits = self.forward_decoder(image_embedding, img_shape, text=None, boxes=None, points=pseudo_points_prompt)\n else:\n pseudo_labels, pseudo_bboxes_prompt = self.build_pseudo_box_prompt_label(image.shape, pseudo_seg_cleaned)\n logits = self.forward_decoder(image_embedding, img_shape, text=None, boxes=pseudo_bboxes_prompt, points=None)\n # cal loss\n sll_loss_dice = self.dice_loss.forward(logits.squeeze().float(), pseudo_labels.squeeze().float())\n sll_loss_bce = self.bce_loss.forward(logits.squeeze().float(), pseudo_labels.squeeze().float())\n sll_loss += sll_loss_dice + sll_loss_bce\n return sll_loss\n\n def build_prompt_label(self, bs, training_organs, train_labels):\n # generate prompt & label\n iter_organs = []\n iter_bboxes = []\n iter_points_ax = []\n iter_point_labels = []\n for sample_idx in range(bs):\n # organ prompt\n iter_organs.append(training_organs)\n # box prompt\n box = generate_box(train_labels[sample_idx])\n iter_bboxes.append(box)\n # point prompt\n num_positive_extra_max, num_negative_extra_max = 10, 10\n num_positive_extra = random.randint(0, num_positive_extra_max)\n num_negative_extra = random.randint(0, num_negative_extra_max)\n point, point_label = select_points(\n train_labels[sample_idx],\n num_positive_extra=num_positive_extra,\n num_negative_extra=num_negative_extra,\n fix_extra_point_num=num_positive_extra_max + num_negative_extra_max)\n iter_points_ax.append(point)\n iter_point_labels.append(point_label)\n # batched prompt\n iter_points_ax = torch.stack(iter_points_ax, dim=0).cuda()\n iter_point_labels = torch.stack(iter_point_labels, dim=0).cuda()\n iter_points = (iter_points_ax, iter_point_labels)\n iter_bboxes = torch.stack(iter_bboxes, dim=0).float().cuda()\n return iter_points, iter_bboxes, iter_organs\n \n def build_pseudo_point_prompt_label(self, input_shape, seg_labels):\n pseudo_labels = torch.zeros(input_shape).cuda()\n # generate points\n points = []\n point_labels = []\n for batch_idx in range(input_shape[0]):\n # generate pseudo label\n unique_ids = torch.unique(seg_labels[batch_idx])\n unique_ids = unique_ids[unique_ids != -1]\n region_id = random.choice(unique_ids).item()\n pseudo_labels[batch_idx][seg_labels[batch_idx]==region_id] = 1\n # generate point prompt\n num_positive_extra_max, num_negative_extra_max = 10, 10\n num_positive_extra = random.randint(4, num_positive_extra_max)\n num_negative_extra = random.randint(0, num_negative_extra_max)\n assert len(pseudo_labels[batch_idx][0].shape) == 3\n point, point_label = select_points(\n pseudo_labels[batch_idx][0],\n num_positive_extra=num_positive_extra,\n num_negative_extra=num_negative_extra,\n fix_extra_point_num=num_positive_extra_max + num_negative_extra_max)\n points.append(point)\n point_labels.append(point_label)\n points = torch.stack(points, dim=0).cuda()\n point_labels = torch.stack(point_labels, dim=0).cuda()\n pseudo_points_prompt = (points, point_labels)\n return pseudo_labels, pseudo_points_prompt\n\n def build_pseudo_box_prompt_label(self, input_shape, seg_labels_cleaned):\n pseudo_labels = torch.zeros(input_shape).cuda()\n iter_bboxes = []\n # generate boxes\n for batch_idx in range(input_shape[0]):\n # generate ori pseudo label\n unique_ids = torch.unique(seg_labels_cleaned[batch_idx])\n unique_ids = unique_ids[unique_ids != -1]\n region_id = random.choice(unique_ids).item()\n pseudo_labels[batch_idx][seg_labels_cleaned[batch_idx]==region_id] = 1\n # generate box prompt\n box = generate_box(pseudo_labels[batch_idx][0])\n iter_bboxes.append(box)\n # refine pseudo label\n x_min, y_min, z_min, x_max, y_max, z_max = box\n binary_cube = torch.zeros_like(pseudo_labels[batch_idx][0]).int()\n binary_cube[x_min:x_max+1, y_min:y_max+1, z_min:z_max+1] = 1\n # cal iou\n mask_label = seg_labels_cleaned[batch_idx][0]\n assert binary_cube.shape == mask_label.shape, str(binary_cube.shape) + ' ' + str(mask_label.shape)\n mask_values_in_binary_cube = mask_label[binary_cube == 1]\n unique_mask_values = torch.unique(mask_values_in_binary_cube)\n # print('unique_mask_values ', unique_mask_values)\n for value in unique_mask_values:\n if value == -1: continue\n mask_area = (mask_label == value)\n intersection = (binary_cube & mask_area)\n iou = intersection.float().sum() / mask_area.float().sum()\n if iou > 0.90:\n # print(f\"Mask value {value} has IOU > 0.90 in binary cube.\")\n pseudo_labels[batch_idx][seg_labels_cleaned[batch_idx]==value] = 1\n\n bboxes = torch.stack(iter_bboxes, dim=0).float().cuda()\n return pseudo_labels, bboxes" }, { "identifier": "sam_model_registry", "path": "segment_anything_volumetric/build_sam.py", "snippet": "def build_sam_vit_3d(args, checkpoint=None):\ndef _build_sam(\n image_encoder_type,\n embed_dim,\n patch_size,\n checkpoint,\n image_size,\n):" }, { "identifier": "LinearWarmupCosineAnnealingLR", "path": "utils/lr_scheduler.py", "snippet": "class LinearWarmupCosineAnnealingLR(_LRScheduler):\n\n def __init__(\n self,\n optimizer: Optimizer,\n warmup_epochs: int,\n max_epochs: int,\n warmup_start_lr: float = 0.0,\n eta_min: float = 0.0,\n last_epoch: int = -1,\n ) -> None:\n \"\"\"\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n warmup_epochs (int): Maximum number of iterations for linear warmup\n max_epochs (int): Maximum number of iterations\n warmup_start_lr (float): Learning rate to start the linear warmup. Default: 0.\n eta_min (float): Minimum learning rate. Default: 0.\n last_epoch (int): The index of last epoch. Default: -1.\n \"\"\"\n self.warmup_epochs = warmup_epochs\n self.max_epochs = max_epochs\n self.warmup_start_lr = warmup_start_lr\n self.eta_min = eta_min\n\n super(LinearWarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch)\n\n def get_lr(self) -> List[float]:\n \"\"\"\n Compute learning rate using chainable form of the scheduler\n \"\"\"\n if not self._get_lr_called_within_step:\n warnings.warn(\n \"To get the last learning rate computed by the scheduler, \"\n \"please use `get_last_lr()`.\",\n UserWarning,\n )\n\n if self.last_epoch == 0:\n return [self.warmup_start_lr] * len(self.base_lrs)\n elif self.last_epoch < self.warmup_epochs:\n return [\n group[\"lr\"] + (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)\n for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)\n ]\n elif self.last_epoch == self.warmup_epochs:\n return self.base_lrs\n else:\n return [\n self.eta_min + 0.5 * (base_lr - self.eta_min) *\n (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs)))\n for base_lr in self.base_lrs\n ]\n # elif (self.last_epoch - 1 - self.max_epochs) % (2 * (self.max_epochs - self.warmup_epochs)) == 0:\n # return [\n # group[\"lr\"] + (base_lr - self.eta_min) *\n # (1 - math.cos(math.pi / (self.max_epochs - self.warmup_epochs))) / 2\n # for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)\n # ]\n\n # return [\n # (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs))) /\n # (\n # 1 +\n # math.cos(math.pi * (self.last_epoch - self.warmup_epochs - 1) / (self.max_epochs - self.warmup_epochs))\n # ) * (group[\"lr\"] - self.eta_min) + self.eta_min for group in self.optimizer.param_groups\n # ]\n\n def _get_closed_form_lr(self) -> List[float]:\n \"\"\"\n Called when epoch is passed as a param to the `step` function of the scheduler.\n \"\"\"\n if self.last_epoch < self.warmup_epochs:\n return [\n self.warmup_start_lr + self.last_epoch * (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)\n for base_lr in self.base_lrs\n ]\n\n return [\n self.eta_min + 0.5 * (base_lr - self.eta_min) *\n (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs)))\n for base_lr in self.base_lrs\n ]" }, { "identifier": "BCELoss", "path": "utils/loss.py", "snippet": "class BCELoss(nn.Module):\r\n def __init__(self):\r\n super(BCELoss, self).__init__()\r\n self.criterion = nn.BCEWithLogitsLoss()\r\n\r\n def forward(self, predict, target):\r\n assert predict.shape == target.shape, 'predict & target shape do not match\\n' + str(predict.shape) + '\\n' + str(target.shape)\r\n target_ = target.clone()\r\n target_[target == -1] = 0\r\n\r\n ce_loss = self.criterion(predict, target_)\r\n\r\n return ce_loss\r" }, { "identifier": "BinaryDiceLoss", "path": "utils/loss.py", "snippet": "class BinaryDiceLoss(nn.Module):\r\n def __init__(self, smooth=1, p=2, reduction='mean'):\r\n super(BinaryDiceLoss, self).__init__()\r\n self.smooth = smooth\r\n self.p = p\r\n self.reduction = reduction\r\n\r\n def forward(self, predict, target):\r\n predict = torch.sigmoid(predict)\r\n target_ = target.clone()\r\n target_[target == -1] = 0\r\n assert predict.shape[0] == target.shape[0], \"predict & target batch size don't match\\n\" + str(predict.shape) + '\\n' + str(target.shape[0])\r\n predict = predict.contiguous().view(predict.shape[0], -1)\r\n target_ = target_.contiguous().view(target_.shape[0], -1)\r\n\r\n num = torch.sum(torch.mul(predict, target_), dim=1)\r\n den = torch.sum(predict, dim=1) + torch.sum(target_, dim=1) + self.smooth\r\n\r\n dice_score = 2*num / den\r\n dice_loss = 1 - dice_score\r\n\r\n # dice_loss_avg = dice_loss[target[:,0]!=-1].sum() / dice_loss[target[:,0]!=-1].shape[0]\r\n dice_loss_avg = dice_loss.sum() / dice_loss.shape[0]\r\n\r\n return dice_loss_avg\r" }, { "identifier": "get_loader", "path": "data_utils.py", "snippet": "def get_loader(args):\n train_transform = transforms.Compose(\n [\n transforms.AddChanneld(keys=[\"image\"]),\n DimTranspose(keys=[\"image\", \"label\", \"pseudo_seg\"]),\n MinMaxNormalization(),\n transforms.CropForegroundd(keys=[\"image\", \"label\", \"pseudo_seg\"], source_key=\"image\"),\n transforms.SpatialPadd(keys=[\"image\", \"label\", \"pseudo_seg\"], spatial_size=args.spatial_size, mode='constant'),\n transforms.OneOf(transforms=[\n transforms.Resized(keys=[\"image\", \"label\", \"pseudo_seg\"],spatial_size=args.spatial_size),\n transforms.RandCropByPosNegLabeld(\n keys=[\"image\", \"label\", \"pseudo_seg\"],\n label_key=\"label\",\n spatial_size=args.spatial_size,\n pos=2,\n neg=1,\n num_samples=1,\n image_key=\"image\",\n image_threshold=0,\n ),\n ],\n weights=[1, 1]\n ),\n transforms.RandFlipd(keys=[\"image\", \"label\", \"pseudo_seg\"], prob=args.RandFlipd_prob, spatial_axis=0),\n transforms.RandFlipd(keys=[\"image\", \"label\", \"pseudo_seg\"], prob=args.RandFlipd_prob, spatial_axis=1),\n transforms.RandFlipd(keys=[\"image\", \"label\", \"pseudo_seg\"], prob=args.RandFlipd_prob, spatial_axis=2),\n transforms.RandScaleIntensityd(keys=\"image\", factors=0.1, prob=args.RandScaleIntensityd_prob),\n transforms.RandShiftIntensityd(keys=\"image\", offsets=0.1, prob=args.RandShiftIntensityd_prob),\n transforms.Resized(keys=[\"image\", \"label\", \"pseudo_seg\"],spatial_size=args.spatial_size),\n transforms.ToTensord(keys=[\"image\", \"label\", \"pseudo_seg\"]),\n ]\n )\n\n print(f'----- train on combination dataset -----')\n combination_train_ds = build_concat_dataset(root_path=args.data_dir, dataset_codes=args.dataset_codes, transform=train_transform)\n train_sampler = BatchedDistributedSampler(combination_train_ds, shuffle=True, batch_size=args.batch_size) if args.dist else None\n train_loader = data.DataLoader(\n combination_train_ds,\n batch_size=args.batch_size,\n shuffle=(train_sampler is None),\n num_workers=args.num_workers,\n sampler=train_sampler,\n pin_memory=True,\n persistent_workers=True,\n collate_fn=collate_fn,\n )\n return train_loader" } ]
import os import torch import argparse import torch.multiprocessing as mp import shutil from datetime import datetime from network.model import SegVol from segment_anything_volumetric import sam_model_registry from utils.lr_scheduler import LinearWarmupCosineAnnealingLR from utils.loss import BCELoss, BinaryDiceLoss from data_utils import get_loader from tensorboardX import SummaryWriter from tqdm import tqdm
5,865
parser.add_argument("--pretrain", type = str, default='') parser.add_argument("--resume", type = str, default='') parser.add_argument("--data_dir", type = str, default='') parser.add_argument("--dataset_codes", type = list, default=['0010', '0011']) # config parser.add_argument("--test_mode", default=False, type=bool) parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap") parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple) parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple) parser.add_argument('-work_dir', type=str, default='./work_dir') parser.add_argument("--clip_ckpt", type = str, default = './config/clip') parser.add_argument("--RandFlipd_prob", default=0.2, type=float, help="RandFlipd aug probability") parser.add_argument("--RandScaleIntensityd_prob", default=0.1, type=float, help="RandScaleIntensityd aug probability") parser.add_argument("--RandShiftIntensityd_prob", default=0.1, type=float, help="RandShiftIntensityd aug probability") parser.add_argument('-num_workers', type=int, default=8) # dist parser.add_argument('--dist', dest='dist', type=bool, default=True, help='distributed training or not') parser.add_argument('--node_rank', type=int, default=0, help='Node rank') parser.add_argument('--init_method', type = str, default = "env://") parser.add_argument('--bucket_cap_mb', type = int, default = 25, help='The amount of memory in Mb that DDP will accumulate before firing off gradient communication for the bucket (need to tune)') # key params parser.add_argument('-lr', type=float, default=1e-4) parser.add_argument('-weight_decay', type=float, default=1e-5) parser.add_argument('-warmup_epoch', type=int, default=10) parser.add_argument('-num_epochs', type=int, default=500) parser.add_argument('-batch_size', type=int, default=4) parser.add_argument("--use_pseudo_label", default=True, type=bool) args = parser.parse_args() return args def train_epoch(args, segvol_model, train_dataloader, optimizer, scheduler, epoch, rank, gpu, iter_num): epoch_loss = 0 epoch_sl_loss = 0 epoch_ssl_loss = 0 epoch_iterator = tqdm( train_dataloader, desc = f"[RANK {rank}: GPU {gpu}]", dynamic_ncols=True ) if args.dist: train_dataloader.sampler.set_epoch(epoch) torch.distributed.barrier() for batch in epoch_iterator: image, gt3D = batch["image"].cuda(), batch["post_label"].cuda() pseudo_seg_cleaned = batch['pseudo_seg_cleaned'].cuda() organ_name_list = batch['organ_name_list'] loss_step_avg = 0 sl_loss_step_avg = 0 ssl_loss_step_avg = 0 for cls_idx in range(len(organ_name_list)): optimizer.zero_grad() organs_cls = organ_name_list[cls_idx] labels_cls = gt3D[:, cls_idx] if torch.sum(labels_cls) == 0: print(f'[RANK {rank}: GPU {gpu}] ITER-{iter_num} --- No object, skip iter') continue sl_loss, ssl_loss = segvol_model(image, organs=None, boxes=None, points=None, train_organs=organs_cls, train_labels=labels_cls, pseudo_seg_cleaned=pseudo_seg_cleaned) if args.use_pseudo_label: loss = sl_loss + 0.1 * ssl_loss ssl_loss_step_avg += ssl_loss.item() sl_loss_step_avg += sl_loss.item() loss_step_avg += loss.item() loss.backward() optimizer.step() print(f'[RANK {rank}: GPU {gpu}] ITER-{iter_num} --- loss {loss.item()}, sl_loss, {sl_loss.item()}, ssl_loss {ssl_loss.item()}') iter_num += 1 loss_step_avg /= len(organ_name_list) sl_loss_step_avg /= len(organ_name_list) ssl_loss_step_avg /= len(organ_name_list) print(f'[RANK {rank}: GPU {gpu}] AVG loss {loss_step_avg}, sl_loss, {sl_loss_step_avg}, ssl_loss {ssl_loss_step_avg}') if rank == 0: args.writer.add_scalar('train_iter/loss', loss_step_avg, iter_num) args.writer.add_scalar('train_iter/sl_loss', sl_loss_step_avg, iter_num) args.writer.add_scalar('train_iter/ssl_loss', ssl_loss_step_avg, iter_num) epoch_loss += loss_step_avg epoch_sl_loss += sl_loss_step_avg if args.use_pseudo_label: epoch_ssl_loss += ssl_loss_step_avg scheduler.step() epoch_loss /= len(train_dataloader) + 1e-12 epoch_ssl_loss /= len(train_dataloader) + 1e-12 epoch_sl_loss /= len(train_dataloader) + 1e-12 print(f'{args.model_save_path} ==> [RANK {rank}: GPU {gpu}] ', 'epoch_loss: {}, ssl_loss: {}'.format(epoch_loss, epoch_ssl_loss)) if rank == 0: args.writer.add_scalar('train/loss', epoch_loss, epoch) args.writer.add_scalar('train/sl_loss', epoch_sl_loss, epoch) args.writer.add_scalar('train/ssl_loss', epoch_ssl_loss, epoch) args.writer.add_scalar('train/lr', scheduler.get_lr(), epoch) return epoch_loss, iter_num def main_worker(gpu, ngpus_per_node, args): node_rank = int(args.node_rank) rank = node_rank * ngpus_per_node + gpu world_size = ngpus_per_node #args.world_size print(f"[Rank {rank}]: Use GPU: {gpu} for training") is_main_host = rank == 0 if is_main_host: os.makedirs(args.model_save_path, exist_ok=True) shutil.copyfile(__file__, os.path.join(args.model_save_path, args.run_id + '_' + os.path.basename(__file__))) torch.cuda.set_device(gpu) torch.distributed.init_process_group( backend = "nccl", init_method = args.init_method, rank = rank, world_size = world_size, ) print('init_process_group finished')
def set_parse(): parser = argparse.ArgumentParser() # %% set up parser parser.add_argument("--pretrain", type = str, default='') parser.add_argument("--resume", type = str, default='') parser.add_argument("--data_dir", type = str, default='') parser.add_argument("--dataset_codes", type = list, default=['0010', '0011']) # config parser.add_argument("--test_mode", default=False, type=bool) parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap") parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple) parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple) parser.add_argument('-work_dir', type=str, default='./work_dir') parser.add_argument("--clip_ckpt", type = str, default = './config/clip') parser.add_argument("--RandFlipd_prob", default=0.2, type=float, help="RandFlipd aug probability") parser.add_argument("--RandScaleIntensityd_prob", default=0.1, type=float, help="RandScaleIntensityd aug probability") parser.add_argument("--RandShiftIntensityd_prob", default=0.1, type=float, help="RandShiftIntensityd aug probability") parser.add_argument('-num_workers', type=int, default=8) # dist parser.add_argument('--dist', dest='dist', type=bool, default=True, help='distributed training or not') parser.add_argument('--node_rank', type=int, default=0, help='Node rank') parser.add_argument('--init_method', type = str, default = "env://") parser.add_argument('--bucket_cap_mb', type = int, default = 25, help='The amount of memory in Mb that DDP will accumulate before firing off gradient communication for the bucket (need to tune)') # key params parser.add_argument('-lr', type=float, default=1e-4) parser.add_argument('-weight_decay', type=float, default=1e-5) parser.add_argument('-warmup_epoch', type=int, default=10) parser.add_argument('-num_epochs', type=int, default=500) parser.add_argument('-batch_size', type=int, default=4) parser.add_argument("--use_pseudo_label", default=True, type=bool) args = parser.parse_args() return args def train_epoch(args, segvol_model, train_dataloader, optimizer, scheduler, epoch, rank, gpu, iter_num): epoch_loss = 0 epoch_sl_loss = 0 epoch_ssl_loss = 0 epoch_iterator = tqdm( train_dataloader, desc = f"[RANK {rank}: GPU {gpu}]", dynamic_ncols=True ) if args.dist: train_dataloader.sampler.set_epoch(epoch) torch.distributed.barrier() for batch in epoch_iterator: image, gt3D = batch["image"].cuda(), batch["post_label"].cuda() pseudo_seg_cleaned = batch['pseudo_seg_cleaned'].cuda() organ_name_list = batch['organ_name_list'] loss_step_avg = 0 sl_loss_step_avg = 0 ssl_loss_step_avg = 0 for cls_idx in range(len(organ_name_list)): optimizer.zero_grad() organs_cls = organ_name_list[cls_idx] labels_cls = gt3D[:, cls_idx] if torch.sum(labels_cls) == 0: print(f'[RANK {rank}: GPU {gpu}] ITER-{iter_num} --- No object, skip iter') continue sl_loss, ssl_loss = segvol_model(image, organs=None, boxes=None, points=None, train_organs=organs_cls, train_labels=labels_cls, pseudo_seg_cleaned=pseudo_seg_cleaned) if args.use_pseudo_label: loss = sl_loss + 0.1 * ssl_loss ssl_loss_step_avg += ssl_loss.item() sl_loss_step_avg += sl_loss.item() loss_step_avg += loss.item() loss.backward() optimizer.step() print(f'[RANK {rank}: GPU {gpu}] ITER-{iter_num} --- loss {loss.item()}, sl_loss, {sl_loss.item()}, ssl_loss {ssl_loss.item()}') iter_num += 1 loss_step_avg /= len(organ_name_list) sl_loss_step_avg /= len(organ_name_list) ssl_loss_step_avg /= len(organ_name_list) print(f'[RANK {rank}: GPU {gpu}] AVG loss {loss_step_avg}, sl_loss, {sl_loss_step_avg}, ssl_loss {ssl_loss_step_avg}') if rank == 0: args.writer.add_scalar('train_iter/loss', loss_step_avg, iter_num) args.writer.add_scalar('train_iter/sl_loss', sl_loss_step_avg, iter_num) args.writer.add_scalar('train_iter/ssl_loss', ssl_loss_step_avg, iter_num) epoch_loss += loss_step_avg epoch_sl_loss += sl_loss_step_avg if args.use_pseudo_label: epoch_ssl_loss += ssl_loss_step_avg scheduler.step() epoch_loss /= len(train_dataloader) + 1e-12 epoch_ssl_loss /= len(train_dataloader) + 1e-12 epoch_sl_loss /= len(train_dataloader) + 1e-12 print(f'{args.model_save_path} ==> [RANK {rank}: GPU {gpu}] ', 'epoch_loss: {}, ssl_loss: {}'.format(epoch_loss, epoch_ssl_loss)) if rank == 0: args.writer.add_scalar('train/loss', epoch_loss, epoch) args.writer.add_scalar('train/sl_loss', epoch_sl_loss, epoch) args.writer.add_scalar('train/ssl_loss', epoch_ssl_loss, epoch) args.writer.add_scalar('train/lr', scheduler.get_lr(), epoch) return epoch_loss, iter_num def main_worker(gpu, ngpus_per_node, args): node_rank = int(args.node_rank) rank = node_rank * ngpus_per_node + gpu world_size = ngpus_per_node #args.world_size print(f"[Rank {rank}]: Use GPU: {gpu} for training") is_main_host = rank == 0 if is_main_host: os.makedirs(args.model_save_path, exist_ok=True) shutil.copyfile(__file__, os.path.join(args.model_save_path, args.run_id + '_' + os.path.basename(__file__))) torch.cuda.set_device(gpu) torch.distributed.init_process_group( backend = "nccl", init_method = args.init_method, rank = rank, world_size = world_size, ) print('init_process_group finished')
sam_model = sam_model_registry['vit'](args=args, checkpoint=None) # checkpoint for pretrained vit
1
2023-11-10 08:25:37+00:00
8k
xk-huang/segment-caption-anything
scripts/apps/visualize_infer_app.py
[ { "identifier": "global_setup", "path": "src/arguments.py", "snippet": "def global_setup(\n args: DictConfig,\n) -> Tuple[Arguments, SCASeq2SeqTrainingArguments, ModelArguments]:\n \"\"\"Global setup of arguments.\"\"\"\n if args.training.output_log_dir is not None:\n output_log_dir = args.training.output_log_dir\n if not osp.exists(output_log_dir):\n os.makedirs(output_log_dir)\n # NOTE: this is a dirty hack to enable logging to a different directory\n # by default in Hydra, logging.root.handlers contains two handler: stream & file\n # NOTE: mainly used in amulet\n for handler in logging.root.handlers:\n if isinstance(handler, logging.FileHandler):\n file_path = handler.baseFilename\n file_name = osp.basename(file_path)\n external_file_path = osp.join(output_log_dir, file_name)\n logging.root.addHandler(logging.FileHandler(external_file_path))\n logger.info(f\"Add external file handler to {external_file_path}\")\n break\n\n hostname = socket.gethostname()\n logger.info(f\"Running on {hostname}\")\n\n # Convert args to the actual dataclass object, to enable methods. Need to\n # delete _n_gpu, a property that TrainingArgs init doesn't expect.\n del args.training._n_gpu\n # Dirty hack: only run post init when we're ready to convert to TrainingArgs\n args.training._run_post_init = True\n # NOTE: otherwise, do_eval will be set to True in TrainingArguments.__post_init__\n if args.training.do_eval == False and args.training.do_train == False:\n args.training.evaluation_strategy = \"no\"\n args.training.load_best_model_at_end = False\n\n training_args = OmegaConf.to_object(args.training)\n model_args = OmegaConf.to_object(args.model)\n\n if (\n isinstance(model_args, (SCAModelArguments, SCADirectDecodingModelArguments))\n and args.model.model_name_or_path is None\n ):\n # NOTE: we need to set the default value of `model_name_or_path` to None\n # otherwise, it will be set to `base_sca` by default\n raise ValueError(f\"{type(model_args)} is not supported in model cfg name.\")\n\n log_level = training_args.get_process_log_level()\n logger.setLevel(log_level)\n datasets.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device},\"\n f\" log_level: {log_level} n_gpu: {training_args.n_gpu}\"\n f\" distributed training: {bool(training_args.local_rank != -1)}, 16-bits\"\n f\" training: {training_args.fp16}, bf16 training: {training_args.bf16}\"\n )\n logger.debug(f\"Training/evaluation parameters {training_args}\")\n\n return args, training_args, model_args" }, { "identifier": "SAMCaptionerModelArguments", "path": "src/arguments.py", "snippet": "class SAMCaptionerModelArguments(ModelArguments):\n sam_model_name_or_path: str = field(default=\"facebook/sam-vit-huge\")\n captioner_model_name_or_path: str = field(default=\"Salesforce/blip-image-captioning-base\")" }, { "identifier": "SCAModelBaseArguments", "path": "src/arguments.py", "snippet": "class SCAModelBaseArguments(ModelArguments):\n model_name_or_path: Optional[str] = field(default=None)\n sam_model_name_or_path: str = field(default=\"facebook/sam-vit-huge\")\n lm_head_model_name_or_path: str = field(default=\"gpt2\")\n additional_num_hidden_layers: int = field(default=2)" }, { "identifier": "SAMCaptionerProcessor", "path": "src/models/sam_captioner/processing_sam_captioner.py", "snippet": "class SAMCaptionerProcessor(ProcessorMixin):\n attributes = [\"captioner_processor\"]\n captioner_processor_class = None\n\n def __init__(self, sam_processor, captioner_processor, *args, **kwargs):\n self.captioner_processor_class = type(captioner_processor).__name__\n # NOTE(xiaoke): The sam_processor is customized, which cannot pass the type check.\n # NOTE(xiaoke): Check the types of pretrained prcoessors\n super().__init__(captioner_processor, *args, **kwargs)\n\n # NOTE(xiaoke): Our sam processor is different from the original one\n self.sam_processor: SamProcessor = sam_processor\n self.captioner_processor: BlipProcessor\n self.tokenizer = self.captioner_processor.tokenizer\n\n def __call__(\n self,\n # from ../sam/processing_sam.py\n images=None,\n input_points=None,\n input_labels=None,\n input_boxes=None,\n original_sizes=None,\n # from transformers.models.blip.processing_blip.py\n text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,\n add_special_tokens: bool = True,\n padding: Union[bool, str, PaddingStrategy] = False,\n truncation: Union[bool, str, TruncationStrategy] = None,\n max_length: Optional[int] = None,\n stride: int = 0,\n pad_to_multiple_of: Optional[int] = None,\n return_attention_mask: Optional[bool] = None,\n return_overflowing_tokens: bool = False,\n return_special_tokens_mask: bool = False,\n return_offsets_mapping: bool = False,\n return_token_type_ids: bool = False,\n return_length: bool = False,\n verbose: bool = True,\n return_tensors=None,\n **kwargs,\n ):\n if images is None and original_sizes is None:\n raise ValueError(f\"images and original_sizes cannot both be None.\")\n\n if images is not None:\n input_encoding = self.sam_processor(\n images=images,\n input_points=input_points,\n input_labels=input_labels,\n input_boxes=input_boxes,\n return_tensors=return_tensors,\n **kwargs,\n )\n images = make_list_of_images(images)\n input_encoding[\"images\"] = make_list_of_images(images)\n else:\n input_encoding = self.sam_processor.process_prompts(\n original_sizes=original_sizes,\n input_points=input_points,\n input_labels=input_labels,\n input_boxes=input_boxes,\n return_tensors=return_tensors,\n )\n\n if text is not None:\n text_encoding = self.tokenizer(\n text=text,\n add_special_tokens=add_special_tokens,\n padding=padding,\n truncation=truncation,\n max_length=max_length,\n stride=stride,\n pad_to_multiple_of=pad_to_multiple_of,\n return_attention_mask=return_attention_mask,\n return_overflowing_tokens=return_overflowing_tokens,\n return_special_tokens_mask=return_special_tokens_mask,\n return_offsets_mapping=return_offsets_mapping,\n return_token_type_ids=return_token_type_ids,\n return_length=return_length,\n verbose=verbose,\n return_tensors=return_tensors,\n **kwargs,\n )\n else:\n text_encoding = {}\n input_encoding.update(text_encoding)\n\n return input_encoding\n\n def post_process_masks(self, *args, **kwargs):\n return self.sam_processor.post_process_masks(*args, **kwargs)\n\n @classmethod\n def from_sam_captioner_pretrained(\n cls, sam_pretrained_model_name_or_path, captioner_pretrained_model_name_or_path, **kwargs\n ):\n sam_processor = SamProcessor.from_pretrained(sam_pretrained_model_name_or_path, **kwargs)\n # NOTE: To be compatible with OpenLLAMA which uses the slow tokenizer to avoid a bug.\n # Ref: https://github.com/openlm-research/open_llama#loading-the-weights-with-hugging-face-transformers\n if \"open_llama\" in captioner_pretrained_model_name_or_path:\n logger.warning(f\"Using slow tokenizer for {captioner_pretrained_model_name_or_path}.\")\n use_fast = False\n else:\n use_fast = True\n captioner_processor = AutoProcessor.from_pretrained(\n captioner_pretrained_model_name_or_path, use_fast=use_fast, **kwargs\n )\n return cls(sam_processor, captioner_processor)\n\n @property\n def model_input_names(self):\n tokenizer_input_names = self.tokenizer.model_input_names\n sam_processor_input_names = self.sam_processor.model_input_names\n return list(dict.fromkeys(tokenizer_input_names + sam_processor_input_names))" }, { "identifier": "ScaProcessor", "path": "src/models/sca/processing_sca.py", "snippet": "class ScaProcessor(ProcessorMixin):\n attributes = [\"tokenizer\"]\n tokenizer_class = \"AutoTokenizer\"\n\n def __init__(self, sam_processor, tokenizer):\n super().__init__(tokenizer)\n self.sam_processor: SamProcessor = sam_processor\n\n def __call__(\n self,\n # from ../sam/processing_sam.py\n images=None,\n input_points=None,\n input_labels=None,\n input_boxes=None,\n original_sizes=None,\n # from transformers.models.blip.processing_blip.py\n text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,\n add_special_tokens: bool = True,\n padding: Union[bool, str, PaddingStrategy] = False,\n truncation: Union[bool, str, TruncationStrategy] = None,\n max_length: Optional[int] = None,\n stride: int = 0,\n pad_to_multiple_of: Optional[int] = None,\n return_attention_mask: Optional[bool] = None,\n return_overflowing_tokens: bool = False,\n return_special_tokens_mask: bool = False,\n return_offsets_mapping: bool = False,\n return_token_type_ids: bool = False,\n return_length: bool = False,\n verbose: bool = True,\n return_tensors=None,\n **kwargs,\n ):\n if images is None and original_sizes is None:\n raise ValueError(f\"images and original_sizes cannot both be None.\")\n\n if images is not None:\n input_encoding = self.sam_processor(\n images=images,\n input_points=input_points,\n input_labels=input_labels,\n input_boxes=input_boxes,\n return_tensors=return_tensors,\n **kwargs,\n )\n images = make_list_of_images(images)\n input_encoding[\"images\"] = make_list_of_images(images)\n else:\n input_encoding = self.sam_processor.process_prompts(\n original_sizes=original_sizes,\n input_points=input_points,\n input_labels=input_labels,\n input_boxes=input_boxes,\n return_tensors=return_tensors,\n )\n\n if text is not None:\n text_encoding = self.tokenizer(\n text=text,\n add_special_tokens=add_special_tokens,\n padding=padding,\n truncation=truncation,\n max_length=max_length,\n stride=stride,\n pad_to_multiple_of=pad_to_multiple_of,\n return_attention_mask=return_attention_mask,\n return_overflowing_tokens=return_overflowing_tokens,\n return_special_tokens_mask=return_special_tokens_mask,\n return_offsets_mapping=return_offsets_mapping,\n return_token_type_ids=return_token_type_ids,\n return_length=return_length,\n verbose=verbose,\n return_tensors=return_tensors,\n **kwargs,\n )\n else:\n text_encoding = {}\n input_encoding.update(text_encoding)\n\n return input_encoding\n\n def post_process_masks(self, *args, **kwargs):\n return self.sam_processor.post_process_masks(*args, **kwargs)\n\n @classmethod\n def from_sam_text_pretrained(cls, sam_pretrained_model_name_or_path, text_pretrained_model_name_or_path, **kwargs):\n sam_processor = SamProcessor.from_pretrained(sam_pretrained_model_name_or_path, **kwargs)\n # NOTE: To be compatible with OpenLLAMA which uses the slow tokenizer to avoid a bug.\n # Ref: https://github.com/openlm-research/open_llama#loading-the-weights-with-hugging-face-transformers\n if \"open_llama\" in text_pretrained_model_name_or_path:\n logger.warning(f\"Using slow tokenizer for {text_pretrained_model_name_or_path}.\")\n use_fast = False\n else:\n use_fast = True\n captioner_processor = AutoProcessor.from_pretrained(\n text_pretrained_model_name_or_path, use_fast=use_fast, **kwargs\n )\n return cls(sam_processor, captioner_processor)\n\n @property\n def model_input_names(self):\n tokenizer_input_names = self.tokenizer.model_input_names\n sam_processor_input_names = self.sam_processor.model_input_names\n return list(dict.fromkeys(tokenizer_input_names + sam_processor_input_names))" }, { "identifier": "prepare_datasets", "path": "src/train.py", "snippet": "def prepare_datasets(args):\n train_data = []\n for train_data_config_name in args.train_data:\n cfg = hydra.compose(config_name=f\"data/{train_data_config_name}\", overrides=args.train_data_overrides)\n train_data.append(cfg.data)\n args.train_data = train_data\n\n # NOTE(xiaoke): We should only inference one eval dataset\n if len(args.eval_data) > 1:\n logger.warning(f\"We should only inference one dataset, got {args.eval_data}\")\n eval_data = []\n for eval_data_config_name in args.eval_data:\n cfg = hydra.compose(config_name=f\"data/{eval_data_config_name}\", overrides=args.eval_data_overrides)\n eval_data.append(cfg.data)\n\n train_dataset = []\n for i, each_train_data in enumerate(train_data):\n # NOTE: add data `split` to each dataset\n each_train_data.split = \"train\"\n\n _train_dataset = instantiate(each_train_data)\n train_dataset.append(_train_dataset)\n logger.info(f\"Train Dataset [{i}]: {each_train_data}\\n{_train_dataset}\")\n\n eval_dataset = {}\n for i, each_eval_data in enumerate(eval_data):\n # NOTE: add data `split` to each dataset\n # NOTE: visual genome has validation set, but we use test set for evaluation\n if \"visual_genome.py\" in each_eval_data.path and getattr(each_eval_data, \"use_densecap_splits\", None) is True:\n logger.info(\"Using densecap splits in Visual Genome, using test split to eval\")\n each_eval_data.split = \"test\"\n\n # NOTE: refcoco has validation set, but we use test set for evaluation\n elif \"refcoco.py\" in each_eval_data.path:\n if each_eval_data.name.startswith(\"refcoco-\") or each_eval_data.name.startswith(\"refcoco+-\"):\n if each_eval_data.split is None or each_eval_data.split == \"train\":\n raise ValueError(f\"refcoco{{,+}} must have split for eval. got {each_eval_data.split}\")\n logger.info(f\"Using refcoco{{,+}}: {each_eval_data.split} split to eval\")\n elif each_eval_data.name.startswith(\"refcocog\"):\n logger.info(\"Using refcocog val split to eval\")\n each_eval_data.split = \"validation\"\n elif each_eval_data.name.startswith(\"refclef\"):\n logger.info(\"Using refclef val split to eval\")\n each_eval_data.split = \"validation\"\n\n # NOTE: coco has validation set, but it does not have test set.\n elif \"coco_instance.py\" in each_eval_data.path or \"coco_instance-local.py\" in each_eval_data.path:\n logger.info(\"Using coco val split to eval\")\n each_eval_data.split = \"validation\"\n\n elif \"objects365-local.py\" in each_eval_data.path:\n logger.info(\"Using objects365 (in fact, it is COCO) val split to eval\")\n each_eval_data.split = \"validation\"\n\n elif \"v3det-local.py\" in each_eval_data.path:\n logger.info(\"Using v3det (in fact, it is COCO) val split to eval\")\n each_eval_data.split = \"validation\"\n\n elif \"sbu-pseudo_region-local.py\" in each_eval_data.path or \"sbu-pseudo_region.py\" in each_eval_data.path:\n logger.info(\"Using sbu to eval, but it does not have test split, so we use train split\")\n each_eval_data.split = \"train\"\n\n elif \"coco_caption-pseudo_region.py\" in each_eval_data.path:\n logger.info(\"Using coco_caption (in fact, it is COCO) val split to eval\")\n each_eval_data.split = \"validation\"\n\n elif (\n \"visual_genome-densecap-local.py\" in each_eval_data.path\n or \"visual_genome-grit-local.py\" in each_eval_data.path\n ):\n logger.info(f\"Using visual_genome (They are my custom splits for GRiT and Densecap) test split to eval\")\n each_eval_data.split = \"test\"\n else:\n raise ValueError(\n f\"Unknown dataset {each_eval_data.path}, we cannot determine the split for it. Please edit `src/train.py:prepare_datasets` to add the split for it.\"\n )\n\n _eval_dataset = instantiate(each_eval_data)\n eval_dataset_name = _get_data_name(each_eval_data)\n eval_dataset[eval_dataset_name] = _eval_dataset\n logger.info(f\"Eval Dataset [{i}]: {each_eval_data}\\n{_eval_dataset}\")\n args.eval_data = eval_data # NOTE: overwrite previous eval_data\n\n if args.train_data_interleave_probabilities is not None and len(train_dataset) != len(\n args.train_data_interleave_probabilities\n ):\n raise ValueError(\n f\"train_data_interleave_probabilities must have the same length as train_data, got {len(train_dataset)} and {len(args.train_data_interleave_probabilities)}\"\n )\n # NOTE(xiaoke): Expected a list of Dataset objects or a list of IterableDataset objects.\n if len(train_dataset) > 0:\n if args.train_data_interleave_probabilities is None:\n logger.warning(\n \"train_data_interleave_probabilities is not provided, \"\n \"the resulting dataset will have max_length_datasets*nb_dataset samples. \"\n \"As we use `all_exhausted` stopping strategy which is a oversampling strategy.\"\n )\n else:\n if sum(args.train_data_interleave_probabilities) != 1.0:\n logger.info(f\"Normalize train_data_interleave_probabilities to sum to 1.0\")\n args.train_data_interleave_probabilities = [\n each_prob / sum(args.train_data_interleave_probabilities)\n for each_prob in args.train_data_interleave_probabilities\n ]\n logger.info(f\"train_data_interleave_probabilities: {args.train_data_interleave_probabilities}\")\n # NOTE(xiaoke): Accourding to `datasets/src/datasets/arrow_dataset.py:_interleave_map_style_datasets:6079` and\n # `Breadcrumbsdatasets/src/datasets/iterable_dataset.py:_interleave_iterable_datasets:2293`\n train_dataset = interleave_datasets(\n train_dataset,\n probabilities=args.train_data_interleave_probabilities,\n seed=args.training.seed,\n stopping_strategy=\"all_exhausted\",\n )\n else:\n train_dataset = None\n\n logger.info(f\"Train Dataset: {train_dataset}\")\n logger.info(f\"Eval Dataset: {eval_dataset}\")\n return train_dataset, eval_dataset" }, { "identifier": "prepare_data_transform", "path": "src/train.py", "snippet": "def prepare_data_transform(training_args, model_args, train_dataset, eval_dataset, processor):\n DataTransformClass = None\n if isinstance(model_args, SAMCaptionerModelArguments):\n DataTransformClass = SamCaptionerDataTransform\n elif isinstance(model_args, SCAModelBaseArguments):\n DataTransformClass = SCADataTransform\n if training_args.do_train:\n if train_dataset is None:\n raise ValueError(\"train_dataset must be provided if do_train is True\")\n\n num_masks_per_sample = training_args.num_masks_per_sample\n if num_masks_per_sample is None:\n num_masks_per_sample = 64\n logger.info(f\"num_masks_per_sample not provided, defaulting to {num_masks_per_sample}\")\n\n data_transforms = training_args.data_transforms\n\n train_transforms = DataTransformClass(\n processor.sam_processor, processor.tokenizer, \"train\", num_masks_per_sample, data_transforms\n )\n\n if isinstance(train_dataset, Dataset) and training_args.max_train_samples is not None:\n train_dataset = train_dataset.shuffle(seed=training_args.seed).select(\n range(training_args.max_train_samples)\n )\n # Set the training transforms\n if isinstance(train_dataset, Dataset):\n train_dataset = train_dataset.with_transform(train_transforms)\n elif isinstance(train_dataset, IterableDataset):\n train_dataset = train_dataset.map(\n train_transforms, batched=True, batch_size=training_args.per_device_train_batch_size\n )\n else:\n raise ValueError(f\"dataset must be one of [Dataset, IterableDataset], got {type(train_dataset)}\")\n else:\n logger.warning(\"do_train is False, so we do not apply data augmentation to train_dataset\")\n\n if training_args.do_eval or training_args.do_inference or training_args.do_train:\n if eval_dataset is None:\n raise ValueError(\"eval_dataset must be provided if do_eval or do_inference is True\")\n\n eval_transforms = DataTransformClass(processor.sam_processor, processor.tokenizer, \"inference\")\n for eval_dataset_k, eval_dataset_v in eval_dataset.items():\n if isinstance(eval_dataset_v, Dataset) and training_args.max_eval_samples is not None:\n eval_dataset_v = eval_dataset_v.select(range(training_args.max_eval_samples))\n # Set the validation transforms\n if isinstance(eval_dataset_v, Dataset):\n eval_dataset_v = eval_dataset_v.with_transform(eval_transforms)\n elif isinstance(eval_dataset_v, IterableDataset):\n eval_dataset_v = eval_dataset_v.map(\n eval_transforms, batched=True, batch_size=training_args.per_device_eval_batch_size\n )\n else:\n raise ValueError(f\"dataset must be one of [Dataset, IterableDataset], got {type(eval_dataset_v)}\")\n eval_dataset[eval_dataset_k] = eval_dataset_v\n else:\n logger.warning(\n \"do_eval and do_inference and do_train are False, so we do not apply data augmentation to eval_dataset\"\n )\n return train_dataset, eval_dataset" } ]
import sys import os import os import torch import numpy as np import pandas as pd import pycocotools.mask import json import tqdm import hashlib import glob import cv2 import numpy as np import cv2 import numpy as np import pandas as pd import json import io import base64 import pycocotools.mask import sqlite3 from src.arguments import global_setup, SAMCaptionerModelArguments, SCAModelBaseArguments from src.models.sam_captioner import SAMCaptionerProcessor from src.models.sca import ScaProcessor from src.train import prepare_datasets, prepare_data_transform from PIL import Image from hydra import initialize, compose from PIL import Image, ImageDraw, ImageFont from PIL import Image, ImageDraw, ImageFont from PIL import Image from flask import Flask, render_template, request, send_file
5,743
sys.path.append(".") os.getcwd() DATASET = "vg-densecap-local" with initialize(version_base="1.3", config_path="../../src/conf"): args = compose( config_name="conf", overrides=[ f"train_data=[{DATASET}]", f"eval_data=[{DATASET}]", "+model=base_sam_captioner", "training.output_dir=tmp/visualization" # "training.do_train=True", # "training.do_eval=True", ], ) args, training_args, model_args = global_setup(args) os.makedirs(training_args.output_dir, exist_ok=True) # Initialize our dataset and prepare it with initialize(version_base="1.3", config_path="../../src/conf"): train_dataset, eval_dataset = prepare_datasets(args) if isinstance(model_args, SAMCaptionerModelArguments): processor = SAMCaptionerProcessor.from_sam_captioner_pretrained( model_args.sam_model_name_or_path, model_args.captioner_model_name_or_path, cache_dir=model_args.cache_dir, model_max_length=model_args.model_max_length, ) # FIXME: when load weights from existing sca model, we should use the same tokenizer as the existing sca model # model.lm_head_model_name_or_path=$(grep lm_head_model_name_or_path $AMLT_MAP_INPUT_DIR/.hydra/config.yaml | tail -n1 | sed 's/ *//g' | cut -d ':' -f2) # model.sam_model_name_or_path=$(grep sam_model_name_or_path $AMLT_MAP_INPUT_DIR/.hydra/config.yaml | tail -n1 | sed 's/ *//g' | cut -d ':' -f2) elif isinstance(model_args, SCAModelBaseArguments):
sys.path.append(".") os.getcwd() DATASET = "vg-densecap-local" with initialize(version_base="1.3", config_path="../../src/conf"): args = compose( config_name="conf", overrides=[ f"train_data=[{DATASET}]", f"eval_data=[{DATASET}]", "+model=base_sam_captioner", "training.output_dir=tmp/visualization" # "training.do_train=True", # "training.do_eval=True", ], ) args, training_args, model_args = global_setup(args) os.makedirs(training_args.output_dir, exist_ok=True) # Initialize our dataset and prepare it with initialize(version_base="1.3", config_path="../../src/conf"): train_dataset, eval_dataset = prepare_datasets(args) if isinstance(model_args, SAMCaptionerModelArguments): processor = SAMCaptionerProcessor.from_sam_captioner_pretrained( model_args.sam_model_name_or_path, model_args.captioner_model_name_or_path, cache_dir=model_args.cache_dir, model_max_length=model_args.model_max_length, ) # FIXME: when load weights from existing sca model, we should use the same tokenizer as the existing sca model # model.lm_head_model_name_or_path=$(grep lm_head_model_name_or_path $AMLT_MAP_INPUT_DIR/.hydra/config.yaml | tail -n1 | sed 's/ *//g' | cut -d ':' -f2) # model.sam_model_name_or_path=$(grep sam_model_name_or_path $AMLT_MAP_INPUT_DIR/.hydra/config.yaml | tail -n1 | sed 's/ *//g' | cut -d ':' -f2) elif isinstance(model_args, SCAModelBaseArguments):
processor = ScaProcessor.from_sam_text_pretrained(
4
2023-11-17 14:10:41+00:00
8k
p0p4k/pflowtts_pytorch
pflow/models/components/speech_prompt_encoder.py
[ { "identifier": "sequence_mask", "path": "pflow/utils/model.py", "snippet": "def sequence_mask(length, max_length=None):\n if max_length is None:\n max_length = length.max()\n x = torch.arange(max_length, dtype=length.dtype, device=length.device)\n return x.unsqueeze(0) < length.unsqueeze(1)" }, { "identifier": "commons", "path": "pflow/models/components/commons.py", "snippet": "def init_weights(m, mean=0.0, std=0.01):\ndef get_padding(kernel_size, dilation=1):\ndef convert_pad_shape(pad_shape):\ndef intersperse(lst, item):\ndef kl_divergence(m_p, logs_p, m_q, logs_q):\ndef rand_gumbel(shape):\ndef rand_gumbel_like(x):\ndef slice_segments(x, ids_str, segment_size=4):\ndef rand_slice_segments(x, x_lengths=None, segment_size=4):\ndef rand_slice_segments_for_cat(x, x_lengths=None, segment_size=4):\ndef get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):\ndef add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):\ndef cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):\ndef subsequent_mask(length):\ndef fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):\ndef convert_pad_shape(pad_shape):\ndef shift_1d(x):\ndef sequence_mask(length, max_length=None):\ndef generate_path(duration, mask):\ndef clip_grad_value_(parameters, clip_value, norm_type=2):" }, { "identifier": "PosteriorEncoder", "path": "pflow/models/components/vits_posterior.py", "snippet": "class PosteriorEncoder(nn.Module):\n\n def __init__(self,\n in_channels,\n out_channels,\n hidden_channels,\n kernel_size,\n dilation_rate,\n n_layers,\n gin_channels=0):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.hidden_channels = hidden_channels\n self.kernel_size = kernel_size\n self.dilation_rate = dilation_rate\n self.n_layers = n_layers\n self.gin_channels = gin_channels\n\n self.pre = nn.Conv1d(in_channels, hidden_channels, 1)\n self.enc = modules.WN(hidden_channels,\n kernel_size,\n dilation_rate,\n n_layers,\n gin_channels=gin_channels)\n self.proj = nn.Conv1d(hidden_channels, out_channels, 1)\n\n def forward(self, x, x_lengths, g=None):\n x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)),\n 1).to(x.dtype)\n x = self.pre(x) * x_mask\n x = self.enc(x, x_mask, g=g)\n stats = self.proj(x) * x_mask\n # m, logs = torch.split(stats, self.out_channels, dim=1)\n # z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask\n # z = m * x_mask\n return stats, x_mask" }, { "identifier": "BasicTransformerBlock", "path": "pflow/models/components/transformer.py", "snippet": "class BasicTransformerBlock(nn.Module):\n r\"\"\"\n A basic Transformer block.\n\n Parameters:\n dim (`int`): The number of channels in the input and output.\n num_attention_heads (`int`): The number of heads to use for multi-head attention.\n attention_head_dim (`int`): The number of channels in each head.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.\n only_cross_attention (`bool`, *optional*):\n Whether to use only cross-attention layers. In this case two cross attention layers are used.\n double_self_attention (`bool`, *optional*):\n Whether to use two self-attention layers. In this case no cross attention layers are used.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`): Activation function to be used in feed-forward.\n num_embeds_ada_norm (:\n obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.\n attention_bias (:\n obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n num_attention_heads: int,\n attention_head_dim: int,\n dropout=0.0,\n cross_attention_dim: Optional[int] = None,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n attention_bias: bool = False,\n only_cross_attention: bool = False,\n double_self_attention: bool = False,\n upcast_attention: bool = False,\n norm_elementwise_affine: bool = True,\n norm_type: str = \"layer_norm\",\n final_dropout: bool = False,\n ):\n super().__init__()\n self.only_cross_attention = only_cross_attention\n\n self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == \"ada_norm_zero\"\n self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == \"ada_norm\"\n\n if norm_type in (\"ada_norm\", \"ada_norm_zero\") and num_embeds_ada_norm is None:\n raise ValueError(\n f\"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to\"\n f\" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.\"\n )\n\n # Define 3 blocks. Each block has its own normalization layer.\n # 1. Self-Attn\n if self.use_ada_layer_norm:\n self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)\n elif self.use_ada_layer_norm_zero:\n self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)\n else:\n self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)\n self.attn1 = Attention(\n query_dim=dim,\n heads=num_attention_heads,\n dim_head=attention_head_dim,\n dropout=dropout,\n bias=attention_bias,\n cross_attention_dim=cross_attention_dim if only_cross_attention else None,\n upcast_attention=upcast_attention,\n )\n\n # 2. Cross-Attn\n if cross_attention_dim is not None or double_self_attention:\n # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.\n # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during\n # the second cross attention block.\n self.norm2 = (\n AdaLayerNorm(dim, num_embeds_ada_norm)\n if self.use_ada_layer_norm\n else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)\n )\n self.attn2 = Attention(\n query_dim=dim,\n cross_attention_dim=cross_attention_dim if not double_self_attention else None,\n heads=num_attention_heads,\n dim_head=attention_head_dim,\n dropout=dropout,\n bias=attention_bias,\n upcast_attention=upcast_attention,\n # scale_qk=False, # uncomment this to not to use flash attention\n ) # is self-attn if encoder_hidden_states is none\n else:\n self.norm2 = None\n self.attn2 = None\n\n # 3. Feed-forward\n self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)\n self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)\n\n # let chunk size default to None\n self._chunk_size = None\n self._chunk_dim = 0\n\n def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int):\n # Sets chunk feed-forward\n self._chunk_size = chunk_size\n self._chunk_dim = dim\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n ):\n # Notice that normalization is always applied before the real computation in the following blocks.\n # 1. Self-Attention\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=encoder_attention_mask if self.only_cross_attention else attention_mask,\n **cross_attention_kwargs,\n )\n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n # 2. Cross-Attention\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n if self._chunk_size is not None:\n # \"feed_forward_chunk_size\" can be used to save memory\n if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:\n raise ValueError(\n f\"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.\"\n )\n\n num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size\n ff_output = torch.cat(\n [self.ff(hid_slice) for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)],\n dim=self._chunk_dim,\n )\n else:\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states" } ]
import math import torch import torch.nn as nn import pflow.utils as utils from einops import rearrange from pflow.utils.model import sequence_mask from pflow.models.components import commons from pflow.models.components.vits_posterior import PosteriorEncoder from pflow.models.components.transformer import BasicTransformerBlock
5,155
MultiHeadAttention( hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout ) ) self.norm_layers_0.append(LayerNorm(hidden_channels)) self.encdec_attn_layers.append( MultiHeadAttention( hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout ) ) self.norm_layers_1.append(LayerNorm(hidden_channels)) self.ffn_layers.append( FFN( hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, ) ) self.norm_layers_2.append(LayerNorm(hidden_channels)) def forward(self, x, x_mask, h, h_mask): """ x: decoder input h: encoder output """ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( device=x.device, dtype=x.dtype ) encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) x = x * x_mask for i in range(self.n_layers): y = self.self_attn_layers[i](x, x, self_attn_mask) y = self.drop(y) x = self.norm_layers_0[i](x + y) y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) y = self.drop(y) x = self.norm_layers_1[i](x + y) y = self.ffn_layers[i](x, x_mask) y = self.drop(y) x = self.norm_layers_2[i](x + y) x = x * x_mask return x class TextEncoder(nn.Module): def __init__( self, encoder_type, encoder_params, duration_predictor_params, n_vocab, speech_in_channels, ): super().__init__() self.encoder_type = encoder_type self.n_vocab = n_vocab self.n_feats = encoder_params.n_feats self.n_channels = encoder_params.n_channels self.emb = torch.nn.Embedding(n_vocab, self.n_channels) torch.nn.init.normal_(self.emb.weight, 0.0, self.n_channels**-0.5) self.speech_in_channels = speech_in_channels self.speech_out_channels = self.n_channels self.speech_prompt_proj = torch.nn.Conv1d(self.speech_in_channels, self.speech_out_channels, 1) # self.speech_prompt_proj = PosteriorEncoder( # self.speech_in_channels, # self.speech_out_channels, # self.speech_out_channels, # 1, # 1, # 1, # gin_channels=0, # ) self.prenet = ConvReluNorm( self.n_channels, self.n_channels, self.n_channels, kernel_size=5, n_layers=3, p_dropout=0, ) self.speech_prompt_encoder = Encoder( encoder_params.n_channels, encoder_params.filter_channels, encoder_params.n_heads, encoder_params.n_layers, encoder_params.kernel_size, encoder_params.p_dropout, ) self.text_base_encoder = Encoder( encoder_params.n_channels, encoder_params.filter_channels, encoder_params.n_heads, encoder_params.n_layers, encoder_params.kernel_size, encoder_params.p_dropout, ) self.decoder = Decoder( encoder_params.n_channels, encoder_params.filter_channels, encoder_params.n_heads, encoder_params.n_layers, encoder_params.kernel_size, encoder_params.p_dropout, )
""" from https://github.com/jaywalnut310/glow-tts """ log = utils.get_pylogger(__name__) class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-4): super().__init__() self.channels = channels self.eps = eps self.gamma = torch.nn.Parameter(torch.ones(channels)) self.beta = torch.nn.Parameter(torch.zeros(channels)) def forward(self, x): n_dims = len(x.shape) mean = torch.mean(x, 1, keepdim=True) variance = torch.mean((x - mean) ** 2, 1, keepdim=True) x = (x - mean) * torch.rsqrt(variance + self.eps) shape = [1, -1] + [1] * (n_dims - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class ConvReluNorm(nn.Module): def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): super().__init__() self.in_channels = in_channels self.hidden_channels = hidden_channels self.out_channels = out_channels self.kernel_size = kernel_size self.n_layers = n_layers self.p_dropout = p_dropout self.conv_layers = torch.nn.ModuleList() self.norm_layers = torch.nn.ModuleList() self.conv_layers.append(torch.nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) self.norm_layers.append(LayerNorm(hidden_channels)) self.relu_drop = torch.nn.Sequential(torch.nn.ReLU(), torch.nn.Dropout(p_dropout)) for _ in range(n_layers - 1): self.conv_layers.append( torch.nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2) ) self.norm_layers.append(LayerNorm(hidden_channels)) self.proj = torch.nn.Conv1d(hidden_channels, out_channels, 1) self.proj.weight.data.zero_() self.proj.bias.data.zero_() def forward(self, x, x_mask): x_org = x for i in range(self.n_layers): x = self.conv_layers[i](x * x_mask) x = self.norm_layers[i](x) x = self.relu_drop(x) x = x_org + self.proj(x) return x * x_mask class DurationPredictor(nn.Module): def __init__(self, in_channels, filter_channels, kernel_size, p_dropout): super().__init__() self.in_channels = in_channels self.filter_channels = filter_channels self.p_dropout = p_dropout self.drop = torch.nn.Dropout(p_dropout) self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) self.norm_1 = LayerNorm(filter_channels) self.conv_2 = torch.nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) self.norm_2 = LayerNorm(filter_channels) self.proj = torch.nn.Conv1d(filter_channels, 1, 1) def forward(self, x, x_mask): x = self.conv_1(x * x_mask) x = torch.relu(x) x = self.norm_1(x) x = self.drop(x) x = self.conv_2(x * x_mask) x = torch.relu(x) x = self.norm_2(x) x = self.drop(x) x = self.proj(x * x_mask) # x = torch.relu(x) return x * x_mask class DurationPredictorNS2(nn.Module): def __init__( self, in_channels, filter_channels, kernel_size, p_dropout=0.5 ): super().__init__() self.in_channels = in_channels self.filter_channels = filter_channels self.kernel_size = kernel_size self.p_dropout = p_dropout self.drop = nn.Dropout(p_dropout) self.conv_1 = nn.Conv1d( in_channels, filter_channels, kernel_size, padding=kernel_size // 2 ) self.norm_1 = LayerNorm(filter_channels) self.module_list = nn.ModuleList() self.module_list.append(self.conv_1) self.module_list.append(nn.ReLU()) self.module_list.append(self.norm_1) self.module_list.append(self.drop) for i in range(12): self.module_list.append(nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)) self.module_list.append(nn.ReLU()) self.module_list.append(LayerNorm(filter_channels)) self.module_list.append(nn.Dropout(p_dropout)) # attention layer every 3 layers self.attn_list = nn.ModuleList() for i in range(4): self.attn_list.append( Encoder( filter_channels, filter_channels, 8, 10, 3, p_dropout=p_dropout, ) ) for i in range(30): if i+1 % 3 == 0: self.module_list.append(self.attn_list[i//3]) self.proj = nn.Conv1d(filter_channels, 1, 1) def forward(self, x, x_mask): x = torch.detach(x) for layer in self.module_list: x = layer(x * x_mask) x = self.proj(x * x_mask) # x = torch.relu(x) return x * x_mask class RotaryPositionalEmbeddings(nn.Module): """ ## RoPE module Rotary encoding transforms pairs of features by rotating in the 2D plane. That is, it organizes the $d$ features as $\frac{d}{2}$ pairs. Each pair can be considered a coordinate in a 2D plane, and the encoding will rotate it by an angle depending on the position of the token. """ def __init__(self, d: int, base: int = 10_000): r""" * `d` is the number of features $d$ * `base` is the constant used for calculating $\Theta$ """ super().__init__() self.base = base self.d = int(d) self.cos_cached = None self.sin_cached = None def _build_cache(self, x: torch.Tensor): r""" Cache $\cos$ and $\sin$ values """ # Return if cache is already built if self.cos_cached is not None and x.shape[0] <= self.cos_cached.shape[0]: return # Get sequence length seq_len = x.shape[0] # $\Theta = {\theta_i = 10000^{-\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$ theta = 1.0 / (self.base ** (torch.arange(0, self.d, 2).float() / self.d)).to(x.device) # Create position indexes `[0, 1, ..., seq_len - 1]` seq_idx = torch.arange(seq_len, device=x.device).float().to(x.device) # Calculate the product of position index and $\theta_i$ idx_theta = torch.einsum("n,d->nd", seq_idx, theta) # Concatenate so that for row $m$ we have # $[m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}, m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}]$ idx_theta2 = torch.cat([idx_theta, idx_theta], dim=1) # Cache them self.cos_cached = idx_theta2.cos()[:, None, None, :] self.sin_cached = idx_theta2.sin()[:, None, None, :] def _neg_half(self, x: torch.Tensor): # $\frac{d}{2}$ d_2 = self.d // 2 # Calculate $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$ return torch.cat([-x[:, :, :, d_2:], x[:, :, :, :d_2]], dim=-1) def forward(self, x: torch.Tensor): """ * `x` is the Tensor at the head of a key or a query with shape `[seq_len, batch_size, n_heads, d]` """ # Cache $\cos$ and $\sin$ values x = rearrange(x, "b h t d -> t b h d") self._build_cache(x) # Split the features, we can choose to apply rotary embeddings only to a partial set of features. x_rope, x_pass = x[..., : self.d], x[..., self.d :] # Calculate # $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$ neg_half_x = self._neg_half(x_rope) x_rope = (x_rope * self.cos_cached[: x.shape[0]]) + (neg_half_x * self.sin_cached[: x.shape[0]]) return rearrange(torch.cat((x_rope, x_pass), dim=-1), "t b h d -> b h t d") class MultiHeadAttention(nn.Module): def __init__( self, channels, out_channels, n_heads, heads_share=True, p_dropout=0.0, proximal_bias=False, proximal_init=False, ): super().__init__() assert channels % n_heads == 0 self.channels = channels self.out_channels = out_channels self.n_heads = n_heads self.heads_share = heads_share self.proximal_bias = proximal_bias self.p_dropout = p_dropout self.attn = None self.k_channels = channels // n_heads self.conv_q = torch.nn.Conv1d(channels, channels, 1) self.conv_k = torch.nn.Conv1d(channels, channels, 1) self.conv_v = torch.nn.Conv1d(channels, channels, 1) # from https://nn.labml.ai/transformers/rope/index.html self.query_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5) self.key_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5) self.conv_o = torch.nn.Conv1d(channels, out_channels, 1) self.drop = torch.nn.Dropout(p_dropout) torch.nn.init.xavier_uniform_(self.conv_q.weight) torch.nn.init.xavier_uniform_(self.conv_k.weight) if proximal_init: self.conv_k.weight.data.copy_(self.conv_q.weight.data) self.conv_k.bias.data.copy_(self.conv_q.bias.data) torch.nn.init.xavier_uniform_(self.conv_v.weight) def forward(self, x, c, attn_mask=None): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) x, self.attn = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) return x def attention(self, query, key, value, mask=None): b, d, t_s, t_t = (*key.size(), query.size(2)) query = rearrange(query, "b (h c) t-> b h t c", h=self.n_heads) key = rearrange(key, "b (h c) t-> b h t c", h=self.n_heads) value = rearrange(value, "b (h c) t-> b h t c", h=self.n_heads) query = self.query_rotary_pe(query) key = self.key_rotary_pe(key) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels) if self.proximal_bias: assert t_s == t_t, "Proximal bias is only available for self-attention." scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) if mask is not None: scores = scores.masked_fill(mask == 0, -1e4) p_attn = torch.nn.functional.softmax(scores, dim=-1) p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) output = output.transpose(2, 3).contiguous().view(b, d, t_t) return output, p_attn @staticmethod def _attention_bias_proximal(length): r = torch.arange(length, dtype=torch.float32) diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) class FFN(nn.Module): def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.filter_channels = filter_channels self.kernel_size = kernel_size self.p_dropout = p_dropout self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size, padding=kernel_size // 2) self.drop = torch.nn.Dropout(p_dropout) def forward(self, x, x_mask): x = self.conv_1(x * x_mask) x = torch.relu(x) x = self.drop(x) x = self.conv_2(x * x_mask) return x * x_mask class Encoder(nn.Module): def __init__( self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.0, **kwargs, ): super().__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size self.p_dropout = p_dropout self.drop = torch.nn.Dropout(p_dropout) self.attn_layers = torch.nn.ModuleList() self.norm_layers_1 = torch.nn.ModuleList() self.ffn_layers = torch.nn.ModuleList() self.norm_layers_2 = torch.nn.ModuleList() for _ in range(self.n_layers): self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) self.norm_layers_1.append(LayerNorm(hidden_channels)) self.ffn_layers.append( FFN( hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, ) ) self.norm_layers_2.append(LayerNorm(hidden_channels)) def forward(self, x, x_mask): attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) for i in range(self.n_layers): x = x * x_mask y = self.attn_layers[i](x, x, attn_mask) y = self.drop(y) x = self.norm_layers_1[i](x + y) y = self.ffn_layers[i](x, x_mask) y = self.drop(y) x = self.norm_layers_2[i](x + y) x = x * x_mask return x class Decoder(nn.Module): def __init__( self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.0, proximal_bias=False, proximal_init=True, **kwargs ): super().__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels self.n_heads = n_heads self.n_layers = n_layers self.kernel_size = kernel_size self.p_dropout = p_dropout self.proximal_bias = proximal_bias self.proximal_init = proximal_init self.drop = nn.Dropout(p_dropout) self.self_attn_layers = nn.ModuleList() self.norm_layers_0 = nn.ModuleList() self.encdec_attn_layers = nn.ModuleList() self.norm_layers_1 = nn.ModuleList() self.ffn_layers = nn.ModuleList() self.norm_layers_2 = nn.ModuleList() for i in range(self.n_layers): self.self_attn_layers.append( MultiHeadAttention( hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout ) ) self.norm_layers_0.append(LayerNorm(hidden_channels)) self.encdec_attn_layers.append( MultiHeadAttention( hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout ) ) self.norm_layers_1.append(LayerNorm(hidden_channels)) self.ffn_layers.append( FFN( hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, ) ) self.norm_layers_2.append(LayerNorm(hidden_channels)) def forward(self, x, x_mask, h, h_mask): """ x: decoder input h: encoder output """ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( device=x.device, dtype=x.dtype ) encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) x = x * x_mask for i in range(self.n_layers): y = self.self_attn_layers[i](x, x, self_attn_mask) y = self.drop(y) x = self.norm_layers_0[i](x + y) y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) y = self.drop(y) x = self.norm_layers_1[i](x + y) y = self.ffn_layers[i](x, x_mask) y = self.drop(y) x = self.norm_layers_2[i](x + y) x = x * x_mask return x class TextEncoder(nn.Module): def __init__( self, encoder_type, encoder_params, duration_predictor_params, n_vocab, speech_in_channels, ): super().__init__() self.encoder_type = encoder_type self.n_vocab = n_vocab self.n_feats = encoder_params.n_feats self.n_channels = encoder_params.n_channels self.emb = torch.nn.Embedding(n_vocab, self.n_channels) torch.nn.init.normal_(self.emb.weight, 0.0, self.n_channels**-0.5) self.speech_in_channels = speech_in_channels self.speech_out_channels = self.n_channels self.speech_prompt_proj = torch.nn.Conv1d(self.speech_in_channels, self.speech_out_channels, 1) # self.speech_prompt_proj = PosteriorEncoder( # self.speech_in_channels, # self.speech_out_channels, # self.speech_out_channels, # 1, # 1, # 1, # gin_channels=0, # ) self.prenet = ConvReluNorm( self.n_channels, self.n_channels, self.n_channels, kernel_size=5, n_layers=3, p_dropout=0, ) self.speech_prompt_encoder = Encoder( encoder_params.n_channels, encoder_params.filter_channels, encoder_params.n_heads, encoder_params.n_layers, encoder_params.kernel_size, encoder_params.p_dropout, ) self.text_base_encoder = Encoder( encoder_params.n_channels, encoder_params.filter_channels, encoder_params.n_heads, encoder_params.n_layers, encoder_params.kernel_size, encoder_params.p_dropout, ) self.decoder = Decoder( encoder_params.n_channels, encoder_params.filter_channels, encoder_params.n_heads, encoder_params.n_layers, encoder_params.kernel_size, encoder_params.p_dropout, )
self.transformerblock = BasicTransformerBlock(
3
2023-11-11 16:08:17+00:00
8k
ShipBit/wingman-ai
gui/root.py
[ { "identifier": "NotificationBanner", "path": "gui/components/notification_banner.py", "snippet": "class NotificationBanner(ctk.CTkFrame):\n notification_level: dict[Printr.CHANNEL, Any] = {\n \"info\": {\"color\": (\"#113399\", \"#113399\"), \"font_color\": \"white\"},\n \"warning\": {\"color\": (\"yellow\", \"yellow\"), \"font_color\": (\"black\", \"black\")},\n \"error\": {\"color\": \"#dd0033\", \"font_color\": \"white\"}\n }\n\n def __init__(self, master, **kwargs):\n super().__init__(master, **kwargs)\n self._grid_position = {\"row\":0, \"column\":0}\n self.visible = False\n self.printr = Printr()\n self.grid_columnconfigure(0, weight=1)\n\n self.close_button = IconButton(self,\n icon=\"close\",\n size=16,\n padding=8,\n themed=False,\n hover_color=\"white\",\n command=self.hide)\n self.close_button.grid(row=0, column=3, padx=5, pady=5, sticky=\"e\")\n\n self.msg_vars = {}\n self.labels = {}\n for level, level_config in self.notification_level.items():\n self.msg_vars[level] = ctk.StringVar(self, name=level)\n self.msg_vars[level].trace_add(\"write\", lambda *args, lvl=level: self.__change_handler(lvl))\n self.labels[level] = ctk.CTkLabel(self,\n pady=5,\n textvariable=self.msg_vars[level],\n text_color=level_config.get(\"font_color\", (\"black\", \"white\")))\n self.printr.set_output(level, self.msg_vars[level])\n\n\n def __change_handler(self, level):\n for label_level, label in self.labels.items():\n if label_level == level:\n label.grid(row=0, column=0, padx=5, pady=5, sticky=\"ew\")\n banner_color = self.notification_level.get(level, []).get(\"color\", \"grey50\")\n self.configure(fg_color=banner_color)\n # TODO: improve close_button color\n # btn_color = self.notification_level.get(level, []).get(\"font_color\", \"black\")\n # self.close_button.configure(text_color=btn_color)\n self.show()\n else:\n label.grid_forget()\n\n\n def set_grid_position(self, row=0, column=0):\n self._grid_position = {\"row\":row, \"column\":column}\n if self.visible:\n self.show()\n\n\n def hide(self):\n if self.visible:\n self.grid_forget()\n self.visible = False\n\n\n def show(self):\n self.grid(\n row=self._grid_position.get(\"row\", 0),\n column=self._grid_position.get(\"column\", 0),\n sticky=\"ew\"\n )\n self.visible = True" }, { "identifier": "Header", "path": "gui/sections/header.py", "snippet": "class Header(ctk.CTkFrame):\n def __init__(self, master, **kwargs):\n super().__init__(master, **kwargs)\n self.grid_columnconfigure(1, weight=1)\n\n txt_logo = Icon(icon=\"wingman-ai-text\", size=(512, 62))\n\n self.logo = ctk.CTkLabel(self, text=\"\", image=txt_logo)\n self.logo.grid(row=0, column=0, padx=5, pady=5, sticky=\"w\")\n\n self.version = ctk.CTkLabel(self)\n if version_check.current_version_is_latest():\n self.version.configure(text=f\"v{LOCAL_VERSION}\")\n else:\n self.version.configure(text=f\"v{LOCAL_VERSION} ⇪\", text_color=\"#dd6633\")\n printr.print_info(f\"A new Wingman AI version is available! Get the latest version ({version_check.get_latest_version()}) at https://wingman-ai.com\")\n self.version.grid(row=0, column=0, padx=20, pady=10, sticky=\"es\")\n\n self.social_links = SocialLinks(self, fg_color=(\"grey80\", \"grey40\"))\n self.social_links.grid(row=0, column=1, padx=5, pady=5)\n\n # TODO: Move to Burger-Menu\n self.about_button = IconButton(self,\n icon=\"info\",\n size=32,\n themed=False,\n command=lambda: master.show_view(\"about\"))\n self.about_button.grid(row=0, column=3, padx=5, pady=5, sticky=\"e\")\n self.settings_button = IconButton(self,\n icon=\"settings\",\n size=32,\n themed=False,\n command=lambda: master.show_view(\"settings\"))\n self.settings_button.grid(row=0, column=4, padx=5, pady=5, sticky=\"e\")" }, { "identifier": "ContextView", "path": "gui/views/context_view.py", "snippet": "class ContextView(ctk.CTkFrame):\n def __init__(self, master, **kwargs):\n super().__init__(master, **kwargs)\n\n self.core = master.core\n\n self.grid_columnconfigure(1, weight=1)\n self.grid_rowconfigure(0, weight=1)\n\n\n self.context_switcher = ContextSwitcher(self, width=88, corner_radius=0)\n self.context_switcher.grid(row=0, column=0, sticky=\"ns\")\n\n self.context_runner = ContextRunner(self, context=\"\", fg_color=\"transparent\", bg_color=\"transparent\")\n self.context_runner.grid(row=0, column=1, pady=5, sticky=\"nesw\")\n\n\n def update_context(self, context=\"\"):\n self.context_runner.destroy()\n self.context_runner = ContextRunner(self, context=context, fg_color=\"transparent\", bg_color=\"transparent\")\n self.context_runner.grid(row=0, column=1, pady=5, sticky=\"nesw\")" }, { "identifier": "SettingsView", "path": "gui/views/settings_view.py", "snippet": "class SettingsView(ctk.CTkFrame):\n SYSTEM_APPEARANCE_MAP: dict[str, str] = {\n \"Dark\": \"dark\",\n \"Light\": \"light\",\n \"System\": \"system\",\n }\n\n def __init__(self, master, **kwargs):\n super().__init__(master, **kwargs)\n\n self.core = master.core\n self.config_manager: ConfigManager = self.core.config_manager\n self.secret_keeper = self.core.secret_keeper\n\n self.grid_rowconfigure(5, weight=1)\n self.grid_columnconfigure(0, weight=1)\n self.grid_columnconfigure(3, weight=1)\n padding = {\"padx\": 15, \"pady\": 10}\n\n # --- Headline\n self.headline = ctk.CTkLabel(\n self, text=\"Settings\", font=(\"TkHeadingFont\", 20, \"bold\")\n )\n self.headline.grid(row=0, column=1, columnspan=2, **padding)\n self.close_button = IconButton(\n self,\n icon=\"close\",\n size=16,\n padding=8,\n themed=False,\n command=lambda: master.show_view(\"context\"),\n )\n self.close_button.grid(row=0, column=3, **padding, sticky=\"e\")\n\n # --- Appearance\n appearance_options = [\"Dark\", \"System\", \"Light\"]\n self.appearance_label = ctk.CTkLabel(self, text=\"UI Appearance: \")\n self.appearance_label.grid(row=1, column=1, **padding, sticky=\"w\")\n self.appearance_switcher = ctk.CTkSegmentedButton(\n self, values=appearance_options, command=self.set_appearance\n )\n self.appearance_switcher.grid(row=1, column=2, **padding, sticky=\"w\")\n\n # --- Auto Run\n self.auto_run_label = ctk.CTkLabel(self, text=\"Enable Auto-Run: \")\n self.auto_run_label.grid(row=2, column=1, **padding, sticky=\"w\")\n self.auto_run_switch = ctk.CTkSwitch(\n self, text=\"\", command=self.__handle_auto_run_switch, onvalue=\"on\", offvalue=\"off\"\n )\n self.auto_run_switch.grid(row=2, column=2, **padding, sticky=\"w\")\n\n # load config\n self.__load_gui_config()\n\n # |<---Spacer--->|\n self.spacer = ctk.CTkLabel(self, text=\"\")\n self.spacer.grid(row=3, column=1, **padding, sticky=\"ew\")\n\n # --- API Keys\n self.key_list = KeyValueList(\n self,\n label_text=\"API Keys\",\n key_name=\"Service Name\",\n key_placeholder=\"e.g. elevenlabs\",\n value_name=\"API Key\",\n value_placeholder=\"Your Key\",\n hide_values=True,\n data=self.secret_keeper.secrets,\n update_callback=self.__update_secrets,\n )\n self.key_list.grid(row=5, column=0, columnspan=4, **padding, sticky=\"nesw\")\n self.hide_keys_button = ctk.CTkButton(\n self, text=\"Toggle API Key Visibility\", command=self.key_list.hide_values\n )\n self.hide_keys_button.grid(row=4, column=2, **padding, sticky=\"w\")\n\n\n def tkraise(self, aboveThis=None):\n super().tkraise(aboveThis)\n self.key_list.update(self.secret_keeper.secrets)\n\n\n def __load_gui_config(self):\n config = self.config_manager.load_gui_config()\n\n mapped_value = list(self.SYSTEM_APPEARANCE_MAP.keys())[\n list(self.SYSTEM_APPEARANCE_MAP.values()).index(\n config.get(\"appearance\", \"system\")\n )\n ]\n self.appearance_switcher.set(mapped_value)\n\n if config.get(\"auto-run\", \"off\") == \"on\":\n self.auto_run_switch.select()\n else:\n self.auto_run_switch.deselect()\n\n\n def __update_secrets(self, data):\n self.secret_keeper.secrets = data\n self.secret_keeper.save()\n\n\n def __handle_auto_run_switch(self):\n self.config_manager.gui_config[\"auto-run\"] = self.auto_run_switch.get()\n self.config_manager.save_gui_config()\n\n\n def set_appearance(self, value):\n mapped_value = self.SYSTEM_APPEARANCE_MAP[value]\n self.config_manager.gui_config[\"appearance\"] = mapped_value\n self.config_manager.save_gui_config()\n ctk.set_appearance_mode(mapped_value)" }, { "identifier": "AboutView", "path": "gui/views/about_view.py", "snippet": "class AboutView(ctk.CTkFrame):\n\n def __init__(self, master, **kwargs):\n super().__init__(master, **kwargs)\n\n self.core = master.core\n\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(1, weight=1)\n padding = {\"padx\":15, \"pady\":10}\n\n self.headline = ctk.CTkLabel(self, text=\"About Wingman AI\", font=('TkHeadingFont', 20, \"bold\"))\n self.headline.grid(row=0, column=0, columnspan=2, **padding)\n self.close_button = IconButton(self,\n icon=\"close\",\n size=16,\n padding=8,\n themed=False,\n command=lambda: master.show_view(\"context\"))\n self.close_button.grid(row=0, column=1, **padding, sticky=\"e\")\n\n self.tab_view = ctk.CTkTabview(self)\n self.tab_view.grid(row=1, column=0, columnspan=2, **padding, sticky=\"nesw\")\n\n self.__create_license_tab()\n # TODO:\n # self.__create_patreon_tab()\n # self.__create_modules_tab()\n\n # ──────────────────────────────────────────────────────────────────────────\n def __create_license_tab(self):\n self.license_tab = self.tab_view.add(\"LICENSE\")\n self.license_box = ctk.CTkTextbox(self.license_tab)\n self.license_box.tag_config(\"center\", justify=\"center\")\n license_file = os.path.join(self.core.app_root_dir, \"LICENSE\")\n with open(license_file, \"r\", encoding=\"UTF-8\") as f:\n self.license_box.insert(\"end\", str(f.read()), \"center\")\n self.license_box.configure(state=\"disabled\")\n self.license_box.pack(fill=BOTH, expand=True)\n\n # ──────────────────────────────────────────────────────────────────────────\n def __create_patreon_tab(self):\n self.patreon_tab = self.tab_view.add(\"Patreon\")\n self.patreon_box = ctk.CTkTextbox(self.patreon_tab)\n self.patreon_box.insert(\"end\", \"- PATREON - \\n\\nfrom File\")\n self.patreon_box.configure(state=\"disabled\")\n self.patreon_box.pack(fill=BOTH, expand=True)\n\n # ──────────────────────────────────────────────────────────────────────────\n def __create_modules_tab(self):\n self.modules_tab = self.tab_view.add(\"Modules\")\n self.modules_box = ctk.CTkTextbox(self.modules_tab)\n self.modules_box.insert(\"end\", \"- used modules - \\n\\nfrom File\")\n self.modules_box.configure(state=\"disabled\")\n self.modules_box.pack(fill=BOTH, expand=True)" } ]
from os import path from sys import platform from typing import Literal from gui.components.notification_banner import NotificationBanner from gui.sections.header import Header from gui.views.context_view import ContextView from gui.views.settings_view import SettingsView from gui.views.about_view import AboutView import tkinter as tk import customtkinter as ctk
3,673
class WingmanUI(ctk.CTk): VIEWS = Literal["context", "settings", "about"] _views: dict[VIEWS, ctk.CTkFrame | None] = dict( context=None, settings=None, about=None ) def __init__(self, core): super().__init__() self.core = core self.about_window = None ctk.set_appearance_mode( self.core.config_manager.gui_config.get("appearance", "system") ) # TODO: add themes # ctk.set_default_color_theme(path.join(self.core.app_root_dir, "assets", "themes", "wingman-ai.json")) self.title("Wingman AI") self.geometry("1024x800+200+150") self.minsize(400, 150) # no way to set this on MacOS self.iconbitmap(path.join(self.core.app_root_dir, "assets", "wingman-ai.ico")) if platform == "darwin": mac_dock_icon = tk.Image( "photo", file=path.join( self.core.app_root_dir, "assets", "icons", "wingman-ai.png" ), ) self.iconphoto(True, mac_dock_icon) self.menubar = tk.Menu(self) self.system_menu = tk.Menu(self.menubar, name="apple") self.system_menu.add_command(label="Exit Wingman AI", command=self.quit) self.menubar.add_cascade(label="System", menu=self.system_menu) self.help_menu = tk.Menu(self.menubar, tearoff=0) self.help_menu.add_command( label="About Wingman AI", command=lambda: self.show_view("about") ) self.menubar.add_cascade(label="Help", menu=self.help_menu) self.config(menu=self.menubar) self.grid_columnconfigure(0, weight=1) self.grid_rowconfigure(1, weight=1) self.header = Header(self, height=74, corner_radius=0) self.header.grid(row=0, column=0, sticky="we") view_grid = {"row": 1, "column": 0, "sticky": "nesw"} self._views["about"] = AboutView(self, corner_radius=0, fg_color="transparent") self._views["about"].grid(**view_grid)
class WingmanUI(ctk.CTk): VIEWS = Literal["context", "settings", "about"] _views: dict[VIEWS, ctk.CTkFrame | None] = dict( context=None, settings=None, about=None ) def __init__(self, core): super().__init__() self.core = core self.about_window = None ctk.set_appearance_mode( self.core.config_manager.gui_config.get("appearance", "system") ) # TODO: add themes # ctk.set_default_color_theme(path.join(self.core.app_root_dir, "assets", "themes", "wingman-ai.json")) self.title("Wingman AI") self.geometry("1024x800+200+150") self.minsize(400, 150) # no way to set this on MacOS self.iconbitmap(path.join(self.core.app_root_dir, "assets", "wingman-ai.ico")) if platform == "darwin": mac_dock_icon = tk.Image( "photo", file=path.join( self.core.app_root_dir, "assets", "icons", "wingman-ai.png" ), ) self.iconphoto(True, mac_dock_icon) self.menubar = tk.Menu(self) self.system_menu = tk.Menu(self.menubar, name="apple") self.system_menu.add_command(label="Exit Wingman AI", command=self.quit) self.menubar.add_cascade(label="System", menu=self.system_menu) self.help_menu = tk.Menu(self.menubar, tearoff=0) self.help_menu.add_command( label="About Wingman AI", command=lambda: self.show_view("about") ) self.menubar.add_cascade(label="Help", menu=self.help_menu) self.config(menu=self.menubar) self.grid_columnconfigure(0, weight=1) self.grid_rowconfigure(1, weight=1) self.header = Header(self, height=74, corner_radius=0) self.header.grid(row=0, column=0, sticky="we") view_grid = {"row": 1, "column": 0, "sticky": "nesw"} self._views["about"] = AboutView(self, corner_radius=0, fg_color="transparent") self._views["about"].grid(**view_grid)
self._views["settings"] = SettingsView(
3
2023-11-15 09:36:06+00:00
8k
jeromeleong/mirrors-zhile-io-pandora
src/pandora/turbo/chat.py
[ { "identifier": "Conversations", "path": "src/pandora/turbo/base.py", "snippet": "class Conversations:\n def __init__(self):\n self.__data = []\n\n def list(self, offset, limit):\n return len(self.__data), self.__data[offset: limit]\n\n def clear(self):\n self.__data = []\n\n def delete(self, conversation):\n self.__data = [x for x in self.__data if conversation.conversation_id != x.conversation_id]\n\n def new(self):\n conversation = Conversation()\n self.__data.insert(0, conversation)\n\n return conversation\n\n def get(self, conversation_id):\n for x in self.__data:\n if x.conversation_id == conversation_id:\n return x\n\n return None\n\n def guard_get(self, conversation_id):\n conversation = self.get(conversation_id)\n if not conversation:\n raise Exception('Can\\'t load conversation {}'.format(conversation_id))\n\n return conversation" }, { "identifier": "UserPrompt", "path": "src/pandora/turbo/base.py", "snippet": "class UserPrompt(Prompt):\n def __init__(self, prompt_id, content, parent):\n super().__init__(prompt_id=prompt_id, role='user', content=content, parent=parent)\n\n def get_message(self, end=True):\n return {\n 'id': self.prompt_id,\n 'author': {\n 'role': self.role,\n 'name': None,\n 'metadata': {}\n },\n 'create_time': self.create_time,\n 'update_time': None,\n 'content': {\n 'content_type': 'text',\n 'parts': [self.content]\n },\n 'end_turn': None,\n 'weight': 1.0,\n 'metadata': {\n 'timestamp_': 'absolute',\n 'message_type': None\n },\n 'recipient': 'all'\n }" }, { "identifier": "Prompt", "path": "src/pandora/turbo/base.py", "snippet": "class Prompt:\n def __init__(self, prompt_id=None, role=None, content=None, parent=None):\n self.prompt_id = prompt_id or str(uuid.uuid4())\n self.parent_id = None\n self.role = role\n self.content = content\n self.children = []\n self.create_time = dt.now().timestamp()\n\n if parent:\n self.parent_id = parent.prompt_id\n parent.add_child(self.prompt_id)\n\n def add_child(self, prompt_id):\n self.children.append(prompt_id)\n\n def get_message(self, end=True):\n return None\n\n def get_info(self):\n return {\n 'id': self.prompt_id,\n 'message': self.get_message(),\n 'parent': self.parent_id,\n 'children': self.children\n }" }, { "identifier": "SystemPrompt", "path": "src/pandora/turbo/base.py", "snippet": "class SystemPrompt(Prompt):\n def __init__(self, content, parent):\n super().__init__(role='system', content=content, parent=parent)\n\n def get_message(self, end=True):\n return {\n 'id': self.prompt_id,\n 'author': {\n 'role': self.role,\n 'name': None,\n 'metadata': {}\n },\n 'create_time': self.create_time,\n 'update_time': None,\n 'content': {\n 'content_type': 'text',\n 'parts': ['']\n },\n 'end_turn': True,\n 'weight': 1.0,\n 'metadata': {},\n 'recipient': 'all'\n }" }, { "identifier": "ChatCompletion", "path": "src/pandora/openai/api.py", "snippet": "class ChatCompletion(API):\n def __init__(self, proxy=None):\n self.session = requests.Session()\n self.req_kwargs = {\n 'proxies': {\n 'http': proxy,\n 'https': proxy,\n } if proxy else None,\n 'verify': where(),\n 'timeout': 600,\n 'allow_redirects': False,\n }\n\n self.user_agent = 'pandora/{}'.format(__version__)\n\n super().__init__(proxy, self.req_kwargs['verify'])\n\n def __get_headers(self, api_key):\n return {\n 'Authorization': 'Bearer ' + api_key,\n 'User-Agent': self.user_agent,\n 'Content-Type': 'application/json',\n }\n\n def request(self, api_key, model, messages, stream=True, **kwargs):\n data = {\n 'model': model,\n 'messages': messages,\n **kwargs,\n 'stream': stream,\n }\n\n return self.__request_conversation(api_key, data, stream)\n\n def __request_conversation(self, api_key, data, stream):\n default = default_api_prefix()\n\n if api_key.startswith('fk-') or api_key.startswith('pk-'):\n prefix = default\n else:\n prefix = getenv('OPENAI_API_PREFIX', default)\n url = '{}/v1/chat/completions'.format(prefix)\n\n if stream:\n headers = {**self.__get_headers(api_key), 'Accept': 'text/event-stream'}\n return self._request_sse(url, headers, data)\n\n resp = self.session.post(url=url, headers=self.__get_headers(api_key), json=data, **self.req_kwargs)\n\n def __generate_wrap():\n yield resp.json()\n\n return resp.status_code, resp.headers, __generate_wrap()" }, { "identifier": "gpt_num_tokens", "path": "src/pandora/openai/token.py", "snippet": "def gpt_num_tokens(messages, model='gpt-3.5-turbo'):\n encoding = tiktoken.encoding_for_model(model)\n\n num_tokens = 0\n for message in messages:\n num_tokens += 4\n for key, value in message.items():\n num_tokens += len(encoding.encode(value))\n if 'name' == key:\n num_tokens -= 1\n num_tokens += 2\n\n return num_tokens" } ]
import json from datetime import datetime as dt from os import getenv from requests import Response from .base import Conversations, UserPrompt, Prompt, SystemPrompt from ..openai.api import ChatCompletion from ..openai.token import gpt_num_tokens
3,617
if not conversation: return self.__out_error('Conversation not found', 404) if 'New chat' != conversation.title: message = { 'message': 'Conversation {} already has title \'{}\''.format(conversation_id, conversation.title) } return self.__wrap_response(message) messages = conversation.get_messages_directly(message_id) messages.append({'role': 'user', 'content': self.TITLE_PROMPT}) status, header, generator = self.api.request(self.get_access_token(token), model, messages, False) last_ok, last = self.__get_completion(status, next(generator)) if not last_ok: return self.__out_error(last['detail'], status) conversation.set_title(last.strip('"')) result = { 'title': conversation.title } return self.__wrap_response(result) resp = __shadow() if raw: return resp if resp.status_code != 200: raise Exception('generate title failed: ' + resp.text) return resp.json()['title'] def set_conversation_title(self, conversation_id, title, raw=False, token=None): def __shadow(): try: conversation = self.__get_conversations(token).guard_get(conversation_id) except Exception as e: return self.__out_error(str(e), 404) conversation.set_title(title) result = { 'success': True } return self.__wrap_response(result) resp = __shadow() if raw: return resp if resp.status_code != 200: raise Exception('update conversation failed: ' + resp.json()['detail']) return resp.json()['success'] def talk(self, content, model, message_id, parent_message_id, conversation_id=None, stream=True, token=None): system_prompt = None if conversation_id: conversation = self.__get_conversations(token).get(conversation_id) if not conversation: return self.__out_error_stream('Conversation not found', 404) parent = conversation.get_prompt(parent_message_id) else: conversation = self.__get_conversations(token).new() parent = conversation.add_prompt(Prompt(parent_message_id)) parent = system_prompt = conversation.add_prompt(SystemPrompt(self.system_prompt, parent)) conversation.add_prompt(UserPrompt(message_id, content, parent)) user_prompt, gpt_prompt, messages = conversation.get_messages(message_id, model) try: status, headers, generator = self.api.request(self.get_access_token(token), model, self.__reduce_messages(messages, model, token), stream) except Exception as e: return self.__out_error_stream(str(e)) def __out_generator(): if 200 == status and system_prompt and stream: yield self.__out_stream(conversation, system_prompt) yield self.__out_stream(conversation, user_prompt) for line in generator: yield self.__map_conversation(status, conversation, gpt_prompt, line) return status, headers, __out_generator() def goon(self, model, parent_message_id, conversation_id, stream=True, token=None): return self.regenerate_reply(None, model, conversation_id, parent_message_id, None, stream, token) def regenerate_reply(self, prompt, model, conversation_id, message_id, parent_message_id, stream=True, token=None): if not conversation_id: return self.__out_error_stream('Miss conversation_id', 400) conversation = self.__get_conversations(token).get(conversation_id) if not conversation: return self.__out_error_stream('Conversation not found', 404) user_prompt, gpt_prompt, messages = conversation.get_messages(message_id, model) try: status, headers, generator = self.api.request(self.get_access_token(token), model, self.__reduce_messages(messages, model, token), stream) except Exception as e: return self.__out_error_stream(str(e)) def __out_generator(): for line in generator: yield self.__map_conversation(status, conversation, gpt_prompt, line) return status, headers, __out_generator() def __reduce_messages(self, messages, model, token=None): max_tokens = self.FAKE_TOKENS[model] if self.__is_fake_api(token) else self.MAX_TOKENS[model]
# -*- coding: utf-8 -*- class TurboGPT: DEFAULT_SYSTEM_PROMPT = 'You are ChatGPT, a large language model trained by OpenAI. ' \ 'Answer as concisely as possible.\nKnowledge cutoff: 2021-09-01\n' \ 'Current date: {}'.format(dt.now().strftime('%Y-%m-%d')) TITLE_PROMPT = 'Generate a brief title for our conversation.' MAX_TOKENS = { 'gpt-3.5-turbo': 4096, 'gpt-4': 8192, 'gpt-4-32k': 32768, } FAKE_TOKENS = { 'gpt-3.5-turbo': 8191, 'gpt-4': 4095, 'gpt-4-32k': 8195, } def __init__(self, api_keys: dict, proxy=None): self.api_keys = api_keys self.api_keys_key_list = list(api_keys) self.default_api_keys_key = self.api_keys_key_list[0] self.api = ChatCompletion(proxy) self.conversations_map = {} self.system_prompt = getenv('API_SYSTEM_PROMPT', self.DEFAULT_SYSTEM_PROMPT) def __get_conversations(self, api_keys_key=None): if api_keys_key is None: api_keys_key = self.default_api_keys_key if api_keys_key not in self.conversations_map: self.conversations_map[api_keys_key] = Conversations() return self.conversations_map[api_keys_key] def __is_fake_api(self, token=None): api_key = self.get_access_token(token) return api_key.startswith('fk-') or api_key.startswith('pk-') def get_access_token(self, token_key=None): return self.api_keys[token_key or self.default_api_keys_key] def list_token_keys(self): return self.api_keys_key_list def list_models(self, raw=False, token=None): fake_api = self.__is_fake_api(token) models = { 'models': [ { 'slug': 'gpt-3.5-turbo', 'max_tokens': self.FAKE_TOKENS['gpt-3.5-turbo'] if fake_api else self.MAX_TOKENS['gpt-3.5-turbo'], 'title': 'GPT-3.5', 'description': 'Turbo is the api model that powers ChatGPT', 'tags': [] }, { 'slug': 'gpt-4', 'max_tokens': self.FAKE_TOKENS['gpt-4'] if fake_api else self.MAX_TOKENS['gpt-4'], 'title': 'GPT-4', 'description': 'More capable than any GPT-3.5, able to do complex tasks, and optimized for chat', 'tags': [] }, { 'slug': 'gpt-4-32k', 'max_tokens': self.FAKE_TOKENS['gpt-4-32k'] if fake_api else self.MAX_TOKENS['gpt-4-32k'], 'title': 'GPT-4 32K', 'description': 'Same capabilities as the base gpt-4 mode but with 4x the context length', 'tags': [] } ] } if raw: return self.__wrap_response(models) return models['models'] def list_conversations(self, offset, limit, raw=False, token=None): offset = int(offset) limit = int(limit) total, items = self.__get_conversations(token).list(offset, limit) stripped = [] for item in items: stripped.append({ 'id': item.conversation_id, 'title': item.title, 'create_time': dt.utcfromtimestamp(item.create_time).isoformat(), }) result = {'items': stripped, 'total': total, 'limit': limit, 'offset': offset} if raw: return self.__wrap_response(result) return result def get_conversation(self, conversation_id, raw=False, token=None): def __shadow(): try: conversation = self.__get_conversations(token).guard_get(conversation_id) except Exception as e: return self.__out_error(str(e), 404) return self.__wrap_response(conversation.get_info()) resp = __shadow() if raw: return resp if resp.status_code != 200: raise Exception('get conversation failed: ' + resp.json()['detail']) return resp.json() def clear_conversations(self, raw=False, token=None): def __shadow(): self.__get_conversations(token).clear() result = { 'success': True } return self.__wrap_response(result) resp = __shadow() if raw: return resp return resp.json()['success'] def del_conversation(self, conversation_id, raw=False, token=None): def __shadow(): conversations = self.__get_conversations(token) try: conversation = conversations.guard_get(conversation_id) except Exception as e: return self.__out_error(str(e), 404) conversations.delete(conversation) result = { 'success': True } return self.__wrap_response(result) resp = __shadow() if raw: return resp if resp.status_code != 200: raise Exception('delete conversation failed: ' + resp.json()['detail']) return resp.json()['success'] def gen_conversation_title(self, conversation_id, model, message_id, raw=False, token=None): def __shadow(): conversation = self.__get_conversations(token).get(conversation_id) if not conversation: return self.__out_error('Conversation not found', 404) if 'New chat' != conversation.title: message = { 'message': 'Conversation {} already has title \'{}\''.format(conversation_id, conversation.title) } return self.__wrap_response(message) messages = conversation.get_messages_directly(message_id) messages.append({'role': 'user', 'content': self.TITLE_PROMPT}) status, header, generator = self.api.request(self.get_access_token(token), model, messages, False) last_ok, last = self.__get_completion(status, next(generator)) if not last_ok: return self.__out_error(last['detail'], status) conversation.set_title(last.strip('"')) result = { 'title': conversation.title } return self.__wrap_response(result) resp = __shadow() if raw: return resp if resp.status_code != 200: raise Exception('generate title failed: ' + resp.text) return resp.json()['title'] def set_conversation_title(self, conversation_id, title, raw=False, token=None): def __shadow(): try: conversation = self.__get_conversations(token).guard_get(conversation_id) except Exception as e: return self.__out_error(str(e), 404) conversation.set_title(title) result = { 'success': True } return self.__wrap_response(result) resp = __shadow() if raw: return resp if resp.status_code != 200: raise Exception('update conversation failed: ' + resp.json()['detail']) return resp.json()['success'] def talk(self, content, model, message_id, parent_message_id, conversation_id=None, stream=True, token=None): system_prompt = None if conversation_id: conversation = self.__get_conversations(token).get(conversation_id) if not conversation: return self.__out_error_stream('Conversation not found', 404) parent = conversation.get_prompt(parent_message_id) else: conversation = self.__get_conversations(token).new() parent = conversation.add_prompt(Prompt(parent_message_id)) parent = system_prompt = conversation.add_prompt(SystemPrompt(self.system_prompt, parent)) conversation.add_prompt(UserPrompt(message_id, content, parent)) user_prompt, gpt_prompt, messages = conversation.get_messages(message_id, model) try: status, headers, generator = self.api.request(self.get_access_token(token), model, self.__reduce_messages(messages, model, token), stream) except Exception as e: return self.__out_error_stream(str(e)) def __out_generator(): if 200 == status and system_prompt and stream: yield self.__out_stream(conversation, system_prompt) yield self.__out_stream(conversation, user_prompt) for line in generator: yield self.__map_conversation(status, conversation, gpt_prompt, line) return status, headers, __out_generator() def goon(self, model, parent_message_id, conversation_id, stream=True, token=None): return self.regenerate_reply(None, model, conversation_id, parent_message_id, None, stream, token) def regenerate_reply(self, prompt, model, conversation_id, message_id, parent_message_id, stream=True, token=None): if not conversation_id: return self.__out_error_stream('Miss conversation_id', 400) conversation = self.__get_conversations(token).get(conversation_id) if not conversation: return self.__out_error_stream('Conversation not found', 404) user_prompt, gpt_prompt, messages = conversation.get_messages(message_id, model) try: status, headers, generator = self.api.request(self.get_access_token(token), model, self.__reduce_messages(messages, model, token), stream) except Exception as e: return self.__out_error_stream(str(e)) def __out_generator(): for line in generator: yield self.__map_conversation(status, conversation, gpt_prompt, line) return status, headers, __out_generator() def __reduce_messages(self, messages, model, token=None): max_tokens = self.FAKE_TOKENS[model] if self.__is_fake_api(token) else self.MAX_TOKENS[model]
while gpt_num_tokens(messages) > max_tokens - 200:
5
2023-11-12 10:31:05+00:00
8k
dubverse-ai/MahaTTS
maha_tts/inference.py
[ { "identifier": "load_diff_model", "path": "maha_tts/models/diff_model.py", "snippet": "def load_diff_model(checkpoint,device,model_channels=512,ar_active=False,len_code_labels=10004):\n diff_model = DiffModel(input_channels=80,\n output_channels=160,\n model_channels=1024,\n num_heads=16,\n dropout=0.1,\n condition_free_per=0.0,\n multispeaker=True,\n training=False,\n ar_active=ar_active,\n in_latent_channels = len_code_labels)\n\n # diff_model.load_state_dict(torch.load('/content/LibriTTS_fp64_10k/S2A/_latest.pt',map_location=torch.device('cpu')),strict=True)\n diff_model.load_state_dict(torch.load(checkpoint,map_location=torch.device('cpu')),strict=True)\n diff_model=diff_model.eval().to(device)\n return diff_model" }, { "identifier": "load_TS_model", "path": "maha_tts/models/autoregressive.py", "snippet": "def load_TS_model(checkpoint,device,name):\n data = torch.load(checkpoint,map_location=torch.device('cpu'))\n sem_model= TS_model(n_embed = data['n_embed'], n_layer = data['n_layer'], n_head = data['n_head'], n_positions = data['n_positions'],name=name)\n print(name,data['n_embed'],data['n_layer'],data['n_head'],data['n_positions'])\n sem_model.load_state_dict(data['state_dict'],strict=True)\n sem_model.eval().to(device)\n\n return sem_model" }, { "identifier": "load_vocoder_model", "path": "maha_tts/models/vocoder.py", "snippet": "def load_vocoder_model(config_path,checkpoint_path,device):\n # config_file = os.path.join(os.path.split(checkpoint_file)[0], 'config.json')\n with open(config_path) as f:\n data = f.read()\n\n global h\n json_config = json.loads(data)\n h = AttrDict(json_config)\n\n torch.manual_seed(h.seed)\n\n generator = Generator(h).to(device)\n\n state_dict_g = load_checkpoint(checkpoint_path, device)\n generator.load_state_dict(state_dict_g['generator'])\n\n generator.eval()\n generator.remove_weight_norm()\n\n return generator" }, { "identifier": "infer_wav", "path": "maha_tts/models/vocoder.py", "snippet": "def infer_wav(mel,generator):\n MAX_WAV_VALUE =32768.0\n with torch.no_grad():\n y_g_hat = generator(mel)\n audio = y_g_hat.squeeze()\n audio = audio * MAX_WAV_VALUE\n audio = audio.cpu().numpy().astype('int16')\n return audio" }, { "identifier": "denormalize_tacotron_mel", "path": "maha_tts/utils/audio.py", "snippet": "def denormalize_tacotron_mel(norm_mel):\n return ((norm_mel+1)/2)*(TACOTRON_MEL_MAX-TACOTRON_MEL_MIN)+TACOTRON_MEL_MIN" }, { "identifier": "normalize_tacotron_mel", "path": "maha_tts/utils/audio.py", "snippet": "def normalize_tacotron_mel(mel):\n return 2 * ((mel - TACOTRON_MEL_MIN) / (TACOTRON_MEL_MAX - TACOTRON_MEL_MIN)) - 1" }, { "identifier": "load_wav_to_torch", "path": "maha_tts/utils/audio.py", "snippet": "def load_wav_to_torch(full_path):\n sampling_rate, data = read(full_path,)\n return torch.FloatTensor(data), sampling_rate" }, { "identifier": "dynamic_range_compression", "path": "maha_tts/utils/audio.py", "snippet": "def dynamic_range_compression(x, C=1, clip_val=1e-5):\n \"\"\"\n PARAMS\n ------\n C: compression factor\n \"\"\"\n return torch.log(torch.clamp(x, min=clip_val) * C)" }, { "identifier": "STFT", "path": "maha_tts/utils/stft.py", "snippet": "class STFT(torch.nn.Module):\n \"\"\"adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft\"\"\"\n def __init__(self, filter_length=800, hop_length=200, win_length=800,\n window='hann'):\n super(STFT, self).__init__()\n self.filter_length = filter_length\n self.hop_length = hop_length\n self.win_length = win_length\n self.window = window\n self.forward_transform = None\n scale = self.filter_length / self.hop_length\n fourier_basis = np.fft.fft(np.eye(self.filter_length))\n\n cutoff = int((self.filter_length / 2 + 1))\n fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),\n np.imag(fourier_basis[:cutoff, :])])\n\n forward_basis = torch.FloatTensor(fourier_basis[:, None, :])\n inverse_basis = torch.FloatTensor(\n np.linalg.pinv(scale * fourier_basis).T[:, None, :])\n\n if window is not None:\n assert(filter_length >= win_length)\n # get window and zero center pad it to filter_length\n fft_window = get_window(window, win_length, fftbins=True)\n fft_window = pad_center(fft_window, size = filter_length)\n fft_window = torch.from_numpy(fft_window).float()\n\n # window the bases\n forward_basis *= fft_window\n inverse_basis *= fft_window\n\n self.register_buffer('forward_basis', forward_basis.float())\n self.register_buffer('inverse_basis', inverse_basis.float())\n\n def transform(self, input_data):\n num_batches = input_data.size(0)\n num_samples = input_data.size(1)\n\n self.num_samples = num_samples\n\n # similar to librosa, reflect-pad the input\n input_data = input_data.view(num_batches, 1, num_samples)\n input_data = F.pad(\n input_data.unsqueeze(1),\n (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),\n mode='reflect')\n input_data = input_data.squeeze(1)\n\n forward_transform = F.conv1d(\n input_data,\n Variable(self.forward_basis, requires_grad=False),\n stride=self.hop_length,\n padding=0)\n\n cutoff = int((self.filter_length / 2) + 1)\n real_part = forward_transform[:, :cutoff, :]\n imag_part = forward_transform[:, cutoff:, :]\n\n magnitude = torch.sqrt(real_part**2 + imag_part**2)\n phase = torch.autograd.Variable(\n torch.atan2(imag_part.data, real_part.data))\n\n return magnitude, phase\n\n def inverse(self, magnitude, phase):\n recombine_magnitude_phase = torch.cat(\n [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)\n\n inverse_transform = F.conv_transpose1d(\n recombine_magnitude_phase,\n Variable(self.inverse_basis, requires_grad=False),\n stride=self.hop_length,\n padding=0)\n\n if self.window is not None:\n window_sum = window_sumsquare(\n self.window, magnitude.size(-1), hop_length=self.hop_length,\n win_length=self.win_length, n_fft=self.filter_length,\n dtype=np.float32)\n # remove modulation effects\n approx_nonzero_indices = torch.from_numpy(\n np.where(window_sum > tiny(window_sum))[0])\n window_sum = torch.autograd.Variable(\n torch.from_numpy(window_sum), requires_grad=False)\n window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum\n inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]\n\n # scale by hop ratio\n inverse_transform *= float(self.filter_length) / self.hop_length\n\n inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]\n inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]\n\n return inverse_transform\n\n def forward(self, input_data):\n self.magnitude, self.phase = self.transform(input_data)\n reconstruction = self.inverse(self.magnitude, self.phase)\n return reconstruction" }, { "identifier": "SpacedDiffusion", "path": "maha_tts/utils/diffusion.py", "snippet": "class SpacedDiffusion(GaussianDiffusion):\n \"\"\"\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n \"\"\"\n\n def __init__(self, use_timesteps, **kwargs):\n self.use_timesteps = set(use_timesteps)\n self.timestep_map = []\n self.original_num_steps = len(kwargs[\"betas\"])\n\n base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa\n last_alpha_cumprod = 1.0\n new_betas = []\n for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):\n if i in self.use_timesteps:\n new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)\n last_alpha_cumprod = alpha_cumprod\n self.timestep_map.append(i)\n kwargs[\"betas\"] = np.array(new_betas)\n super().__init__(**kwargs)\n\n def p_mean_variance(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)\n\n def training_losses(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().training_losses(self._wrap_model(model), *args, **kwargs)\n\n def autoregressive_training_losses(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().autoregressive_training_losses(self._wrap_model(model, True), *args, **kwargs)\n\n def condition_mean(self, cond_fn, *args, **kwargs):\n return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)\n\n def condition_score(self, cond_fn, *args, **kwargs):\n return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)\n\n def _wrap_model(self, model, autoregressive=False):\n if isinstance(model, _WrappedModel) or isinstance(model, _WrappedAutoregressiveModel):\n return model\n mod = _WrappedAutoregressiveModel if autoregressive else _WrappedModel\n return mod(\n model, self.timestep_map, self.rescale_timesteps, self.original_num_steps\n )\n\n def _scale_timesteps(self, t):\n # Scaling is done by the wrapped model.\n return t" }, { "identifier": "get_named_beta_schedule", "path": "maha_tts/utils/diffusion.py", "snippet": "def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):\n \"\"\"\n Get a pre-defined beta schedule for the given name.\n\n The beta schedule library consists of beta schedules which remain similar\n in the limit of num_diffusion_timesteps.\n Beta schedules may be added, but should not be removed or changed once\n they are committed to maintain backwards compatibility.\n \"\"\"\n if schedule_name == \"linear\":\n # Linear schedule from Ho et al, extended to work for any number of\n # diffusion steps.\n scale = 1000 / num_diffusion_timesteps\n beta_start = scale * 0.0001\n beta_end = scale * 0.02\n return np.linspace(\n beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64\n )\n elif schedule_name == \"cosine\":\n return betas_for_alpha_bar(\n num_diffusion_timesteps,\n lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,\n )\n else:\n raise NotImplementedError(f\"unknown beta schedule: {schedule_name}\")" }, { "identifier": "space_timesteps", "path": "maha_tts/utils/diffusion.py", "snippet": "def space_timesteps(num_timesteps, section_counts):\n \"\"\"\n Create a list of timesteps to use from an original diffusion process,\n given the number of timesteps we want to take from equally-sized portions\n of the original process.\n\n For example, if there's 300 timesteps and the section counts are [10,15,20]\n then the first 100 timesteps are strided to be 10 timesteps, the second 100\n are strided to be 15 timesteps, and the final 100 are strided to be 20.\n\n If the stride is a string starting with \"ddim\", then the fixed striding\n from the DDIM paper is used, and only one section is allowed.\n\n :param num_timesteps: the number of diffusion steps in the original\n process to divide up.\n :param section_counts: either a list of numbers, or a string containing\n comma-separated numbers, indicating the step count\n per section. As a special case, use \"ddimN\" where N\n is a number of steps to use the striding from the\n DDIM paper.\n :return: a set of diffusion steps from the original process to use.\n \"\"\"\n if isinstance(section_counts, str):\n if section_counts.startswith(\"ddim\"):\n desired_count = int(section_counts[len(\"ddim\") :])\n for i in range(1, num_timesteps):\n if len(range(0, num_timesteps, i)) == desired_count:\n return set(range(0, num_timesteps, i))\n raise ValueError(\n f\"cannot create exactly {num_timesteps} steps with an integer stride\"\n )\n section_counts = [int(x) for x in section_counts.split(\",\")]\n size_per = num_timesteps // len(section_counts)\n extra = num_timesteps % len(section_counts)\n start_idx = 0\n all_steps = []\n for i, section_count in enumerate(section_counts):\n size = size_per + (1 if i < extra else 0)\n if size < section_count:\n raise ValueError(\n f\"cannot divide section of {size} steps into {section_count}\"\n )\n if section_count <= 1:\n frac_stride = 1\n else:\n frac_stride = (size - 1) / (section_count - 1)\n cur_idx = 0.0\n taken_steps = []\n for _ in range(section_count):\n taken_steps.append(start_idx + round(cur_idx))\n cur_idx += frac_stride\n all_steps += taken_steps\n start_idx += size\n return set(all_steps)" }, { "identifier": "labels", "path": "maha_tts/text/symbols.py", "snippet": "" }, { "identifier": "english_cleaners", "path": "maha_tts/text/cleaners.py", "snippet": "def english_cleaners(text):\n '''Pipeline for English text, including number and abbreviation expansion.'''\n # text = convert_to_ascii(text)\n # text = lowercase(text)\n text = expand_numbers(text)\n text = expand_abbreviations(text)\n text = collapse_whitespace(text)\n return text" }, { "identifier": "config", "path": "maha_tts/config.py", "snippet": "class config:\n \n semantic_model_centroids = 10000 + 1\n seed_value = 3407\n\n # Text to Semantic\n t2s_position = 4096\n langs = ['english','tamil', 'telugu', 'punjabi', 'marathi', 'hindi', 'gujarati', 'bengali', 'assamese']\n lang_index = {i:j for j,i in enumerate(langs)}\n # Semantic to acoustic\n sa_timesteps_max = 1000\n\n #Acoustic Properties\n CLIP_LENGTH = 500\n MAX_WAV_VALUE=32768.0\n filter_length=1024\n hop_length=256 #256\n window = 'hann'\n win_length=1024\n n_mel_channels=80\n sampling_rate=22050\n mel_fmin=0.0\n mel_fmax=8000.0" } ]
import torch,glob,os,requests import numpy as np import torch.nn.functional as F from tqdm import tqdm from librosa.filters import mel as librosa_mel_fn from scipy.io.wavfile import write from scipy.special import softmax from maha_tts.models.diff_model import load_diff_model from maha_tts.models.autoregressive import load_TS_model from maha_tts.models.vocoder import load_vocoder_model,infer_wav from maha_tts.utils.audio import denormalize_tacotron_mel,normalize_tacotron_mel,load_wav_to_torch,dynamic_range_compression from maha_tts.utils.stft import STFT from maha_tts.utils.diffusion import SpacedDiffusion,get_named_beta_schedule,space_timesteps from maha_tts.text.symbols import labels,text_labels,text_labels_en,code_labels,text_enc,text_dec,code_enc,code_dec,text_enc_en,text_dec_en from maha_tts.text.cleaners import english_cleaners from maha_tts.config import config
5,885
download_file(model_dirs[name][0],checkpoint_voco) download_file(model_dirs[name][1],voco_config_path) else: download_file(model_dirs[name][0],checkpoint_diff) download_file(model_dirs[name][1],checkpoint_ts) def load_models(name,device=torch.device('cpu')): ''' Load pre-trained models for different components of a text-to-speech system. Args: device (str): The target device for model loading (e.g., 'cpu' or 'cuda'). checkpoint_diff (str): File path to the pre-trained model checkpoint for the diffusion model. checkpoint_ts (str): File path to the pre-trained model checkpoint for the text-to-semantic model. checkpoint_voco (str): File path to the pre-trained model checkpoint for the vocoder model. voco_config_path (str): File path to the configuration file for the vocoder model. Returns: diff_model (object): Loaded diffusion model for semantic-to-acoustic tokens. ts_model (object): Loaded text-to-semantic model for converting text-to-semantic tokens. vocoder (object): Loaded vocoder model for generating waveform from acoustic tokens. diffuser (object): Configured diffuser object for use in the diffusion model. ''' assert name in model_dirs, "no model name "+name checkpoint_diff = os.path.join(DEFAULT_MODELS_DIR,name,'s2a_latest.pt') checkpoint_ts = os.path.join(DEFAULT_MODELS_DIR,name,'t2s_best.pt') checkpoint_voco = os.path.join(DEFAULT_MODELS_DIR,'hifigan','g_02500000') voco_config_path = os.path.join(DEFAULT_MODELS_DIR,'hifigan','config.json') # for i in [checkpoint_diff,checkpoint_ts,checkpoint_voco,voco_config_path]: if not os.path.exists(checkpoint_diff) or not os.path.exists(checkpoint_ts): download_model(name) if not os.path.exists(checkpoint_voco) or not os.path.exists(voco_config_path): download_model('hifigan') diff_model = load_diff_model(checkpoint_diff,device) ts_model = load_TS_model(checkpoint_ts,device,name) vocoder = load_vocoder_model(voco_config_path,checkpoint_voco,device) diffuser = load_diffuser() return diff_model,ts_model,vocoder,diffuser def infer_mel(model,timeshape,code,ref_mel,diffuser,temperature=1.0): device = next(model.parameters()).device code = code.to(device) ref_mel =ref_mel.to(device) output_shape = (1,80,timeshape) noise = torch.randn(output_shape, device=code.device) * temperature mel = diffuser.p_sample_loop(model, output_shape, noise=noise, model_kwargs={'code_emb': code,'ref_clips':ref_mel}, progress=True) return denormalize_tacotron_mel(mel) def generate_semantic_tokens( text, model, ref_mels, language=None, temp = 0.7, top_p= None, top_k= 1, n_tot_steps = 1000, device = None ): semb = [] with torch.no_grad(): for n in tqdm(range(n_tot_steps)): x = get_inputs(text,semb,ref_mels,device,model.name) _,result = model(**x,language=language) relevant_logits = result[0,:,-1] if top_p is not None: # faster to convert to numpy original_device = relevant_logits.device relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy() sorted_indices = np.argsort(relevant_logits)[::-1] sorted_logits = relevant_logits[sorted_indices] cumulative_probs = np.cumsum(softmax(sorted_logits)) sorted_indices_to_remove = cumulative_probs > top_p sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy() sorted_indices_to_remove[0] = False relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf relevant_logits = torch.from_numpy(relevant_logits) relevant_logits = relevant_logits.to(original_device) if top_k is not None: v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1))) relevant_logits[relevant_logits < v[-1]] = -float("Inf") probs = F.softmax(relevant_logits / temp, dim=-1) item_next = torch.multinomial(probs, num_samples=1).to(torch.int32) semb.append(str(code_dec[item_next.item()])) if semb[-1] == '<EST>' or semb[-1] == '<PAD>': break del relevant_logits, probs, item_next semb = torch.tensor([int(i) for i in semb[:-1]]) return semb,result def get_inputs(text,semb=[],ref_mels=[],device=torch.device('cpu'),name = 'Smolie-in'): text = text.lower() if name=='Smolie-en': text_ids=[text_enc_en['<S>']]+[text_enc_en[i] for i in text.strip()]+[text_enc_en['<E>']] else: text_ids=[text_enc['<S>']]+[text_enc[i] for i in text.strip()]+[text_enc['<E>']] semb_ids=[code_enc['<SST>']]+[code_enc[i] for i in semb]#+[tok_enc['<EST>']] input_ids = text_ids+semb_ids # pad_length = config.t2s_position-(len(text_ids)+len(semb_ids)) token_type_ids = [0]*len(text_ids)+[1]*len(semb_ids)#+[0]*pad_length positional_ids = [i for i in range(len(text_ids))]+[i for i in range(len(semb_ids))]#+[0]*pad_length # labels = [-100]*len(text_ids)+semb_ids+[-100]*pad_length attention_mask = [1]*len(input_ids)#+[0]*pad_length # input_ids += [tok_enc['<PAD>']]*pad_length
DEFAULT_MODELS_DIR = os.path.join(os.path.expanduser('~'), '.cache', 'maha_tts', 'models') stft_fn = STFT(config.filter_length, config.hop_length, config.win_length) mel_basis = librosa_mel_fn( sr=config.sampling_rate, n_fft=config.filter_length, n_mels=config.n_mel_channels, fmin=config.mel_fmin, fmax=config.mel_fmax) mel_basis = torch.from_numpy(mel_basis).float() model_dirs= { 'Smolie-en':['https://huggingface.co/Dubverse/MahaTTS/resolve/main/maha_tts/pretrained_models/Smolie-en/s2a_latest.pt', 'https://huggingface.co/Dubverse/MahaTTS/resolve/main/maha_tts/pretrained_models/Smolie-en/t2s_best.pt'], 'Smolie-in':['https://huggingface.co/Dubverse/MahaTTS/resolve/main/maha_tts/pretrained_models/Smolie-in/s2a_latest.pt', 'https://huggingface.co/Dubverse/MahaTTS/resolve/main/maha_tts/pretrained_models/Smolie-in/t2s_best.pt'], 'hifigan':['https://huggingface.co/Dubverse/MahaTTS/resolve/main/maha_tts/pretrained_models/hifigan/g_02500000', 'https://huggingface.co/Dubverse/MahaTTS/resolve/main/maha_tts/pretrained_models/hifigan/config.json'] } def download_file(url, filename): response = requests.get(url, stream=True) total_size = int(response.headers.get('content-length', 0)) # Check if the response was successful (status code 200) response.raise_for_status() with open(filename, 'wb') as file, tqdm( desc=filename, total=total_size, unit='B', unit_scale=True, unit_divisor=1024, ) as bar: for data in response.iter_content(chunk_size=1024): # Write data to the file file.write(data) # Update the progress bar bar.update(len(data)) print(f"Download complete: {filename}\n") def download_model(name): print('Downloading ',name," ....") checkpoint_diff = os.path.join(DEFAULT_MODELS_DIR,name,'s2a_latest.pt') checkpoint_ts = os.path.join(DEFAULT_MODELS_DIR,name,'t2s_best.pt') checkpoint_voco = os.path.join(DEFAULT_MODELS_DIR,'hifigan','g_02500000') voco_config_path = os.path.join(DEFAULT_MODELS_DIR,'hifigan','config.json') os.makedirs(os.path.join(DEFAULT_MODELS_DIR,name),exist_ok=True) if name == 'hifigan': download_file(model_dirs[name][0],checkpoint_voco) download_file(model_dirs[name][1],voco_config_path) else: download_file(model_dirs[name][0],checkpoint_diff) download_file(model_dirs[name][1],checkpoint_ts) def load_models(name,device=torch.device('cpu')): ''' Load pre-trained models for different components of a text-to-speech system. Args: device (str): The target device for model loading (e.g., 'cpu' or 'cuda'). checkpoint_diff (str): File path to the pre-trained model checkpoint for the diffusion model. checkpoint_ts (str): File path to the pre-trained model checkpoint for the text-to-semantic model. checkpoint_voco (str): File path to the pre-trained model checkpoint for the vocoder model. voco_config_path (str): File path to the configuration file for the vocoder model. Returns: diff_model (object): Loaded diffusion model for semantic-to-acoustic tokens. ts_model (object): Loaded text-to-semantic model for converting text-to-semantic tokens. vocoder (object): Loaded vocoder model for generating waveform from acoustic tokens. diffuser (object): Configured diffuser object for use in the diffusion model. ''' assert name in model_dirs, "no model name "+name checkpoint_diff = os.path.join(DEFAULT_MODELS_DIR,name,'s2a_latest.pt') checkpoint_ts = os.path.join(DEFAULT_MODELS_DIR,name,'t2s_best.pt') checkpoint_voco = os.path.join(DEFAULT_MODELS_DIR,'hifigan','g_02500000') voco_config_path = os.path.join(DEFAULT_MODELS_DIR,'hifigan','config.json') # for i in [checkpoint_diff,checkpoint_ts,checkpoint_voco,voco_config_path]: if not os.path.exists(checkpoint_diff) or not os.path.exists(checkpoint_ts): download_model(name) if not os.path.exists(checkpoint_voco) or not os.path.exists(voco_config_path): download_model('hifigan') diff_model = load_diff_model(checkpoint_diff,device) ts_model = load_TS_model(checkpoint_ts,device,name) vocoder = load_vocoder_model(voco_config_path,checkpoint_voco,device) diffuser = load_diffuser() return diff_model,ts_model,vocoder,diffuser def infer_mel(model,timeshape,code,ref_mel,diffuser,temperature=1.0): device = next(model.parameters()).device code = code.to(device) ref_mel =ref_mel.to(device) output_shape = (1,80,timeshape) noise = torch.randn(output_shape, device=code.device) * temperature mel = diffuser.p_sample_loop(model, output_shape, noise=noise, model_kwargs={'code_emb': code,'ref_clips':ref_mel}, progress=True) return denormalize_tacotron_mel(mel) def generate_semantic_tokens( text, model, ref_mels, language=None, temp = 0.7, top_p= None, top_k= 1, n_tot_steps = 1000, device = None ): semb = [] with torch.no_grad(): for n in tqdm(range(n_tot_steps)): x = get_inputs(text,semb,ref_mels,device,model.name) _,result = model(**x,language=language) relevant_logits = result[0,:,-1] if top_p is not None: # faster to convert to numpy original_device = relevant_logits.device relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy() sorted_indices = np.argsort(relevant_logits)[::-1] sorted_logits = relevant_logits[sorted_indices] cumulative_probs = np.cumsum(softmax(sorted_logits)) sorted_indices_to_remove = cumulative_probs > top_p sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy() sorted_indices_to_remove[0] = False relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf relevant_logits = torch.from_numpy(relevant_logits) relevant_logits = relevant_logits.to(original_device) if top_k is not None: v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1))) relevant_logits[relevant_logits < v[-1]] = -float("Inf") probs = F.softmax(relevant_logits / temp, dim=-1) item_next = torch.multinomial(probs, num_samples=1).to(torch.int32) semb.append(str(code_dec[item_next.item()])) if semb[-1] == '<EST>' or semb[-1] == '<PAD>': break del relevant_logits, probs, item_next semb = torch.tensor([int(i) for i in semb[:-1]]) return semb,result def get_inputs(text,semb=[],ref_mels=[],device=torch.device('cpu'),name = 'Smolie-in'): text = text.lower() if name=='Smolie-en': text_ids=[text_enc_en['<S>']]+[text_enc_en[i] for i in text.strip()]+[text_enc_en['<E>']] else: text_ids=[text_enc['<S>']]+[text_enc[i] for i in text.strip()]+[text_enc['<E>']] semb_ids=[code_enc['<SST>']]+[code_enc[i] for i in semb]#+[tok_enc['<EST>']] input_ids = text_ids+semb_ids # pad_length = config.t2s_position-(len(text_ids)+len(semb_ids)) token_type_ids = [0]*len(text_ids)+[1]*len(semb_ids)#+[0]*pad_length positional_ids = [i for i in range(len(text_ids))]+[i for i in range(len(semb_ids))]#+[0]*pad_length # labels = [-100]*len(text_ids)+semb_ids+[-100]*pad_length attention_mask = [1]*len(input_ids)#+[0]*pad_length # input_ids += [tok_enc['<PAD>']]*pad_length
return {'text_ids':torch.tensor(text_ids).unsqueeze(0).to(device),'codes_ids':torch.tensor(semb_ids).unsqueeze(0).to(device),'ref_clips':normalize_tacotron_mel(ref_mels).to(device)}
5
2023-11-16 09:44:54+00:00
8k
wjun0830/CGDETR
run_on_video/CLIP_ckpt/qvhighlights_onlyCLIP/model.py
[ { "identifier": "generalized_temporal_iou", "path": "cg_detr/span_utils.py", "snippet": "def generalized_temporal_iou(spans1, spans2):\n \"\"\"\n Generalized IoU from https://giou.stanford.edu/\n Also reference to DETR implementation of generalized_box_iou\n https://github.com/facebookresearch/detr/blob/master/util/box_ops.py#L40\n\n Args:\n spans1: (N, 2) torch.Tensor, each row defines a span in xx format [st, ed]\n spans2: (M, 2) torch.Tensor, ...\n\n Returns:\n giou: (N, M) torch.Tensor\n\n >>> test_spans1 = torch.Tensor([[0, 0.2], [0.5, 1.0]])\n >>> test_spans2 = torch.Tensor([[0, 0.3], [0., 1.0]])\n >>> generalized_temporal_iou(test_spans1, test_spans2)\n tensor([[ 0.6667, 0.2000],\n [-0.2000, 0.5000]])\n \"\"\"\n spans1 = spans1.float()\n spans2 = spans2.float()\n assert (spans1[:, 1] >= spans1[:, 0]).all()\n assert (spans2[:, 1] >= spans2[:, 0]).all()\n iou, union = temporal_iou(spans1, spans2)\n\n left = torch.min(spans1[:, None, 0], spans2[:, 0]) # (N, M)\n right = torch.max(spans1[:, None, 1], spans2[:, 1]) # (N, M)\n enclosing_area = (right - left).clamp(min=0) # (N, M)\n\n return iou - (enclosing_area - union) / enclosing_area" }, { "identifier": "span_cxw_to_xx", "path": "cg_detr/span_utils.py", "snippet": "def span_cxw_to_xx(cxw_spans):\n \"\"\"\n Args:\n cxw_spans: tensor, (#windows, 2) or (..., 2), the last dim is a row denoting a window of format (center, width)\n\n >>> spans = torch.Tensor([[0.5000, 1.0000], [0.3000, 0.2000]])\n >>> span_cxw_to_xx(spans)\n tensor([[0.0000, 1.0000],\n [0.2000, 0.4000]])\n >>> spans = torch.Tensor([[[0.5000, 1.0000], [0.3000, 0.2000]]])\n >>> span_cxw_to_xx(spans)\n tensor([[[0.0000, 1.0000],\n [0.2000, 0.4000]]])\n \"\"\"\n x1 = cxw_spans[..., 0] - 0.5 * cxw_spans[..., 1]\n x2 = cxw_spans[..., 0] + 0.5 * cxw_spans[..., 1]\n return torch.stack([x1, x2], dim=-1)" }, { "identifier": "build_matcher", "path": "cg_detr/matcher.py", "snippet": "def build_matcher(args):\n return HungarianMatcher(\n cost_span=args.set_cost_span, cost_giou=args.set_cost_giou,\n cost_class=args.set_cost_class, span_loss_type=args.span_loss_type, max_v_l=args.max_v_l\n )" }, { "identifier": "build_transformer", "path": "cg_detr/transformer.py", "snippet": "def build_transformer(args):\n return Transformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n activation='prelu',\n args=args\n )" }, { "identifier": "TransformerEncoderLayer", "path": "cg_detr/transformer.py", "snippet": "class TransformerEncoderLayer(nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,\n activation=\"relu\", normalize_before=False):\n super().__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.dropout1 = DropPath(dropout)\n self.dropout2 = DropPath(dropout)\n\n self.activation = _get_activation_fn(activation)\n self.normalize_before = normalize_before\n\n def with_pos_embed(self, tensor, pos: Optional[Tensor]):\n return tensor if pos is None else tensor + pos\n\n def forward_post(self,\n src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None):\n q = k = self.with_pos_embed(src, pos)\n src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,\n key_padding_mask=src_key_padding_mask)[0]\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def forward_pre(self, src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None):\n pass\n\n def forward(self, src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None):\n if self.normalize_before:\n return self.forward_pre(src, src_mask, src_key_padding_mask, pos)\n return self.forward_post(src, src_mask, src_key_padding_mask, pos)" }, { "identifier": "TransformerEncoder", "path": "cg_detr/transformer.py", "snippet": "class TransformerEncoder(nn.Module):\n\n def __init__(self, encoder_layer, num_layers, norm=None, return_intermediate=False):\n super().__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n self.return_intermediate = return_intermediate\n\n def forward(self, src,\n mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n **kwargs):\n output = src\n\n intermediate = []\n\n for layer in self.layers:\n output = layer(output, src_mask=mask,\n src_key_padding_mask=src_key_padding_mask, pos=pos, **kwargs)\n if self.return_intermediate:\n intermediate.append(output)\n\n if self.norm is not None:\n output = self.norm(output)\n\n if self.return_intermediate:\n return torch.stack(intermediate)\n\n return output" }, { "identifier": "build_position_encoding", "path": "cg_detr/position_encoding.py", "snippet": "def build_position_encoding(args):\n N_steps = args.hidden_dim\n if args.position_embedding in ('v2', 'sine'):\n # TODO find a better way of exposing other arguments\n position_embedding = PositionEmbeddingSine(N_steps, normalize=True)\n # elif args.position_embedding in ('v3', 'learned'):\n # position_embedding = PositionEmbeddingLearned(N_steps)\n else:\n raise ValueError(f\"not supported {args.position_embedding}\")\n if args.max_q_l == -1:\n args.max_q_l = 100\n txt_pos_embed = TrainablePositionalEncoding(\n max_position_embeddings=args.max_q_l,\n hidden_size=args.hidden_dim, dropout=args.input_dropout)\n return position_embedding, txt_pos_embed" }, { "identifier": "accuracy", "path": "cg_detr/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\n output: (#items, #classes)\n target: int,\n \"\"\"\n maxk = max(topk)\n num_items = output.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target)\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / num_items))\n return res" } ]
import torch import torch.nn.functional as F import numpy as np import copy from torch import nn from cg_detr.span_utils import generalized_temporal_iou, span_cxw_to_xx from cg_detr.matcher import build_matcher from cg_detr.transformer import build_transformer, TransformerEncoderLayer, TransformerEncoder from cg_detr.position_encoding import build_position_encoding from cg_detr.misc import accuracy
5,814
return losses def _get_src_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) src_idx = torch.cat([src for (src, _) in indices]) return batch_idx, src_idx # two 1D tensors of the same length def _get_tgt_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) tgt_idx = torch.cat([tgt for (_, tgt) in indices]) return batch_idx, tgt_idx def get_loss(self, loss, outputs, targets, indices, **kwargs): loss_map = { "spans": self.loss_spans, "labels": self.loss_labels, "contrastive_align": self.loss_contrastive_align, "saliency": self.loss_saliency, "ms_align": self.loss_contrastive_moment_sentence, "distill": self.loss_moment2txt_sim_distill, "orthogonal_dummy":self.loss_orthogonal_dummy } assert loss in loss_map, f'do you really want to compute {loss} loss?' return loss_map[loss](outputs, targets, indices, **kwargs) def forward(self, outputs, targets): """ This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc """ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} # Retrieve the matching between the outputs of the last layer and the targets # list(tuples), each tuple is (pred_span_indices, tgt_span_indices) # only for HL, do not use matcher if self.use_matcher: indices = self.matcher(outputs_without_aux, targets) losses_target = self.losses else: indices = None losses_target = ["saliency"] # Compute all the requested losses losses = {} # for loss in self.losses: for loss in losses_target: losses.update(self.get_loss(loss, outputs, targets, indices)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if 'aux_outputs' in outputs: for i, aux_outputs in enumerate(outputs['aux_outputs']): # indices = self.matcher(aux_outputs, targets) if self.use_matcher: indices = self.matcher(aux_outputs, targets) losses_target = self.losses else: indices = None losses_target = ["saliency", "ms_align", "distill", "orthogonal_dummy"] for loss in losses_target: if "saliency" == loss: # skip as it is only in the top layer continue if "ms_align" == loss: continue if "distill" == loss: continue if "orthogonal_dummy" == loss: continue kwargs = {} l_dict = self.get_loss(loss, aux_outputs, targets, indices, **kwargs) l_dict = {k + f'_{i}': v for k, v in l_dict.items()} losses.update(l_dict) return losses class MLP(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x class LinearLayer(nn.Module): """linear layer configurable with layer normalization, dropout, ReLU.""" def __init__(self, input_dim, output_dim, layer_norm=True, dropout=0.1, relu=True): super(LinearLayer, self).__init__() self.relu = relu self.layer_norm = layer_norm if layer_norm: self.LayerNorm = nn.LayerNorm(input_dim) layers = [ nn.Dropout(dropout), nn.Linear(input_dim, output_dim) ] self.net = nn.Sequential(*layers) def forward(self, x): """(N, L, D)""" if self.layer_norm: x = self.LayerNorm(x) x = self.net(x) if self.relu: x = F.relu(x, inplace=True) return x # (N, L, D) def build_model(args): device = torch.device(args.device) transformer = build_transformer(args)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ CG-DETR model and criterion classes. """ def inverse_sigmoid(x, eps=1e-3): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1/x2) def init_weights(module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.02) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def find_nth(vid, underline, n): max_len = len(vid) start = vid.find(underline) while start >= 0 and n > 1: start = vid.find(underline, start+len(underline)) n -= 1 if start == -1: start = max_len return start def element_wise_list_equal(listA, listB): res = [] for a, b in zip(listA, listB): if a==b: res.append(True) else: res.append(False) return res class CGDETR(nn.Module): """ CG DETR. """ def __init__(self, transformer, position_embed, txt_position_embed, txt_dim, vid_dim, num_queries, input_dropout, aux_loss=False, contrastive_align_loss=False, contrastive_hdim=64, max_v_l=75, span_loss_type="l1", use_txt_pos=False, n_input_proj=2, aud_dim=0, args=None): """ Initializes the model. Parameters: transformer: torch module of the transformer architecture. See transformer.py position_embed: torch module of the position_embedding, See position_encoding.py txt_position_embed: position_embedding for text txt_dim: int, text query input dimension vid_dim: int, video feature input dimension num_queries: number of object queries, ie detection slot. This is the maximal number of objects CG-DETR can detect in a single video. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. contrastive_align_loss: If true, perform span - tokens contrastive learning contrastive_hdim: dimension used for projecting the embeddings before computing contrastive loss max_v_l: int, maximum #clips in videos span_loss_type: str, one of [l1, ce] l1: (center-x, width) regression. ce: (st_idx, ed_idx) classification. # foreground_thd: float, intersection over prediction >= foreground_thd: labeled as foreground # background_thd: float, intersection over prediction <= background_thd: labeled background """ super().__init__() self.args=args self.num_queries = num_queries self.transformer = transformer self.position_embed = position_embed self.txt_position_embed = txt_position_embed hidden_dim = transformer.d_model self.span_loss_type = span_loss_type self.max_v_l = max_v_l span_pred_dim = 2 if span_loss_type == "l1" else max_v_l * 2 self.span_embed = MLP(hidden_dim, hidden_dim, span_pred_dim, 3) self.class_embed = nn.Linear(hidden_dim, 2) # 0: background, 1: foreground self.token_type_embeddings = nn.Embedding(2, hidden_dim) self.token_type_embeddings.apply(init_weights) self.use_txt_pos = use_txt_pos self.n_input_proj = n_input_proj self.query_embed = nn.Embedding(num_queries, 2) relu_args = [True] * 3 relu_args[n_input_proj-1] = False self.input_txt_proj = nn.Sequential(*[ LinearLayer(txt_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[0]), LinearLayer(hidden_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[1]), LinearLayer(hidden_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[2]) ][:n_input_proj]) self.input_vid_proj = nn.Sequential(*[ LinearLayer(vid_dim + aud_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[0]), LinearLayer(hidden_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[1]), LinearLayer(hidden_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[2]) ][:n_input_proj]) self.contrastive_align_loss = contrastive_align_loss if contrastive_align_loss: self.contrastive_align_projection_query = nn.Linear(hidden_dim, contrastive_hdim) self.contrastive_align_projection_txt = nn.Linear(hidden_dim, contrastive_hdim) self.contrastive_align_projection_vid = nn.Linear(hidden_dim, contrastive_hdim) self.saliency_proj1 = nn.Linear(hidden_dim, hidden_dim) self.saliency_proj2 = nn.Linear(hidden_dim, hidden_dim) self.aux_loss = aux_loss self.hidden_dim = hidden_dim self.global_rep_token = torch.nn.Parameter(torch.randn(args.total_prompts, hidden_dim)) self.global_rep_pos = torch.nn.Parameter(torch.randn(1, hidden_dim)) self.moment_rep_token = torch.nn.Parameter(torch.randn(hidden_dim)) self.moment_rep_pos = torch.nn.Parameter(torch.randn(hidden_dim)) self.dummy_rep_token = torch.nn.Parameter(torch.randn(args.num_dummies, hidden_dim)) self.dummy_rep_pos = torch.nn.Parameter(torch.randn(args.num_dummies, hidden_dim)) normalize_before = False self.sent_rep_token = torch.nn.Parameter(torch.randn(hidden_dim)) self.sent_rep_pos = torch.nn.Parameter(torch.randn(hidden_dim)) self.txt_proj_linear = LinearLayer(txt_dim, hidden_dim, layer_norm=True) input_txt_sa_proj = TransformerEncoderLayer(hidden_dim, 8, self.args.dim_feedforward, 0.1, "prelu", normalize_before) txtproj_encoder_norm = nn.LayerNorm(hidden_dim) if normalize_before else None self.txtproj_encoder = TransformerEncoder(input_txt_sa_proj, args.dummy_layers, txtproj_encoder_norm) scls_encoder_layer = TransformerEncoderLayer(hidden_dim, 8, self.args.dim_feedforward, 0.1, "prelu", normalize_before) scls_encoder_norm = nn.LayerNorm(hidden_dim) if normalize_before else None self.scls_encoder = TransformerEncoder(scls_encoder_layer, args.sent_layers, scls_encoder_norm) def forward(self, src_txt, src_txt_mask, src_vid, src_vid_mask, vid, qid, src_aud=None, src_aud_mask=None, targets=None): """The forward expects two tensors: - src_txt: [batch_size, L_txt, D_txt] - src_txt_mask: [batch_size, L_txt], containing 0 on padded pixels, will convert to 1 as padding later for transformer - src_vid: [batch_size, L_vid, D_vid] - src_vid_mask: [batch_size, L_vid], containing 0 on padded pixels, will convert to 1 as padding later for transformer It returns a dict with the following elements: - "pred_spans": The normalized boxes coordinates for all queries, represented as (center_x, width). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ _count = [v.count('_') for v in vid] if self.args.dset_name == 'hl': _position_to_cut = [find_nth(v, '_', _count[i]-1) for i, v in enumerate(vid)] ori_vid = [v[:_position_to_cut[i]] for i, v in enumerate(vid)] else: ori_vid = [v for v in vid] if src_aud is not None: src_vid = torch.cat([src_vid, src_aud], dim=2) src_vid = self.input_vid_proj(src_vid) src_txt = self.input_txt_proj(src_txt) src_vid = src_vid + self.token_type_embeddings(torch.full_like(src_vid_mask.long(), 1)) src_txt = src_txt + self.token_type_embeddings(torch.zeros_like(src_txt_mask.long())) pos_vid = self.position_embed(src_vid, src_vid_mask) # (bsz, L_vid, d) pos_txt = self.txt_position_embed(src_txt) if self.use_txt_pos else torch.zeros_like(src_txt) # (bsz, L_txt, d) ### insert dummy token in front of txt txt_dummy = self.dummy_rep_token.reshape([1, self.args.num_dummies, self.hidden_dim]).repeat(src_txt.shape[0], 1, 1) src_txt_dummy = torch.cat([txt_dummy, src_txt], dim=1) mask_txt = torch.tensor([[True] * self.args.num_dummies]).to(src_txt_mask.device).repeat(src_txt_mask.shape[0], 1) src_txt_mask_dummy = torch.cat([mask_txt, src_txt_mask], dim=1) pos_dummy = self.dummy_rep_pos.reshape([1, self.args.num_dummies, self.hidden_dim]).repeat(pos_txt.shape[0], 1, 1) pos_txt_dummy = torch.cat([pos_dummy, pos_txt], dim=1) src_txt_dummy = src_txt_dummy.permute(1, 0, 2) # (L, batch_size, d) pos_txt_dummy = pos_txt_dummy.permute(1, 0, 2) # (L, batch_size, d) memory = self.txtproj_encoder(src_txt_dummy, src_key_padding_mask=~(src_txt_mask_dummy.bool()), pos=pos_txt_dummy) # (L, batch_size, d) dummy_token = memory[:self.args.num_dummies].permute(1, 0, 2) pos_txt_dummy = pos_txt_dummy.permute(1, 0, 2) # (L, batch_size, d) src_txt_dummy = torch.cat([dummy_token, src_txt], dim=1) mask_txt_dummy = torch.tensor([[True]*self.args.num_dummies]).to(src_txt_mask.device).repeat(src_txt_mask.shape[0], 1) src_txt_mask_dummy = torch.cat([mask_txt_dummy, src_txt_mask], dim=1) # Input : Concat video, dummy, txt src = torch.cat([src_vid, src_txt_dummy], dim=1) # (bsz, L_vid+L_txt, d) mask = torch.cat([src_vid_mask, src_txt_mask_dummy], dim=1).bool() # (bsz, L_vid+L_txt) pos = torch.cat([pos_vid, pos_txt_dummy], dim=1) #### for moment token #### moment2txt_similarity = None nmoment2txt_similarity = None moment_mask_ = None ### sentence token smask_ = torch.tensor([[True]]).to(mask.device).repeat(src_txt_mask.shape[0], 1) smask = torch.cat([smask_, src_txt_mask.bool()], dim=1) ssrc_ = self.sent_rep_token.reshape([1, 1, self.hidden_dim]).repeat(src_txt.shape[0], 1, 1) ssrc = torch.cat([ssrc_, src_txt], dim=1) spos_ = self.sent_rep_pos.reshape([1, 1, self.hidden_dim]).repeat(pos_txt.shape[0], 1, 1) spos = torch.cat([spos_, pos_txt], dim=1) ### dummy sentence token smaskd = torch.cat([smask_, mask_txt_dummy.bool()], dim=1) ssrcd = torch.cat([ssrc_, dummy_token], dim=1) sposd = torch.cat([spos_, pos_dummy], dim=1) if targets is not None: # train mmask_ = torch.tensor([[True]]).to(mask.device).repeat(src_vid_mask.shape[0], 1) mmask = torch.cat([mmask_, src_vid_mask.bool()], dim=1) moment_mask_ = torch.clamp(targets["relevant_clips"], 0, 1).bool() moment_mask = torch.cat([mmask_, moment_mask_], dim=1) mmask = mmask * moment_mask msrc_ = self.moment_rep_token.reshape([1, 1, self.hidden_dim]).repeat(src_vid.shape[0], 1, 1) msrc = torch.cat([msrc_, src_vid], dim=1) mpos_ = self.moment_rep_pos.reshape([1, 1, self.hidden_dim]).repeat(pos_vid.shape[0], 1, 1) mpos = torch.cat([mpos_, pos_vid], dim=1) ### for Not moment token #### nmmask_ = torch.tensor([[True]]).to(mask.device).repeat(src_vid_mask.shape[0], 1) nmmask = torch.cat([nmmask_, src_vid_mask.bool()], dim=1) nmoment_mask_ = ~(torch.clamp(targets["relevant_clips"], 0, 1).bool()) nmoment_mask = torch.cat([nmmask_, nmoment_mask_], dim=1) nmmask = nmmask * nmoment_mask nmsrc_ = self.moment_rep_token.reshape([1, 1, self.hidden_dim]).repeat(src_vid.shape[0], 1, 1) nmsrc = torch.cat([nmsrc_, src_vid], dim=1) nmpos_ = self.moment_rep_pos.reshape([1, 1, self.hidden_dim]).repeat(pos_vid.shape[0], 1, 1) nmpos = torch.cat([nmpos_, pos_vid], dim=1) ########### # for t2vidavg sal token vidsrc_ = torch.zeros((len(src_vid), 1, self.hidden_dim)).cuda() for i in range(len(src_vid)): vidsrc_[i] = src_vid[i][:src_vid_mask.sum(1)[i].long()].mean(0).clone().detach() video_length = src_vid.shape[1] if targets is not None: ## train ssrc = ssrc.permute(1, 0, 2) # (L, batch_size, d) spos = spos.permute(1, 0, 2) # (L, batch_size, d) smemory = self.scls_encoder(ssrc, src_key_padding_mask=~smask, pos=spos) # (L, batch_size, d) # print(smemory[0].shape, smemory[:self.args.num_dummies].shape) # 32 256, 3 32 256 sentence_txt, smemory_words = smemory[0], smemory[1:] ssrcd = ssrcd.permute(1, 0, 2) # (L, batch_size, d) sposd = sposd.permute(1, 0, 2) # (L, batch_size, d) smemoryd = self.scls_encoder(ssrcd, src_key_padding_mask=~smaskd, pos=sposd) # (L, batch_size, d) sentence_dummy, smemory_words_dummy = smemoryd[0], smemoryd[1:] txt_dummy_proj = torch.cat([smemory_words_dummy, smemory_words], dim=0) hs, reference, memory, memory_global, attn_weights, memory_moment, nmmemory_moment, mmemory_frames, nmmemory_frames = self.transformer(src, ~mask, self.query_embed.weight, pos, video_length=video_length, moment_idx=targets["relevant_clips"], msrc=msrc, mpos=mpos, mmask=~mmask, nmsrc=nmsrc, nmpos=nmpos, nmmask=~nmmask, ctxtoken=vidsrc_, gtoken=self.global_rep_token, gpos=self.global_rep_pos, vlen=src_vid_mask.sum(1).long()) moment2txt_similarity = torch.matmul(mmemory_frames.permute(1, 0, 2), txt_dummy_proj.permute(1, 2, 0)) nmoment2txt_similarity = torch.matmul(nmmemory_frames.permute(1, 0, 2), txt_dummy_proj.permute(1, 2, 0)) else: ## inference sentence_dummy, sentence_txt = None, None hs, reference, memory, memory_global, attn_weights, memory_moment, nmmemory_moment, mmemory_frames, nmmemory_frames = self.transformer(src, ~mask, self.query_embed.weight, pos, video_length=video_length, ctxtoken=vidsrc_, gtoken=self.global_rep_token, gpos=self.global_rep_pos, vlen=src_vid_mask.sum(1).long()) outputs_class = self.class_embed(hs) # (#layers, batch_size, #queries, #classes) reference_before_sigmoid = inverse_sigmoid(reference) tmp = self.span_embed(hs) outputs_coord = tmp + reference_before_sigmoid if self.span_loss_type == "l1": outputs_coord = outputs_coord.sigmoid() out = {'pred_logits': outputs_class[-1], 'pred_spans': outputs_coord[-1]} txt_mem = memory[:, src_vid.shape[1]:] # (bsz, L_txt, d) vid_mem = memory[:, :src_vid.shape[1]] # (bsz, L_vid, d) if self.contrastive_align_loss: proj_queries = F.normalize(self.contrastive_align_projection_query(hs), p=2, dim=-1) proj_txt_mem = F.normalize(self.contrastive_align_projection_txt(txt_mem), p=2, dim=-1) proj_vid_mem = F.normalize(self.contrastive_align_projection_vid(vid_mem), p=2, dim=-1) out.update(dict( proj_queries=proj_queries[-1], proj_txt_mem=proj_txt_mem, proj_vid_mem=proj_vid_mem )) ### Neg Pairs ### neg_vid = ori_vid[1:] + ori_vid[:1] real_neg_mask = torch.Tensor(element_wise_list_equal(ori_vid, neg_vid)).to(src_txt_dummy.device) real_neg_mask = real_neg_mask == False if real_neg_mask.sum() != 0: src_txt_dummy_neg = torch.cat([src_txt_dummy[1:], src_txt_dummy[0:1]], dim=0) src_txt_mask_dummy_neg = torch.cat([src_txt_mask_dummy[1:], src_txt_mask_dummy[0:1]], dim=0) src_dummy_neg = torch.cat([src_vid, src_txt_dummy_neg], dim=1) mask_dummy_neg = torch.cat([src_vid_mask, src_txt_mask_dummy_neg], dim=1).bool() pos_neg = pos.clone() # since it does not use actual content mask_dummy_neg = mask_dummy_neg[real_neg_mask] src_dummy_neg = src_dummy_neg[real_neg_mask] pos_neg = pos_neg[real_neg_mask] src_txt_mask_dummy_neg = src_txt_mask_dummy_neg[real_neg_mask] _, _, memory_neg, memory_global_neg, attn_weights_neg, _, _, _, _ = self.transformer(src_dummy_neg, ~mask_dummy_neg, self.query_embed.weight, pos_neg, video_length=video_length, ctxtoken=vidsrc_[real_neg_mask], gtoken=self.global_rep_token, gpos=self.global_rep_pos, vlen=src_vid_mask[real_neg_mask].sum(1).long()) vid_mem_neg = memory_neg[:, :src_vid.shape[1]] out["saliency_scores_neg"] = (torch.sum(self.saliency_proj1(vid_mem_neg) * self.saliency_proj2(memory_global_neg).unsqueeze(1), dim=-1) / np.sqrt(self.hidden_dim)) out["src_txt_mask_neg"] = src_txt_mask_dummy_neg out["t2vattnvalues_neg"] = (attn_weights_neg[:, :, self.args.num_dummies:] * (src_txt_mask_dummy_neg[:, self.args.num_dummies:].unsqueeze(1).repeat(1, video_length, 1))).sum(2) out["t2vattnvalues_neg"] = torch.clamp(out["t2vattnvalues_neg"], 0, 1) else: out["saliency_scores_neg"] = None out["t2vattnvalues_neg"] = None out["saliency_scores"] = (torch.sum(self.saliency_proj1(vid_mem) * self.saliency_proj2(memory_global).unsqueeze(1), dim=-1) / np.sqrt(self.hidden_dim)) out["memory_moment"] = memory_moment out["nmmemory_moment"] = nmmemory_moment ## sentence token embeeded with text / dummy out["sentence_txt"] = sentence_txt out["sentence_dummy"] = sentence_dummy out["moment2txt_similarity"] = moment2txt_similarity out["nmoment2txt_similarity"] = nmoment2txt_similarity out["cate_attn_weights"] = attn_weights out["moment_mask"] = moment_mask_ out["txt_mask"] = src_txt_mask_dummy out["real_neg_mask"] = real_neg_mask out["t2vattnvalues"] = (attn_weights[:,:,self.args.num_dummies:] * (src_txt_mask.unsqueeze(1).repeat(1, video_length, 1))).sum(2) # 32 75 (24) / 32 (24) out["t2vattnvalues"] = torch.clamp(out["t2vattnvalues"], 0, 1) out["dummy_tokens"] = dummy_token out["global_rep_tokens"] = self.global_rep_token if targets is not None: out["src_vid"] = mmemory_frames.permute(1, 0, 2) * moment_mask_.unsqueeze(2) + nmmemory_frames.permute(1, 0, 2) * (~(moment_mask_.unsqueeze(2).bool())).float() else: out["src_vid"] = None out["video_mask"] = src_vid_mask if self.aux_loss: # assert proj_queries and proj_txt_mem out['aux_outputs'] = [ {'pred_logits': a, 'pred_spans': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] if self.contrastive_align_loss: assert proj_queries is not None for idx, d in enumerate(proj_queries[:-1]): out['aux_outputs'][idx].update(dict(proj_queries=d, proj_txt_mem=proj_txt_mem)) return out class SetCriterion(nn.Module): """ This class computes the loss for DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) """ def __init__(self, matcher, weight_dict, eos_coef, losses, temperature, span_loss_type, max_v_l, saliency_margin=1, use_matcher=True, args=None): """ Create the criterion. Parameters: matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. eos_coef: relative classification weight applied to the no-object category losses: list of all the losses to be applied. See get_loss for list of available losses. temperature: float, temperature for NCE loss span_loss_type: str, [l1, ce] max_v_l: int, saliency_margin: float """ super().__init__() self.args=args self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.temperature = temperature self.span_loss_type = span_loss_type self.max_v_l = max_v_l self.saliency_margin = saliency_margin # foreground and background classification self.foreground_label = 0 self.background_label = 1 self.eos_coef = eos_coef empty_weight = torch.ones(2) empty_weight[-1] = self.eos_coef # lower weight for background (index 1, foreground index 0) self.register_buffer('empty_weight', empty_weight) # for tvsum, self.use_matcher = use_matcher # moment sentence contrastive self.criterion = torch.nn.CrossEntropyLoss().to(self.args.device) self.l2_criterion = torch.nn.MSELoss().to(self.args.device) self.kld_criterion = torch.nn.KLDivLoss(reduction='none').to(self.args.device) self.bce_criterion = nn.BCELoss(reduction='none') def loss_spans(self, outputs, targets, indices): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "spans" containing a tensor of dim [nb_tgt_spans, 2] The target spans are expected in format (center_x, w), normalized by the image size. """ assert 'pred_spans' in outputs targets = targets["span_labels"] idx = self._get_src_permutation_idx(indices) src_spans = outputs['pred_spans'][idx] # (#spans, max_v_l * 2) tgt_spans = torch.cat([t['spans'][i] for t, (_, i) in zip(targets, indices)], dim=0) # (#spans, 2) if self.span_loss_type == "l1": loss_span = F.l1_loss(src_spans, tgt_spans, reduction='none') loss_giou = 1 - torch.diag(generalized_temporal_iou(span_cxw_to_xx(src_spans), span_cxw_to_xx(tgt_spans))) else: # ce n_spans = src_spans.shape[0] src_spans = src_spans.view(n_spans, 2, self.max_v_l).transpose(1, 2) loss_span = F.cross_entropy(src_spans, tgt_spans, reduction='none') loss_giou = loss_span.new_zeros([1]) losses = {} losses['loss_span'] = loss_span.mean() losses['loss_giou'] = loss_giou.mean() return losses def loss_labels(self, outputs, targets, indices, log=True): """Classification loss (NLL) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] """ # TODO add foreground and background classifier. use all non-matched as background. assert 'pred_logits' in outputs src_logits = outputs['pred_logits'] # (batch_size, #queries, #classes=2) # idx is a tuple of two 1D tensors (batch_idx, src_idx), of the same length == #objects in batch idx = self._get_src_permutation_idx(indices) target_classes = torch.full(src_logits.shape[:2], self.background_label, dtype=torch.int64, device=src_logits.device) # (batch_size, #queries) target_classes[idx] = self.foreground_label loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight, reduction="none") losses = {'loss_label': loss_ce.mean()} if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], self.foreground_label)[0] return losses def loss_saliency(self, outputs, targets, indices, log=True): """higher scores for positive clips""" if "saliency_pos_labels" not in targets: return {"loss_saliency": 0} # Neg pair loss if outputs["saliency_scores_neg"] is not None: vid_token_mask = outputs["video_mask"] real_neg_mask = outputs["real_neg_mask"] saliency_scores_neg = outputs["saliency_scores_neg"].clone() # (N, L) loss_neg_pair = (- torch.log(1. - torch.sigmoid(saliency_scores_neg)) * (vid_token_mask[real_neg_mask])).sum(dim=1).mean() saliency_scores = outputs["saliency_scores"].clone() # (N, L) saliency_contrast_label = targets["saliency_all_labels"] # real neg / false neg 나눠서 contrastive 진 realneg_saliency_scores = torch.cat([saliency_scores[real_neg_mask], saliency_scores_neg], dim=1) realneg_saliency_contrast_label = torch.cat([saliency_contrast_label[real_neg_mask], torch.zeros_like(saliency_contrast_label)[real_neg_mask]], dim=1) realneg_vid_token_mask = vid_token_mask[real_neg_mask].repeat([1, 2]) realneg_saliency_scores = realneg_vid_token_mask * realneg_saliency_scores + (1. - realneg_vid_token_mask) * -1e+3 tau = 0.5 loss_rank_contrastive = 0. for rand_idx in range(1, 12): drop_mask = ~(realneg_saliency_contrast_label > 100) # no drop pos_mask = (realneg_saliency_contrast_label >= rand_idx) # positive when equal or higher than rand_idx if torch.sum(pos_mask) == 0: # no positive sample continue else: batch_drop_mask = torch.sum(pos_mask, dim=1) > 0 # negative sample indicator # drop higher ranks cur_saliency_scores = realneg_saliency_scores * drop_mask / tau + ~drop_mask * -1e+3 # numerical stability logits = cur_saliency_scores - torch.max(cur_saliency_scores, dim=1, keepdim=True)[0] # softmax exp_logits = torch.exp(logits) log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + 1e-6) mean_log_prob_pos = (pos_mask * log_prob * realneg_vid_token_mask).sum(1) / (pos_mask.sum(1) + 1e-6) loss = - mean_log_prob_pos * batch_drop_mask loss_rank_contrastive = loss_rank_contrastive + loss.mean() loss_rank_contrastive = loss_rank_contrastive / 12 false_neg_mask = ~(real_neg_mask) if false_neg_mask.sum() != 0: if false_neg_mask.sum() == 1: falseneg_saliency_scores = saliency_scores[false_neg_mask].unsqueeze(0) falseneg_saliency_contrast_label = saliency_contrast_label[false_neg_mask].unsqueeze(0) falseneg_vid_token_mask = vid_token_mask[false_neg_mask].unsqueeze(0) falseneg_saliency_scores = falseneg_vid_token_mask * falseneg_saliency_scores + (1. - falseneg_vid_token_mask) * -1e+3 else: falseneg_saliency_scores = saliency_scores[false_neg_mask] falseneg_saliency_contrast_label = saliency_contrast_label[false_neg_mask] falseneg_vid_token_mask = vid_token_mask[false_neg_mask] falseneg_saliency_scores = falseneg_vid_token_mask * falseneg_saliency_scores + (1. - falseneg_vid_token_mask) * -1e+3 tau = 0.5 falseneg_loss_rank_contrastive = 0. for rand_idx in range(1, 12): drop_mask = ~(falseneg_saliency_contrast_label > 100) # no drop pos_mask = (falseneg_saliency_contrast_label >= rand_idx) # positive when equal or higher than rand_idx if torch.sum(pos_mask) == 0: # no positive sample continue else: batch_drop_mask = torch.sum(pos_mask, dim=1) > 0 # negative sample indicator # drop higher ranks cur_saliency_scores = falseneg_saliency_scores * drop_mask / tau + ~drop_mask * -1e+3 # numerical stability logits = cur_saliency_scores - torch.max(cur_saliency_scores, dim=1, keepdim=True)[0] # softmax exp_logits = torch.exp(logits) log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + 1e-6) mean_log_prob_pos = (pos_mask * log_prob * falseneg_vid_token_mask).sum(1) / (pos_mask.sum(1) + 1e-6) loss = - mean_log_prob_pos * batch_drop_mask falseneg_loss_rank_contrastive = falseneg_loss_rank_contrastive + loss.mean() falseneg_loss_rank_contrastive = falseneg_loss_rank_contrastive / 12 loss_rank_contrastive += falseneg_loss_rank_contrastive saliency_scores = outputs["saliency_scores"] # (N, L) pos_indices = targets["saliency_pos_labels"] # (N, #pairs) neg_indices = targets["saliency_neg_labels"] # (N, #pairs) num_pairs = pos_indices.shape[1] # typically 2 or 4 batch_indices = torch.arange(len(saliency_scores)).to(saliency_scores.device) pos_scores = torch.stack( [saliency_scores[batch_indices, pos_indices[:, col_idx]] for col_idx in range(num_pairs)], dim=1) neg_scores = torch.stack( [saliency_scores[batch_indices, neg_indices[:, col_idx]] for col_idx in range(num_pairs)], dim=1) loss_saliency = torch.clamp(self.saliency_margin + neg_scores - pos_scores, min=0).sum() \ / (len(pos_scores) * num_pairs) * 2 # * 2 to keep the loss the same scale if self.args.dset_name in ['youtube_uni']: loss_saliency = loss_saliency + loss_rank_contrastive + loss_neg_pair * 0. else: loss_saliency = loss_saliency + loss_rank_contrastive + loss_neg_pair ########### Saliency loss to t2v attn weights ############## """higher scores for positive clips""" vid_token_mask = outputs["video_mask"] # Neg pair loss if outputs["t2vattnvalues_neg"] is not None: saliency_scores_neg = outputs["t2vattnvalues_neg"].clone() # (N, L) loss_neg_pair_attn = (- torch.log(1. - saliency_scores_neg) * (vid_token_mask[real_neg_mask])).sum(dim=1).mean() saliency_scores = outputs["t2vattnvalues"].clone() # (N, L) saliency_contrast_label = targets["saliency_all_labels"] # real neg / false neg 나눠서 contrastive 진 realneg_saliency_scores = torch.cat([saliency_scores[real_neg_mask], saliency_scores_neg], dim=1) realneg_saliency_contrast_label = torch.cat( [saliency_contrast_label[real_neg_mask], torch.zeros_like(saliency_contrast_label)[real_neg_mask]], dim=1) realneg_vid_token_mask = vid_token_mask[real_neg_mask].repeat([1, 2]) realneg_saliency_scores = realneg_vid_token_mask * realneg_saliency_scores + ( 1. - realneg_vid_token_mask) * -1e+3 tau = 0.5 loss_rank_contrastive_attn = 0. for rand_idx in range(1, 12): drop_mask = ~(realneg_saliency_contrast_label > 100) # no drop pos_mask = (realneg_saliency_contrast_label >= rand_idx) # positive when equal or higher than rand_idx if torch.sum(pos_mask) == 0: # no positive sample continue else: batch_drop_mask = torch.sum(pos_mask, dim=1) > 0 # negative sample indicator # drop higher ranks cur_saliency_scores = realneg_saliency_scores * drop_mask / tau + ~drop_mask * -1e+3 # numerical stability logits = cur_saliency_scores - torch.max(cur_saliency_scores, dim=1, keepdim=True)[0] # softmax exp_logits = torch.exp(logits) log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + 1e-6) mean_log_prob_pos = (pos_mask * log_prob * realneg_vid_token_mask).sum(1) / (pos_mask.sum(1) + 1e-6) loss = - mean_log_prob_pos * batch_drop_mask loss_rank_contrastive_attn = loss_rank_contrastive_attn + loss.mean() loss_rank_contrastive_attn = loss_rank_contrastive_attn / 12 false_neg_mask = ~(real_neg_mask) if false_neg_mask.sum() != 0: if false_neg_mask.sum() == 1: falseneg_saliency_scores = saliency_scores[false_neg_mask].unsqueeze(0) falseneg_saliency_contrast_label = saliency_contrast_label[false_neg_mask].unsqueeze(0) falseneg_vid_token_mask = vid_token_mask[false_neg_mask].unsqueeze(0) falseneg_saliency_scores = falseneg_vid_token_mask * falseneg_saliency_scores + (1. - falseneg_vid_token_mask) * -1e+3 else: falseneg_saliency_scores = saliency_scores[false_neg_mask] falseneg_saliency_contrast_label = saliency_contrast_label[false_neg_mask] falseneg_vid_token_mask = vid_token_mask[false_neg_mask] falseneg_saliency_scores = falseneg_vid_token_mask * falseneg_saliency_scores + (1. - falseneg_vid_token_mask) * -1e+3 tau = 0.5 falseneg_loss_rank_contrastive = 0. for rand_idx in range(1, 12): drop_mask = ~(falseneg_saliency_contrast_label > 100) # no drop pos_mask = (falseneg_saliency_contrast_label >= rand_idx) # positive when equal or higher than rand_idx if torch.sum(pos_mask) == 0: # no positive sample continue else: batch_drop_mask = torch.sum(pos_mask, dim=1) > 0 # negative sample indicator # drop higher ranks cur_saliency_scores = falseneg_saliency_scores * drop_mask / tau + ~drop_mask * -1e+3 # numerical stability logits = cur_saliency_scores - torch.max(cur_saliency_scores, dim=1, keepdim=True)[0] # softmax exp_logits = torch.exp(logits) log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + 1e-6) mean_log_prob_pos = (pos_mask * log_prob * falseneg_vid_token_mask).sum(1) / (pos_mask.sum(1) + 1e-6) loss = - mean_log_prob_pos * batch_drop_mask falseneg_loss_rank_contrastive = falseneg_loss_rank_contrastive + loss.mean() falseneg_loss_rank_contrastive = falseneg_loss_rank_contrastive / 12 loss_rank_contrastive += falseneg_loss_rank_contrastive saliency_scores = outputs["t2vattnvalues"] # (N, L) pos_indices = targets["saliency_pos_labels"] # (N, #pairs) neg_indices = targets["saliency_neg_labels"] # (N, #pairs) num_pairs = pos_indices.shape[1] # typically 2 or 4 batch_indices = torch.arange(len(saliency_scores)).to(saliency_scores.device) pos_scores = torch.stack( [saliency_scores[batch_indices, pos_indices[:, col_idx]] for col_idx in range(num_pairs)], dim=1) neg_scores = torch.stack( [saliency_scores[batch_indices, neg_indices[:, col_idx]] for col_idx in range(num_pairs)], dim=1) loss_saliency_attn = torch.clamp(self.saliency_margin + neg_scores - pos_scores, min=0).sum() \ / (len(pos_scores) * num_pairs) * 2 # * 2 to keep the loss the same scale saliency_binary_label = torch.clamp(targets["saliency_all_labels"], 0, 1) # print(saliency_scores.shape, saliency_binary_label.shape) logits = saliency_scores.reshape(-1) labels_x = saliency_binary_label.reshape(-1) BCEcriterion = nn.BCELoss() bceloss = BCEcriterion(logits, labels_x) if self.args.dset_name in ['youtube_uni']: loss_saliency_attn = loss_rank_contrastive_attn + bceloss + loss_neg_pair_attn * 0 + loss_saliency_attn else: loss_saliency_attn = loss_rank_contrastive_attn + bceloss + loss_neg_pair_attn + loss_saliency_attn loss_saliency += (loss_saliency_attn * self.args.lw_wattn) else: ## when batch size == 1 vid_token_mask = outputs["video_mask"] saliency_scores = outputs["saliency_scores"].clone() # (N, L) saliency_contrast_label = targets["saliency_all_labels"] saliency_scores = vid_token_mask * saliency_scores + (1. - vid_token_mask) * -1e+3 tau = 0.5 loss_rank_contrastive = 0. for rand_idx in range(1, 12): drop_mask = ~(saliency_contrast_label > 100) # no drop pos_mask = (saliency_contrast_label >= rand_idx) # positive when equal or higher than rand_idx if torch.sum(pos_mask) == 0: # no positive sample continue else: batch_drop_mask = torch.sum(pos_mask, dim=1) > 0 # negative sample indicator # drop higher ranks cur_saliency_scores = saliency_scores * drop_mask / tau + ~drop_mask * -1e+3 # numerical stability logits = cur_saliency_scores - torch.max(cur_saliency_scores, dim=1, keepdim=True)[0] # softmax exp_logits = torch.exp(logits) log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + 1e-6) mean_log_prob_pos = (pos_mask * log_prob * vid_token_mask).sum(1) / (pos_mask.sum(1) + 1e-6) loss = - mean_log_prob_pos * batch_drop_mask loss_rank_contrastive = loss_rank_contrastive + loss.mean() loss_rank_contrastive = loss_rank_contrastive / 12 saliency_scores = outputs["saliency_scores"] # (N, L) pos_indices = targets["saliency_pos_labels"] # (N, #pairs) neg_indices = targets["saliency_neg_labels"] # (N, #pairs) num_pairs = pos_indices.shape[1] # typically 2 or 4 batch_indices = torch.arange(len(saliency_scores)).to(saliency_scores.device) pos_scores = torch.stack( [saliency_scores[batch_indices, pos_indices[:, col_idx]] for col_idx in range(num_pairs)], dim=1) neg_scores = torch.stack( [saliency_scores[batch_indices, neg_indices[:, col_idx]] for col_idx in range(num_pairs)], dim=1) loss_saliency = torch.clamp(self.saliency_margin + neg_scores - pos_scores, min=0).sum() \ / (len(pos_scores) * num_pairs) * 2 # * 2 to keep the loss the same scale loss_saliency = loss_saliency + loss_rank_contrastive ########### Saliency loss to t2v attn weights ############## """higher scores for positive clips""" vid_token_mask = outputs["video_mask"] saliency_scores = outputs["t2vattnvalues"].clone() # (N, L) saliency_contrast_label = targets["saliency_all_labels"] saliency_scores = vid_token_mask * saliency_scores + (1. - vid_token_mask) * -1e+3 tau = 0.5 loss_rank_contrastive = 0. for rand_idx in range(1, 12): drop_mask = ~(saliency_contrast_label > 100) # no drop pos_mask = (saliency_contrast_label >= rand_idx) # positive when equal or higher than rand_idx if torch.sum(pos_mask) == 0: # no positive sample continue else: batch_drop_mask = torch.sum(pos_mask, dim=1) > 0 # negative sample indicator # drop higher ranks cur_saliency_scores = saliency_scores * drop_mask / tau + ~drop_mask * -1e+3 # numerical stability logits = cur_saliency_scores - torch.max(cur_saliency_scores, dim=1, keepdim=True)[0] # softmax exp_logits = torch.exp(logits) log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True) + 1e-6) mean_log_prob_pos = (pos_mask * log_prob * vid_token_mask).sum(1) / (pos_mask.sum(1) + 1e-6) loss = - mean_log_prob_pos * batch_drop_mask loss_rank_contrastive = loss_rank_contrastive + loss.mean() loss_rank_contrastive_attn = loss_rank_contrastive / 12 saliency_scores = outputs["t2vattnvalues"] # (N, L) pos_indices = targets["saliency_pos_labels"] # (N, #pairs) neg_indices = targets["saliency_neg_labels"] # (N, #pairs) num_pairs = pos_indices.shape[1] # typically 2 or 4 batch_indices = torch.arange(len(saliency_scores)).to(saliency_scores.device) pos_scores = torch.stack( [saliency_scores[batch_indices, pos_indices[:, col_idx]] for col_idx in range(num_pairs)], dim=1) neg_scores = torch.stack( [saliency_scores[batch_indices, neg_indices[:, col_idx]] for col_idx in range(num_pairs)], dim=1) loss_saliency_attn = torch.clamp(self.saliency_margin + neg_scores - pos_scores, min=0).sum() \ / (len(pos_scores) * num_pairs) * 2 # * 2 to keep the loss the same scale saliency_binary_label = torch.clamp(targets["saliency_all_labels"], 0, 1) logits = saliency_scores.reshape(-1) labels_x = saliency_binary_label.reshape(-1) BCEcriterion = nn.BCELoss() bceloss = BCEcriterion(logits, labels_x) loss_saliency_attn = loss_rank_contrastive_attn + bceloss + loss_saliency_attn loss_saliency += (loss_saliency_attn * self.args.lw_wattn) return {"loss_saliency": loss_saliency} def loss_contrastive_moment_sentence(self, outputs, targets, indices, log=True): if outputs["memory_moment"] is not None: moment_token = outputs["memory_moment"] nmmemory_moment = outputs["nmmemory_moment"] sentence_token = outputs["sentence_txt"].squeeze(1) sentence_dummy = outputs["sentence_dummy"].squeeze(1) # b, 1, d moment_logits = F.normalize(moment_token, dim=1) nmoment_logits = F.normalize(nmmemory_moment, dim=1) sentence_logits = F.normalize(sentence_token, dim=1) dummy_logits = F.normalize(sentence_dummy, dim=1) similarity_matrix = torch.matmul(moment_logits, sentence_logits.T) # B B nsimilarity_matrix = torch.matmul(nmoment_logits, sentence_logits.T) # B B similarity_matrix = torch.cat([similarity_matrix, nsimilarity_matrix], dim=1) labels = torch.eye(similarity_matrix.shape[0]).to(self.args.device) nlabels = torch.zeros_like(nsimilarity_matrix).to(self.args.device) labels = torch.cat([labels, nlabels], dim=1).max(dim=1)[1] loss_ms_align = self.criterion(similarity_matrix, labels) dummy_similarity_matrix = torch.matmul(moment_logits, dummy_logits.T) dummy_nsimilarity_matrix = torch.matmul(nmoment_logits, dummy_logits.T) dummy_similarity_matrix = torch.cat([dummy_similarity_matrix, dummy_nsimilarity_matrix], dim=1) dummy_labels = (~(torch.eye(similarity_matrix.shape[0]).to(self.args.device).bool())).float() dummy_nlabels = torch.ones_like(nsimilarity_matrix).to(self.args.device) dummy_labels = torch.cat([dummy_labels, dummy_nlabels], dim=1).max(dim=1)[1] dummy_loss_ms_align = self.criterion(dummy_similarity_matrix, dummy_labels) loss_ms_align += dummy_loss_ms_align video_mask = outputs['video_mask'] src_vid = outputs['src_vid'] # [batch_size, L_vid, D_vid] moment_mask_ = torch.clamp(targets["relevant_clips"], 0, 1) momtokcls_pred = torch.matmul(moment_token.unsqueeze(1), src_vid.permute(0, 2, 1)) # b 1 L_vid momtokcls_label = moment_mask_ momtokcls_logit = torch.sigmoid(momtokcls_pred) loss_ms_align += (self.bce_criterion(momtokcls_logit.reshape(-1), momtokcls_label.reshape(-1)) * video_mask.reshape(-1)).mean() else: loss_ms_align = 0. return {"loss_ms_align": loss_ms_align} # def loss_moment2txt_sim_distill(self, outputs, targets, indices, log=True): if outputs["moment2txt_similarity"] is not None: moment2txt_similarity = outputs["moment2txt_similarity"] # 32 75 22 moment_mask = outputs["moment_mask"].int() # 32 75 1 txt_mask = outputs["txt_mask"].unsqueeze(1).repeat(1, outputs["cate_attn_weights"].size(1), 1) # b l_t attn_weights = outputs["cate_attn_weights"] # 32 75 22 b, L_vid, L_txt = attn_weights.size() loss_distill = self.kld_criterion( torch.log(attn_weights + 1e-6).reshape(b * L_vid, -1), torch.softmax(moment2txt_similarity, dim=-1).clone().detach().reshape(b * L_vid, -1)).mean(1) * moment_mask.reshape(-1) loss_distill = loss_distill.sum() / moment_mask.sum() else: loss_distill = 0. return {"loss_distill": loss_distill} def loss_orthogonal_dummy(self, outputs, targets, indices, log=True): dummy_tokens = outputs["dummy_tokens"] # (n_dum, dim) if dummy_tokens.size(1) != 1: dummy_tokens_norm = dummy_tokens / dummy_tokens.norm(dim=2)[:, :, None] dummy_tokens_sim = torch.matmul(dummy_tokens_norm, dummy_tokens_norm.permute(0, 2, 1).detach()) for i in range(len(dummy_tokens_sim)): dummy_tokens_sim[i].fill_diagonal_(0) loss_dummy_ortho = dummy_tokens_sim.abs().mean() else: loss_dummy_ortho=0. global_tokens = outputs["global_rep_tokens"] global_tokens_norm = global_tokens / global_tokens.norm(dim=1)[:, None] global_tokens_sim = torch.matmul(global_tokens_norm, global_tokens_norm.permute(1, 0).detach()) for i in range(len(global_tokens_sim)): global_tokens_sim.fill_diagonal_(0) loss_dummy_ortho += global_tokens_sim.abs().mean() return {"loss_orthogonal_dummy": loss_dummy_ortho} def loss_contrastive_align(self, outputs, targets, indices, log=True): """encourage higher scores between matched query span and input text""" normalized_text_embed = outputs["proj_txt_mem"] # (bsz, #tokens, d) text tokens normalized_img_embed = outputs["proj_queries"] # (bsz, #queries, d) logits = torch.einsum( "bmd,bnd->bmn", normalized_img_embed, normalized_text_embed) # (bsz, #queries, #tokens) logits = logits.sum(2) / self.temperature # (bsz, #queries) idx = self._get_src_permutation_idx(indices) positive_map = torch.zeros_like(logits, dtype=torch.bool) positive_map[idx] = True positive_logits = logits.masked_fill(~positive_map, 0) pos_term = positive_logits.sum(1) # (bsz, ) num_pos = positive_map.sum(1) # (bsz, ) neg_term = logits.logsumexp(1) # (bsz, ) loss_nce = - pos_term / num_pos + neg_term # (bsz, ) losses = {"loss_contrastive_align": loss_nce.mean()} return losses def loss_contrastive_align_vid_txt(self, outputs, targets, indices, log=True): """encourage higher scores between matched query span and input text""" # TODO (1) align vid_mem and txt_mem; # TODO (2) change L1 loss as CE loss on 75 labels, similar to soft token prediction in MDETR normalized_text_embed = outputs["proj_txt_mem"] # (bsz, #tokens, d) text tokens normalized_img_embed = outputs["proj_queries"] # (bsz, #queries, d) logits = torch.einsum( "bmd,bnd->bmn", normalized_img_embed, normalized_text_embed) # (bsz, #queries, #tokens) logits = logits.sum(2) / self.temperature # (bsz, #queries) idx = self._get_src_permutation_idx(indices) positive_map = torch.zeros_like(logits, dtype=torch.bool) positive_map[idx] = True positive_logits = logits.masked_fill(~positive_map, 0) pos_term = positive_logits.sum(1) # (bsz, ) num_pos = positive_map.sum(1) # (bsz, ) neg_term = logits.logsumexp(1) # (bsz, ) loss_nce = - pos_term / num_pos + neg_term # (bsz, ) losses = {"loss_contrastive_align": loss_nce.mean()} return losses def _get_src_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) src_idx = torch.cat([src for (src, _) in indices]) return batch_idx, src_idx # two 1D tensors of the same length def _get_tgt_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) tgt_idx = torch.cat([tgt for (_, tgt) in indices]) return batch_idx, tgt_idx def get_loss(self, loss, outputs, targets, indices, **kwargs): loss_map = { "spans": self.loss_spans, "labels": self.loss_labels, "contrastive_align": self.loss_contrastive_align, "saliency": self.loss_saliency, "ms_align": self.loss_contrastive_moment_sentence, "distill": self.loss_moment2txt_sim_distill, "orthogonal_dummy":self.loss_orthogonal_dummy } assert loss in loss_map, f'do you really want to compute {loss} loss?' return loss_map[loss](outputs, targets, indices, **kwargs) def forward(self, outputs, targets): """ This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc """ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} # Retrieve the matching between the outputs of the last layer and the targets # list(tuples), each tuple is (pred_span_indices, tgt_span_indices) # only for HL, do not use matcher if self.use_matcher: indices = self.matcher(outputs_without_aux, targets) losses_target = self.losses else: indices = None losses_target = ["saliency"] # Compute all the requested losses losses = {} # for loss in self.losses: for loss in losses_target: losses.update(self.get_loss(loss, outputs, targets, indices)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if 'aux_outputs' in outputs: for i, aux_outputs in enumerate(outputs['aux_outputs']): # indices = self.matcher(aux_outputs, targets) if self.use_matcher: indices = self.matcher(aux_outputs, targets) losses_target = self.losses else: indices = None losses_target = ["saliency", "ms_align", "distill", "orthogonal_dummy"] for loss in losses_target: if "saliency" == loss: # skip as it is only in the top layer continue if "ms_align" == loss: continue if "distill" == loss: continue if "orthogonal_dummy" == loss: continue kwargs = {} l_dict = self.get_loss(loss, aux_outputs, targets, indices, **kwargs) l_dict = {k + f'_{i}': v for k, v in l_dict.items()} losses.update(l_dict) return losses class MLP(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x class LinearLayer(nn.Module): """linear layer configurable with layer normalization, dropout, ReLU.""" def __init__(self, input_dim, output_dim, layer_norm=True, dropout=0.1, relu=True): super(LinearLayer, self).__init__() self.relu = relu self.layer_norm = layer_norm if layer_norm: self.LayerNorm = nn.LayerNorm(input_dim) layers = [ nn.Dropout(dropout), nn.Linear(input_dim, output_dim) ] self.net = nn.Sequential(*layers) def forward(self, x): """(N, L, D)""" if self.layer_norm: x = self.LayerNorm(x) x = self.net(x) if self.relu: x = F.relu(x, inplace=True) return x # (N, L, D) def build_model(args): device = torch.device(args.device) transformer = build_transformer(args)
position_embedding, txt_position_embedding = build_position_encoding(args)
6
2023-11-10 12:45:25+00:00
8k
kudelskisecurity/fuzzomatic
fuzzomatic/main.py
[ { "identifier": "utils", "path": "fuzzomatic/tools/utils.py", "snippet": "def get_codebase_name(codebase_dir):\ndef autofix_unwrap_calls(target_path):\ndef load_fuzz_target(target_path):\ndef autofix_fuzz_target(target_path):\ndef rustfmt_target(target_path):\ndef build_target(codebase_dir, target_name):\ndef git_clone(url, path):\ndef add_fuzz_dependency(codebase_dir, dependency, features=[]):\ndef remove_fuzz_dependency(codebase_dir, dependency):\ndef write_fuzz_target(code_snippet, codebase_dir, target_name):\ndef build_fuzz_target_path(codebase_dir, target_name):\ndef init_cargo_fuzz(codebase_dir, target_name):\ndef expand_workspace_member(codebase_dir, member):\ndef read_workspace_members(codebase_dir):\ndef expand_members(codebase_dir, members):\ndef check_has_workspace_members(codebase_dir):\ndef check_virtual_manifest(codebase_dir):\ndef detect_git_url(codebase_dir, remote_name=\"origin\"):\ndef detect_crate_name(codebase_dir):\ndef load_toml(file_path):\ndef write_toml(file_path, contents):\ndef add_parent_dependencies(codebase_dir, root_codebase_dir):" }, { "identifier": "discovery", "path": "fuzzomatic/discovery.py", "snippet": "OSS_FUZZ_PROJECTS = []\ndef find_rust_projects_on_github(query, page=1):\ndef clone_projects(projects, dest, maxprojects=0):\ndef is_project_to_be_skipped(codebase_dir, git_url, verbose=False):\ndef is_project_already_fuzzed(codebase_dir):\ndef is_project_covered_by_oss_fuzz(name, git_url):\ndef load_repos(query):\ndef get_parser():\ndef main():" }, { "identifier": "try_benches_approach", "path": "fuzzomatic/approaches/benches.py", "snippet": "def try_benches_approach(codebase_dir, target_name=DEFAULT_TARGET_NAME, **kwargs):\n return try_examples_approach(\n codebase_dir, target_name=target_name, examples_dirname=\"benches\", **kwargs\n )" }, { "identifier": "try_examples_approach", "path": "fuzzomatic/approaches/examples.py", "snippet": "def try_examples_approach(\n codebase_dir,\n target_name=DEFAULT_TARGET_NAME,\n examples_dirname=\"examples\",\n **_kwargs,\n):\n example_paths = detect_example_paths(codebase_dir, examples_dirname)\n\n if example_paths is None:\n print(\"Failed to detect examples\")\n return\n\n if examples_dirname == \"examples\":\n print(\"Examples detected.\")\n elif examples_dirname == \"benches\":\n print(\"Benches detected\")\n\n max_examples = 5\n example_snippets = []\n for example_path in example_paths[:max_examples]:\n print(f\"Using example {example_path}\")\n with open(example_path) as f:\n example_code = f.read()\n example_snippets.append(example_code)\n\n # use shortest examples first\n example_snippets = sorted(example_snippets, key=lambda x: len(x))\n\n for example_code in example_snippets:\n prompt = prompts.example_prompt(example_code)\n success, fuzz_target_path = llm_attempt(\n codebase_dir, prompt, target_name, remaining_attempts=1\n )\n if success:\n yield fuzz_target_path" }, { "identifier": "try_functions_approach", "path": "fuzzomatic/approaches/functions.py", "snippet": "def try_functions_approach(\n codebase_dir,\n target_name=DEFAULT_TARGET_NAME,\n root_codebase_dir=None,\n args=None,\n **_kwargs,\n):\n functions = find_target_functions_via_cargo_doc(\n codebase_dir, root_codebase_dir=root_codebase_dir\n )\n\n if functions is None:\n print(\"Failed to detect functions\")\n return\n\n ordered_functions = score_functions(functions)\n\n print(f\"{len(ordered_functions)} functions detected\")\n print(\"Detected target functions:\")\n for f in ordered_functions:\n print(f)\n\n max_functions = 8 # try max N functions\n max_negative_score_functions = 2\n negative_score_functions = 0\n for f in ordered_functions[:max_functions]:\n path = f[0]\n function_name = f[1]\n score = f[3]\n\n # skip functions matching deny list\n if args is not None and args.functions_denylist is not None:\n skip_function = False\n fully_qualified_function_name = \"::\".join(path)\n if len(fully_qualified_function_name) > 0:\n fully_qualified_function_name += \"::\"\n fully_qualified_function_name += function_name\n for word in args.functions_denylist:\n if word in fully_qualified_function_name:\n skip_function = True\n if skip_function:\n print(\n f\"Skipping function {fully_qualified_function_name} \"\n f\"because of deny list: {args.functions_denylist}\"\n )\n continue\n\n print(\"Attempting function:\")\n print(f)\n\n if score <= 0:\n negative_score_functions += 1\n\n success, fuzz_target_path = try_function(f, codebase_dir, target_name)\n\n if success:\n yield fuzz_target_path\n\n if negative_score_functions >= max_negative_score_functions:\n break" }, { "identifier": "try_readme_approach", "path": "fuzzomatic/approaches/readme.py", "snippet": "def try_readme_approach(\n codebase_dir, target_name=DEFAULT_TARGET_NAME, virtual_manifest=False, **_kwargs\n):\n readme_paths = detect_readme_paths(codebase_dir, parent_readme=virtual_manifest)\n if len(readme_paths) == 0:\n print(\"Failed to detect README\")\n return\n\n for readme_path in readme_paths:\n print(f\"README detected: {readme_path}\")\n readme_contents = prompts.load_file_contents(readme_path)\n\n # if the readme does not contain any code snippets, skip it\n if \"```\" not in readme_contents:\n print(\n \"Skipping readme because it does not appear \"\n \"to contain any code snippets\"\n )\n continue\n\n prompt = prompts.readme_prompt(readme_contents)\n\n build_success, fuzz_target_path = llm_attempt(codebase_dir, prompt, target_name)\n\n if build_success:\n yield fuzz_target_path" }, { "identifier": "try_unit_tests_approach", "path": "fuzzomatic/approaches/unit_tests.py", "snippet": "def try_unit_tests_approach(codebase_dir, target_name=DEFAULT_TARGET_NAME, **_kwargs):\n max_unit_tests = 3\n unit_tests = detect_unit_tests(codebase_dir, max_tests=max_unit_tests)\n\n if unit_tests is None:\n print(\"Failed to detect unit tests with function\")\n return\n\n i = 1\n for test_source_code, use_statements in unit_tests[:max_unit_tests]:\n print(f\"USING UNIT TEST {i}/{max_unit_tests}:\")\n i += 1\n print(\"--- (use statements)\")\n for use_stmt in use_statements:\n print(use_stmt)\n print(\"---\")\n print(test_source_code)\n print(\"---\")\n\n prompt = prompts.unit_test_prompt(test_source_code, use_statements)\n success, fuzz_target_path = llm_attempt(\n codebase_dir, prompt, target_name, remaining_attempts=0\n )\n if success:\n yield fuzz_target_path" }, { "identifier": "try_unit_tests_with_function_approach", "path": "fuzzomatic/approaches/unit_tests.py", "snippet": "def try_unit_tests_with_function_approach(\n codebase_dir, target_name=DEFAULT_TARGET_NAME, **_kwargs\n):\n # try unit tests with associated function\n max_unit_tests = 3\n\n unit_tests_with_function = detect_unit_tests_with_function(\n codebase_dir, max_tests=max_unit_tests\n )\n if unit_tests_with_function is None:\n print(\"Failed to detect unit tests with function\")\n return\n\n # sort unit tests by their length (use shortest first)\n augmented_unit_tests = sorted(\n unit_tests_with_function,\n key=lambda x: len(x[0]) + len(x[1]) + len(x[2]) + len(x[3]),\n )\n\n i = 1\n for (\n test_function_source_code,\n additional_function_code,\n additional_function_name,\n use_statements,\n ) in augmented_unit_tests:\n print(f\"TRYING UNIT TEST {i}/{max_unit_tests}\")\n print(\"=\" * 40)\n i += 1\n print(\"Using unit test with additional function:\")\n print(\"--- (use statements)\")\n for use_stmt in use_statements:\n print(use_stmt)\n print(\"---\")\n print(test_function_source_code)\n print(\"---\")\n print(additional_function_code)\n print(\"---\")\n\n prompt = prompts.unit_test_prompt_with_additional_function(\n test_function_source_code, additional_function_name, use_statements\n )\n success, fuzz_target_path = llm_attempt(\n codebase_dir,\n prompt,\n target_name,\n remaining_attempts=0,\n additional_code=additional_function_code,\n )\n if success:\n yield fuzz_target_path" }, { "identifier": "DEFAULT_TARGET_NAME", "path": "fuzzomatic/tools/constants.py", "snippet": "DEFAULT_TARGET_NAME = \"auto\"" }, { "identifier": "FUZZOMATIC_RESULTS_FILENAME", "path": "fuzzomatic/tools/constants.py", "snippet": "FUZZOMATIC_RESULTS_FILENAME = \".fuzzomatic_results.json\"" }, { "identifier": "EXIT_NOT_A_CARGO_PROJECT", "path": "fuzzomatic/tools/constants.py", "snippet": "EXIT_NOT_A_CARGO_PROJECT = 100" }, { "identifier": "EXIT_PROJECT_ALREADY_FUZZED", "path": "fuzzomatic/tools/constants.py", "snippet": "EXIT_PROJECT_ALREADY_FUZZED = 101" }, { "identifier": "EXIT_PROJECT_DOES_NOT_BUILD", "path": "fuzzomatic/tools/constants.py", "snippet": "EXIT_PROJECT_DOES_NOT_BUILD = 102" }, { "identifier": "EXIT_OPENAI_API_KEY_ERROR", "path": "fuzzomatic/tools/constants.py", "snippet": "EXIT_OPENAI_API_KEY_ERROR = 103" }, { "identifier": "ask_llm", "path": "fuzzomatic/tools/llm.py", "snippet": "def ask_llm(\n prompt,\n model=\"gpt-3.5-turbo\",\n long_model=\"gpt-3.5-turbo-16k\",\n long_model_retry=True,\n retry=2,\n):\n print(\"Asking LLM...\")\n # Make a request to the API to generate text\n messages = [{\"role\": \"user\", \"content\": prompt}]\n\n try:\n if not hasattr(ask_llm, \"counter\"):\n ask_llm.counter = 0\n ask_llm.counter += 1\n response = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=0,\n timeout=35,\n )\n except openai.error.InvalidRequestError:\n if long_model_retry:\n print(\"LLM call failed\")\n print(f\"Retrying with model {long_model}\")\n return ask_llm(\n prompt, model=long_model, long_model_retry=False, retry=retry\n )\n else:\n return None\n except openai.error.Timeout:\n print(\"LLM call timeout\")\n if retry > 0:\n print(\"Retrying...\")\n return ask_llm(prompt, model=model, long_model=long_model, retry=retry - 1)\n return None\n except openai.error.RateLimitError as e:\n print(\"OpenAI API rate limit reached\")\n print(e)\n sleep_seconds = 60\n print(f\"Sleeping for {sleep_seconds} seconds...\")\n time.sleep(sleep_seconds)\n print(\"Retrying\")\n return ask_llm(\n prompt,\n model=model,\n long_model=long_model,\n long_model_retry=long_model_retry,\n retry=retry,\n )\n except openai.error.ServiceUnavailableError as e:\n print(\"OpenAI service unavailable\")\n print(e)\n sleep_seconds = 60\n print(f\"Sleeping for {sleep_seconds} seconds...\")\n time.sleep(sleep_seconds)\n print(\"Retrying\")\n return ask_llm(\n prompt,\n model=model,\n long_model=long_model,\n long_model_retry=long_model_retry,\n retry=retry,\n )\n except openai.error.APIError as e:\n print(\"OpenAI API Error\")\n print(e)\n sleep_seconds = 60\n print(f\"Sleeping for {sleep_seconds} seconds...\")\n time.sleep(sleep_seconds)\n print(\"Retrying\")\n return ask_llm(\n prompt,\n model=model,\n long_model=long_model,\n long_model_retry=long_model_retry,\n retry=retry,\n )\n except openai.error.AuthenticationError as e:\n print(\"OpenAI authentication error. Is the OpenAI API key set and correct?\")\n print(e)\n sys.exit(EXIT_OPENAI_API_KEY_ERROR)\n\n # extract usage information (tokens in/out)\n usage = response.usage\n prompt_tokens = usage.prompt_tokens\n completion_tokens = usage.completion_tokens\n total_tokens = usage.total_tokens\n update_tokens(prompt_tokens, completion_tokens, total_tokens)\n\n # Extract the generated text from the API response\n generated_text = response.choices[0].message.content\n print(\"Got LLM response.\")\n return generated_text" }, { "identifier": "reset_ask_llm_counts", "path": "fuzzomatic/tools/llm.py", "snippet": "def reset_ask_llm_counts():\n ask_llm.counter = 0\n ask_llm.prompt_tokens = 0\n ask_llm.completion_tokens = 0\n ask_llm.total_tokens = 0" }, { "identifier": "load_openai_api_key", "path": "fuzzomatic/tools/llm.py", "snippet": "def load_openai_api_key():\n varname = \"OPENAI_API_KEY\"\n if varname in os.environ:\n # environment variable is set, nothing to do\n print(\"API key is set in env var\")\n else:\n # env var not set, try to load from settings file\n here = os.path.dirname(os.path.realpath(__file__))\n project_root = os.path.join(here, os.pardir, os.pardir)\n settings_filename = \"settings.yml\"\n settings_path = os.path.join(project_root, settings_filename)\n error_message = (\n f\"OpenAI API key not set. \"\n f\"Please set it in {settings_filename} \"\n f\"or set the {varname} environment variable.\"\n )\n if not os.path.exists(settings_path):\n sys.exit(error_message)\n else:\n with open(settings_path) as f:\n blob = yaml.safe_load(f)\n found = False\n if \"settings\" in blob:\n settings = blob[\"settings\"]\n if \"openai_api_key\" in settings:\n found = True\n openai_api_key = settings[\"openai_api_key\"]\n print(\"setting openai.api_key\")\n openai.api_key = openai_api_key\n\n # check that all is good with this key\n _models = get_available_models()\n\n if not found:\n sys.exit(error_message)" }, { "identifier": "evaluate_target", "path": "fuzzomatic/tools/runtime.py", "snippet": "def evaluate_target(\n fuzz_project_dir,\n max_total_time_seconds=DEFAULT_MAX_TOTAL_TIME_SECONDS,\n):\n print(f\"Evaluating target: {fuzz_project_dir}\")\n success, error = run_fuzz_target(\n fuzz_project_dir, max_total_time_seconds=max_total_time_seconds\n )\n cov_changes, first_cov, last_cov = is_cov_changing(error)\n print(f\"Cov changing: {cov_changes}\")\n print(f\"{first_cov=}\")\n print(f\"{last_cov=}\")\n\n # determine panic location\n panic_pattern = \"panicked at \"\n panic_outside_fuzz_target = None\n lines = error.decode(\"utf-8\").split(\"\\n\")\n for line in lines:\n if panic_pattern in line:\n if f\"fuzz_targets/{DEFAULT_TARGET_NAME}.rs\" in line:\n panic_outside_fuzz_target = False\n break\n else:\n panic_outside_fuzz_target = True\n\n # useful:\n # * cov changes or panicks outside fuzz target\n is_useful = cov_changes or (\n panic_outside_fuzz_target is not None and panic_outside_fuzz_target\n )\n\n # found bug:\n # * crashes and thread panicked not inside fuzz target (not in auto.rs)\n bug_found = not success and (\n panic_outside_fuzz_target is not None and panic_outside_fuzz_target\n )\n\n return is_useful, bug_found, error" }, { "identifier": "cleanup_corpus", "path": "fuzzomatic/tools/runtime.py", "snippet": "def cleanup_corpus(t):\n corpus_dir = os.path.join(t, \"corpus\")\n if os.path.exists(corpus_dir):\n print(\"Cleaning up corpus dir\")\n print(corpus_dir)\n shutil.rmtree(corpus_dir, ignore_errors=True)" }, { "identifier": "get_codebase_name", "path": "fuzzomatic/tools/utils.py", "snippet": "def get_codebase_name(codebase_dir):\n if codebase_dir.endswith(\"/\"):\n return os.path.basename(os.path.dirname(codebase_dir))\n else:\n return os.path.basename(codebase_dir)" }, { "identifier": "git_clone", "path": "fuzzomatic/tools/utils.py", "snippet": "def git_clone(url, path):\n os.makedirs(path, exist_ok=True)\n\n cmd = [\"git\", \"clone\", \"--recurse-submodules\", url]\n try:\n env_copy = os.environ.copy()\n env_copy[\"GIT_TERMINAL_PROMPT\"] = \"0\"\n subprocess.check_output(cmd, cwd=path, stderr=subprocess.STDOUT, env=env_copy)\n except subprocess.CalledProcessError as e:\n cmd_str = \" \".join(cmd)\n print(f\"Failed to run command: {cmd_str}\")\n print(e.output.decode(\"utf-8\"))\n print(\"Failed to run git clone\")\n splits = url.split(\"/\")\n for s in splits[::-1]: # walk in reverse\n piece = s.strip()\n if len(piece) > 0:\n codebase_name = piece\n break\n if codebase_name.endswith(\".git\"):\n codebase_name = codebase_name.replace(\".git\", \"\")\n\n print(f\"codebase_name: {codebase_name}\")\n codebase_dir = os.path.join(path, codebase_name)\n print(f\"codebase dir: {codebase_dir}\")\n return codebase_dir" }, { "identifier": "init_cargo_fuzz", "path": "fuzzomatic/tools/utils.py", "snippet": "def init_cargo_fuzz(codebase_dir, target_name):\n cmd_init = [\"cargo\", \"fuzz\", \"init\"]\n try:\n subprocess.check_output(cmd_init, cwd=codebase_dir, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n cmd_str = \" \".join(cmd_init)\n print(f\"Warning: failed to run {cmd_str}\")\n error = e.output.decode(\"utf-8\")\n if \"could not read the manifest file\" in error:\n print(error)\n return False\n if \"is malformed\" in error:\n print(error)\n return False\n if \"could not find a cargo project\" in error:\n print(error)\n return False\n\n # try to create target with cargo fuzz, in case it's the first time\n cmd_add = [\"cargo\", \"fuzz\", \"add\", target_name]\n try:\n subprocess.check_output(cmd_add, cwd=codebase_dir, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError:\n pass\n\n return True" }, { "identifier": "check_virtual_manifest", "path": "fuzzomatic/tools/utils.py", "snippet": "def check_virtual_manifest(codebase_dir):\n cargo_file = os.path.join(codebase_dir, \"Cargo.toml\")\n if os.path.exists(cargo_file):\n with open(cargo_file) as f:\n workspace_found = False\n package_found = False\n for line in f:\n line = line.strip()\n if line == \"[workspace]\":\n workspace_found = True\n if line == \"[package]\":\n package_found = True\n virtual_manifest = workspace_found and not package_found\n return virtual_manifest\n\n return False" }, { "identifier": "check_has_workspace_members", "path": "fuzzomatic/tools/utils.py", "snippet": "def check_has_workspace_members(codebase_dir):\n cargo_file = os.path.join(codebase_dir, \"Cargo.toml\")\n if os.path.exists(cargo_file):\n with open(cargo_file) as f:\n workspace_found = False\n for line in f:\n line = line.strip()\n if line == \"[workspace]\":\n workspace_found = True\n has_members = workspace_found\n return has_members\n\n return False" }, { "identifier": "detect_git_url", "path": "fuzzomatic/tools/utils.py", "snippet": "def detect_git_url(codebase_dir, remote_name=\"origin\"):\n cmd = [\"git\", \"remote\", \"get-url\", remote_name]\n try:\n output = subprocess.check_output(cmd, cwd=codebase_dir)\n git_url = output.decode(\"utf-8\").strip()\n return git_url\n except subprocess.CalledProcessError:\n print(\"Failed to detect git URL\")\n return None" } ]
import argparse import datetime import json import os.path import subprocess import sys import fuzzomatic.tools.utils from fuzzomatic.tools import utils from fuzzomatic import discovery from fuzzomatic.approaches import ( try_functions_approach, try_examples_approach, try_readme_approach, try_benches_approach, try_unit_tests_approach, try_unit_tests_with_function_approach, ) from fuzzomatic.tools.constants import ( DEFAULT_TARGET_NAME, FUZZOMATIC_RESULTS_FILENAME, EXIT_NOT_A_CARGO_PROJECT, EXIT_PROJECT_ALREADY_FUZZED, EXIT_PROJECT_DOES_NOT_BUILD, EXIT_OPENAI_API_KEY_ERROR, ) from fuzzomatic.tools.llm import ask_llm, reset_ask_llm_counts, load_openai_api_key from fuzzomatic.tools.runtime import evaluate_target, cleanup_corpus from fuzzomatic.tools.utils import ( get_codebase_name, git_clone, init_cargo_fuzz, check_virtual_manifest, check_has_workspace_members, detect_git_url, )
6,128
nargs="+", dest="functions_denylist", default=None, help="List of words that should not appear in target function names. " "Such functions will be skipped.", ) return parser def save_results( args, git_url, generated_fuzz_targets, start_time, end_time, duration, outcome_reason, ): name = get_codebase_name(args.codebase_dir) # runtime duration_seconds = duration.total_seconds() # create results results_path = os.path.join(args.codebase_dir, FUZZOMATIC_RESULTS_FILENAME) results = { "codebase_dir": args.codebase_dir, "name": name, "git_url": git_url, "generated_fuzz_targets": generated_fuzz_targets, "start_time": start_time.isoformat(), "end_time": end_time.isoformat(), "duration_seconds": duration_seconds, "outcome_reason": outcome_reason, } # save results to file with open(results_path, "w+") as fout: fout.write(json.dumps(results)) print(f"Saved fuzzomatic results to: {results_path}") def get_approaches(requested_approaches): approaches = [] if requested_approaches is not None: for name, func in ENABLED_APPROACHES: if name in requested_approaches: approaches.append((name, func)) else: approaches = ENABLED_APPROACHES return approaches def generate_building_fuzz_targets( args, codebase_dir, git_url, approaches, force=False ): codebase_name = get_codebase_name(codebase_dir) if not force: print(f"Checking if {codebase_name} is not already in oss-fuzz") if discovery.is_project_to_be_skipped(codebase_dir, git_url): yield "message", EXIT_PROJECT_ALREADY_FUZZED autofuzz_generator = autofuzz_codebase(args, codebase_dir, approaches=approaches) for result in autofuzz_generator: yield result def ensure_dependencies_available(): required_external_commands = [ ("semgrep", ["semgrep"]), ("cargo fuzz", ["cargo", "fuzz", "help"]), ] print("Checking external dependencies...") for cmd_name, cmd in required_external_commands: try: subprocess.check_call( cmd, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL ) print(f"[SUCCESS] {cmd_name}") except (subprocess.CalledProcessError, FileNotFoundError): print( f"[FAILURE] {cmd_name} is a required dependency. " f"Fuzzomatic won't run without it." ) print( "Make sure that the dependency is installed " "as explained in the README instructions" ) print("Aborting...") sys.exit(-1) def main(args=None): # reset LLM counter ask_llm.counter = 0 if args is None: parser = get_parser() args = parser.parse_args() # load openai api key load_openai_api_key() # check required dependencies are available ensure_dependencies_available() very_start = datetime.datetime.utcnow() # if git URL, clone the repository git_url = None if args.codebase_dir.startswith("https://"): git_url = args.codebase_dir path = os.path.abspath(os.path.join(".", "git")) print("Code base appears to be a git URL. Trying to git clone...") codebase_dir = git_clone(args.codebase_dir, path) args.codebase_dir = codebase_dir else:
#!/usr/bin/env python3 def get_parser(): prog_name = "fuzzomatic" parser = argparse.ArgumentParser( prog=prog_name, description="Automatically generate Rust fuzz targets from scratch", ) parser.add_argument( "codebase_dir", help="Path to the codebase to generate a fuzz target for" ) parser = add_parser_shared_arguments(parser) return parser def add_parser_shared_arguments(parser): parser.add_argument( "--force", action="store_true", dest="force", help="Run Fuzzomatic anyway. Even if project is already covered by oss-fuzz", ) parser.add_argument( "--stop-on", dest="stop_on", default="bug", help="Stop on can be one of `building`, `useful` or `bug`. " "`building` means stop when a building fuzz target was generated." "`useful` means stop when a useful fuzz target was generated." "`bug` means stop when a bug is found. ", ) parser.add_argument( "--max-fuzz-targets", dest="max_fuzz_targets", type=int, default=1, help="Stop if `max_fuzz_targets` fuzz targets match the " "`stop_on` condition for this code base." "For example, if max_fuzz_targets is 2 and stop_on is bug, " "we will stop as soon as 2 bugs are found.", ) parser.add_argument( "--approaches", nargs="+", dest="approaches", default=None, help="List of approaches to use", ) parser.add_argument( "--functions-denylist", nargs="+", dest="functions_denylist", default=None, help="List of words that should not appear in target function names. " "Such functions will be skipped.", ) return parser def save_results( args, git_url, generated_fuzz_targets, start_time, end_time, duration, outcome_reason, ): name = get_codebase_name(args.codebase_dir) # runtime duration_seconds = duration.total_seconds() # create results results_path = os.path.join(args.codebase_dir, FUZZOMATIC_RESULTS_FILENAME) results = { "codebase_dir": args.codebase_dir, "name": name, "git_url": git_url, "generated_fuzz_targets": generated_fuzz_targets, "start_time": start_time.isoformat(), "end_time": end_time.isoformat(), "duration_seconds": duration_seconds, "outcome_reason": outcome_reason, } # save results to file with open(results_path, "w+") as fout: fout.write(json.dumps(results)) print(f"Saved fuzzomatic results to: {results_path}") def get_approaches(requested_approaches): approaches = [] if requested_approaches is not None: for name, func in ENABLED_APPROACHES: if name in requested_approaches: approaches.append((name, func)) else: approaches = ENABLED_APPROACHES return approaches def generate_building_fuzz_targets( args, codebase_dir, git_url, approaches, force=False ): codebase_name = get_codebase_name(codebase_dir) if not force: print(f"Checking if {codebase_name} is not already in oss-fuzz") if discovery.is_project_to_be_skipped(codebase_dir, git_url): yield "message", EXIT_PROJECT_ALREADY_FUZZED autofuzz_generator = autofuzz_codebase(args, codebase_dir, approaches=approaches) for result in autofuzz_generator: yield result def ensure_dependencies_available(): required_external_commands = [ ("semgrep", ["semgrep"]), ("cargo fuzz", ["cargo", "fuzz", "help"]), ] print("Checking external dependencies...") for cmd_name, cmd in required_external_commands: try: subprocess.check_call( cmd, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL ) print(f"[SUCCESS] {cmd_name}") except (subprocess.CalledProcessError, FileNotFoundError): print( f"[FAILURE] {cmd_name} is a required dependency. " f"Fuzzomatic won't run without it." ) print( "Make sure that the dependency is installed " "as explained in the README instructions" ) print("Aborting...") sys.exit(-1) def main(args=None): # reset LLM counter ask_llm.counter = 0 if args is None: parser = get_parser() args = parser.parse_args() # load openai api key load_openai_api_key() # check required dependencies are available ensure_dependencies_available() very_start = datetime.datetime.utcnow() # if git URL, clone the repository git_url = None if args.codebase_dir.startswith("https://"): git_url = args.codebase_dir path = os.path.abspath(os.path.join(".", "git")) print("Code base appears to be a git URL. Trying to git clone...") codebase_dir = git_clone(args.codebase_dir, path) args.codebase_dir = codebase_dir else:
git_url = detect_git_url(args.codebase_dir)
24
2023-11-14 09:52:59+00:00
8k
AdmTal/music-graphs
src/graph_stuff.py
[ { "identifier": "Theme", "path": "src/theme_stuff.py", "snippet": "class Theme:\n def __init__(\n self,\n theme_file,\n defaults_file,\n ):\n with open(theme_file, \"r\") as stream:\n self._theme = AttributeDict(**yaml.safe_load(stream))\n\n with open(defaults_file, \"r\") as stream:\n try:\n self._defaults = AttributeDict(**yaml.safe_load(stream))\n except:\n self._defaults = AttributeDict(**{})\n\n def _get_value(self, path, default_path=\"\"):\n value = self._theme.get_path(path)\n if value is not None:\n return value\n if default_path:\n theme_default = self._theme.get_path(default_path)\n if theme_default:\n return theme_default\n return self._defaults.get_path(default_path)\n\n @property\n def debug_show_base_image(self):\n path = \"debug.show_base_image\"\n return self._get_value(path, path)\n\n @property\n def debug_max_frames(self):\n path = \"debug.max_frames\"\n return self._get_value(path, path)\n\n @property\n def frame_rate(self):\n path = \"frame_rate\"\n return self._get_value(path, path)\n\n @property\n def graphviz_engine(self):\n path = \"graphviz_engine\"\n return self._get_value(path, path)\n\n @property\n def squash_tracks(self):\n path = \"squash_tracks\"\n return self._get_value(path, path)\n\n def skip_track(self, track):\n if track == \"track_1\":\n return True\n return self._get_value(\n f\"tracks.{track}.skip\",\n default_path=f\"tracks.default.skip\",\n )\n\n def pulses_only(self, track):\n return self._get_value(\n f\"tracks.{track}.pulses_only\",\n default_path=f\"tracks.default.pulses_only\",\n )\n\n def allow_self_notes(self, track):\n return self._get_value(\n f\"tracks.{track}.allow_self_notes\",\n default_path=f\"tracks.default.skip\",\n )\n\n @property\n def graphviz_edge_attrs(self):\n path = \"graphviz_edge_attrs\"\n return self._get_value(path, path)\n\n @property\n def graphviz_node_attrs(self):\n path = \"graphviz_node_attrs\"\n return self._get_value(path, path)\n\n @property\n def graphviz_graph_attrs(self):\n path = \"graphviz_graph_attrs\"\n return self._get_value(path, path)\n\n @property\n def nodes_sorted(self):\n path = \"nodes_sorted\"\n return self._get_value(path, path)\n\n @property\n def background_image(self):\n path = \"background_image\"\n return self._get_value(path, path)\n\n @property\n def background_color(self):\n path = \"background_color\"\n return self._get_value(path, path)\n\n @property\n def font(self):\n path = \"font\"\n return self._get_value(path, path)\n\n @property\n def hide_letters(self):\n path = \"hide_letters\"\n return self._get_value(path, path)\n\n @property\n def group_notes_by_track(self):\n path = \"group_notes_by_track\"\n return self._get_value(path, path)\n\n @property\n def width(self):\n path = \"width\"\n return self._get_value(path, path)\n\n @property\n def height(self):\n path = \"height\"\n return self._get_value(path, path)\n\n @property\n def show_lines(self):\n path = \"show_graph_lines\"\n return self._get_value(path, path)\n\n @property\n def graph_line_width(self):\n path = \"graph_line_width\"\n return self._get_value(path, path)\n\n @property\n def graph_line_blur(self):\n path = \"graph_line_blur\"\n return self._get_value(path, path)\n\n @property\n def graph_line_color(self):\n path = \"graph_line_color\"\n return self._get_value(path, path)\n\n @property\n def font_size(self):\n path = \"font_size\"\n return self._get_value(path, path)\n\n @property\n def node_outline_color(self):\n path = \"node.outline_color\"\n return self._get_value(path, path)\n\n @property\n def node_fill_color(self):\n path = \"node.fill_color\"\n return self._get_value(path, path)\n\n @property\n def node_text_color(self):\n path = \"node.text.color\"\n return self._get_value(path, path)\n\n @property\n def node_text_outline_color(self):\n path = \"node.text.stroke_color\"\n return self._get_value(path, path)\n\n @property\n def node_text_stroke_width(self):\n path = \"node.text.stroke_width\"\n return self._get_value(path, path)\n\n @property\n def dpi(self):\n path = \"dpi\"\n return self._get_value(path, path)\n\n @property\n def text_location_offsets(self):\n path = \"text_location_offsets\"\n return self._get_value(path, path)\n\n @property\n def node_shadow_color(self):\n path = \"node.shadow_color\"\n return self._get_value(path, path)\n\n @property\n def node_shadow_size(self):\n path = \"node.shadow_size\"\n return self._get_value(path, path) / 100\n\n @property\n def tracks(self):\n return list(self._theme.tracks.keys())\n\n def note_num_frames(self, track):\n a = self._get_value(\n f\"tracks.{track}.note.num_frames\",\n default_path=f\"tracks.default.note.num_frames\",\n )\n return a\n\n def note_color(self, track):\n return self._get_value(\n f\"tracks.{track}.note.color\",\n default_path=f\"tracks.default.note.color\",\n )\n\n def note_stroke_width(self, track):\n return self._get_value(\n f\"tracks.{track}.note.stroke_width\",\n default_path=f\"tracks.default.note.stroke_width\",\n )\n\n def note_increase_size(self, track):\n return (\n self._get_value(\n f\"tracks.{track}.note.increase_size\",\n default_path=f\"tracks.default.note.increase_size\",\n )\n / 100\n )\n\n def chord_line_width(self, track):\n return self._get_value(\n f\"tracks.{track}.chord_line.width\",\n default_path=f\"tracks.default.chord_line.width\",\n )\n\n def chord_line_border_color(self, track):\n return self._get_value(\n f\"tracks.{track}.chord_line.border_color\",\n default_path=f\"tracks.default.chord_line.border_color\",\n )\n\n def chord_line_color(self, track):\n return self._get_value(\n f\"tracks.{track}.chord_line.color\",\n default_path=f\"tracks.default.chord_line.color\",\n )\n\n def ball_radius(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.radius\",\n default_path=f\"tracks.default.ball.radius\",\n )\n\n def ball_g_blur_max(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.g_blur_max\",\n default_path=f\"tracks.default.ball.g_blur_max\",\n )\n\n def ball_color(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.color\",\n default_path=f\"tracks.default.ball.color\",\n )\n\n def ball_stroke_color(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.stroke_color\",\n default_path=f\"tracks.default.ball.stroke_color\",\n )\n\n def ball_stroke_width(self, track):\n return self._get_value(\n f\"tracks.{track}.ball.stroke_width\",\n default_path=f\"tracks.default.ball.stroke_width\",\n )" }, { "identifier": "get_cache_dir", "path": "src/cache_stuff.py", "snippet": "def get_cache_dir():\n global _cache_dir_created, _cache_dir\n if not _cache_dir_created:\n _cache_dir = f\".cache/{uuid4()}\"\n os.makedirs(_cache_dir, exist_ok=True)\n _cache_dir_created = True\n return _cache_dir" } ]
import re import math from collections import defaultdict, namedtuple from PIL import Image, ImageDraw, ImageFilter, ImageFont from src.theme_stuff import Theme from src.cache_stuff import get_cache_dir
4,297
(point_center[0] + i, point_center[1] + i), ], fill=theme.ball_color(track), outline=hex_to_rgb(theme.ball_stroke_color(track)), width=theme.ball_stroke_width(track), ) blur_max = theme.ball_g_blur_max(track) if blur_max: blur_radius = min( animation_length_in_frames - frame_number, theme.ball_g_blur_max(track) / (frame_number + 1), ) overlay_image = overlay_image.filter( ImageFilter.GaussianBlur(radius=blur_radius) ) # Composite the transparent overlay onto the base image return Image.alpha_composite( base_image.convert("RGBA"), overlay_image, ) def animate_ellipsis_blur( base_image, points, frame_number, offsets, theme, track, animation_len, velocity, ): image = base_image.copy() draw = ImageDraw.Draw(image) x_offset, y_offset = offsets x0, y0, w, h = points x0 += x_offset y0 += y_offset # Calculate the increase in size w_increase = w * theme.note_increase_size(track) * (velocity / 127) h_increase = h * theme.note_increase_size(track) * (velocity / 127) # Define the bounding box with the increased size bounding_box = [ x0 - w - w_increase / 2, y0 - h - h_increase / 2, x0 + w + w_increase / 2, y0 + h + h_increase / 2, ] # Draw the initial ellipse draw.ellipse( bounding_box, outline=hex_to_rgb(theme.note_color(track)), width=theme.note_stroke_width(track), ) # Determine the blur radius for this frame blur_strength = (frame_number / animation_len) * velocity blur_radius = max(1, blur_strength) # Create a mask for the ellipse to constrain the blur effect mask = Image.new("L", image.size, 0) mask_draw = ImageDraw.Draw(mask) mask_draw.ellipse(bounding_box, fill=255) # Apply the blur effect on the mask mask_blurred = mask.filter(ImageFilter.GaussianBlur(blur_radius)) # Create a solid image for the blur color ellipse = Image.new("RGBA", image.size, hex_to_rgb(theme.note_color(track))) # Composite the blurred mask with the ellipse onto the base image image.paste(ellipse, mask=mask_blurred) return image def draw_centered_text( offsets, image, text, x, y, font_path, font_size, color, outline_color, stroke_width, ): font = ImageFont.truetype(font_path, font_size) draw = ImageDraw.Draw(image) x += offsets[0] y += offsets[1] draw.text( (x, y), text, font=font, fill=hex_to_rgb(color), stroke_width=stroke_width, stroke_fill=hex_to_rgb(outline_color), ) return image def paste_center(host_image, image): host_width, host_height = host_image.size width, height = image.size x = (host_width - width) // 2 y = (host_height - height) // 2 host_image.paste(image, (x, y), image) def get_node_positions(graph): """Draw a graph to a file, load it, then parse it's `node[pos] values, and return them"""
LINE_WIDTH = 3 Draw = namedtuple( "Draw", "pen_color fill_color p_points b_points e_points", ) LDraw = namedtuple( "LDraw", "font font_size pen_color text_x text_y text_w text_j text", ) def points_to_pixels(points, dpi): pixels = points * (dpi / 72) # 72 points per inch return pixels class Graph: def __init__(self, pen_color, fill_color, polygon_points): self._pen_color = pen_color self._fill_color = fill_color self._polygon_points = polygon_points def split_attributes(attr_string): attributes_dict = {} attr_lines = re.split(r",(?!\d)", attr_string) for attr_line in attr_lines: attrs = attr_line.replace('"', "") key, value = attrs.split("=") attributes_dict[key] = value return attributes_dict def compact_dot_format(file_content): file_content = file_content.replace("\n\t", "") file_content = file_content.replace("{", "{\n") file_content = file_content.replace(";", ";\n") file_content = file_content.replace(" ", "") file_content = file_content.replace("\n\n", "") file_content = file_content.replace("\\\n", "") file_content = file_content.replace("}", "") return file_content def split_numbers(sequence, n): # Create a regex pattern for n numbers pattern = r"((?:\b\d+\b\s*){" + str(n) + r"})(.*)" match = re.match(pattern, sequence) return match.groups() if match else (sequence, "") def array_chunk(lst, chunk_size): return [lst[i : i + chunk_size] for i in range(0, len(lst), chunk_size)] def parse_draw(draw_string, dpi): rest = draw_string.strip() pen_color = None fill_color = None p_points = None b_points = None e_points = None while rest.strip(): command, rest = rest[0], rest[2:] if command == "c": num, rest = int(rest[0]), rest[3:] pen_color, rest = rest[:num], rest[num + 1 :] continue if command == "C": num, rest = int(rest[0]), rest[3:] fill_color, rest = rest[:num], rest[num + 1 :] continue if command == "P": num, rest = int(rest[0]), rest[2:] p_points, rest = split_numbers(rest, num * 2) p_points = array_chunk([float(i) for i in p_points.split()], 2) continue if command == "e": e_points, rest = split_numbers(rest, 4) e_points = [float(i) for i in e_points.split()] continue if command == "B": num, rest = int(rest[0]), rest[2:] b_points, rest = split_numbers(rest, num) b_points = [float(i) for i in b_points.split()] continue raise Exception(rest) return Draw( fill_color=fill_color, pen_color=pen_color, p_points=[ [points_to_pixels(i[0], dpi), points_to_pixels(i[1], dpi)] for i in p_points ] if p_points else None, b_points=[points_to_pixels(i, dpi) for i in b_points] if b_points else None, e_points=[points_to_pixels(i, dpi) for i in e_points] if e_points else None, ) def parse_ldraw(ldraw_string, dpi): rest = ldraw_string.strip() font = None font_size = None pen_color = None text_x = None text_y = None text_w = None text_j = None text = None while rest.strip(): command, rest = rest[0], rest[2:] if command == "F": first_space = rest.find(" ") font_size, rest = int(rest[:first_space]), rest[first_space + 1 :] first_space = rest.find(" ") num, rest = int(rest[:first_space]), rest[first_space:] font, rest = ( rest[first_space : first_space + num], rest[first_space + num + 1 :], ) continue if command == "c": num, rest = int(rest[0]), rest[3:] pen_color, rest = rest[:num], rest[num + 1 :] continue if command == "T": nums, text = rest.split("-") text_x, text_y, text_j, text_w, text_len = [float(i) for i in nums.split()] rest = "" continue raise Exception(f"command = {command}; rest = {rest}") return LDraw( font=font, font_size=font_size, pen_color=pen_color, text_x=points_to_pixels(text_x, dpi), text_y=points_to_pixels(text_y, dpi), text_w=points_to_pixels(text_w, dpi), text_j=text_j, text=text, ) def get_dimensions(points): x_values = [point[0] for point in points] y_values = [point[1] for point in points] width = max(x_values) - min(x_values) height = max(y_values) - min(y_values) return int(width), int(height) def hex_to_rgb(hex_color): if not hex_color: return None if not isinstance(hex_color, str): return hex_color hex_color = hex_color.lstrip("#") return tuple(int(hex_color[i : i + 2], 16) for i in (0, 2, 4)) def draw_ellipse( offsets, image, e_points, node_outline_color=None, node_fill_color=None, node_shadow_color=None, node_shadow_size=None, line_width=None, ): x0, y0, w, h = e_points x0 += offsets[0] y0 += offsets[1] bounding_box = [ x0 - w, y0 - h, x0 + w, y0 + h, ] # Draw shadow if bg_fade and bg_fade_size are provided if node_shadow_color and node_shadow_size: # Calculate the size increase based on the percentage increase_w = w * node_shadow_size increase_h = h * node_shadow_size shadow_size = [ x0 - w - increase_w, y0 - h - increase_h, x0 + w + increase_w, y0 + h + increase_h, ] # Create a temporary image for the shadow temp_image = Image.new("RGBA", image.size, (0, 0, 0, 0)) temp_draw = ImageDraw.Draw(temp_image) # Draw and blur the shadow ellipse temp_draw.ellipse(shadow_size, fill=node_shadow_color) blur_radius = int(max(increase_w, increase_h)) / 2 # Gaussian blur radius temp_image = temp_image.filter(ImageFilter.GaussianBlur(radius=blur_radius)) # Merge shadow with the main image image.paste(temp_image, (0, 0), temp_image) draw = ImageDraw.Draw(image) # Draw the main ellipse draw.ellipse( bounding_box, outline=hex_to_rgb(node_outline_color) if node_outline_color else None, fill=hex_to_rgb(node_fill_color) if node_fill_color else None, width=line_width, ) def bezier_point(t, points): while len(points) > 1: points = [ tuple((1 - t) * p0 + t * p1 for p0, p1 in zip(points[i], points[i + 1])) for i in range(len(points) - 1) ] return points[0] def draw_bezier_curve(offsets, image, points, pen_color, line_width, blur_radius): # Create a transparent image to draw the curve curve_image = Image.new("RGBA", image.size, (0, 0, 0, 0)) draw = ImageDraw.Draw(curve_image) # Adjust points with offsets points = [points[i : i + 2] for i in range(0, len(points), 2)] points = [(c[0] + offsets[0], c[1] + offsets[1]) for c in points] # Define the bezier curve function def bezier_point(t, points): n = len(points) - 1 return tuple( sum( c * (t**i) * ((1 - t) ** (n - i)) * math.comb(n, i) for i, c in enumerate(coords) ) for coords in zip(*points) ) # Split the curve into segments and draw segments = 500 curve = [bezier_point(t / segments, points) for t in range(segments + 1)] for i in range(segments): draw.line((curve[i], curve[i + 1]), fill=pen_color, width=line_width) # Apply a blur filter to the curve image curve_image = curve_image.filter(ImageFilter.GaussianBlur(blur_radius)) # Composite the blurred curve onto the original image image.paste(curve_image, (0, 0), curve_image) return image def hex_to_rgba(color, alpha): return ( *int(color[1:], 16).to_bytes(3, "big"), alpha, ) def calculate_alpha(frame_number, total_frames): fade_in_end = total_frames * 0.1 fade_out_start = total_frames * 0.5 if frame_number < fade_in_end: # Fade in phase return int(255 * (frame_number / fade_in_end)) elif frame_number <= fade_out_start: # Full opacity phase return 255 else: # Fade out phase return int( 255 * ((total_frames - frame_number) / (total_frames - fade_out_start)) ) def draw_fading_bezier_curve( base_image, offsets, theme, points, frame_number, track, animation_len, ): # Create a new transparent image to draw the Bézier curve overlay_image = Image.new("RGBA", base_image.size, (255, 255, 255, 0)) draw = ImageDraw.Draw(overlay_image) # Calculate alpha value for current frame alpha = calculate_alpha(frame_number, animation_len) # Split the points into pairs and apply offsets points = [points[i : i + 2] for i in range(0, len(points), 2)] points = [(c[0] + offsets[0], c[1] + offsets[1]) for c in points] # Split the curve into segments segments = 300 * len(points) curve = [bezier_point(t / segments, points) for t in range(segments + 1)] # Draw the border/shadow border_width = theme.chord_line_width(track) * 2 border_rgba_color = hex_to_rgba(theme.chord_line_border_color(track), alpha) for i in range(segments): draw.line( ( curve[i], curve[i + 1], ), fill=border_rgba_color, width=border_width, ) overlay_image = overlay_image.filter(ImageFilter.GaussianBlur(radius=5)) draw = ImageDraw.Draw(overlay_image) # Convert hex color to RGBA with alpha for main line rgba_color = hex_to_rgba(theme.chord_line_color(track), alpha) # Draw the main line with fading effect for i in range(segments): draw.line( ( curve[i], curve[i + 1], ), fill=rgba_color, width=theme.chord_line_width(track), ) # Composite the transparent overlay onto the base image return Image.alpha_composite( base_image.convert("RGBA"), overlay_image, ) def animate_bezier_point( base_image, offsets, theme, track, points, frame_number, animation_length_in_frames, ): overlay_image = Image.new( "RGBA", base_image.size, color=None, ) draw = ImageDraw.Draw(overlay_image) x_offset, y_offset = offsets t = frame_number / animation_length_in_frames point = bezier_point(t, [points[i : i + 2] for i in range(0, len(points), 2)]) point_center = (x_offset + point[0], y_offset + point[1]) # Draw the 3D-looking circle for i in range(theme.ball_radius(track) // 2): # Calculate the color gradient based on the specified ball color draw.ellipse( [ (point_center[0] - i, point_center[1] - i), (point_center[0] + i, point_center[1] + i), ], fill=theme.ball_color(track), outline=hex_to_rgb(theme.ball_stroke_color(track)), width=theme.ball_stroke_width(track), ) blur_max = theme.ball_g_blur_max(track) if blur_max: blur_radius = min( animation_length_in_frames - frame_number, theme.ball_g_blur_max(track) / (frame_number + 1), ) overlay_image = overlay_image.filter( ImageFilter.GaussianBlur(radius=blur_radius) ) # Composite the transparent overlay onto the base image return Image.alpha_composite( base_image.convert("RGBA"), overlay_image, ) def animate_ellipsis_blur( base_image, points, frame_number, offsets, theme, track, animation_len, velocity, ): image = base_image.copy() draw = ImageDraw.Draw(image) x_offset, y_offset = offsets x0, y0, w, h = points x0 += x_offset y0 += y_offset # Calculate the increase in size w_increase = w * theme.note_increase_size(track) * (velocity / 127) h_increase = h * theme.note_increase_size(track) * (velocity / 127) # Define the bounding box with the increased size bounding_box = [ x0 - w - w_increase / 2, y0 - h - h_increase / 2, x0 + w + w_increase / 2, y0 + h + h_increase / 2, ] # Draw the initial ellipse draw.ellipse( bounding_box, outline=hex_to_rgb(theme.note_color(track)), width=theme.note_stroke_width(track), ) # Determine the blur radius for this frame blur_strength = (frame_number / animation_len) * velocity blur_radius = max(1, blur_strength) # Create a mask for the ellipse to constrain the blur effect mask = Image.new("L", image.size, 0) mask_draw = ImageDraw.Draw(mask) mask_draw.ellipse(bounding_box, fill=255) # Apply the blur effect on the mask mask_blurred = mask.filter(ImageFilter.GaussianBlur(blur_radius)) # Create a solid image for the blur color ellipse = Image.new("RGBA", image.size, hex_to_rgb(theme.note_color(track))) # Composite the blurred mask with the ellipse onto the base image image.paste(ellipse, mask=mask_blurred) return image def draw_centered_text( offsets, image, text, x, y, font_path, font_size, color, outline_color, stroke_width, ): font = ImageFont.truetype(font_path, font_size) draw = ImageDraw.Draw(image) x += offsets[0] y += offsets[1] draw.text( (x, y), text, font=font, fill=hex_to_rgb(color), stroke_width=stroke_width, stroke_fill=hex_to_rgb(outline_color), ) return image def paste_center(host_image, image): host_width, host_height = host_image.size width, height = image.size x = (host_width - width) // 2 y = (host_height - height) // 2 host_image.paste(image, (x, y), image) def get_node_positions(graph): """Draw a graph to a file, load it, then parse it's `node[pos] values, and return them"""
temp_filename = f"{get_cache_dir()}/graph_order"
1
2023-11-17 17:56:04+00:00
8k