repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
YaoFANGUK/video-subtitle-remover
backend/inpaint/video/raft/corr.py
[ { "identifier": "bilinear_sampler", "path": "backend/inpaint/video/raft/utils/utils.py", "snippet": "def bilinear_sampler(img, coords, mode='bilinear', mask=False):\n \"\"\" Wrapper for grid_sample, uses pixel coordinates \"\"\"\n H, W = img.shape[-2:]\n xgrid, ygrid = coords.split([1,1], dim=-1)\n xgrid = 2*xgrid/(W-1) - 1\n ygrid = 2*ygrid/(H-1) - 1\n\n grid = torch.cat([xgrid, ygrid], dim=-1)\n img = F.grid_sample(img, grid, align_corners=True)\n\n if mask:\n mask = (xgrid > -1) & (ygrid > -1) & (xgrid < 1) & (ygrid < 1)\n return img, mask.float()\n\n return img" }, { "identifier": "coords_grid", "path": "backend/inpaint/video/raft/utils/utils.py", "snippet": "def coords_grid(batch, ht, wd):\n coords = torch.meshgrid(torch.arange(ht), torch.arange(wd))\n coords = torch.stack(coords[::-1], dim=0).float()\n return coords[None].repeat(batch, 1, 1, 1)" } ]
import torch import torch.nn.functional as F import alt_cuda_corr from .utils.utils import bilinear_sampler, coords_grid
673
try: except: # alt_cuda_corr is not compiled pass class CorrBlock: def __init__(self, fmap1, fmap2, num_levels=4, radius=4): self.num_levels = num_levels self.radius = radius self.corr_pyramid = [] # all pairs correlation corr = CorrBlock.corr(fmap1, fmap2) batch, h1, w1, dim, h2, w2 = corr.shape corr = corr.reshape(batch*h1*w1, dim, h2, w2) self.corr_pyramid.append(corr) for i in range(self.num_levels-1): corr = F.avg_pool2d(corr, 2, stride=2) self.corr_pyramid.append(corr) def __call__(self, coords): r = self.radius coords = coords.permute(0, 2, 3, 1) batch, h1, w1, _ = coords.shape out_pyramid = [] for i in range(self.num_levels): corr = self.corr_pyramid[i] dx = torch.linspace(-r, r, 2*r+1) dy = torch.linspace(-r, r, 2*r+1) delta = torch.stack(torch.meshgrid(dy, dx), axis=-1).to(coords.device) centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2) coords_lvl = centroid_lvl + delta_lvl
try: except: # alt_cuda_corr is not compiled pass class CorrBlock: def __init__(self, fmap1, fmap2, num_levels=4, radius=4): self.num_levels = num_levels self.radius = radius self.corr_pyramid = [] # all pairs correlation corr = CorrBlock.corr(fmap1, fmap2) batch, h1, w1, dim, h2, w2 = corr.shape corr = corr.reshape(batch*h1*w1, dim, h2, w2) self.corr_pyramid.append(corr) for i in range(self.num_levels-1): corr = F.avg_pool2d(corr, 2, stride=2) self.corr_pyramid.append(corr) def __call__(self, coords): r = self.radius coords = coords.permute(0, 2, 3, 1) batch, h1, w1, _ = coords.shape out_pyramid = [] for i in range(self.num_levels): corr = self.corr_pyramid[i] dx = torch.linspace(-r, r, 2*r+1) dy = torch.linspace(-r, r, 2*r+1) delta = torch.stack(torch.meshgrid(dy, dx), axis=-1).to(coords.device) centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2) coords_lvl = centroid_lvl + delta_lvl
corr = bilinear_sampler(corr, coords_lvl)
0
2023-10-25 02:50:01+00:00
2k
Genesis-Embodied-AI/RoboGen
objaverse_utils/find_uid_utils.py
[ { "identifier": "text_to_uid_dict", "path": "objaverse_utils/utils.py", "snippet": "" }, { "identifier": "check_text_similarity", "path": "gpt_4/verification.py", "snippet": "def check_text_similarity(text, check_list=None, check_embeddings=None):\n global sentence_bert_model\n if sentence_bert_model is None:\n sentence_bert_model = SentenceTransformer('all-mpnet-base-v2')\n\n #Sentences are encoded by calling model.encode()\n with torch.no_grad():\n emb1 = sentence_bert_model.encode(text)\n if check_embeddings is None:\n emb_to_check = sentence_bert_model.encode(check_list)\n else:\n emb_to_check = check_embeddings\n cos_sim = util.cos_sim(emb1, emb_to_check)\n\n return cos_sim.cpu().numpy()" }, { "identifier": "verify_objaverse_object", "path": "gpt_4/bard_verify.py", "snippet": "def verify_objaverse_object(object_name, uid, task_name=None, task_description=None, use_bard=False, use_blip2=True):\n annotations = objaverse.load_annotations([uid])[uid]\n thumbnail_urls = annotations['thumbnails'][\"images\"]\n\n max_size = -1000\n max_url = -1\n for dict in thumbnail_urls:\n width = dict[\"width\"]\n if width > max_size:\n max_size = width\n max_url = dict[\"url\"]\n if max_url == -1: # TODO: in this case, we should render the object using blender to get the image.\n return False\n \n # download the image from the url\n try: \n raw_image = Image.open(requests.get(max_url, stream=True).raw).convert('RGB')\n except:\n return False\n \n if not os.path.exists('objaverse_utils/data/images'):\n os.makedirs('objaverse_utils/data/images')\n \n raw_image.save(\"objaverse_utils/data/images/{}.jpeg\".format(uid))\n bard_image = open(\"objaverse_utils/data/images/{}.jpeg\".format(uid), \"rb\").read()\n\n descriptions = []\n if use_bard:\n bard_description = bard_verify(bard_image)\n descriptions.append(bard_description)\n if use_blip2:\n blip2_description = blip2_caption(raw_image)\n descriptions.append(blip2_description)\n\n gpt_results = []\n\n for description in descriptions:\n if description:\n system = \"You are a helpful assistant.\"\n query_string = \"\"\"\n A robotic arm is trying to solve a task to learn a manipulation skill in a simulator.\n We are trying to find the best objects to load into the simulator to build this task for the robot to learn the skill.\n The task the robot is trying to learn is: {}. \n A more detailed description of the task is: {}.\n As noted, to build the task in the simulator, we need to find this object: {}.\n We are retrieving the object from an existing database, which provides some language annotations for the object.\n With the given lanugage annotation, please think if the object can be used in the simulator as {} for learning the task {}.\n\n This is the language annotation:\n {}\n\n Please reply first with your reasoning, and then a single line with \"**yes**\" or \"**no**\" to indicate whether this object can be used.\n \"\"\".format(task_name, task_description, object_name, object_name, task_name, description)\n \n if not os.path.exists('data/debug'):\n os.makedirs('data/debug')\n res = query(system, [query_string], [], save_path='data/debug/verify.json', temperature=0)\n \n responses = res.split(\"\\n\")\n\n useable = False\n for l_idx, line in enumerate(responses):\n if \"yes\" in line.lower():\n useable = True\n break\n\n gpt_results.append(useable)\n\n return np.alltrue(gpt_results)" } ]
import pandas as pd import torch import numpy as np import json from objaverse_utils.utils import text_to_uid_dict from gpt_4.verification import check_text_similarity from gpt_4.bard_verify import verify_objaverse_object
1,182
objaverse_csv = pd.read_csv('objaverse_utils/Cap3D_automated_Objaverse.csv') objaverse_csv = objaverse_csv.dropna() objaverse_csv_uids = list(objaverse_csv.iloc[:, 0].values) objaverse_csv_annotations = list(objaverse_csv.iloc[:, 1].values) objaverse_csv_annotations_embeddings = torch.load("objaverse_utils/data/cap3d_sentence_bert_embeddings.pt") tag_uids = [] tag_embeddings = [] tag_descriptions = [] num_chunks = 31 for idx in range(num_chunks): uids = torch.load("objaverse_utils/data/default_tag_uids_{}.pt".format(idx)) embeddings = torch.load("objaverse_utils/data/default_tag_embeddings_{}.pt".format(idx)) descriptions = torch.load("objaverse_utils/data/default_tag_names_{}.pt".format(idx)) tag_uids = tag_uids + uids tag_descriptions = tag_descriptions + descriptions tag_embeddings.append(embeddings) def find_uid(obj_descrption, candidate_num=10, debug=False, task_name=None, task_description=None):
objaverse_csv = pd.read_csv('objaverse_utils/Cap3D_automated_Objaverse.csv') objaverse_csv = objaverse_csv.dropna() objaverse_csv_uids = list(objaverse_csv.iloc[:, 0].values) objaverse_csv_annotations = list(objaverse_csv.iloc[:, 1].values) objaverse_csv_annotations_embeddings = torch.load("objaverse_utils/data/cap3d_sentence_bert_embeddings.pt") tag_uids = [] tag_embeddings = [] tag_descriptions = [] num_chunks = 31 for idx in range(num_chunks): uids = torch.load("objaverse_utils/data/default_tag_uids_{}.pt".format(idx)) embeddings = torch.load("objaverse_utils/data/default_tag_embeddings_{}.pt".format(idx)) descriptions = torch.load("objaverse_utils/data/default_tag_names_{}.pt".format(idx)) tag_uids = tag_uids + uids tag_descriptions = tag_descriptions + descriptions tag_embeddings.append(embeddings) def find_uid(obj_descrption, candidate_num=10, debug=False, task_name=None, task_description=None):
uids = text_to_uid_dict.get(obj_descrption, None)
0
2023-10-31 19:44:09+00:00
2k
junhoyeo/BetterOCR
betterocr/detect.py
[ { "identifier": "extract_json", "path": "betterocr/parsers.py", "snippet": "def extract_json(input_string):\n # Find the JSON in the string\n matches = re.findall(r'{\\s*\"data\"\\s*:\\s*\"(.*?)\"\\s*}', input_string, re.DOTALL)\n if matches:\n # Correctly escape special characters\n matches = [m.replace(\"\\n\", \"\\\\n\").replace('\"', '\\\\\"') for m in matches]\n for match in matches:\n # Construct JSON string\n json_string = f'{{\"data\": \"{match}\"}}'\n try:\n # Load the JSON and return the data\n json_obj = json.loads(json_string)\n return json_obj\n except json.decoder.JSONDecodeError:\n continue\n\n # If no JSON found, return None\n return None" }, { "identifier": "extract_list", "path": "betterocr/parsers.py", "snippet": "def extract_list(s):\n stack = []\n start_position = None\n\n # Iterate through each character in the string\n for i, c in enumerate(s):\n if c == \"[\":\n if start_position is None: # First '[' found\n start_position = i\n stack.append(c)\n\n elif c == \"]\":\n if stack:\n stack.pop()\n\n # If stack is empty and start was marked\n if not stack and start_position is not None:\n substring = s[start_position : i + 1]\n try:\n list_obj = json.loads(substring)\n for item in list_obj:\n if \"box\" in item and \"text\" in item:\n return list_obj\n except json.decoder.JSONDecodeError:\n # Reset the stack and start position as this isn't a valid JSON\n stack = []\n start_position = None\n continue\n\n # If no valid list found, return None\n return None" }, { "identifier": "rectangle_corners", "path": "betterocr/parsers.py", "snippet": "def rectangle_corners(rect):\n x, y, w, h = rect\n return [[x, y], [x + w, y], [x + w, y + h], [x, y + h]]" }, { "identifier": "job_easy_ocr", "path": "betterocr/wrappers/easy_ocr.py", "snippet": "def job_easy_ocr(_options):\n reader = easyocr.Reader(_options[\"lang\"])\n text = reader.readtext(_options[\"path\"], detail=0)\n text = \"\".join(text)\n print(\"[*] job_easy_ocr\", text)\n return text" }, { "identifier": "job_easy_ocr_boxes", "path": "betterocr/wrappers/easy_ocr.py", "snippet": "def job_easy_ocr_boxes(_options):\n reader = easyocr.Reader(_options[\"lang\"])\n boxes = reader.readtext(_options[\"path\"], output_format=\"dict\")\n for box in boxes:\n box[\"box\"] = box.pop(\"boxes\")\n return boxes" }, { "identifier": "job_tesseract", "path": "betterocr/wrappers/tesseract/job.py", "snippet": "def job_tesseract(_options):\n lang = convert_to_tesseract_lang_code(_options[\"lang\"])\n text = pytesseract.image_to_string(\n _options[\"path\"],\n lang=lang,\n **_options[\"tesseract\"]\n # pass rest of tesseract options here.\n )\n text = text.replace(\"\\n\", \"\\\\n\")\n print(\"[*] job_tesseract_ocr\", text)\n return text" }, { "identifier": "job_tesseract_boxes", "path": "betterocr/wrappers/tesseract/job.py", "snippet": "def job_tesseract_boxes(_options):\n lang = convert_to_tesseract_lang_code(_options[\"lang\"])\n df = pytesseract.image_to_data(\n _options[\"path\"],\n lang=lang,\n **_options[\"tesseract\"],\n output_type=pytesseract.Output.DATAFRAME\n # pass rest of tesseract options here.\n )\n\n # https://stackoverflow.com/questions/74221064/draw-a-rectangle-around-a-string-of-words-using-pytesseract\n boxes = []\n for line_num, words_per_line in df.groupby(\"line_num\"):\n words_per_line = words_per_line[words_per_line[\"conf\"] >= 5]\n if len(words_per_line) == 0:\n continue\n\n words = words_per_line[\"text\"].values\n line = \" \".join(words)\n\n word_boxes = []\n for left, top, width, height in words_per_line[\n [\"left\", \"top\", \"width\", \"height\"]\n ].values:\n word_boxes.append((left, top))\n word_boxes.append((left + width, top + height))\n\n x, y, w, h = cv2.boundingRect(np.array(word_boxes))\n boxes.append(\n {\n \"box\": [[x, y], [x + w, y], [x + w, y + h], [x, y + h]],\n \"text\": line,\n }\n )\n\n return boxes" } ]
from threading import Thread from queue import Queue from openai import OpenAI from .parsers import extract_json, extract_list, rectangle_corners from .wrappers import ( job_easy_ocr, job_easy_ocr_boxes, job_tesseract, job_tesseract_boxes, ) from .wrappers.easy_pororo_ocr import job_easy_pororo_ocr from .wrappers.easy_pororo_ocr import job_easy_pororo_ocr_boxes import json import os
1,298
def wrapper(func, args, queue): queue.put(func(args)) # custom error class NoTextDetectedError(Exception): pass def detect(): """Unimplemented""" raise NotImplementedError def detect_async(): """Unimplemented""" raise NotImplementedError def get_jobs(languages: list[str], boxes=False): jobs = [
def wrapper(func, args, queue): queue.put(func(args)) # custom error class NoTextDetectedError(Exception): pass def detect(): """Unimplemented""" raise NotImplementedError def detect_async(): """Unimplemented""" raise NotImplementedError def get_jobs(languages: list[str], boxes=False): jobs = [
job_easy_ocr if not boxes else job_easy_ocr_boxes,
3
2023-10-26 11:26:25+00:00
2k
KoeAI/LLVC
infer.py
[ { "identifier": "Net", "path": "model.py", "snippet": "class Net(nn.Module):\n def __init__(self, label_len, L=8,\n enc_dim=512, num_enc_layers=10,\n dec_dim=256, dec_buf_len=100, num_dec_layers=2,\n dec_chunk_size=72, out_buf_len=2,\n use_pos_enc=True, skip_connection=True, proj=True, lookahead=True, decoder_dropout=0.0, convnet_config=None):\n super(Net, self).__init__()\n self.L = L\n self.dec_chunk_size = dec_chunk_size\n self.out_buf_len = out_buf_len\n self.enc_dim = enc_dim\n self.lookahead = lookahead\n\n self.convnet_config = convnet_config\n if convnet_config['convnet_prenet']:\n self.convnet_pre = CachedConvNet(\n 1, convnet_config['kernel_sizes'], convnet_config['dilations'],\n convnet_config['dropout'], convnet_config['combine_residuals'],\n convnet_config['use_residual_blocks'], convnet_config['out_channels'],\n use_2d=False)\n\n # Input conv to convert input audio to a latent representation\n kernel_size = 3 * L if lookahead else L\n self.in_conv = nn.Sequential(\n nn.Conv1d(in_channels=1,\n out_channels=enc_dim, kernel_size=kernel_size, stride=L,\n padding=0, bias=False),\n nn.ReLU())\n\n # Label embedding layer\n label_len = 1\n self.label_embedding = nn.Sequential(\n nn.Linear(label_len, 512),\n nn.LayerNorm(512),\n nn.ReLU(),\n nn.Linear(512, enc_dim),\n nn.LayerNorm(enc_dim),\n nn.ReLU())\n\n # Mask generator\n self.mask_gen = MaskNet(\n enc_dim=enc_dim, num_enc_layers=num_enc_layers,\n dec_dim=dec_dim, dec_buf_len=dec_buf_len,\n dec_chunk_size=dec_chunk_size, num_dec_layers=num_dec_layers,\n use_pos_enc=use_pos_enc, skip_connection=skip_connection, proj=proj, decoder_dropout=decoder_dropout)\n\n # Output conv layer\n self.out_conv = nn.Sequential(\n nn.ConvTranspose1d(\n in_channels=enc_dim, out_channels=1,\n kernel_size=(out_buf_len + 1) * L,\n stride=L,\n padding=out_buf_len * L, bias=False),\n nn.Tanh())\n\n def init_buffers(self, batch_size, device):\n enc_buf = self.mask_gen.encoder.init_ctx_buf(batch_size, device)\n dec_buf = self.mask_gen.decoder.init_ctx_buf(batch_size, device)\n out_buf = torch.zeros(batch_size, self.enc_dim, self.out_buf_len,\n device=device)\n return enc_buf, dec_buf, out_buf\n\n def forward(self, x, init_enc_buf=None, init_dec_buf=None,\n init_out_buf=None, convnet_pre_ctx=None, pad=True):\n \"\"\"\n Extracts the audio corresponding to the `label` in the given\n `mixture`. Generates `chunk_size` samples per iteration.\n\n Args:\n mixed: [B, n_mics, T]\n input audio mixture\n label: [B, num_labels]\n one hot label\n Returns:\n out: [B, n_spk, T]\n extracted audio with sounds corresponding to the `label`\n \"\"\"\n label = torch.zeros(x.shape[0], 1, device=x.device)\n mod = 0\n if pad:\n pad_size = (self.L, self.L) if self.lookahead else (0, 0)\n x, mod = mod_pad(x, chunk_size=self.L, pad=pad_size)\n\n if hasattr(self, 'convnet_pre'):\n if convnet_pre_ctx is None:\n convnet_pre_ctx = self.convnet_pre.init_ctx_buf(\n x.shape[0], x.device)\n\n convnet_out, convnet_pre_ctx = self.convnet_pre(x, convnet_pre_ctx)\n\n if self.convnet_config['skip_connection'] == 'add':\n x = x + convnet_out\n elif self.convnet_config['skip_connection'] == 'multiply':\n x = x * convnet_out\n else:\n x = convnet_out\n\n if init_enc_buf is None or init_dec_buf is None or init_out_buf is None:\n assert init_enc_buf is None and \\\n init_dec_buf is None and \\\n init_out_buf is None, \\\n \"Both buffers have to initialized, or \" \\\n \"both of them have to be None.\"\n enc_buf, dec_buf, out_buf = self.init_buffers(\n x.shape[0], x.device)\n else:\n enc_buf, dec_buf, out_buf, = \\\n init_enc_buf, init_dec_buf, init_out_buf\n\n # Generate latent space representation of the input\n x = self.in_conv(x)\n\n # Generate label embedding\n l = self.label_embedding(label) # [B, label_len] --> [B, channels]\n\n # Generate mask corresponding to the label\n m, enc_buf, dec_buf = self.mask_gen(x, l, enc_buf, dec_buf)\n\n # Apply mask and decode\n x = x * m\n x = torch.cat((out_buf, x), dim=-1)\n out_buf = x[..., -self.out_buf_len:]\n x = self.out_conv(x)\n\n # Remove mod padding, if present.\n if mod != 0:\n x = x[:, :, :-mod]\n\n if init_enc_buf is None:\n return x\n else:\n return x, enc_buf, dec_buf, out_buf, convnet_pre_ctx" }, { "identifier": "glob_audio_files", "path": "utils.py", "snippet": "def glob_audio_files(dir):\n ext_list = [\"wav\", \"mp3\", \"flac\"]\n audio_files = []\n for ext in ext_list:\n audio_files.extend(glob.glob(\n os.path.join(dir, f\"**/*.{ext}\"), recursive=True))\n return audio_files" } ]
from model import Net from utils import glob_audio_files from tqdm import tqdm import torch import torchaudio import time import numpy as np import argparse import json import os
1,507
def load_model(checkpoint_path, config_path): with open(config_path) as f: config = json.load(f)
def load_model(checkpoint_path, config_path): with open(config_path) as f: config = json.load(f)
model = Net(**config['model_params'])
0
2023-10-28 01:58:49+00:00
2k
aurelio-labs/semantic-router
semantic_router/llms/llamacpp.py
[ { "identifier": "BaseLLM", "path": "semantic_router/llms/base.py", "snippet": "class BaseLLM(BaseModel):\n name: str\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(self, name: str, **kwargs):\n super().__init__(name=name, **kwargs)\n\n def __call__(self, messages: List[Message]) -> Optional[str]:\n raise NotImplementedError(\"Subclasses must implement this method\")\n\n def _is_valid_inputs(\n self, inputs: dict[str, Any], function_schema: dict[str, Any]\n ) -> bool:\n \"\"\"Validate the extracted inputs against the function schema\"\"\"\n try:\n # Extract parameter names and types from the signature string\n signature = function_schema[\"signature\"]\n param_info = [param.strip() for param in signature[1:-1].split(\",\")]\n param_names = [info.split(\":\")[0].strip() for info in param_info]\n param_types = [\n info.split(\":\")[1].strip().split(\"=\")[0].strip() for info in param_info\n ]\n\n for name, type_str in zip(param_names, param_types):\n if name not in inputs:\n logger.error(f\"Input {name} missing from query\")\n return False\n return True\n except Exception as e:\n logger.error(f\"Input validation error: {str(e)}\")\n return False\n\n def extract_function_inputs(\n self, query: str, function_schema: dict[str, Any]\n ) -> dict:\n logger.info(\"Extracting function input...\")\n\n prompt = f\"\"\"\n You are a helpful assistant designed to output JSON.\n Given the following function schema\n << {function_schema} >>\n and query\n << {query} >>\n extract the parameters values from the query, in a valid JSON format.\n Example:\n Input:\n query: \"How is the weather in Hawaii right now in International units?\"\n schema:\n {{\n \"name\": \"get_weather\",\n \"description\": \"Useful to get the weather in a specific location\",\n \"signature\": \"(location: str, degree: str) -> str\",\n \"output\": \"<class 'str'>\",\n }}\n\n Result: {{\n \"location\": \"London\",\n \"degree\": \"Celsius\",\n }}\n\n Input:\n query: {query}\n schema: {function_schema}\n Result:\n \"\"\"\n llm_input = [Message(role=\"user\", content=prompt)]\n output = self(llm_input)\n if not output:\n raise Exception(\"No output generated for extract function input\")\n\n output = output.replace(\"'\", '\"').strip().rstrip(\",\")\n\n function_inputs = json.loads(output)\n if not self._is_valid_inputs(function_inputs, function_schema):\n raise ValueError(\"Invalid inputs\")\n return function_inputs" }, { "identifier": "Message", "path": "semantic_router/schema.py", "snippet": "class Message(BaseModel):\n role: str\n content: str\n\n def to_openai(self):\n if self.role.lower() not in [\"user\", \"assistant\", \"system\"]:\n raise ValueError(\"Role must be either 'user', 'assistant' or 'system'\")\n return {\"role\": self.role, \"content\": self.content}\n\n def to_cohere(self):\n return {\"role\": self.role, \"message\": self.content}\n\n def to_llamacpp(self):\n return {\"role\": self.role, \"content\": self.content}" }, { "identifier": "logger", "path": "semantic_router/utils/logger.py", "snippet": "class CustomFormatter(colorlog.ColoredFormatter):\n def __init__(self):\ndef add_coloured_handler(logger):\ndef setup_custom_logger(name):" } ]
from contextlib import contextmanager from pathlib import Path from typing import Any, Optional from llama_cpp import Llama, LlamaGrammar from semantic_router.llms.base import BaseLLM from semantic_router.schema import Message from semantic_router.utils.logger import logger
1,212
class LlamaCppLLM(BaseLLM): llm: Llama temperature: float max_tokens: Optional[int] = 200 grammar: Optional[LlamaGrammar] = None def __init__( self, llm: Llama, name: str = "llama.cpp", temperature: float = 0.2, max_tokens: Optional[int] = 200, grammar: Optional[LlamaGrammar] = None, ): super().__init__( name=name, llm=llm, temperature=temperature, max_tokens=max_tokens, grammar=grammar, ) self.llm = llm self.temperature = temperature self.max_tokens = max_tokens self.grammar = grammar def __call__( self, messages: list[Message], ) -> str: try: completion = self.llm.create_chat_completion( messages=[m.to_llamacpp() for m in messages], temperature=self.temperature, max_tokens=self.max_tokens, grammar=self.grammar, stream=False, ) assert isinstance(completion, dict) # keep mypy happy output = completion["choices"][0]["message"]["content"] if not output: raise Exception("No output generated") return output except Exception as e:
class LlamaCppLLM(BaseLLM): llm: Llama temperature: float max_tokens: Optional[int] = 200 grammar: Optional[LlamaGrammar] = None def __init__( self, llm: Llama, name: str = "llama.cpp", temperature: float = 0.2, max_tokens: Optional[int] = 200, grammar: Optional[LlamaGrammar] = None, ): super().__init__( name=name, llm=llm, temperature=temperature, max_tokens=max_tokens, grammar=grammar, ) self.llm = llm self.temperature = temperature self.max_tokens = max_tokens self.grammar = grammar def __call__( self, messages: list[Message], ) -> str: try: completion = self.llm.create_chat_completion( messages=[m.to_llamacpp() for m in messages], temperature=self.temperature, max_tokens=self.max_tokens, grammar=self.grammar, stream=False, ) assert isinstance(completion, dict) # keep mypy happy output = completion["choices"][0]["message"]["content"] if not output: raise Exception("No output generated") return output except Exception as e:
logger.error(f"LLM error: {e}")
2
2023-10-30 12:12:45+00:00
2k
baaivision/JudgeLM
judgelm/llm_judge/gen_model_judgement_mmvet.py
[ { "identifier": "load_questions", "path": "judgelm/llm_judge/common.py", "snippet": "def parse_score(review):\ndef translate_score_to_win_list(score_list, T=0.0):\ndef generate_question_template(domain, question1, question2):\ndef reorg_answer_file(answer_file):\n def __init__(self, keywords, tokenizer, input_ids):\n def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n def get_prompt(self):\n def append_message(self, role, message):\n def to_gradio_chatbot(self):\n def copy(self, answer_num):\n def dict(self):\ndef load_questions(question_file: str, begin: Optional[int], end: Optional[int]):\n Q = (\"Human\", \"Provide a question in [\" + domain + \"] domain just like {\" + question1 + \"}, your provided question must be different from the questions that we have mentioned in this conversation.\")\n A = (\"Assistant\", \"Certainly! Here's another question in a [\" + domain + \"] domain: {\" + question2 + \"}\")\n SINGLE = auto()\n TWO = auto()\nclass SeparatorStyle(Enum):\nclass KeywordsStoppingCriteria(StoppingCriteria):\nclass Conversation:" }, { "identifier": "load_model", "path": "judgelm/model/model_adapter.py", "snippet": "def load_model(self, model_path: str, from_pretrained_kwargs: dict):\n revision = from_pretrained_kwargs.get(\"revision\", \"main\")\n try:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path,\n use_fast=self.use_fast_tokenizer,\n revision=revision,\n )\n except TypeError:\n tokenizer = AutoTokenizer.from_pretrained(\n model_path,\n use_fast=False,\n revision=revision,\n )\n try:\n model = AutoModelForCausalLM.from_pretrained(\n model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs\n )\n except NameError:\n model = AutoModel.from_pretrained(\n model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs\n )\n return model, tokenizer" } ]
import argparse import json import os import time import shortuuid import torch import sys import random import ray from tqdm import tqdm from pathlib import Path # if you haven't already done so from judgelm.llm_judge.common import load_questions, reorg_answer_file, conv_judge_vqa_single_answer, KeywordsStoppingCriteria from judgelm.model import load_model
937
"""Generate answers with local models. """ file = Path(__file__).resolve() root = file.parents[2] sys.path.append(str(root)) print(sys.path) def run_eval( model_path, model_id, question_file, question_begin, question_end, answer_file, max_new_token, num_gpus_per_model, num_gpus_total, max_gpu_memory, temperature, if_fast_eval ): questions = load_questions(question_file, question_begin, question_end) # Split the question file into `num_gpus` files assert num_gpus_total % num_gpus_per_model == 0 use_ray = num_gpus_total // num_gpus_per_model > 1 if use_ray: get_answers_func = ray.remote(num_gpus=num_gpus_per_model)( get_model_answers ).remote else: get_answers_func = get_model_answers chunk_size = len(questions) // (num_gpus_total // num_gpus_per_model) # // 2 ans_handles = [] for i in range(0, len(questions), chunk_size): ans_handles.append( get_answers_func( model_path, model_id, questions[i : i + chunk_size], answer_file, max_new_token, num_gpus_per_model, max_gpu_memory, temperature, if_fast_eval, ) ) if use_ray: ray.get(ans_handles) @torch.inference_mode() def get_model_answers( model_path, model_id, questions, answer_file, max_new_token, num_gpus_per_model, max_gpu_memory, temperature, if_fast_eval, ):
"""Generate answers with local models. """ file = Path(__file__).resolve() root = file.parents[2] sys.path.append(str(root)) print(sys.path) def run_eval( model_path, model_id, question_file, question_begin, question_end, answer_file, max_new_token, num_gpus_per_model, num_gpus_total, max_gpu_memory, temperature, if_fast_eval ): questions = load_questions(question_file, question_begin, question_end) # Split the question file into `num_gpus` files assert num_gpus_total % num_gpus_per_model == 0 use_ray = num_gpus_total // num_gpus_per_model > 1 if use_ray: get_answers_func = ray.remote(num_gpus=num_gpus_per_model)( get_model_answers ).remote else: get_answers_func = get_model_answers chunk_size = len(questions) // (num_gpus_total // num_gpus_per_model) # // 2 ans_handles = [] for i in range(0, len(questions), chunk_size): ans_handles.append( get_answers_func( model_path, model_id, questions[i : i + chunk_size], answer_file, max_new_token, num_gpus_per_model, max_gpu_memory, temperature, if_fast_eval, ) ) if use_ray: ray.get(ans_handles) @torch.inference_mode() def get_model_answers( model_path, model_id, questions, answer_file, max_new_token, num_gpus_per_model, max_gpu_memory, temperature, if_fast_eval, ):
model, tokenizer = load_model(
1
2023-10-26 19:41:07+00:00
2k
EulerSearch/embedding_studio
embedding_studio/api/api_v1/endpoints/fine_tuning.py
[ { "identifier": "FineTuningTaskCreate", "path": "embedding_studio/api/api_v1/schemas/fine_tuning.py", "snippet": "class FineTuningTaskCreate(BaseModel):\n fine_tuning_method: str\n batch_id: Optional[str] = None\n metadata: Optional[Dict] = None\n idempotency_key: Optional[uuid.UUID] = None" }, { "identifier": "FineTuningTaskResponse", "path": "embedding_studio/api/api_v1/schemas/fine_tuning.py", "snippet": "class FineTuningTaskResponse(FineTuningTask):\n id: Optional[PyObjectId] = Field(default=ObjectId, alias=\"_id\")" }, { "identifier": "context", "path": "embedding_studio/context/app_context.py", "snippet": "class AppContext:" }, { "identifier": "FineTuningStatus", "path": "embedding_studio/models/fine_tuning.py", "snippet": "class FineTuningStatus(str, enum.Enum):\n pending = \"pending\"\n processing = \"processing\"\n done = \"done\"\n canceled = \"canceled\"\n error = \"error\"" }, { "identifier": "fine_tuning_worker", "path": "embedding_studio/workers/fine_tuning/worker.py", "snippet": "@dramatiq.actor(\n queue_name=\"fine_tuning_worker\",\n max_retries=settings.FINE_TUNING_WORKER_MAX_RETRIES,\n time_limit=settings.FINE_TUNING_WORKER_TIME_LIMIT,\n)\ndef fine_tuning_worker(task_id: str):\n \"\"\"Dramatiq task for fine-tuning a model.\n\n :param task_id: The ID of the fine-tuning task.\n \"\"\"\n logger.info(f\"Start fine-tuning worker with task ID `{task_id}`\")\n\n task = context.fine_tuning_task.get(id=task_id)\n if not task:\n raise FineTuningWorkerException(f\"Task with ID `{task_id}` not found\")\n\n try:\n task.status = FineTuningStatus.processing\n context.fine_tuning_task.update(obj=task)\n\n if not task.batch_id:\n release_id = task.idempotency_key or task.id or uuid.uuid4()\n logger.info(f\"Release batch with ID `{release_id}`\")\n session_batch = context.clickstream_dao.release_batch(\n release_id=str(release_id)\n )\n if session_batch is None:\n raise FineTuningWorkerException(\n f\"Cannot release batch with ID `{release_id}`\"\n )\n task.batch_id = session_batch.batch_id\n context.fine_tuning_task.update(obj=task)\n\n # TODO: add config with parameters\n clickstream = context.clickstream_dao.get_batch_sessions(task.batch_id)\n if not clickstream:\n raise FineTuningWorkerException(\n f\"Clickstream batch with ID `{task.batch_id}` not found\"\n )\n\n fine_tuning_plugin = plugin_manager.get_plugin(task.fine_tuning_method)\n if not fine_tuning_plugin:\n raise FineTuningWorkerException(\n f\"Fine tuning plugin with name `{task.fine_tuning_method}` \"\n f\"not found\"\n )\n logger.info(f\"Upload initial model...\")\n fine_tuning_plugin.upload_initial_model()\n logger.info(f\"Upload initial model... OK\")\n\n logger.info(\"Create fine-tuning builder...\")\n builder = fine_tuning_plugin.get_fine_tuning_builder(\n clickstream=clickstream\n )\n logger.info(\"Create fine-tuning builder... OK\")\n\n iteration = FineTuningIteration(\n batch_id=task.batch_id,\n plugin_name=task.fine_tuning_method,\n )\n logger.info(\"Start fine-tuning the embedding model...\")\n finetune_embedding_model(\n iteration=iteration,\n settings=builder.fine_tuning_settings,\n ranking_data=builder.ranking_data,\n query_retriever=builder.query_retriever,\n tracker=builder.experiments_manager,\n initial_params=builder.initial_params,\n initial_max_evals=builder.initial_max_evals,\n )\n logger.info(\n \"Fine tuning of the embedding model was completed successfully!\"\n )\n builder.experiments_manager.set_iteration(iteration)\n best_model_url = builder.experiments_manager.get_current_model_url()\n logger.info(\n f\"You can download best model using this url: {best_model_url}\"\n )\n task.best_model_url = best_model_url\n builder.experiments_manager.finish_iteration()\n\n except Exception:\n try:\n task.status = FineTuningStatus.error\n context.fine_tuning_task.update(obj=task)\n except Exception as exc:\n logger.exception(f\"Failed to update task status: {exc}\")\n raise\n\n task.status = FineTuningStatus.done\n context.fine_tuning_task.update(obj=task)" } ]
import logging from typing import Any, List from dramatiq_abort import abort as dramatiq_abort from fastapi import APIRouter, HTTPException, status from embedding_studio.api.api_v1.schemas.fine_tuning import ( FineTuningTaskCreate, FineTuningTaskResponse, ) from embedding_studio.context.app_context import context from embedding_studio.models.fine_tuning import FineTuningStatus from embedding_studio.workers.fine_tuning.worker import fine_tuning_worker
1,238
logger = logging.getLogger(__name__) router = APIRouter() @router.post( "/task", response_model=FineTuningTaskResponse, response_model_by_alias=False, response_model_exclude_none=True, ) def create_fine_tuning_task(
logger = logging.getLogger(__name__) router = APIRouter() @router.post( "/task", response_model=FineTuningTaskResponse, response_model_by_alias=False, response_model_exclude_none=True, ) def create_fine_tuning_task(
body: FineTuningTaskCreate,
0
2023-10-31 00:33:13+00:00
2k
reworkd/bananalyzer
tests/test_examples.py
[ { "identifier": "download_examples", "path": "bananalyzer/data/examples.py", "snippet": "def are_examples_available(path: Path) -> bool:\ndef get_examples_path() -> Path:\ndef convert_to_crlf(file_path: Path) -> None:\ndef download_examples() -> None:\ndef load_examples_at_path(path: Path, examples_json_file_name: str) -> List[Example]:\ndef get_training_examples() -> List[Example]:\ndef get_test_examples() -> List[Example]:\ndef get_all_examples() -> List[Example]:\ndef get_all_example_urls() -> List[str]:\ndef get_example_by_url(url: str) -> Example:" }, { "identifier": "Example", "path": "bananalyzer/data/schemas.py", "snippet": "class Example(BaseModel):\n id: str\n url: str\n source: Literal[\"mhtml\", \"hosted\", \"url\"] = Field(\n description=\"Source of the website\"\n )\n category: str = Field(description=\"Category of the website\")\n subcategory: str = Field(description=\"Subcategory of the website\")\n type: GoalType = Field(\n description=\"The high level goal intent the agent is aiming to do\"\n )\n goal: Optional[Union[str, Dict[str, Any]]] = Field(\n description=\"The goal of the agent for this specific example\"\n )\n fetch_id: Optional[FetchId] = Field(\n default=None,\n description=\"If it is a fetch type, we can infer the goal based on this id to avoid large schemas in json\",\n )\n evals: List[Eval] = Field(\n description=\"Various evaluations to test for within the example\"\n )\n\n def get_static_url(self) -> str:\n from bananalyzer.runner.website_responder import get_website_responder\n\n return get_website_responder(self).get_url(self)\n\n @model_validator(mode=\"before\")\n def set_goal_if_fetch_id_provided(cls, values: Dict[str, Any]) -> Dict[str, Any]:\n from bananalyzer.data.fetch_schemas import get_fetch_schema\n\n goal_type = values.get(\"type\")\n if goal_type != \"fetch\":\n return values\n\n fetch_id: Optional[FetchId] = values.get(\"fetch_id\")\n goal = values.get(\"goal\")\n\n if fetch_id is not None and goal is not None:\n raise ValueError(\"fetch_id and goal cannot both be provided\")\n\n if fetch_id is None and goal is not None:\n return values\n\n if fetch_id is None:\n raise ValueError(\"fetch_id must be provided if goal is not provided\")\n\n values[\"goal\"] = get_fetch_schema(fetch_id).model_fields\n return values" } ]
import json import os import shutil import pytest from pathlib import Path from typing import List from unittest.mock import mock_open from pytest_mock import MockFixture from bananalyzer.data.examples import ( download_examples, downloaded_examples_path, get_all_examples, get_example_by_url, get_examples_path, get_test_examples, get_training_examples, load_examples_at_path, local_examples_path, ) from bananalyzer.data.schemas import Example
932
def test_load_examples_at_path_success(mocker: MockFixture) -> None: data: List[Example] = [] mocker.patch("builtins.open", mock_open(read_data=json.dumps(data))) loaded_examples = load_examples_at_path(Path("/fake/path"), "fake.json") assert len(loaded_examples) == len(data) assert all(isinstance(example, Example) for example in loaded_examples) def test_load_examples_at_path_json_error(mocker: MockFixture) -> None: mocker.patch("builtins.open", mock_open(read_data="invalid json")) with pytest.raises(json.JSONDecodeError): load_examples_at_path(Path("/fake/path"), "fake.json") def test_load_examples_at_path_file_not_found(mocker: MockFixture) -> None: mocker.patch("builtins.open", side_effect=FileNotFoundError) with pytest.raises(FileNotFoundError): load_examples_at_path(Path("/fake/path"), "fake.json") def test_get_local_examples_path() -> None: # Running test in repo will default to local path
def test_load_examples_at_path_success(mocker: MockFixture) -> None: data: List[Example] = [] mocker.patch("builtins.open", mock_open(read_data=json.dumps(data))) loaded_examples = load_examples_at_path(Path("/fake/path"), "fake.json") assert len(loaded_examples) == len(data) assert all(isinstance(example, Example) for example in loaded_examples) def test_load_examples_at_path_json_error(mocker: MockFixture) -> None: mocker.patch("builtins.open", mock_open(read_data="invalid json")) with pytest.raises(json.JSONDecodeError): load_examples_at_path(Path("/fake/path"), "fake.json") def test_load_examples_at_path_file_not_found(mocker: MockFixture) -> None: mocker.patch("builtins.open", side_effect=FileNotFoundError) with pytest.raises(FileNotFoundError): load_examples_at_path(Path("/fake/path"), "fake.json") def test_get_local_examples_path() -> None: # Running test in repo will default to local path
assert get_examples_path() == local_examples_path
0
2023-10-30 16:40:57+00:00
2k
OpenMask3D/openmask3d
openmask3d/mask_features_computation/features_extractor.py
[ { "identifier": "Camera", "path": "openmask3d/data/load.py", "snippet": "class Camera:\n def __init__(self, \n intrinsic_path, \n intrinsic_resolution, \n poses_path, \n depths_path, \n extension_depth, \n depth_scale):\n self.intrinsic = np.loadtxt(intrinsic_path)[:3, :3]\n self.intrinsic_original_resolution = intrinsic_resolution\n self.poses_path = poses_path\n self.depths_path = depths_path\n self.extension_depth = extension_depth\n self.depth_scale = depth_scale\n \n def get_adapted_intrinsic(self, desired_resolution):\n '''Get adjusted camera intrinsics.'''\n if self.intrinsic_original_resolution == desired_resolution:\n return self.intrinsic\n \n resize_width = int(math.floor(desired_resolution[1] * float(\n self.intrinsic_original_resolution[0]) / float(self.intrinsic_original_resolution[1])))\n \n adapted_intrinsic = self.intrinsic.copy()\n adapted_intrinsic[0, 0] *= float(resize_width) / float(self.intrinsic_original_resolution[0])\n adapted_intrinsic[1, 1] *= float(desired_resolution[1]) / float(self.intrinsic_original_resolution[1])\n adapted_intrinsic[0, 2] *= float(desired_resolution[0] - 1) / float(self.intrinsic_original_resolution[0] - 1)\n adapted_intrinsic[1, 2] *= float(desired_resolution[1] - 1) / float(self.intrinsic_original_resolution[1] - 1)\n return adapted_intrinsic\n \n def load_poses(self, indices):\n path = os.path.join(self.poses_path, str(0) + '.txt')\n shape = np.linalg.inv(np.loadtxt(path))[:3, :].shape\n poses = np.zeros((len(indices), shape[0], shape[1]))\n for i, idx in enumerate(indices):\n path = os.path.join(self.poses_path, str(idx) + '.txt')\n poses[i] = np.linalg.inv(np.loadtxt(path))[:3, :]\n return poses\n \n def load_depth(self, idx, depth_scale):\n depth_path = os.path.join(self.depths_path, str(idx) + self.extension_depth)\n sensor_depth = imageio.v2.imread(depth_path) / depth_scale\n return sensor_depth" }, { "identifier": "InstanceMasks3D", "path": "openmask3d/data/load.py", "snippet": "class InstanceMasks3D:\n def __init__(self, masks_path):\n self.masks = torch.load(masks_path)\n self.num_masks = self.masks.shape[1]" }, { "identifier": "Images", "path": "openmask3d/data/load.py", "snippet": "class Images:\n def __init__(self, \n images_path, \n extension, \n indices):\n self.images_path = images_path\n self.extension = extension\n self.indices = indices\n self.images = self.load_images(indices)\n \n def load_images(self, indices):\n images = []\n for idx in indices:\n img_path = os.path.join(self.images_path, str(idx) + self.extension)\n images.append(Image.open(img_path).convert(\"RGB\"))\n return images\n def get_as_np_list(self):\n images = []\n for i in range(len(self.images)):\n images.append(np.asarray(self.images[i]))\n return images" }, { "identifier": "PointCloud", "path": "openmask3d/data/load.py", "snippet": "class PointCloud:\n def __init__(self, \n point_cloud_path):\n pcd = o3d.io.read_point_cloud(point_cloud_path)\n self.points = np.asarray(pcd.points)\n self.num_points = self.points.shape[0]\n \n def get_homogeneous_coordinates(self):\n return np.append(self.points, np.ones((self.num_points,1)), axis = -1)" }, { "identifier": "get_number_of_images", "path": "openmask3d/data/load.py", "snippet": "def get_number_of_images(poses_path):\n i = 0\n while(os.path.isfile(os.path.join(poses_path, str(i) + '.txt'))): i += 1\n return i" }, { "identifier": "initialize_sam_model", "path": "openmask3d/mask_features_computation/utils.py", "snippet": "def initialize_sam_model(device, sam_model_type, sam_checkpoint):\n sam = sam_model_registry[sam_model_type](checkpoint=sam_checkpoint)\n sam.to(device)\n predictor_sam = SamPredictor(sam) \n return predictor_sam" }, { "identifier": "mask2box_multi_level", "path": "openmask3d/mask_features_computation/utils.py", "snippet": "def mask2box_multi_level(mask: torch.Tensor, level, expansion_ratio):\n x1, y1, x2 , y2 = mask2box(mask)\n if level == 0:\n return x1, y1, x2 , y2\n shape = mask.shape\n x_exp = int(abs(x2- x1)*expansion_ratio) * level\n y_exp = int(abs(y2-y1)*expansion_ratio) * level\n return max(0, x1 - x_exp), max(0, y1 - y_exp), min(shape[1], x2 + x_exp), min(shape[0], y2 + y_exp)" }, { "identifier": "run_sam", "path": "openmask3d/mask_features_computation/utils.py", "snippet": "def run_sam(image_size, num_random_rounds, num_selected_points, point_coords, predictor_sam):\n best_score = 0\n best_mask = np.zeros_like(image_size, dtype=bool)\n \n point_coords_new = np.zeros_like(point_coords)\n point_coords_new[:,0] = point_coords[:,1]\n point_coords_new[:,1] = point_coords[:,0]\n \n # Get only a random subsample of them for num_random_rounds times and choose the mask with highest confidence score\n for i in range(num_random_rounds):\n np.random.shuffle(point_coords_new)\n masks, scores, logits = predictor_sam.predict(\n point_coords=point_coords_new[:num_selected_points],\n point_labels=np.ones(point_coords_new[:num_selected_points].shape[0]),\n multimask_output=False,\n ) \n if scores[0] > best_score:\n best_score = scores[0]\n best_mask = masks[0]\n \n return best_mask" } ]
import clip import numpy as np import imageio import torch import os from tqdm import tqdm from openmask3d.data.load import Camera, InstanceMasks3D, Images, PointCloud, get_number_of_images from openmask3d.mask_features_computation.utils import initialize_sam_model, mask2box_multi_level, run_sam
1,519
class PointProjector: def __init__(self, camera: Camera, point_cloud: PointCloud,
class PointProjector: def __init__(self, camera: Camera, point_cloud: PointCloud,
masks: InstanceMasks3D,
1
2023-10-31 14:58:50+00:00
2k
nv-tlabs/vid2player3d
embodied_pose/models/im_network_builder.py
[ { "identifier": "RunningNorm", "path": "embodied_pose/models/running_norm.py", "snippet": "class RunningNorm(nn.Module):\n \"\"\"\n y = (x-mean)/std\n using running estimates of mean,std\n \"\"\"\n\n def __init__(self, dim, demean=True, destd=True, clip=5.0):\n super().__init__()\n self.dim = dim\n self.demean = demean\n self.destd = destd\n self.clip = clip\n self.register_buffer('n', torch.tensor(0, dtype=torch.long))\n self.register_buffer('mean', torch.zeros(dim))\n self.register_buffer('var', torch.zeros(dim))\n self.register_buffer('std', torch.zeros(dim))\n\n def update(self, x):\n var_x, mean_x = torch.var_mean(x, dim=0, unbiased=False)\n m = x.shape[0]\n w = self.n.to(x.dtype) / (m + self.n).to(x.dtype)\n self.var[:] = w * self.var + (1 - w) * var_x + w * (1 - w) * (mean_x - self.mean).pow(2)\n self.mean[:] = w * self.mean + (1 - w) * mean_x\n self.std[:] = torch.sqrt(self.var)\n self.n += m\n \n def forward(self, x):\n if self.training:\n with torch.no_grad():\n self.update(x)\n if self.n > 0:\n if self.demean:\n x = x - self.mean\n if self.destd:\n x = x / (self.std + 1e-8)\n if self.clip:\n x = torch.clamp(x, -self.clip, self.clip)\n return x" }, { "identifier": "SMPL_BONE_ORDER_NAMES", "path": "uhc/smpllib/smpl_parser.py", "snippet": "SMPL_BONE_ORDER_NAMES = [\n \"Pelvis\",\n \"L_Hip\",\n \"R_Hip\",\n \"Torso\",\n \"L_Knee\",\n \"R_Knee\",\n \"Spine\",\n \"L_Ankle\",\n \"R_Ankle\",\n \"Chest\",\n \"L_Toe\",\n \"R_Toe\",\n \"Neck\",\n \"L_Thorax\",\n \"R_Thorax\",\n \"Head\",\n \"L_Shoulder\",\n \"R_Shoulder\",\n \"L_Elbow\",\n \"R_Elbow\",\n \"L_Wrist\",\n \"R_Wrist\",\n \"L_Hand\",\n \"R_Hand\",\n]" } ]
from rl_games.algos_torch import network_builder from rl_games.algos_torch.running_mean_std import RunningMeanStd from isaacgym.torch_utils import * from .running_norm import RunningNorm from utils import torch_utils from utils.torch_transform import heading_to_vec, rotation_matrix_to_angle_axis, rotation_matrix_to_quaternion, rot6d_to_rotmat from utils.hybrik import batch_inverse_kinematics_transform_naive, batch_inverse_kinematics_transform from uhc.smpllib.smpl_parser import SMPL_BONE_ORDER_NAMES as smpl_joint_names import torch import torch.nn as nn import numpy as np
1,231
DISC_LOGIT_INIT_SCALE = 1.0 mujoco_joint_names = [ 'Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand' ] smpl_2_mujoco = [smpl_joint_names.index(q) for q in mujoco_joint_names] mujoco_2_smpl = [mujoco_joint_names.index(q) for q in smpl_joint_names] class ImitatorBuilder(network_builder.A2CBuilder): def __init__(self, **kwargs): super().__init__(**kwargs) return class Network(network_builder.A2CBuilder.Network): def __init__(self, params, **kwargs): self.context_padding = params.get('context_padding', 8) self.humanoid_obs_dim = params.get('humanoid_obs_dim', 734) self.residual_action = params.get('residual_action', True) self.use_running_obs = params.get('use_running_obs', False) self.running_obs_type = params.get('running_obs_type', 'rl_game') self.use_ik = params.get('use_ik', False) self.ik_type = params.get('ik_type', 'optimized') self.ik_ignore_outlier = params.get('ik_ignore_outlier', False) self.kinematic_pretrained = params.get('kinematic_pretrained', False) self.smpl_rest_joints = kwargs['smpl_rest_joints'] self.smpl_parents = kwargs['smpl_parents'] self.smpl_children = kwargs['smpl_children'] kwargs['input_shape'] = (self.humanoid_obs_dim,) super().__init__(params, **kwargs) if self.use_running_obs: if self.running_obs_type == 'rl_game': self.running_obs = RunningMeanStd((self.humanoid_obs_dim,)) else:
DISC_LOGIT_INIT_SCALE = 1.0 mujoco_joint_names = [ 'Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand' ] smpl_2_mujoco = [smpl_joint_names.index(q) for q in mujoco_joint_names] mujoco_2_smpl = [mujoco_joint_names.index(q) for q in smpl_joint_names] class ImitatorBuilder(network_builder.A2CBuilder): def __init__(self, **kwargs): super().__init__(**kwargs) return class Network(network_builder.A2CBuilder.Network): def __init__(self, params, **kwargs): self.context_padding = params.get('context_padding', 8) self.humanoid_obs_dim = params.get('humanoid_obs_dim', 734) self.residual_action = params.get('residual_action', True) self.use_running_obs = params.get('use_running_obs', False) self.running_obs_type = params.get('running_obs_type', 'rl_game') self.use_ik = params.get('use_ik', False) self.ik_type = params.get('ik_type', 'optimized') self.ik_ignore_outlier = params.get('ik_ignore_outlier', False) self.kinematic_pretrained = params.get('kinematic_pretrained', False) self.smpl_rest_joints = kwargs['smpl_rest_joints'] self.smpl_parents = kwargs['smpl_parents'] self.smpl_children = kwargs['smpl_children'] kwargs['input_shape'] = (self.humanoid_obs_dim,) super().__init__(params, **kwargs) if self.use_running_obs: if self.running_obs_type == 'rl_game': self.running_obs = RunningMeanStd((self.humanoid_obs_dim,)) else:
self.running_obs = RunningNorm(self.humanoid_obs_dim)
0
2023-10-30 20:43:43+00:00
2k
vLAR-group/RayDF
net_multiview/network.py
[ { "identifier": "DualVisClassifier", "path": "net_classifier/network.py", "snippet": "class DualVisClassifier(nn.Module):\n def __init__(self, D=8, W=512, ext_layer=1, input_ch=11, w0_init=30.):\n super(DualVisClassifier, self).__init__()\n\n self.layer_ray = nn.ModuleList(\n [Siren(input_ch, W, w0=w0_init, is_first=True)] + [Siren(W, W) for i in range(ext_layer - 1)])\n self.layer_pts = nn.ModuleList(\n [Siren(3, W, w0=w0_init, is_first=True)] + [Siren(W, W) for i in range(ext_layer - 1)])\n self.lf_encoder = nn.ModuleList([Siren(W * 2, W)] + [Siren(W, W) for i in range(ext_layer + 1, D - 1)])\n self.cls_dense = Siren(W, 1, activation=nn.Identity())\n\n def forward(self, x):\n x_ray0, x_ray1, x_pts = x\n\n for i in range(len(self.layer_ray)):\n x_ray0 = self.layer_ray[i](x_ray0)\n x_ray1 = self.layer_ray[i](x_ray1)\n x_pts = self.layer_pts[i](x_pts)\n x_ray0_pts = torch.cat([x_ray0, x_pts], dim=-1)\n x_ray1_pts = torch.cat([x_ray1, x_pts], dim=-1)\n h = torch.stack([x_ray0_pts, x_ray1_pts], dim=-1).mean(-1)\n\n for i, l in enumerate(self.lf_encoder):\n h = self.lf_encoder[i](h)\n\n o = self.cls_dense(h)\n return o" }, { "identifier": "Siren", "path": "utils/layer.py", "snippet": "class Siren(nn.Module):\n def __init__(self, dim_in, dim_out, w0=30., c=6., is_first=False, activation=None, droprate=0.):\n super().__init__()\n self.dim_in = dim_in\n self.is_first = is_first\n\n weight = torch.zeros(dim_out, dim_in)\n bias = torch.zeros(dim_out)\n self.init_(weight, bias, c=c, w0=w0)\n\n self.weight = nn.Parameter(weight)\n self.bias = nn.Parameter(bias)\n self.activation = Sine(w0) if activation is None else activation\n self.dropout = None\n if droprate > 0.:\n self.dropout = nn.Dropout(p=droprate)\n\n def init_(self, weight, bias, c, w0):\n dim = self.dim_in\n w_std = (1 / dim) if self.is_first else (math.sqrt(c / dim) / w0)\n weight.uniform_(-w_std, w_std)\n bias.uniform_(-w_std, w_std)\n\n def forward(self, x):\n out = F.linear(x, self.weight, self.bias)\n out = self.activation(out)\n if self.dropout is not None:\n out = self.dropout(out)\n return out" }, { "identifier": "get_rayparam_func", "path": "utils/ray.py", "snippet": "def get_rayparam_func(scene_info):\n ray_param = TwoSphere(scene_info)\n ray_embed = lambda x, rp=ray_param: rp.ray2param(x)\n return ray_embed, ray_param.out_dim" } ]
import os import torch import torch.nn as nn import sys from net_classifier.network import DualVisClassifier from utils.layer import Siren from utils.ray import get_rayparam_func
1,173
sys.path.append('../') EPS = 1e-8 class RaySurfDNet(nn.Module): def __init__(self, D=8, W=256, input_ch=4, rgb_layer=0, w0_init=30.): super(RaySurfDNet, self).__init__() self.predict_rgb = True if rgb_layer > 0 else False n_ext = max(rgb_layer, 1) self.lf_encoder = nn.ModuleList([Siren(input_ch, W, w0=w0_init, is_first=True)] + [Siren(W, W) for i in range(1, D-n_ext)]) self.dist_dense = nn.ModuleList([Siren(W, W) for i in range(n_ext-1)] + [Siren(W, 1, activation=nn.Identity())]) if self.predict_rgb: self.color_dense = nn.ModuleList([Siren(W, W) for i in range(rgb_layer-1)] + [Siren(W, 3, activation=nn.Identity())]) @staticmethod def get_features(layers, x): h = x for i, l in enumerate(layers): h = layers[i](h) return h def forward(self, x): outputs = {} feats = self.get_features(self.lf_encoder, x) outputs['dist'] = self.get_features(self.dist_dense, feats) if self.predict_rgb: outputs['rgb'] = self.get_features(self.color_dense, feats) return outputs def create_net(args, scene_info, device):
sys.path.append('../') EPS = 1e-8 class RaySurfDNet(nn.Module): def __init__(self, D=8, W=256, input_ch=4, rgb_layer=0, w0_init=30.): super(RaySurfDNet, self).__init__() self.predict_rgb = True if rgb_layer > 0 else False n_ext = max(rgb_layer, 1) self.lf_encoder = nn.ModuleList([Siren(input_ch, W, w0=w0_init, is_first=True)] + [Siren(W, W) for i in range(1, D-n_ext)]) self.dist_dense = nn.ModuleList([Siren(W, W) for i in range(n_ext-1)] + [Siren(W, 1, activation=nn.Identity())]) if self.predict_rgb: self.color_dense = nn.ModuleList([Siren(W, W) for i in range(rgb_layer-1)] + [Siren(W, 3, activation=nn.Identity())]) @staticmethod def get_features(layers, x): h = x for i, l in enumerate(layers): h = layers[i](h) return h def forward(self, x): outputs = {} feats = self.get_features(self.lf_encoder, x) outputs['dist'] = self.get_features(self.dist_dense, feats) if self.predict_rgb: outputs['rgb'] = self.get_features(self.color_dense, feats) return outputs def create_net(args, scene_info, device):
ray_fn, input_ch = get_rayparam_func(scene_info)
2
2023-10-30 14:05:51+00:00
2k
francescofugazzi/3dgsconverter
gsconverter/utils/utility.py
[ { "identifier": "debug_print", "path": "gsconverter/utils/utility_functions.py", "snippet": "def debug_print(message):\n if config.DEBUG:\n print(message)" }, { "identifier": "init_worker", "path": "gsconverter/utils/utility_functions.py", "snippet": "def init_worker():\n signal.signal(signal.SIGINT, signal.SIG_IGN)" } ]
import numpy as np import multiprocessing from multiprocessing import Pool, cpu_count from .utility_functions import debug_print, init_worker
913
""" 3D Gaussian Splatting Converter Copyright (c) 2023 Francesco Fugazzi This software is released under the MIT License. For more information about the license, please see the LICENSE file. """ class Utility: @staticmethod def text_based_detect_format(file_path): debug_print("[DEBUG] Executing 'text_based_detect_format' function...") """Detect if the given file is in '3dgs' or 'cc' format.""" with open(file_path, 'rb') as file: header_bytes = file.read(2048) # Read the beginning to detect the format header = header_bytes.decode('utf-8', errors='ignore') if "property float f_dc_0" in header: debug_print("[DEBUG] Detected format: 3dgs") return "3dgs" elif "property float scal_f_dc_0" in header or "property float scalar_scal_f_dc_0" in header or "property float scalar_f_dc_0" in header: debug_print("[DEBUG] Detected format: cc") return "cc" else: return None @staticmethod def copy_data_with_prefix_check(source, target, possible_prefixes): debug_print("[DEBUG] Executing 'copy_data_with_prefix_check' function...") """ Given two structured numpy arrays (source and target), copy the data from source to target. If a field exists in source but not in target, this function will attempt to find the field in target by adding any of the possible prefixes to the field name. """ for name in source.dtype.names: if name in target.dtype.names: target[name] = source[name] else: copied = False for prefix in possible_prefixes: # If the field starts with the prefix, try the field name without the prefix if name.startswith(prefix): stripped_name = name[len(prefix):] if stripped_name in target.dtype.names: target[stripped_name] = source[name] copied = True break # If the field doesn't start with any prefix, try adding the prefix else: prefixed_name = prefix + name if prefixed_name in target.dtype.names: debug_print(f"[DEBUG] Copying data from '{name}' to '{prefixed_name}'") target[prefixed_name] = source[name] copied = True break ##if not copied: ## print(f"Warning: Field {name} not found in target.") @staticmethod def compute_rgb_from_vertex(vertices): debug_print("[DEBUG] Executing 'compute_rgb_from_vertex' function...") # Depending on the available field names, choose the appropriate ones if 'f_dc_0' in vertices.dtype.names: f_dc = np.column_stack((vertices['f_dc_0'], vertices['f_dc_1'], vertices['f_dc_2'])) else: f_dc = np.column_stack((vertices['scalar_scal_f_dc_0'], vertices['scalar_scal_f_dc_1'], vertices['scalar_scal_f_dc_2'])) colors = (f_dc + 1) * 127.5 colors = np.clip(colors, 0, 255).astype(np.uint8) debug_print("[DEBUG] RGB colors computed.") return colors @staticmethod def parallel_voxel_counting(vertices, voxel_size=1.0): debug_print("[DEBUG] Executing 'parallel_voxel_counting' function...") """Counts the number of points in each voxel in a parallelized manner.""" num_processes = cpu_count() chunk_size = len(vertices) // num_processes chunks = [vertices[i:i + chunk_size] for i in range(0, len(vertices), chunk_size)] num_cores = max(1, multiprocessing.cpu_count() - 1)
""" 3D Gaussian Splatting Converter Copyright (c) 2023 Francesco Fugazzi This software is released under the MIT License. For more information about the license, please see the LICENSE file. """ class Utility: @staticmethod def text_based_detect_format(file_path): debug_print("[DEBUG] Executing 'text_based_detect_format' function...") """Detect if the given file is in '3dgs' or 'cc' format.""" with open(file_path, 'rb') as file: header_bytes = file.read(2048) # Read the beginning to detect the format header = header_bytes.decode('utf-8', errors='ignore') if "property float f_dc_0" in header: debug_print("[DEBUG] Detected format: 3dgs") return "3dgs" elif "property float scal_f_dc_0" in header or "property float scalar_scal_f_dc_0" in header or "property float scalar_f_dc_0" in header: debug_print("[DEBUG] Detected format: cc") return "cc" else: return None @staticmethod def copy_data_with_prefix_check(source, target, possible_prefixes): debug_print("[DEBUG] Executing 'copy_data_with_prefix_check' function...") """ Given two structured numpy arrays (source and target), copy the data from source to target. If a field exists in source but not in target, this function will attempt to find the field in target by adding any of the possible prefixes to the field name. """ for name in source.dtype.names: if name in target.dtype.names: target[name] = source[name] else: copied = False for prefix in possible_prefixes: # If the field starts with the prefix, try the field name without the prefix if name.startswith(prefix): stripped_name = name[len(prefix):] if stripped_name in target.dtype.names: target[stripped_name] = source[name] copied = True break # If the field doesn't start with any prefix, try adding the prefix else: prefixed_name = prefix + name if prefixed_name in target.dtype.names: debug_print(f"[DEBUG] Copying data from '{name}' to '{prefixed_name}'") target[prefixed_name] = source[name] copied = True break ##if not copied: ## print(f"Warning: Field {name} not found in target.") @staticmethod def compute_rgb_from_vertex(vertices): debug_print("[DEBUG] Executing 'compute_rgb_from_vertex' function...") # Depending on the available field names, choose the appropriate ones if 'f_dc_0' in vertices.dtype.names: f_dc = np.column_stack((vertices['f_dc_0'], vertices['f_dc_1'], vertices['f_dc_2'])) else: f_dc = np.column_stack((vertices['scalar_scal_f_dc_0'], vertices['scalar_scal_f_dc_1'], vertices['scalar_scal_f_dc_2'])) colors = (f_dc + 1) * 127.5 colors = np.clip(colors, 0, 255).astype(np.uint8) debug_print("[DEBUG] RGB colors computed.") return colors @staticmethod def parallel_voxel_counting(vertices, voxel_size=1.0): debug_print("[DEBUG] Executing 'parallel_voxel_counting' function...") """Counts the number of points in each voxel in a parallelized manner.""" num_processes = cpu_count() chunk_size = len(vertices) // num_processes chunks = [vertices[i:i + chunk_size] for i in range(0, len(vertices), chunk_size)] num_cores = max(1, multiprocessing.cpu_count() - 1)
with Pool(processes=num_cores, initializer=init_worker) as pool:
1
2023-10-28 15:09:50+00:00
2k
solangii/MICS
models/network/resnet20.py
[ { "identifier": "to_one_hot", "path": "utils/mixup_utils.py", "snippet": "def to_one_hot(inp, num_classes):\n y_onehot = torch.FloatTensor(inp.size(0), num_classes)\n y_onehot.zero_()\n\n y_onehot.scatter_(1, inp.unsqueeze(1).data.cpu(), 1)\n\n return Variable(y_onehot.cuda(), requires_grad=False)" }, { "identifier": "middle_mixup_process", "path": "utils/mixup_utils.py", "snippet": "def middle_mixup_process(out, target_reweighted, num_base_classes, lam, use_hard_positive_aug,\n add_noise_level=0., mult_noise_level=0., hpa_type=\"none\",\n label_sharpening=True, label_mix=\"vanilla\", label_mix_threshold=0.2, exp_coef=1.,\n predefined_indices=None, gaussian_h1=0.2, piecewise_linear_h1=0.5, piecewise_linear_h2=0., use_softlabel=True):\n indices = np.random.permutation(out.size(0))\n out = out * lam + out[indices] * (1 - lam)\n target_reweighted, mix_label_mask = middle_label_mix_process(target_reweighted, target_reweighted[indices],\n num_base_classes, lam, label_mix,\n label_mix_threshold, exp_coef, gaussian_h1,\n piecewise_linear_h1, piecewise_linear_h2, use_softlabel)\n return out, target_reweighted, mix_label_mask" }, { "identifier": "get_lambda", "path": "utils/mixup_utils.py", "snippet": "def get_lambda(alpha=1.0):\n '''Return lambda'''\n if alpha > 0.:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1.\n return lam" } ]
import torch import torch.nn as nn import numpy as np import random from utils.mixup_utils import to_one_hot, middle_mixup_process, get_lambda from torch.autograd import Variable
1,436
def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, last=False): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.last = last def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=10): self.inplanes = 16 self.num_classes = num_classes super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, 16, layers[0]) self.layer2 = self._make_layer(block, 32, layers[1], stride=2) self.layer3 = self._make_layer(block, 64, layers[2], stride=2, last_phase=True) # self.avgpool = nn.AvgPool2d(8, stride=1) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1, last_phase=False): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion if last_phase: for i in range(1, blocks - 1): layers.append(block(self.inplanes, planes)) layers.append(block(self.inplanes, planes, last=True)) else: for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x, target=None, mix_type="vanilla", mixup_alpha=None, num_base_classes=-1, use_hard_positive_aug=False, add_noise_level=0., mult_noise_level=0., minimum_lambda=0.5, hpa_type="none", label_sharpening=True, label_mix="vanilla", label_mix_threshold=0.2, exp_coef=1., cutmix_prob=1., num_similar_class=3, classifiers=None, gaussian_h1=0.2, piecewise_linear_h1=0.5, piecewise_linear_h2=0., use_softlabel=True): if "mixup_hidden" in mix_type: layer_mix = random.randint(0, 2) else: layer_mix = None out = x if mixup_alpha is not None: lam = get_lambda(mixup_alpha) # https://github.com/YU1ut/MixMatch-pytorch/blob/master/train.py#L243 if use_hard_positive_aug: lam = max(lam, 1 - lam) lam = max(lam, minimum_lambda) lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda() lam = Variable(lam) if target is not None: target_reweighted = to_one_hot(target, self.num_classes) if layer_mix == 0:
def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, last=False): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride self.last = last def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=10): self.inplanes = 16 self.num_classes = num_classes super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(16) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, 16, layers[0]) self.layer2 = self._make_layer(block, 32, layers[1], stride=2) self.layer3 = self._make_layer(block, 64, layers[2], stride=2, last_phase=True) # self.avgpool = nn.AvgPool2d(8, stride=1) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1, last_phase=False): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion if last_phase: for i in range(1, blocks - 1): layers.append(block(self.inplanes, planes)) layers.append(block(self.inplanes, planes, last=True)) else: for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x, target=None, mix_type="vanilla", mixup_alpha=None, num_base_classes=-1, use_hard_positive_aug=False, add_noise_level=0., mult_noise_level=0., minimum_lambda=0.5, hpa_type="none", label_sharpening=True, label_mix="vanilla", label_mix_threshold=0.2, exp_coef=1., cutmix_prob=1., num_similar_class=3, classifiers=None, gaussian_h1=0.2, piecewise_linear_h1=0.5, piecewise_linear_h2=0., use_softlabel=True): if "mixup_hidden" in mix_type: layer_mix = random.randint(0, 2) else: layer_mix = None out = x if mixup_alpha is not None: lam = get_lambda(mixup_alpha) # https://github.com/YU1ut/MixMatch-pytorch/blob/master/train.py#L243 if use_hard_positive_aug: lam = max(lam, 1 - lam) lam = max(lam, minimum_lambda) lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda() lam = Variable(lam) if target is not None: target_reweighted = to_one_hot(target, self.num_classes) if layer_mix == 0:
out, target_reweighted, mix_label_mask = middle_mixup_process(out, target_reweighted, num_base_classes,
1
2023-10-25 16:50:51+00:00
2k
megvii-research/WACV2024-SAFA
model/flownet.py
[ { "identifier": "warp", "path": "model/warplayer.py", "snippet": "def warp(tenInput, tenFlow, mode='bilinear'):\n k = (str(tenFlow.device), str(tenFlow.size()))\n if k not in backwarp_tenGrid:\n tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3]).view(1, 1, 1, tenFlow.shape[3]).expand(tenFlow.shape[0], -1, tenFlow.shape[2], -1)\n tenVertical = torch.linspace(-1.0, 1.0, tenFlow.shape[2]).view(1, 1, tenFlow.shape[2], 1).expand(tenFlow.shape[0], -1, -1, tenFlow.shape[3])\n backwarp_tenGrid[k] = torch.cat([ tenHorizontal, tenVertical ], 1).to(device)\n\n tenFlow = torch.cat([ tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0), tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0) ], 1)\n\n g = (backwarp_tenGrid[k] + tenFlow).permute(0, 2, 3, 1)\n return torch.nn.functional.grid_sample(input=tenInput, grid=g, mode=mode, padding_mode='border', align_corners=True)" }, { "identifier": "Head", "path": "model/head.py", "snippet": "class Head(nn.Module):\n def __init__(self, c):\n super(Head, self).__init__()\n model = models.resnet18(pretrained=False)\n self.cnn0 = nn.Sequential(*nn.ModuleList(model.children())[:3])\n self.cnn1 = nn.Sequential(\n *list(model.children())[3:5],\n )\n self.cnn2 = nn.Sequential(\n *list(model.children())[5:6],\n )\n self.out0 = nn.Conv2d(64, c, 1, 1, 0)\n self.out1 = nn.Conv2d(64, c, 1, 1, 0)\n self.out2 = nn.Conv2d(128, c, 1, 1, 0)\n self.normalize = MeanShift([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], norm=True).to(device) \n def forward(self, x):\n x = self.normalize(x)\n f0 = self.cnn0(x)\n f1 = self.cnn1(f0)\n f2 = self.cnn2(f1)\n f0 = self.out0(f0)\n f1 = F.interpolate(self.out1(f1), scale_factor=2.0, mode=\"bilinear\")\n f2 = F.interpolate(self.out2(f2), scale_factor=4.0, mode=\"bilinear\")\n return f0 + f1 + f2" } ]
import torch import torch.nn as nn import torch.nn.functional as F from torchvision import models from model.warplayer import warp from model.head import Head
1,255
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, groups=1): return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True, groups=groups), nn.PReLU(out_planes) ) def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False), nn.BatchNorm2d(out_planes), nn.PReLU(out_planes) ) class Resblock(nn.Module): def __init__(self, c, dilation=1): super(Resblock, self).__init__() self.conv = nn.Sequential( nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1), nn.PReLU(c), nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1), ) self.beta = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True) self.prelu = nn.PReLU(c) def forward(self, x): y = self.conv(x) return self.prelu(y * self.beta + x) class RoundSTE(torch.autograd.Function): @staticmethod def forward(ctx, x): y = torch.bernoulli(x) return y @staticmethod def backward(ctx, grad): return grad, None class RecurrentBlock(nn.Module): def __init__(self, c, dilation=1, depth=6): super(RecurrentBlock, self).__init__() self.conv_stem = conv(3*c+6+1, c, 3, 1, 1, groups=1) self.conv_backbone = torch.nn.ModuleList([]) self.depth = depth for i in range(depth): self.conv_backbone.append(Resblock(c, dilation)) def forward(self, x, i0, i1, flow, timestep, convflow, getscale): flow_down = F.interpolate(flow, scale_factor=0.5, mode="bilinear")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, groups=1): return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True, groups=groups), nn.PReLU(out_planes) ) def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): return nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False), nn.BatchNorm2d(out_planes), nn.PReLU(out_planes) ) class Resblock(nn.Module): def __init__(self, c, dilation=1): super(Resblock, self).__init__() self.conv = nn.Sequential( nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1), nn.PReLU(c), nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1), ) self.beta = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True) self.prelu = nn.PReLU(c) def forward(self, x): y = self.conv(x) return self.prelu(y * self.beta + x) class RoundSTE(torch.autograd.Function): @staticmethod def forward(ctx, x): y = torch.bernoulli(x) return y @staticmethod def backward(ctx, grad): return grad, None class RecurrentBlock(nn.Module): def __init__(self, c, dilation=1, depth=6): super(RecurrentBlock, self).__init__() self.conv_stem = conv(3*c+6+1, c, 3, 1, 1, groups=1) self.conv_backbone = torch.nn.ModuleList([]) self.depth = depth for i in range(depth): self.conv_backbone.append(Resblock(c, dilation)) def forward(self, x, i0, i1, flow, timestep, convflow, getscale): flow_down = F.interpolate(flow, scale_factor=0.5, mode="bilinear")
i0 = warp(i0, flow_down[:, :2] * 0.5)
0
2023-10-26 09:24:29+00:00
2k
Z4kSec/IoctlHunter
ioctl_hunter/ui/keys_reader.py
[ { "identifier": "State", "path": "ioctl_hunter/lib/state.py", "snippet": "class State:\n results = Results()\n\n script = None\n cur_proc = None\n\n quiet = False\n running = True\n hook_enabled = False\n debug_enabled = False\n hex_out_enabled = False\n\n included_drivers = []\n only_driver_handles = True" }, { "identifier": "print_enable_debugger", "path": "ioctl_hunter/ui/display.py", "snippet": "def print_enable_debugger():\n State.debug_enabled = True\n logger.handlers[0].flush()\n sys.stdout.flush()\n\n logger.info(\"\")\n logger.info(\"\")\n logger.info(\"-\" * 20 + \" IoctlHunter state \" + \"-\" * 20)\n logger.info(\"\")\n\n logger.info(f\"* Hook state:\\t\\t{State.hook_enabled}\")\n logger.info(\"\")\n logger.info(f\"* Filters:\")\n logger.info(\n f\"\\t- Included IOCTLs (decimal):\\t{format_filter_list(State.results.included_ioctls)}\"\n )\n logger.info(\n f\"\\t- Excluded IOCTLs (decimal):\\t{format_filter_list(State.results.excluded_ioctls)}\"\n )\n logger.info(\n f\"\\t- Included drivers:\\t\\t{format_filter_list(State.results.included_drivers)}\"\n )\n logger.info(\n f\"\\t- Excluded drivers:\\t\\t{format_filter_list(State.results.excluded_drivers)}\"\n )\n\n if len(State.results.loaded_drivers):\n logger.info(\"\")\n logger.info(f\"* Dynamically loaded drivers:\")\n for driver, data in State.results.loaded_drivers.items():\n logger.info(f\"\\t- {driver}\\t({data['image_path']})\")\n\n if len(State.results.count_ioctls):\n logger.info(\"\")\n logger.info(f\"* IOCTLs hooked list:\")\n for ioctl, count in State.results.count_ioctls.items():\n handle_path = State.results.ioctls[ioctl][0][\"handle_path\"]\n ioctl_hex = \"{0:#010x}\".format(int(ioctl))\n logger.info(f\"\\t- {ioctl}\\t{ioctl_hex}\\t{count}\\t{handle_path}\")\n\n logger.info(\"\")\n logger.info(\"-\" * 59)\n\n return True" }, { "identifier": "print_disable_debugger", "path": "ioctl_hunter/ui/display.py", "snippet": "def print_disable_debugger():\n logger.info(\"Leaving the debugging mode...\")\n logger.info(\"\")\n logger.info(\"\")\n State.debug_enabled = False\n return True" }, { "identifier": "print_dynamic_helper", "path": "ioctl_hunter/ui/display.py", "snippet": "def print_dynamic_helper():\n State.debug_enabled = True\n logger.handlers[0].flush()\n sys.stdout.flush()\n\n logger.info(\"\")\n logger.info(\"\")\n logger.info(\"-\" * 20 + \" IoctlHunter helper \" + \"-\" * 20)\n logger.info(\"\")\n logger.info(\"> Press [SPACE] to enable or disable the hooking engine\")\n logger.info(\n \"> Press [ENTER] to get all information related to the current Ioctl hunt\"\n )\n logger.info(\"> Press [a/A] to append elements to an inclusion / exclusion list\")\n logger.info(\"> Press [r/R] to remove elements to an inclusion / exclusion list\")\n logger.info(\"> Press [h/H] to display this message\")\n logger.info(\"> Press [CTRL] + [c/C] to gracefully exit IoctlHunter\")\n logger.info(\"\")\n logger.info(\"-\" * 59)\n logger.info(\"\")\n logger.info(\"\")\n State.debug_enabled = False\n\n return True" } ]
import sys import threading import time import logging import msvcrt from colorama import init, Fore, Style from ..lib.state import State from ..ui.display import ( print_enable_debugger, print_disable_debugger, print_dynamic_helper, )
1,022
logger = logging.getLogger("ioctl-hunter") class KeysListenner(threading.Thread): is_debugger_enabled = False def __init__(self): super(KeysListenner, self).__init__(daemon=True) init(convert=True) self.start() def run(self): while not msvcrt.kbhit(): time.sleep(0.1) try: while True: result = None if not msvcrt.kbhit(): time.sleep(0.1) continue result = msvcrt.getch().decode("utf-8") if result and result in ["\n", "\r"] and not self.is_debugger_enabled:
logger = logging.getLogger("ioctl-hunter") class KeysListenner(threading.Thread): is_debugger_enabled = False def __init__(self): super(KeysListenner, self).__init__(daemon=True) init(convert=True) self.start() def run(self): while not msvcrt.kbhit(): time.sleep(0.1) try: while True: result = None if not msvcrt.kbhit(): time.sleep(0.1) continue result = msvcrt.getch().decode("utf-8") if result and result in ["\n", "\r"] and not self.is_debugger_enabled:
print_enable_debugger()
1
2023-10-31 22:38:36+00:00
2k
masked-spacetime-hashing/msth
nerfstudio/process_data/process_data_utils.py
[ { "identifier": "status", "path": "nerfstudio/utils/rich_utils.py", "snippet": "def status(msg: str, spinner: str = \"bouncingBall\", verbose: bool = False):\n \"\"\"A context manager that does nothing is verbose is True. Otherwise it hides logs under a message.\n\n Args:\n msg: The message to log.\n spinner: The spinner to use.\n verbose: If True, print all logs, else hide them.\n \"\"\"\n if verbose:\n return nullcontext()\n return CONSOLE.status(msg, spinner=spinner)" }, { "identifier": "run_command", "path": "nerfstudio/utils/scripts.py", "snippet": "def run_command(cmd: str, verbose=False) -> Optional[str]:\n \"\"\"Runs a command and returns the output.\n\n Args:\n cmd: Command to run.\n verbose: If True, logs the output of the command.\n Returns:\n The output of the command if return_output is True, otherwise None.\n \"\"\"\n out = subprocess.run(cmd, capture_output=not verbose, shell=True, check=False)\n if out.returncode != 0:\n CONSOLE.rule(\"[bold red] :skull: :skull: :skull: ERROR :skull: :skull: :skull: \", style=\"red\")\n CONSOLE.print(f\"[bold red]Error running command: {cmd}\")\n CONSOLE.rule(style=\"red\")\n CONSOLE.print(out.stderr.decode(\"utf-8\"))\n sys.exit(1)\n if out.stdout is not None:\n return out.stdout.decode(\"utf-8\")\n return out" } ]
import os import shutil import sys import cv2 import numpy as np from enum import Enum from pathlib import Path from typing import List, Optional, Tuple from rich.console import Console from typing_extensions import Literal, OrderedDict from nerfstudio.utils.rich_utils import status from nerfstudio.utils.scripts import run_command
1,336
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper utils for processing data into the nerfstudio format.""" CONSOLE = Console(width=120) POLYCAM_UPSCALING_TIMES = 2 class CameraModel(Enum): """Enum for camera types.""" OPENCV = "OPENCV" OPENCV_FISHEYE = "OPENCV_FISHEYE" CAMERA_MODELS = { "perspective": CameraModel.OPENCV, "fisheye": CameraModel.OPENCV_FISHEYE, "equirectangular": CameraModel.OPENCV, } def list_images(data: Path) -> List[Path]: """Lists all supported images in a directory Args: data: Path to the directory of images. Returns: Paths to images contained in the directory """ allowed_exts = [".jpg", ".jpeg", ".png", ".tif", ".tiff"] image_paths = sorted([p for p in data.glob("[!.]*") if p.suffix.lower() in allowed_exts]) return image_paths def get_image_filenames(directory: Path, max_num_images: int = -1) -> Tuple[List[Path], int]: """Returns a list of image filenames in a directory. Args: dir: Path to the directory. max_num_images: The maximum number of images to return. -1 means no limit. Returns: A tuple of A list of image filenames, number of original image paths. """ image_paths = list_images(directory) num_orig_images = len(image_paths) if max_num_images != -1 and num_orig_images > max_num_images: idx = np.round(np.linspace(0, num_orig_images - 1, max_num_images)).astype(int) else: idx = np.arange(num_orig_images) image_filenames = list(np.array(image_paths)[idx]) return image_filenames, num_orig_images def get_num_frames_in_video(video: Path) -> int: """Returns the number of frames in a video. Args: video: Path to a video. Returns: The number of frames in a video. """ cmd = f'ffprobe -v error -select_streams v:0 -count_packets \ -show_entries stream=nb_read_packets -of csv=p=0 "{video}"' output = run_command(cmd) assert output is not None output = output.strip(" ,\t\n\r") return int(output) def convert_video_to_images( video_path: Path, image_dir: Path, num_frames_target: int, percent_crop: Tuple[float, float, float, float] = (0.0, 0.0, 0.0, 0.0), verbose: bool = False, ) -> Tuple[List[str], int]: """Converts a video into a sequence of images. Args: video_path: Path to the video. output_dir: Path to the output directory. num_frames_target: Number of frames to extract. percent_crop: Percent of the image to crop. (top, bottom, left, right) verbose: If True, logs the output of the command. Returns: A tuple containing summary of the conversion and the number of extracted frames. """ if video_path.is_dir(): CONSOLE.print(f"[bold red]Error: Video path is a directory, not a path: {video_path}") sys.exit(1) if video_path.exists() is False: CONSOLE.print(f"[bold red]Error: Video does not exist: {video_path}") sys.exit(1)
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper utils for processing data into the nerfstudio format.""" CONSOLE = Console(width=120) POLYCAM_UPSCALING_TIMES = 2 class CameraModel(Enum): """Enum for camera types.""" OPENCV = "OPENCV" OPENCV_FISHEYE = "OPENCV_FISHEYE" CAMERA_MODELS = { "perspective": CameraModel.OPENCV, "fisheye": CameraModel.OPENCV_FISHEYE, "equirectangular": CameraModel.OPENCV, } def list_images(data: Path) -> List[Path]: """Lists all supported images in a directory Args: data: Path to the directory of images. Returns: Paths to images contained in the directory """ allowed_exts = [".jpg", ".jpeg", ".png", ".tif", ".tiff"] image_paths = sorted([p for p in data.glob("[!.]*") if p.suffix.lower() in allowed_exts]) return image_paths def get_image_filenames(directory: Path, max_num_images: int = -1) -> Tuple[List[Path], int]: """Returns a list of image filenames in a directory. Args: dir: Path to the directory. max_num_images: The maximum number of images to return. -1 means no limit. Returns: A tuple of A list of image filenames, number of original image paths. """ image_paths = list_images(directory) num_orig_images = len(image_paths) if max_num_images != -1 and num_orig_images > max_num_images: idx = np.round(np.linspace(0, num_orig_images - 1, max_num_images)).astype(int) else: idx = np.arange(num_orig_images) image_filenames = list(np.array(image_paths)[idx]) return image_filenames, num_orig_images def get_num_frames_in_video(video: Path) -> int: """Returns the number of frames in a video. Args: video: Path to a video. Returns: The number of frames in a video. """ cmd = f'ffprobe -v error -select_streams v:0 -count_packets \ -show_entries stream=nb_read_packets -of csv=p=0 "{video}"' output = run_command(cmd) assert output is not None output = output.strip(" ,\t\n\r") return int(output) def convert_video_to_images( video_path: Path, image_dir: Path, num_frames_target: int, percent_crop: Tuple[float, float, float, float] = (0.0, 0.0, 0.0, 0.0), verbose: bool = False, ) -> Tuple[List[str], int]: """Converts a video into a sequence of images. Args: video_path: Path to the video. output_dir: Path to the output directory. num_frames_target: Number of frames to extract. percent_crop: Percent of the image to crop. (top, bottom, left, right) verbose: If True, logs the output of the command. Returns: A tuple containing summary of the conversion and the number of extracted frames. """ if video_path.is_dir(): CONSOLE.print(f"[bold red]Error: Video path is a directory, not a path: {video_path}") sys.exit(1) if video_path.exists() is False: CONSOLE.print(f"[bold red]Error: Video does not exist: {video_path}") sys.exit(1)
with status(msg="Converting video to images...", spinner="bouncingBall", verbose=verbose):
0
2023-10-26 04:39:15+00:00
2k
sehyunkwon/ICTC
step2b.py
[ { "identifier": "args", "path": "utils/argument.py", "snippet": "def str2bool(v):" }, { "identifier": "get_gpt_response", "path": "utils/llm_utils.py", "snippet": "def get_gpt_response(system_prompt, user_prompt, api_key, user, model):\n\n headers = {\n \"Content-Type\": \"application/json\",\n \"api-key\": api_key,\n }\n message = [{\"role\": \"system\", \"content\": system_prompt}, {\"role\": \"user\", \"content\": user_prompt}]\n data = setup_gpt(message)\n \n # repeat upto 5 times\n for num_iter in range(5):\n try:\n url = f\"https://chatgptapi.krafton-ai.com/{user}/openai/deployments/\"\n url += f\"{model}/chat/completions?api-version=2023-03-15-preview\"\n response = requests.post(\n url,\n headers=headers,\n data=json.dumps(data),\n )\n response = response.json()\n if \"choices\" in response and response[\"choices\"][0][\"finish_reason\"] == \"content_filter\":\n return \"ERROR_FILTER\"\n elif \"error\" in response and response['error']['code'] == 'context_length_exceeded':\n return \"ERROR_CONTEXT_LENGTH\"\n result = response[\"choices\"][0][\"message\"][\"content\"].strip()\n return result\n except Exception as e:\n print(e)\n print(f\"Invalid GPT result. Trying again: {num_iter}\")\n time.sleep(5 * (2 ** num_iter))\n \n return \"ERROR_TIMEOUT\"" }, { "identifier": "get_llama_response", "path": "utils/llm_utils.py", "snippet": "def get_llama_response(system_prompt, user_prompt, url):\n headers = {\n \"Content-Type\": \"application/json\",\n }\n data = setup_llama(user_prompt, system_prompt)\n\n # repeat upto 5 times\n for num_iter in range(5):\n try:\n url = f\"https://{url}/generate\"\n response = requests.post(\n url,\n headers=headers,\n data=json.dumps(data),\n verify=False,\n )\n response = response.json()\n result = str(response[\"text\"]).split(\"[/INST]\")[1].strip(\" \").strip(\"]\")\n result = result.replace(\"'\", \"\")\n return result\n except Exception as e:\n print(e)\n print(f\"Invalid LLama result. Trying again: {num_iter}\")\n time.sleep(5 * (2 ** num_iter))\n \n return \"ERROR_TIMEOUT\"" } ]
import os from dotenv import load_dotenv from utils.argument import args from utils.llm_utils import get_gpt_response, get_llama_response
1,139
### Requires the file to be in the following format: "Image-file ... ; Answer: {label}" def post_process(): answer_list = {} # read line by line with open(args.step2a_result_path, 'r') as answers: answers = answers.readlines() for answer in answers: if "Image file-" in answer: answer = answer.split(";")[1] label = answer.split(" ")[1:] real_label = "" for lab in label: real_label += lab + " " real_label = real_label[:-1] real_label = real_label.lower().strip().strip(".") if 'answer: ' not in real_label: real_label = 'answer: ' + real_label if real_label not in answer_list: answer_list[real_label] = 1 else: answer_list[real_label] += 1 print('Full dictionary: ',answer_list) print("Number of distinct labels: ", len(answer_list)) return answer_list if __name__ == "__main__": load_dotenv() api_key = os.getenv("API_KEY") user = os.getenv("USER") model = os.getenv("MODEL") url = "" if args.llama_ver == "llama_70b": url = os.getenv("LLAMA_70b_URL") elif args.llama_ver == "llama_13b": url = os.getenv("LLAMA_13b_URL") elif args.llama_ver == "llama_7b": url = os.getenv("LLAMA_7b_URL") # post process gpt_labels.txt answer_list = post_process() answer_list = {k: v for k, v in answer_list.items() if v > 5} print("Post-processed dictionary: ",answer_list) # read system prompt with open(args.step2b_prompt_path, 'r') as file: system_prompt = file.read() system_prompt = system_prompt.replace("[__NUM_CLASSES_CLUSTER__]", str(args.num_classes)) system_prompt = system_prompt.replace("[__LEN__]", str(len(answer_list))) # print(system_prompt) # feed into gpt. user_prompt = f"list of labels: {answer_list}\n" user_prompt += f"num_classes: {args.num_classes}" if args.llama: response = get_llama_response(system_prompt, user_prompt, url) print(response) else:
### Requires the file to be in the following format: "Image-file ... ; Answer: {label}" def post_process(): answer_list = {} # read line by line with open(args.step2a_result_path, 'r') as answers: answers = answers.readlines() for answer in answers: if "Image file-" in answer: answer = answer.split(";")[1] label = answer.split(" ")[1:] real_label = "" for lab in label: real_label += lab + " " real_label = real_label[:-1] real_label = real_label.lower().strip().strip(".") if 'answer: ' not in real_label: real_label = 'answer: ' + real_label if real_label not in answer_list: answer_list[real_label] = 1 else: answer_list[real_label] += 1 print('Full dictionary: ',answer_list) print("Number of distinct labels: ", len(answer_list)) return answer_list if __name__ == "__main__": load_dotenv() api_key = os.getenv("API_KEY") user = os.getenv("USER") model = os.getenv("MODEL") url = "" if args.llama_ver == "llama_70b": url = os.getenv("LLAMA_70b_URL") elif args.llama_ver == "llama_13b": url = os.getenv("LLAMA_13b_URL") elif args.llama_ver == "llama_7b": url = os.getenv("LLAMA_7b_URL") # post process gpt_labels.txt answer_list = post_process() answer_list = {k: v for k, v in answer_list.items() if v > 5} print("Post-processed dictionary: ",answer_list) # read system prompt with open(args.step2b_prompt_path, 'r') as file: system_prompt = file.read() system_prompt = system_prompt.replace("[__NUM_CLASSES_CLUSTER__]", str(args.num_classes)) system_prompt = system_prompt.replace("[__LEN__]", str(len(answer_list))) # print(system_prompt) # feed into gpt. user_prompt = f"list of labels: {answer_list}\n" user_prompt += f"num_classes: {args.num_classes}" if args.llama: response = get_llama_response(system_prompt, user_prompt, url) print(response) else:
response = get_gpt_response(system_prompt, user_prompt, api_key, user, model)
1
2023-10-27 05:00:14+00:00
2k
phineas-pta/comfy-trt-test
comfy_trt/model_manager.py
[ { "identifier": "ModelConfig", "path": "comfy_trt/datastructures.py", "snippet": "class ModelConfig:\n\tprofile: dict\n\tstatic_shapes: bool = False\n\tfp32: bool = False\n\tbaseline_model: str = \"SD15\" # save model info, for values see `comfy/supported_models.py`, breaking change incompatible A1111\n\tprediction_type: str = \"ModelType.EPS\" # save model info, for values see `comfy/model_base.py`, breaking change incompatible A1111\n\tinpaint: bool = False\n\trefit: bool = False\n\tlora: bool = False\n\tunet_hidden_dim: int = 4\n\n\tdef is_compatible_from_dict(self, feed_dict: dict) -> tuple[bool, float]:\n\t\tdistance = 0\n\t\tfor k, v in feed_dict.items():\n\t\t\t_min, _opt, _max = self.profile[k]\n\t\t\tv_tensor = torch.Tensor(list(v.shape))\n\t\t\tr_min = torch.Tensor(_max) - v_tensor\n\t\t\tr_opt = (torch.Tensor(_opt) - v_tensor).abs()\n\t\t\tr_max = v_tensor - torch.Tensor(_min)\n\t\t\tif torch.any(r_min < 0) or torch.any(r_max < 0):\n\t\t\t\treturn False, distance\n\t\t\tdistance += r_opt.sum() + 0.5 * (r_max.sum() + 0.5 * r_min.sum())\n\t\treturn True, distance\n\n\tdef is_compatible(self, width: int, height: int, batch_size: int, max_embedding: int) -> tuple[bool, float]:\n\t\tsample = self.profile[\"sample\"]\n\t\tembedding = self.profile[\"encoder_hidden_states\"]\n\n\t\tbatch_size *= 2\n\t\twidth //= 8\n\t\theight //= 8\n\n\t\t_min, _opt, _max = sample\n\t\t_min_em, _opt_em, _max_em = embedding\n\t\tif (\n\t\t\t_min[0] > batch_size or _max[0] < batch_size\n\t\t\tor _min[2] > height or _max[2] < height\n\t\t\tor _min[3] > width or _max[3] < width\n\t\t\tor _min_em[1] > max_embedding or _max_em[1] < max_embedding\n\t\t):\n\t\t\treturn False, 0\n\t\telse:\n\t\t\tdistance = (\n\t\t\t\tabs(_opt[0] - batch_size)\n\t\t\t\t+ abs(_opt[2] - height)\n\t\t\t\t+ abs(_opt[3] - width)\n\t\t\t\t+ 0.5 * (abs(_max[2] - height) + abs(_max[3] - width))\n\t\t\t)\n\t\t\treturn True, distance" }, { "identifier": "ModelConfigEncoder", "path": "comfy_trt/datastructures.py", "snippet": "class ModelConfigEncoder(json.JSONEncoder):\n\tdef default(self, o: ModelConfig) -> dict:\n\t\treturn o.__dict__" } ]
import hashlib import json import os import logging import copy import torch from .datastructures import ModelConfig, ModelConfigEncoder
1,538
# -*- coding: utf-8 -*- # modified from https://github.com/NVIDIA/Stable-Diffusion-WebUI-TensorRT/blob/main/model_manager.py # CHANGE: retrieve checkpoint info from comfy # STATUS: ok i guess BASE_PATH = os.path.dirname(os.path.realpath(__file__)) ONNX_MODEL_DIR = os.path.join(BASE_PATH, "Unet-onnx") if not os.path.exists(ONNX_MODEL_DIR): os.makedirs(ONNX_MODEL_DIR) TRT_MODEL_DIR = os.path.join(BASE_PATH, "Unet-trt") if not os.path.exists(TRT_MODEL_DIR): os.makedirs(TRT_MODEL_DIR) MODEL_FILE = os.path.join(TRT_MODEL_DIR, "model.json") def get_cc() -> tuple[int]: res = torch.cuda.get_device_properties(int(os.getenv("CUDA_VISIBLE_DEVICES", 0))) return res.major, res.minor cc_major, cc_minor = get_cc() class ModelManager: def __init__(self, model_file: str = MODEL_FILE): self.all_models = {} self.model_file = model_file self.cc = f"cc{cc_major}{cc_minor}" if not os.path.exists(model_file): logging.warning("Model file does not exist. Creating new one.") else: self.all_models = self.read_json() self.update() @staticmethod def get_onnx_path(model_name: str) -> tuple[str]: onnx_filename = f"{model_name}.onnx" onnx_path = os.path.join(ONNX_MODEL_DIR, onnx_filename) return onnx_filename, onnx_path def get_trt_path(self, model_name: str, profile: dict, static_shape: bool) -> tuple[str]: profile_hash = [] n_profiles = 1 if static_shape else 3 for k, v in profile.items(): dim_hash = [] for i in range(n_profiles): dim_hash.append("x".join([str(x) for x in v[i]])) profile_hash.append(k + "=" + "+".join(dim_hash)) # shorter hash coz windows file path length limit hash_str = hashlib.blake2b("-".join(profile_hash).encode("utf-8"), digest_size=16).hexdigest() # 16 digest = 32 char (original >110 char) trt_filename = model_name + "_" + hash_str + ".trt" trt_path = os.path.join(TRT_MODEL_DIR, trt_filename) return trt_filename, trt_path def get_weights_map_path(self, model_name: str): return os.path.join(TRT_MODEL_DIR, f"{model_name}_weights_map.json") def update(self) -> None: trt_engines = [trt_file for trt_file in os.listdir(TRT_MODEL_DIR) if trt_file.endswith(".trt")] tmp_all_models = copy.deepcopy(self.all_models) for cc, base_models in tmp_all_models.items(): for base_model, models in base_models.items(): tmp_config_list = {} for model_config in models: if model_config["filepath"] not in trt_engines: logging.info(f"Model config outdated. {model_config['filepath']} was not found") continue tmp_config_list[model_config["filepath"]] = model_config tmp_config_list = list(tmp_config_list.values()) if len(tmp_config_list) == 0: self.all_models[cc].pop(base_model) else: self.all_models[cc][base_model] = models self.write_json() def add_entry( self, model_name: str, profile: dict, static_shapes: bool, fp32: bool, baseline_model: str, prediction_type: str, inpaint: bool, refit: bool, unet_hidden_dim: int, lora: bool ) -> None:
# -*- coding: utf-8 -*- # modified from https://github.com/NVIDIA/Stable-Diffusion-WebUI-TensorRT/blob/main/model_manager.py # CHANGE: retrieve checkpoint info from comfy # STATUS: ok i guess BASE_PATH = os.path.dirname(os.path.realpath(__file__)) ONNX_MODEL_DIR = os.path.join(BASE_PATH, "Unet-onnx") if not os.path.exists(ONNX_MODEL_DIR): os.makedirs(ONNX_MODEL_DIR) TRT_MODEL_DIR = os.path.join(BASE_PATH, "Unet-trt") if not os.path.exists(TRT_MODEL_DIR): os.makedirs(TRT_MODEL_DIR) MODEL_FILE = os.path.join(TRT_MODEL_DIR, "model.json") def get_cc() -> tuple[int]: res = torch.cuda.get_device_properties(int(os.getenv("CUDA_VISIBLE_DEVICES", 0))) return res.major, res.minor cc_major, cc_minor = get_cc() class ModelManager: def __init__(self, model_file: str = MODEL_FILE): self.all_models = {} self.model_file = model_file self.cc = f"cc{cc_major}{cc_minor}" if not os.path.exists(model_file): logging.warning("Model file does not exist. Creating new one.") else: self.all_models = self.read_json() self.update() @staticmethod def get_onnx_path(model_name: str) -> tuple[str]: onnx_filename = f"{model_name}.onnx" onnx_path = os.path.join(ONNX_MODEL_DIR, onnx_filename) return onnx_filename, onnx_path def get_trt_path(self, model_name: str, profile: dict, static_shape: bool) -> tuple[str]: profile_hash = [] n_profiles = 1 if static_shape else 3 for k, v in profile.items(): dim_hash = [] for i in range(n_profiles): dim_hash.append("x".join([str(x) for x in v[i]])) profile_hash.append(k + "=" + "+".join(dim_hash)) # shorter hash coz windows file path length limit hash_str = hashlib.blake2b("-".join(profile_hash).encode("utf-8"), digest_size=16).hexdigest() # 16 digest = 32 char (original >110 char) trt_filename = model_name + "_" + hash_str + ".trt" trt_path = os.path.join(TRT_MODEL_DIR, trt_filename) return trt_filename, trt_path def get_weights_map_path(self, model_name: str): return os.path.join(TRT_MODEL_DIR, f"{model_name}_weights_map.json") def update(self) -> None: trt_engines = [trt_file for trt_file in os.listdir(TRT_MODEL_DIR) if trt_file.endswith(".trt")] tmp_all_models = copy.deepcopy(self.all_models) for cc, base_models in tmp_all_models.items(): for base_model, models in base_models.items(): tmp_config_list = {} for model_config in models: if model_config["filepath"] not in trt_engines: logging.info(f"Model config outdated. {model_config['filepath']} was not found") continue tmp_config_list[model_config["filepath"]] = model_config tmp_config_list = list(tmp_config_list.values()) if len(tmp_config_list) == 0: self.all_models[cc].pop(base_model) else: self.all_models[cc][base_model] = models self.write_json() def add_entry( self, model_name: str, profile: dict, static_shapes: bool, fp32: bool, baseline_model: str, prediction_type: str, inpaint: bool, refit: bool, unet_hidden_dim: int, lora: bool ) -> None:
config = ModelConfig(profile, static_shapes, fp32, baseline_model, prediction_type, inpaint, refit, lora, unet_hidden_dim)
0
2023-10-25 23:58:12+00:00
2k
hydrogram/hydrogram
hydrogram/raw/core/gzip_packed.py
[ { "identifier": "Bytes", "path": "hydrogram/raw/core/primitives/bytes.py", "snippet": "class Bytes(bytes, TLObject):\n @classmethod\n def read(cls, data: BytesIO, *args: Any) -> bytes:\n length = int.from_bytes(data.read(1), \"little\")\n\n if length <= 253:\n x = data.read(length)\n data.read(-(length + 1) % 4)\n else:\n length = int.from_bytes(data.read(3), \"little\")\n x = data.read(length)\n data.read(-length % 4)\n\n return x\n\n def __new__(cls, value: bytes) -> bytes: # type: ignore\n length = len(value)\n\n if length <= 253:\n return bytes([length]) + value + bytes(-(length + 1) % 4)\n return bytes([254]) + length.to_bytes(3, \"little\") + value + bytes(-length % 4)" }, { "identifier": "Int", "path": "hydrogram/raw/core/primitives/int.py", "snippet": "class Int(bytes, TLObject):\n SIZE = 4\n\n @classmethod\n def read(cls, data: BytesIO, signed: bool = True, *args: Any) -> int:\n return int.from_bytes(data.read(cls.SIZE), \"little\", signed=signed)\n\n def __new__(cls, value: int, signed: bool = True) -> bytes: # type: ignore\n return value.to_bytes(cls.SIZE, \"little\", signed=signed)" }, { "identifier": "TLObject", "path": "hydrogram/raw/core/tl_object.py", "snippet": "class TLObject:\n __slots__: list[str] = []\n\n QUALNAME = \"Base\"\n\n @classmethod\n def read(cls, b: BytesIO, *args: Any) -> Any:\n return cast(TLObject, objects[int.from_bytes(b.read(4), \"little\")]).read(b, *args)\n\n def write(self, *args: Any) -> bytes:\n pass\n\n @staticmethod\n def default(obj: \"TLObject\") -> Union[str, dict[str, str]]:\n if isinstance(obj, bytes):\n return repr(obj)\n\n return {\n \"_\": obj.QUALNAME,\n **{\n attr: getattr(obj, attr)\n for attr in obj.__slots__\n if getattr(obj, attr) is not None\n },\n }\n\n def __str__(self) -> str:\n return dumps(self, indent=4, default=TLObject.default, ensure_ascii=False)\n\n def __repr__(self) -> str:\n return (\n f'hydrogram.raw.{self.QUALNAME}({\", \".join(f\"{attr}={getattr(self, attr)!r}\" for attr in self.__slots__ if getattr(self, attr) is not None)})'\n if hasattr(self, \"QUALNAME\")\n else repr(self)\n )\n\n def __eq__(self, other: Any) -> bool:\n for attr in self.__slots__:\n try:\n if getattr(self, attr) != getattr(other, attr):\n return False\n except AttributeError:\n return False\n\n return True\n\n def __len__(self) -> int:\n return len(self.write())\n\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n pass" } ]
from gzip import compress, decompress from io import BytesIO from typing import Any, cast from .primitives.bytes import Bytes from .primitives.int import Int from .tl_object import TLObject
1,159
# Hydrogram - Telegram MTProto API Client Library for Python # Copyright (C) 2017-2023 Dan <https://github.com/delivrance> # Copyright (C) 2023-present Hydrogram <https://hydrogram.org> # # This file is part of Hydrogram. # # Hydrogram is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Hydrogram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Hydrogram. If not, see <http://www.gnu.org/licenses/>. class GzipPacked(TLObject): ID = 0x3072CFA1 __slots__ = ["packed_data"] QUALNAME = "GzipPacked" def __init__(self, packed_data: TLObject): self.packed_data = packed_data @staticmethod def read(data: BytesIO, *args: Any) -> "GzipPacked": # Return the Object itself instead of a GzipPacked wrapping it
# Hydrogram - Telegram MTProto API Client Library for Python # Copyright (C) 2017-2023 Dan <https://github.com/delivrance> # Copyright (C) 2023-present Hydrogram <https://hydrogram.org> # # This file is part of Hydrogram. # # Hydrogram is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Hydrogram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Hydrogram. If not, see <http://www.gnu.org/licenses/>. class GzipPacked(TLObject): ID = 0x3072CFA1 __slots__ = ["packed_data"] QUALNAME = "GzipPacked" def __init__(self, packed_data: TLObject): self.packed_data = packed_data @staticmethod def read(data: BytesIO, *args: Any) -> "GzipPacked": # Return the Object itself instead of a GzipPacked wrapping it
return cast(GzipPacked, TLObject.read(BytesIO(decompress(Bytes.read(data)))))
0
2023-10-29 16:16:37+00:00
2k
chenruduan/OAReactDiff
oa_reactdiff/tests/utils/test_graph_tools.py
[ { "identifier": "get_edges_index", "path": "oa_reactdiff/utils/_graph_tools.py", "snippet": "def get_edges_index(\n combined_mask: Tensor,\n pos: Optional[Tensor] = None,\n edge_cutoff: Optional[float] = None,\n remove_self_edge: bool = False,\n) -> Tensor:\n r\"\"\"\n\n Args:\n combined_mask (Tensor): Combined mask for all fragments.\n Edges are built for nodes with the same indexes in the mask.\n pos (Optional[Tensor]): 3D coordinations of nodes. Defaults to None.\n edge_cutoff (Optional[float]): cutoff for building edges within a fragment.\n Defaults to None.\n remove_self_edge (bool): whether to remove self-connecting edge (i.e., ii).\n Defaults to False.\n\n Returns:\n Tensor: [2, n_edges], i for node index.\n \"\"\"\n # TODO: cache batches for each example in self._edges_dict[n_nodes]\n adj = combined_mask[:, None] == combined_mask[None, :]\n if edge_cutoff is not None:\n adj = adj & (torch.cdist(pos, pos) <= edge_cutoff)\n if remove_self_edge:\n adj = adj.fill_diagonal_(False)\n edges = torch.stack(torch.where(adj), dim=0)\n return edges" }, { "identifier": "get_subgraph_mask", "path": "oa_reactdiff/utils/_graph_tools.py", "snippet": "def get_subgraph_mask(edge_index: Tensor, n_frag_switch: Tensor) -> Tensor:\n r\"\"\"Filter out edges that have inter-fragment connections.\n Example:\n edge_index: [\n [0, 0, 1, 1, 2, 2],\n [1, 2, 0, 2, 0, 1],\n ]\n n_frag_switch: [0, 0, 1]\n -> [1, 0, 1, 0, 0, 0]\n\n Args:\n edge_index (Tensor): e_ij\n n_frag_switch (Tensor): fragment that a node belongs to\n\n Returns:\n Tensor: [n_edge], 1 for inner- and 0 for inter-fragment edge\n \"\"\"\n subgraph_mask = torch.zeros(edge_index.size(1)).long()\n in_same_frag = n_frag_switch[edge_index[0]] == n_frag_switch[edge_index[1]]\n subgraph_mask[torch.where(in_same_frag)] = 1\n return subgraph_mask.to(edge_index.device)" }, { "identifier": "get_n_frag_switch", "path": "oa_reactdiff/utils/_graph_tools.py", "snippet": "def get_n_frag_switch(natm_list: List[Tensor]) -> Tensor:\n r\"\"\"Get the type of fragments to which each node belongs\n Example: [Tensor(1, 1), Tensor(2, 1)] -> [0, 0, 1, 1 ,1]\n\n Args:\n natm_list (List[Tensor]): [Tensor([number of atoms per small fragment])]\n\n Returns:\n Tensor: [n_nodes], type of fragment each node belongs to\n \"\"\"\n shapes = [natm.shape[0] for natm in natm_list]\n assert np.std(shapes) == 0, \"Tensor must be the same length for <natom_list>\"\n n_frag_switch = torch.repeat_interleave(\n torch.arange(len(natm_list), device=natm_list[0].device),\n torch.tensor(\n [torch.sum(natm).item() for natm in natm_list],\n device=natm_list[0].device,\n ),\n )\n return n_frag_switch.to(natm_list[0].device)" }, { "identifier": "get_mask_for_frag", "path": "oa_reactdiff/utils/_graph_tools.py", "snippet": "def get_mask_for_frag(natm: Tensor) -> Tensor:\n r\"\"\"Get fragment index for each node\n Example: Tensor([2, 0, 3]) -> [0, 0, 2, 2, 2]\n\n Args:\n natm (Tensor): number of nodes per small fragment\n\n Returns:\n Tensor: [n_node], the natural index of fragment a node belongs to\n \"\"\"\n return torch.repeat_interleave(\n torch.arange(natm.size(0), device=natm.device), natm\n ).to(natm.device)" } ]
import unittest import torch from torch import Tensor, tensor from oa_reactdiff.utils import ( get_edges_index, get_subgraph_mask, get_n_frag_switch, get_mask_for_frag, )
1,138
class TestBasics(unittest.TestCase): def test_get_mask_for_frag(self): natms = Tensor([2, 0, 3]).long() res = get_mask_for_frag(natms) self.assertTrue(torch.allclose(res, Tensor([0, 0, 2, 2, 2]).long())) def test_get_n_frag_switch(self): natm_list = [tensor([2, 0]), tensor([1, 3]), tensor([3, 2])]
class TestBasics(unittest.TestCase): def test_get_mask_for_frag(self): natms = Tensor([2, 0, 3]).long() res = get_mask_for_frag(natms) self.assertTrue(torch.allclose(res, Tensor([0, 0, 2, 2, 2]).long())) def test_get_n_frag_switch(self): natm_list = [tensor([2, 0]), tensor([1, 3]), tensor([3, 2])]
res = get_n_frag_switch(natm_list)
2
2023-10-30 02:53:38+00:00
2k
lewandofskee/DiAD
ldm/models/diffusion/ddim.py
[ { "identifier": "make_ddim_sampling_parameters", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())\n\n # according the the formula provided in https://arxiv.org/abs/2010.02502\n sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))\n if verbose:\n print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')\n print(f'For the chosen value of eta, which is {eta}, '\n f'this results in the following sigma_t schedule for ddim sampler {sigmas}')\n return sigmas, alphas, alphas_prev" }, { "identifier": "make_ddim_timesteps", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):\n if ddim_discr_method == 'uniform':\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == 'quad':\n ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)\n else:\n raise NotImplementedError(f'There is no ddim discretization method called \"{ddim_discr_method}\"')\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f'Selected timesteps for ddim sampler: {steps_out}')\n return steps_out" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" } ]
import torch import numpy as np from tqdm import tqdm from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
1,171
"""SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True,timesteps=1000): self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) alphas_cumprod = self.model.alphas_cumprod assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) self.register_buffer('betas', to_torch(self.model.betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) # ddim sampling parameters
"""SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True,timesteps=1000): self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) alphas_cumprod = self.model.alphas_cumprod assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) self.register_buffer('betas', to_torch(self.model.betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) # ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
0
2023-10-30 14:21:09+00:00
2k
nv-tlabs/trace
tbsim/models/temporal.py
[ { "identifier": "SinusoidalPosEmb", "path": "tbsim/models/trace_helpers.py", "snippet": "class SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n device = x.device\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device=device) * -emb)\n emb = x[:, None] * emb[None, :]\n emb = torch.cat((emb.sin(), emb.cos()), dim=-1)\n return emb" }, { "identifier": "Downsample1d", "path": "tbsim/models/trace_helpers.py", "snippet": "class Downsample1d(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.conv = nn.Conv1d(dim, dim, 3, 2, 1)\n\n def forward(self, x):\n return self.conv(x)" }, { "identifier": "Upsample1d", "path": "tbsim/models/trace_helpers.py", "snippet": "class Upsample1d(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.conv = nn.ConvTranspose1d(dim, dim, 4, 2, 1)\n\n def forward(self, x):\n return self.conv(x)" }, { "identifier": "Conv1dBlock", "path": "tbsim/models/trace_helpers.py", "snippet": "class Conv1dBlock(nn.Module):\n '''\n Conv1d --> GroupNorm --> Mish\n '''\n\n def __init__(self, inp_channels, out_channels, kernel_size, n_groups=8):\n super().__init__()\n\n self.block = nn.Sequential(\n nn.Conv1d(inp_channels, out_channels, kernel_size, padding=kernel_size // 2),\n Rearrange('batch channels horizon -> batch channels 1 horizon'),\n nn.GroupNorm(n_groups, out_channels),\n Rearrange('batch channels 1 horizon -> batch channels horizon'),\n nn.Mish(),\n )\n\n def forward(self, x):\n return self.block(x)" } ]
import torch import torch.nn as nn import einops from einops.layers.torch import Rearrange from .trace_helpers import ( SinusoidalPosEmb, Downsample1d, Upsample1d, Conv1dBlock, )
935
# # Based on Diffuser: https://github.com/jannerm/diffuser/blob/main/diffuser/models/temporal.py # class ResidualTemporalMapBlockConcat(nn.Module): def __init__(self, inp_channels, out_channels, time_embed_dim, horizon, kernel_size=5): super().__init__() self.time_mlp = nn.Sequential( nn.Mish(), nn.Linear(time_embed_dim, out_channels), Rearrange('batch t -> batch t 1'), ) self.blocks = nn.ModuleList([ Conv1dBlock(inp_channels, out_channels, kernel_size), Conv1dBlock(out_channels, out_channels, kernel_size), ]) self.residual_conv = nn.Conv1d(inp_channels, out_channels, 1) \ if inp_channels != out_channels else nn.Identity() def forward(self, x, t): ''' x : [ batch_size x inp_channels x horizon ] t : [ batch_size x embed_dim ] returns: out : [ batch_size x out_channels x horizon ] ''' out = self.blocks[0](x) + self.time_mlp(t) out = self.blocks[1](out) return out + self.residual_conv(x) class TemporalMapUnet(nn.Module): def __init__( self, horizon, transition_dim, cond_dim, output_dim, dim=32, dim_mults=(1, 2, 4, 8), ): super().__init__() ResidualTemporalMapBlock = ResidualTemporalMapBlockConcat dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] in_out = list(zip(dims[:-1], dims[1:])) print(f'[ models/temporal ] Channel dimensions: {in_out}') time_dim = dim self.time_mlp = nn.Sequential(
# # Based on Diffuser: https://github.com/jannerm/diffuser/blob/main/diffuser/models/temporal.py # class ResidualTemporalMapBlockConcat(nn.Module): def __init__(self, inp_channels, out_channels, time_embed_dim, horizon, kernel_size=5): super().__init__() self.time_mlp = nn.Sequential( nn.Mish(), nn.Linear(time_embed_dim, out_channels), Rearrange('batch t -> batch t 1'), ) self.blocks = nn.ModuleList([ Conv1dBlock(inp_channels, out_channels, kernel_size), Conv1dBlock(out_channels, out_channels, kernel_size), ]) self.residual_conv = nn.Conv1d(inp_channels, out_channels, 1) \ if inp_channels != out_channels else nn.Identity() def forward(self, x, t): ''' x : [ batch_size x inp_channels x horizon ] t : [ batch_size x embed_dim ] returns: out : [ batch_size x out_channels x horizon ] ''' out = self.blocks[0](x) + self.time_mlp(t) out = self.blocks[1](out) return out + self.residual_conv(x) class TemporalMapUnet(nn.Module): def __init__( self, horizon, transition_dim, cond_dim, output_dim, dim=32, dim_mults=(1, 2, 4, 8), ): super().__init__() ResidualTemporalMapBlock = ResidualTemporalMapBlockConcat dims = [transition_dim, *map(lambda m: dim * m, dim_mults)] in_out = list(zip(dims[:-1], dims[1:])) print(f'[ models/temporal ] Channel dimensions: {in_out}') time_dim = dim self.time_mlp = nn.Sequential(
SinusoidalPosEmb(time_dim),
0
2023-10-31 18:43:07+00:00
2k
gydpku/PPTC
src/evaluate.py
[ { "identifier": "api_doc", "path": "src/api_doc.py", "snippet": "class API(object):\n def __init__(self, name, parameters, description,\n parameter_description=\"\", composition_instruction=\"\", example=\"\", api_desc=\"\",\n type=\"\",\n implementation=None,\n ):\n def __str__(self):\ndef random_permutation(lst):\ndef get_all_APIs(args):\ndef get_API_name(apis):\ndef get_API_desc(apis):\ndef get_must_APIs(args):\ndef api_lack_mask(apis):" }, { "identifier": "prompt_factor", "path": "src/prompt_factor.py", "snippet": "def get_instruction_to_API_code_prompt(selected_API, ppt_content, chat_history, instruction, ask_less_question=False, current_page=1):\ndef get_instruction_to_API_code_prompt2(selected_API, ppt_content, chat_history, instruction, ask_less_question=False, current_page=1):" }, { "identifier": "ppt_reader", "path": "src/ppt_reader.py", "snippet": "SCALE = 1000\ndef get_fill_color(shape):\n def __init__(self, shape):\n def text_info(self):\n def space_info(self):\n def size_info(self):\n def style_info(self):\n def discription(self):\n def __repr__(self):\n def __init__(self, shape, id=None):\n def style_info(self):\n def discription(self):\n def __init__(self, shape):\n def text_info(self):\n def discription(self):\n def __init__(self, shape):\n def text_info(self):\n def style_info(self):\n def discription(self):\n def __init__(self, shape, id=None):\n def text_info(self):\n def style_info(self):\n def discription(self):\n def __init__(self, shape):\n def text_info(self):\n def style_info(self):\n def __init__(self, shape):\n def text_info(self):\n def style_info(self):\ndef hasshape(shape_str, shape_list):\ndef get_content(need_text,need_style,need_position,need_title,need_content,need_picture,need_table,need_chart,need_textbox,need_shape):\ndef get_content_by_instructions(ppt_path, instruction, args, ppt):\ndef eval_get_contents(need_text=True, need_style=True, need_position=True, need_shape_list=None, ppt=None):\nclass BasicShape:\nclass Picture(BasicShape):\nclass Table(BasicShape):\nclass Chart(BasicShape):\nclass Textbox(BasicShape):\nclass Placeholder(BasicShape):\nclass AutoShape(BasicShape):" }, { "identifier": "utils", "path": "src/utils.py", "snippet": "def write_list(lst, filename):\ndef read_list(filename):\ndef write_lines(lst, path):\ndef read_lines(path):\ndef makedir(path):\ndef merge_list(lst):\ndef get_picture_name(labels):\ndef get_picture_name_list(args):\ndef parse_api(codes):\ndef prepare_exp_name(args):\ndef get_tokens(text):\ndef calc_api_cost(path):\ndef check_token(model, prompt):\ndef get_token(text, trunc_num, model):\ndef checkpoint(mode,args,idx,step):\ndef sorted_list(path):\ndef parse_train_json(path):\ndef parse_test_json(path):" }, { "identifier": "pptx_check", "path": "src/pptx_check.py", "snippet": "SLIDE_HEIGHT_MIDDLE = 3429000\nSLIDE_WIDTH_MIDDLE = 4572000\n A = choose_object(slide, A)\n A = choose_object(slide, A)\n B = choose_object(slide, B)\ndef check_left(A,B):\ndef check_top(A,B):\ndef check_right(A,B):\ndef check_bottom(A,B):\ndef check_slide_left(A):\ndef check_slide_top(A):\ndef check_slide_right(A):\ndef check_slide_bottom(A):\ndef check_slide_center(A):\ndef choose_table(slide):\ndef choose_chart(slide):\ndef choose_content(slide):\ndef choose_title(slide):\ndef choose_shape(slide, shape_name):\ndef choose_picture(slide,idx=0):\ndef choose_textbox(slide,idx=0):\ndef choose_object(slide, object_name):\ndef check(slide, A, B, rel):" } ]
from src import api_doc from src import prompt_factor from src import ppt_reader, utils from pptx import Presentation from src import pptx_check from sacremoses import MosesTokenizer from tqdm import tqdm import mosestokenizer import os
1,181
def calc_token_cost(path): text = open(path,'r').read() tokenizer = MosesTokenizer() tokens = tokenizer.tokenize(text) return len(tokens) def calc_acc(label_path, pred_path, instruction, additional_restrictions=[]): pos_total, pos_correct, str_correct = 0,0,0 # position splitted = instruction.split('##') instruction, restrictions = splitted[0], splitted[1:] restrictions += additional_restrictions if len(restrictions) > 0: pos_total = 1 pos_correct = 1 # try: ppt = Presentation(pred_path) for res in restrictions: slide_id,A,B,rel = [x.strip(" ") for x in res.split(",")] try: slide = ppt.slides[int(slide_id)] pos_correct *= pptx_check.check(slide, A, B, rel) except: print(res) print(instruction) pos_correct = 0 # string
def calc_token_cost(path): text = open(path,'r').read() tokenizer = MosesTokenizer() tokens = tokenizer.tokenize(text) return len(tokens) def calc_acc(label_path, pred_path, instruction, additional_restrictions=[]): pos_total, pos_correct, str_correct = 0,0,0 # position splitted = instruction.split('##') instruction, restrictions = splitted[0], splitted[1:] restrictions += additional_restrictions if len(restrictions) > 0: pos_total = 1 pos_correct = 1 # try: ppt = Presentation(pred_path) for res in restrictions: slide_id,A,B,rel = [x.strip(" ") for x in res.split(",")] try: slide = ppt.slides[int(slide_id)] pos_correct *= pptx_check.check(slide, A, B, rel) except: print(res) print(instruction) pos_correct = 0 # string
label_string = ppt_reader.eval_get_contents(need_text=True, need_style=True, need_position=False,need_shape_list=None,ppt=Presentation(label_path))
2
2023-10-25 13:14:46+00:00
2k
secarri/MipFlooding
mipflooding/image_processing.py
[ { "identifier": "setup_logger", "path": "mipflooding/logger.py", "snippet": "def setup_logger(logger_name: str, abs_log_path: str) -> logging.Logger:\n \"\"\"Set up a logger with the specified name and log to the given absolute path, returning the logger instance.\"\"\"\n logger = logging.getLogger(logger_name)\n if not logger.handlers:\n handler = logging.FileHandler(abs_log_path)\n formatter = logging.Formatter('[%(asctime)s] - %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n handler.setFormatter(formatter)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n return logger" }, { "identifier": "terminate_loggers", "path": "mipflooding/logger.py", "snippet": "def terminate_loggers(logger: logging.Logger) -> None:\n \"\"\"Terminate and close all handlers associated with the given logger, releasing any associated resources.\"\"\"\n handlers = logger.handlers[:]\n for handler in handlers:\n handler.close()\n logger.removeHandler(handler)" }, { "identifier": "clear_log_file", "path": "mipflooding/file_utils.py", "snippet": "def clear_log_file(filepath: Path) -> None:\n \"\"\"Clear the content of the log file at the specified 'filepath' if it exists.\"\"\"\n filepath.write_text(\"\", encoding=\"utf-8\")" }, { "identifier": "get_output_directory", "path": "mipflooding/file_utils.py", "snippet": "def get_output_directory(filepath: str) -> str | None:\n \"\"\"Get the parent directory of the specified 'filepath' as a string if it exists, or return None.\"\"\"\n parent_path = Path(filepath).parent\n return parent_path.__str__() if parent_path.exists() else None" }, { "identifier": "get_output_filename", "path": "mipflooding/file_utils.py", "snippet": "def get_output_filename(filepath: str) -> str | None:\n \"\"\"Get the filename from the specified 'filepath' as a string, or return None if the path is empty.\"\"\"\n return Path(filepath).name.__str__()" } ]
import logging import math import os import time from pathlib import Path from typing import List, Optional from PIL import Image from .logger import setup_logger, terminate_loggers from .file_utils import clear_log_file, get_output_directory, get_output_filename
1,599
# Default packages # Third party packages # From self package def _open_image_inputs(color: str, alpha: str, logger: logging.Logger) -> List: """Open and return the color and alpha images as a list of Image objects.""" logger.info("--- Opening images in memory...") if not color: color = str(None) if not alpha: alpha = str(None) color_map = None if not Path(color).exists() else Image.open(color) alpha_mask = None if not Path(alpha).exists() else Image.open(alpha).convert('L') if color_map: logger.info(f"--- File disk size: {os.path.getsize(color) / float(1 << 20):,.2f} MB") return [color_map, alpha_mask] def _validate_inputs(color: Image, alpha_mask: Image, logger: logging.Logger, input_texture_color_abs_path: str) -> str | Optional[None]: if color is None or alpha_mask is None: message = f"One or more inputs do not exist:\n\t-Color: {color}\n\t-Alpha: {alpha_mask}. Skipping..." elif not _do_resolutions_match(color, alpha_mask, logger): message = f"Inputs do not match in resolution for file: {input_texture_color_abs_path}. Skipping..." elif not _is_power_of_two_image(color, logger): message = f"Input is not a power of two image: {input_texture_color_abs_path}. Skipping..." else: message = None return message def _do_resolutions_match(color: Image, alpha: Image, logger: logging.Logger) -> bool: """Check if the resolutions of color and alpha images match.""" logger.info("--- Verifying that inputs resolutions do match ...") return True if color.size == alpha.size else False def _is_power_of_two_image(color: Image, logger: logging.Logger) -> bool: """Check if all dimensions of the input image are powers of two.""" logger.info("--- Verifying that inputs are power of two images ...") for res in color.size: if (res & (res - 1)) != 0: return False return True def _get_mip_levels(image: Image, logger: logging.Logger) -> int: """Calculate the number of mip levels based on image size.""" logger.info("--- Calculating mip map levels...") image_short_side = image.size[0] if image.size[0] < image.size[1] else image.size[1] logger.info(f"--- Done. Miplevels: {round(math.log2(image_short_side))}") return round(math.log2(image_short_side)) def _generate_background(image: Image, logger: logging.Logger) -> Image: """Generate a background image and returns the result Image object.""" logger.info("--- Generating background image and storing it in memory...") average_image_color = image.resize((1, 1)) up_scaled_avg = average_image_color.resize(image.size, Image.NEAREST) return up_scaled_avg def _calculate_image_height(image_width: int, image: Image) -> int: """Calculate the height of the image based on the specified width.""" width_percent = (image_width / float(image.size[0])) new_height = int((float(image.size[1]) * float(width_percent))) return new_height def _stack_mip_levels(average_bgr: str, miplevels: int, color: Image, origin_width: int, origin_height: int, output_dir: str, logger: logging.Logger, resample: Image.Resampling = Image.BOX) -> None: """Stack Mipmap levels on a background Image with alpha integration to generate a single Image.""" stack = average_bgr logger.info(f"--- Storing original resolution in memory: {origin_width, origin_height}") logger.info(f"--- Beginning the stacking process. Please wait...") for miplevel in range(miplevels): width = 2 ** (miplevel + 1) height = _calculate_image_height(width, color) new_image = color.resize((width, height), resample) to_stack = new_image.copy().resize((origin_width, origin_height), Image.NEAREST) img_copy = stack.copy() img_copy.paste(to_stack, (0, 0), to_stack) stack = img_copy.copy() logger.info(f"--- Saving stack to file: {output_dir}") stack.save(output_dir) logger.info(f"--- Output disk size: {os.path.getsize(output_dir) / float(1 << 20):,.2f} MB") def _log_and_terminate(logger, message, level=logging.ERROR): """Log the given 'message' at the specified 'level' using the 'logger', and then terminate the logger.""" logger.log(level=level, msg=message) terminate_loggers(logger) def _make_logger_for_file(directory: str, filename: str) -> logging.Logger: """Constructs the full path to a log file, clears the existing log file, and sets up a logger.""" logs_directory = os.path.join(directory, "logs") Path(logs_directory).mkdir(parents=True, exist_ok=True) out_log_file = Path(os.path.join(logs_directory, f"{filename.split('.')[0]}.txt")) clear_log_file(out_log_file)
# Default packages # Third party packages # From self package def _open_image_inputs(color: str, alpha: str, logger: logging.Logger) -> List: """Open and return the color and alpha images as a list of Image objects.""" logger.info("--- Opening images in memory...") if not color: color = str(None) if not alpha: alpha = str(None) color_map = None if not Path(color).exists() else Image.open(color) alpha_mask = None if not Path(alpha).exists() else Image.open(alpha).convert('L') if color_map: logger.info(f"--- File disk size: {os.path.getsize(color) / float(1 << 20):,.2f} MB") return [color_map, alpha_mask] def _validate_inputs(color: Image, alpha_mask: Image, logger: logging.Logger, input_texture_color_abs_path: str) -> str | Optional[None]: if color is None or alpha_mask is None: message = f"One or more inputs do not exist:\n\t-Color: {color}\n\t-Alpha: {alpha_mask}. Skipping..." elif not _do_resolutions_match(color, alpha_mask, logger): message = f"Inputs do not match in resolution for file: {input_texture_color_abs_path}. Skipping..." elif not _is_power_of_two_image(color, logger): message = f"Input is not a power of two image: {input_texture_color_abs_path}. Skipping..." else: message = None return message def _do_resolutions_match(color: Image, alpha: Image, logger: logging.Logger) -> bool: """Check if the resolutions of color and alpha images match.""" logger.info("--- Verifying that inputs resolutions do match ...") return True if color.size == alpha.size else False def _is_power_of_two_image(color: Image, logger: logging.Logger) -> bool: """Check if all dimensions of the input image are powers of two.""" logger.info("--- Verifying that inputs are power of two images ...") for res in color.size: if (res & (res - 1)) != 0: return False return True def _get_mip_levels(image: Image, logger: logging.Logger) -> int: """Calculate the number of mip levels based on image size.""" logger.info("--- Calculating mip map levels...") image_short_side = image.size[0] if image.size[0] < image.size[1] else image.size[1] logger.info(f"--- Done. Miplevels: {round(math.log2(image_short_side))}") return round(math.log2(image_short_side)) def _generate_background(image: Image, logger: logging.Logger) -> Image: """Generate a background image and returns the result Image object.""" logger.info("--- Generating background image and storing it in memory...") average_image_color = image.resize((1, 1)) up_scaled_avg = average_image_color.resize(image.size, Image.NEAREST) return up_scaled_avg def _calculate_image_height(image_width: int, image: Image) -> int: """Calculate the height of the image based on the specified width.""" width_percent = (image_width / float(image.size[0])) new_height = int((float(image.size[1]) * float(width_percent))) return new_height def _stack_mip_levels(average_bgr: str, miplevels: int, color: Image, origin_width: int, origin_height: int, output_dir: str, logger: logging.Logger, resample: Image.Resampling = Image.BOX) -> None: """Stack Mipmap levels on a background Image with alpha integration to generate a single Image.""" stack = average_bgr logger.info(f"--- Storing original resolution in memory: {origin_width, origin_height}") logger.info(f"--- Beginning the stacking process. Please wait...") for miplevel in range(miplevels): width = 2 ** (miplevel + 1) height = _calculate_image_height(width, color) new_image = color.resize((width, height), resample) to_stack = new_image.copy().resize((origin_width, origin_height), Image.NEAREST) img_copy = stack.copy() img_copy.paste(to_stack, (0, 0), to_stack) stack = img_copy.copy() logger.info(f"--- Saving stack to file: {output_dir}") stack.save(output_dir) logger.info(f"--- Output disk size: {os.path.getsize(output_dir) / float(1 << 20):,.2f} MB") def _log_and_terminate(logger, message, level=logging.ERROR): """Log the given 'message' at the specified 'level' using the 'logger', and then terminate the logger.""" logger.log(level=level, msg=message) terminate_loggers(logger) def _make_logger_for_file(directory: str, filename: str) -> logging.Logger: """Constructs the full path to a log file, clears the existing log file, and sets up a logger.""" logs_directory = os.path.join(directory, "logs") Path(logs_directory).mkdir(parents=True, exist_ok=True) out_log_file = Path(os.path.join(logs_directory, f"{filename.split('.')[0]}.txt")) clear_log_file(out_log_file)
return setup_logger("mipmap_flooding", out_log_file.__str__())
0
2023-10-25 11:05:59+00:00
2k
Lin-jun-xiang/chatgpt-line-bot
chatgpt_linebot/modules/horoscope.py
[ { "identifier": "chat_completion", "path": "chatgpt_linebot/modules/gpt.py", "snippet": "def chat_completion(message: List[Dict]) -> str:\n \"\"\"Use OpenAI API via gpt4free providers\"\"\"\n try:\n response = g4f.ChatCompletion.create(\n model=g4f.models.default,\n messages=message,\n )\n print(response)\n\n except Exception as e:\n response = (\n \"There're something wrong in openai api, please try again.😱\\n\"\n \"Or connect to developer: https://github.com/Lin-jun-xiang/chatgpt-line-bot/issues\"\n )\n print(e)\n\n return response" }, { "identifier": "horoscope_template", "path": "chatgpt_linebot/prompts/template.py", "snippet": "" } ]
import json import re import requests from bs4 import BeautifulSoup from chatgpt_linebot.modules.gpt import chat_completion from chatgpt_linebot.prompts import horoscope_template
668
class Horoscope: HOST = "https://www.cosmopolitan.com/tw/horoscopes/" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' } error_msg = ( "Cannot get the horoscope, please try again.🥶\n" "Or connect to developer: https://github.com/Lin-jun-xiang/chatgpt-line-bot/issues" ) def __init__(self) -> None: self.horoscope_urls = self.get_horoscope_urls() def get_horoscope_urls(self) -> list: """Get all horoscope urls Returns ------- horoscope_urls (List[Dict]): [ {'name': '天蠍座', 'url': 'https://www...'}, {'name': '獅子座', 'url': 'https://www...'}, ... ] """ try: response = requests.get(f"{self.HOST}weekly/", headers=self.headers) soup = BeautifulSoup(response.content, 'html.parser') # Find the script tag containing JSON data script_tag = soup.find('script', {'id': 'json-ld'}) horoscope_urls = [] if not script_tag: return # Extract the text content of the script tag script_content = script_tag.contents[0] # Load the JSON data json_data = json.loads(script_content) # Extract the information for each zodiac sign for item in json_data['itemListElement']: name = item['name'] url = item['url'] horoscope_urls.append({"name": name, "url": url}) return horoscope_urls except Exception as e: print(e) def _process_horoscope_response(self, content: str) -> str: if not content: return f"{self.error_msg}\nContent is None."
class Horoscope: HOST = "https://www.cosmopolitan.com/tw/horoscopes/" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' } error_msg = ( "Cannot get the horoscope, please try again.🥶\n" "Or connect to developer: https://github.com/Lin-jun-xiang/chatgpt-line-bot/issues" ) def __init__(self) -> None: self.horoscope_urls = self.get_horoscope_urls() def get_horoscope_urls(self) -> list: """Get all horoscope urls Returns ------- horoscope_urls (List[Dict]): [ {'name': '天蠍座', 'url': 'https://www...'}, {'name': '獅子座', 'url': 'https://www...'}, ... ] """ try: response = requests.get(f"{self.HOST}weekly/", headers=self.headers) soup = BeautifulSoup(response.content, 'html.parser') # Find the script tag containing JSON data script_tag = soup.find('script', {'id': 'json-ld'}) horoscope_urls = [] if not script_tag: return # Extract the text content of the script tag script_content = script_tag.contents[0] # Load the JSON data json_data = json.loads(script_content) # Extract the information for each zodiac sign for item in json_data['itemListElement']: name = item['name'] url = item['url'] horoscope_urls.append({"name": name, "url": url}) return horoscope_urls except Exception as e: print(e) def _process_horoscope_response(self, content: str) -> str: if not content: return f"{self.error_msg}\nContent is None."
response = chat_completion(
0
2023-10-24 09:01:13+00:00
2k
nv-tlabs/pacer
pacer/env/tasks/vec_task_wrappers.py
[ { "identifier": "VecTaskCPU", "path": "pacer/env/tasks/vec_task.py", "snippet": "class VecTaskCPU(VecTask):\n\n def __init__(self, task, rl_device, sync_frame_time=False, clip_observations=5.0):\n super().__init__(task, rl_device, clip_observations=clip_observations)\n self.sync_frame_time = sync_frame_time\n\n def step(self, actions):\n actions = actions.cpu().numpy()\n self.task.render(self.sync_frame_time)\n\n obs, rewards, resets, extras = self.task.step(actions)\n\n return (to_torch(np.clip(obs, -self.clip_obs, self.clip_obs), dtype=torch.float, device=self.rl_device), to_torch(rewards, dtype=torch.float, device=self.rl_device), to_torch(resets, dtype=torch.uint8, device=self.rl_device), [])\n\n def reset(self):\n actions = 0.01 * (1 - 2 * np.random.rand(self.num_envs, self.num_actions)).astype('f')\n\n # step the simulator\n obs, rewards, resets, extras = self.task.step(actions)\n\n return to_torch(np.clip(obs, -self.clip_obs, self.clip_obs), dtype=torch.float, device=self.rl_device)" }, { "identifier": "VecTaskGPU", "path": "pacer/env/tasks/vec_task.py", "snippet": "class VecTaskGPU(VecTask):\n\n def __init__(self, task, rl_device, clip_observations=5.0):\n super().__init__(task, rl_device, clip_observations=clip_observations)\n\n self.obs_tensor = gymtorch.wrap_tensor(self.task.obs_tensor, counts=(self.task.num_envs, self.task.num_obs))\n self.rewards_tensor = gymtorch.wrap_tensor(self.task.rewards_tensor, counts=(self.task.num_envs,))\n self.resets_tensor = gymtorch.wrap_tensor(self.task.resets_tensor, counts=(self.task.num_envs,))\n\n def step(self, actions):\n self.task.render(False)\n actions_tensor = gymtorch.unwrap_tensor(actions)\n\n self.task.step(actions_tensor)\n\n return torch.clamp(self.obs_tensor, -self.clip_obs, self.clip_obs), self.rewards_tensor, self.resets_tensor, []\n\n def reset(self):\n actions = 0.01 * (1 - 2 * torch.rand([self.task.num_envs, self.task.num_actions], dtype=torch.float32, device=self.rl_device))\n actions_tensor = gymtorch.unwrap_tensor(actions)\n\n # step the simulator\n self.task.step(actions_tensor)\n\n return torch.clamp(self.obs_tensor, -self.clip_obs, self.clip_obs)" }, { "identifier": "VecTaskPython", "path": "pacer/env/tasks/vec_task.py", "snippet": "class VecTaskPython(VecTask):\n\n def get_state(self):\n return torch.clamp(self.task.states_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)\n\n def step(self, actions):\n\n self.task.step(actions)\n\n return torch.clamp(self.task.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device), self.task.rew_buf.to(self.rl_device), self.task.reset_buf.to(self.rl_device), self.task.extras\n\n def reset(self):\n actions = 0.01 * (1 - 2 * torch.rand([self.task.num_envs, self.task.num_actions], dtype=torch.float32, device=self.rl_device))\n\n # step the simulator\n self.task.step(actions)\n\n return torch.clamp(self.task.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)" } ]
from gym import spaces from pacer.env.tasks.vec_task import VecTaskCPU, VecTaskGPU, VecTaskPython import numpy as np import torch
983
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. class VecTaskCPUWrapper(VecTaskCPU): def __init__(self, task, rl_device, sync_frame_time=False, clip_observations=5.0): super().__init__(task, rl_device, sync_frame_time, clip_observations) return
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. class VecTaskCPUWrapper(VecTaskCPU): def __init__(self, task, rl_device, sync_frame_time=False, clip_observations=5.0): super().__init__(task, rl_device, sync_frame_time, clip_observations) return
class VecTaskGPUWrapper(VecTaskGPU):
1
2023-10-31 20:47:12+00:00
2k
Improbable-AI/dexenv
dexenv/models/state_model.py
[ { "identifier": "DiagGaussianPolicy", "path": "dexenv/models/diag_gaussian_pol/diag_gaussian_policy.py", "snippet": "class DiagGaussianPolicy(nn.Module):\n def __init__(self,\n body_net,\n action_dim,\n init_log_std=-0.2,\n std_cond_in=False,\n tanh_on_dist=False,\n in_features=None,\n clamp_log_std=False): # add tanh on the action distribution\n super().__init__()\n self.std_cond_in = std_cond_in\n self.tanh_on_dist = tanh_on_dist\n self.body = body_net\n self.clamp_log_std = clamp_log_std\n\n if in_features is None:\n for i in reversed(range(len(self.body.fcs))):\n layer = self.body.fcs[i]\n if hasattr(layer, 'out_features'):\n in_features = layer.out_features\n break\n\n self.head_mean = nn.Linear(in_features, action_dim)\n if self.std_cond_in:\n self.head_logstd = nn.Linear(in_features, action_dim)\n else:\n self.head_logstd = nn.Parameter(torch.full((action_dim,),\n init_log_std))\n\n def forward(self, x=None, body_x=None, **kwargs):\n if x is None and body_x is None:\n raise ValueError('One of [x, body_x] should be provided!')\n if body_x is None:\n body_x = self.body(x, **kwargs)\n body_out = body_x[0] if isinstance(body_x, Sequence) else body_x\n mean = self.head_mean(body_out)\n if self.std_cond_in:\n log_std = self.head_logstd(body_out)\n else:\n log_std = self.head_logstd.expand_as(mean)\n if self.clamp_log_std:\n log_std = torch.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)\n std = torch.exp(log_std)\n action_dist = Independent(Normal(loc=mean, scale=std), 1)\n\n if self.tanh_on_dist:\n action_dist = TransformedDistribution(action_dist,\n [TanhTransform(cache_size=1)])\n return action_dist, body_x" }, { "identifier": "get_activation", "path": "dexenv/models/utils.py", "snippet": "def get_activation(act='gleu'):\n logger.info(f'=-' * 20)\n logger.info(f'Using activation:{act}')\n if act == 'tanh':\n return nn.Tanh\n elif act == 'elu':\n return nn.ELU\n elif act == 'relu':\n return nn.ReLU\n elif act == 'leakyrelu':\n return nn.LeakyReLU\n elif act == 'sigmoid':\n return nn.Sigmoid\n elif act == 'swish':\n return nn.SiLU\n elif act == 'gelu':\n return nn.GELU" }, { "identifier": "ValueNet", "path": "dexenv/models/value_nets/value_net.py", "snippet": "class ValueNet(nn.Module):\n def __init__(self,\n body_net,\n in_features=None): # add tanh on the action distribution\n super().__init__()\n self.body = body_net\n if in_features is None:\n for i in reversed(range(len(self.body.fcs))):\n layer = self.body.fcs[i]\n if hasattr(layer, 'out_features'):\n in_features = layer.out_features\n break\n self.head = nn.Linear(in_features, 1)\n\n def forward(self, x=None, body_x=None, **kwargs):\n if x is None and body_x is None:\n raise ValueError('One of [x, body_x] should be provided!')\n if body_x is None:\n body_x = self.body(x, **kwargs)\n if isinstance(body_x, tuple):\n val = self.head(body_x[0])\n else:\n val = self.head(body_x)\n return val, body_x" } ]
import gym import torch.nn as nn from collections.abc import Sequence from loguru import logger from dexenv.models.diag_gaussian_pol.diag_gaussian_policy import \ DiagGaussianPolicy from dexenv.models.utils import get_activation from dexenv.models.value_nets.value_net import ValueNet
1,138
class SimpleMLP(nn.Module): def __init__(self, in_dim, out_dim, act): super().__init__() act = get_activation(act) self.body = nn.Sequential( nn.Linear(in_dim, 512), act(), nn.Linear(512, 256), act(), nn.Linear(256, out_dim), act(), ) def forward(self, x): if isinstance(x, dict): # assert len(x) == 1 x = list(x.values())[0] out = self.body(x) return out def get_mlp_critic(ob_size, act='gelu'): logger.info(f'Critic state input size:{ob_size}') critic_body = SimpleMLP(in_dim=ob_size, out_dim=256, act=act)
class SimpleMLP(nn.Module): def __init__(self, in_dim, out_dim, act): super().__init__() act = get_activation(act) self.body = nn.Sequential( nn.Linear(in_dim, 512), act(), nn.Linear(512, 256), act(), nn.Linear(256, out_dim), act(), ) def forward(self, x): if isinstance(x, dict): # assert len(x) == 1 x = list(x.values())[0] out = self.body(x) return out def get_mlp_critic(ob_size, act='gelu'): logger.info(f'Critic state input size:{ob_size}') critic_body = SimpleMLP(in_dim=ob_size, out_dim=256, act=act)
critic = ValueNet(critic_body,
2
2023-10-25 17:22:41+00:00
2k
ai-safety-foundation/sparse_autoencoder
sparse_autoencoder/autoencoder/components/tests/test_tied_bias.py
[ { "identifier": "TiedBias", "path": "sparse_autoencoder/autoencoder/components/tied_bias.py", "snippet": "class TiedBias(Module):\n \"\"\"Tied Bias Layer.\n\n The tied pre-encoder bias is a learned bias term that is subtracted from the input before\n encoding, and added back after decoding.\n\n The bias parameter must be initialised in the parent module, and then passed to this layer.\n\n https://transformer-circuits.pub/2023/monosemantic-features/index.html#appendix-autoencoder-bias\n \"\"\"\n\n _bias_position: TiedBiasPosition\n\n _bias_reference: Float[\n Parameter, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n\n @property\n def bias(\n self,\n ) -> Float[Parameter, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)]:\n \"\"\"Bias.\"\"\"\n return self._bias_reference\n\n def __init__(\n self,\n bias_reference: Float[\n Parameter, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n position: TiedBiasPosition,\n ) -> None:\n \"\"\"Initialize the bias layer.\n\n Args:\n bias_reference: Tied bias parameter (initialised in the parent module), used for both\n the pre-encoder and post-encoder bias. The original paper initialised this using the\n geometric median of the dataset.\n position: Whether this is the pre-encoder or post-encoder bias.\n \"\"\"\n super().__init__()\n\n self._bias_reference = bias_reference\n\n # Support string literals as well as enums\n self._bias_position = position\n\n def forward(\n self,\n x: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)]:\n \"\"\"Forward Pass.\n\n Args:\n x: Input tensor.\n\n Returns:\n Output of the forward pass.\n \"\"\"\n # If this is the pre-encoder bias, we subtract the bias from the input.\n if self._bias_position == TiedBiasPosition.PRE_ENCODER:\n return x - self.bias\n\n # If it's the post-encoder bias, we add the bias to the input.\n return x + self.bias\n\n def extra_repr(self) -> str:\n \"\"\"String extra representation of the module.\"\"\"\n return f\"position={self._bias_position.value}\"" }, { "identifier": "TiedBiasPosition", "path": "sparse_autoencoder/autoencoder/components/tied_bias.py", "snippet": "class TiedBiasPosition(str, Enum):\n \"\"\"Tied Bias Position.\"\"\"\n\n PRE_ENCODER = \"pre_encoder\"\n POST_DECODER = \"post_decoder\"" }, { "identifier": "Axis", "path": "sparse_autoencoder/tensor_types.py", "snippet": "class Axis(LowercaseStrEnum):\n \"\"\"Tensor axis names.\n\n Used to annotate tensor types.\n\n Example:\n When used directly it prints a string:\n\n >>> print(Axis.INPUT_OUTPUT_FEATURE)\n input_output_feature\n\n The primary use is to annotate tensor types:\n\n >>> from jaxtyping import Float\n >>> from torch import Tensor\n >>> from typing import TypeAlias\n >>> batch: TypeAlias = Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]\n >>> print(batch)\n <class 'jaxtyping.Float[Tensor, 'batch input_output_feature']'>\n\n You can also join multiple axis together to represent the dimensions of a tensor:\n\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n \"\"\"\n\n # Component idx\n COMPONENT = auto()\n \"\"\"Component index.\"\"\"\n\n COMPONENT_OPTIONAL = \"*component\"\n \"\"\"Optional component index.\"\"\"\n\n # Batches\n SOURCE_DATA_BATCH = auto()\n \"\"\"Batch of prompts used to generate source model activations.\"\"\"\n\n BATCH = auto()\n \"\"\"Batch of items that the SAE is being trained on.\"\"\"\n\n STORE_BATCH = auto()\n \"\"\"Batch of items to be written to the store.\"\"\"\n\n ITEMS = auto()\n \"\"\"Arbitrary number of items.\"\"\"\n\n # Features\n INPUT_OUTPUT_FEATURE = auto()\n \"\"\"Input or output feature (e.g. feature in activation vector from source model).\"\"\"\n\n LEARNT_FEATURE = auto()\n \"\"\"Learn feature (e.g. feature in learnt activation vector).\"\"\"\n\n DEAD_FEATURE = auto()\n \"\"\"Dead feature.\"\"\"\n\n ALIVE_FEATURE = auto()\n \"\"\"Alive feature.\"\"\"\n\n # Feature indices\n INPUT_OUTPUT_FEATURE_IDX = auto()\n \"\"\"Input or output feature index.\"\"\"\n\n LEARNT_FEATURE_IDX = auto()\n \"\"\"Learn feature index.\"\"\"\n\n # Other\n POSITION = auto()\n \"\"\"Token position.\"\"\"\n\n SINGLE_ITEM = \"\"\n \"\"\"Single item axis.\"\"\"\n\n ANY = \"...\"\n \"\"\"Any number of axis.\"\"\"\n\n @staticmethod\n def names(*axis: \"Axis\") -> str:\n \"\"\"Join multiple axis together, to represent the dimensions of a tensor.\n\n Example:\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n\n Args:\n *axis: Axis to join.\n\n Returns:\n Joined axis string.\n \"\"\"\n return \" \".join(a.value for a in axis)" } ]
from jaxtyping import Float from torch import Tensor from torch.nn import Parameter from sparse_autoencoder.autoencoder.components.tied_bias import TiedBias, TiedBiasPosition from sparse_autoencoder.tensor_types import Axis import torch
1,467
"""Tied Bias Tests.""" def test_pre_encoder_subtracts_bias() -> None: """Check that the pre-encoder bias subtracts the bias.""" encoder_input: Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)] = torch.tensor( [[5.0, 3.0, 1.0]] ) bias = Parameter(torch.tensor([2.0, 4.0, 6.0])) expected = encoder_input - bias
"""Tied Bias Tests.""" def test_pre_encoder_subtracts_bias() -> None: """Check that the pre-encoder bias subtracts the bias.""" encoder_input: Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)] = torch.tensor( [[5.0, 3.0, 1.0]] ) bias = Parameter(torch.tensor([2.0, 4.0, 6.0])) expected = encoder_input - bias
pre_encoder = TiedBias(bias, TiedBiasPosition.PRE_ENCODER)
0
2023-10-27 07:37:15+00:00
2k
vb000/SemanticHearing
src/training/train.py
[ { "identifier": "utils", "path": "src/helpers/utils.py", "snippet": "class Params():\n def __init__(self, json_path):\n def save(self, json_path):\n def update(self, json_path):\n def dict(self):\ndef save_graph(train_metrics, test_metrics, save_dir):\ndef import_attr(import_path):\ndef set_logger(log_path):\ndef load_checkpoint(checkpoint, model, optim=None, lr_sched=None, data_parallel=False):\ndef save_checkpoint(checkpoint, epoch, model, optim=None, lr_sched=None,\n train_metrics=None, val_metrics=None, data_parallel=False):\ndef model_size(model):\ndef run_time(model, inputs, profiling=False):\ndef format_lr_info(optimizer):" }, { "identifier": "test_epoch", "path": "src/training/eval.py", "snippet": "def test_epoch(model: nn.Module, device: torch.device,\n test_loader: torch.utils.data.dataloader.DataLoader,\n n_items: int, loss_fn, metrics_fn,\n results_fn = None, results_path: str = None, output_dir: str = None,\n profiling: bool = False, epoch: int = 0,\n writer: SummaryWriter = None) -> float:\n \"\"\"\n Evaluate the network.\n \"\"\"\n model.eval()\n metrics = {}\n losses = []\n runtimes = []\n results = []\n\n with torch.no_grad():\n for batch_idx, (inp, tgt) in \\\n enumerate(tqdm(test_loader, desc='Test', ncols=100)):\n # Move data to device\n inp, tgt = test_loader.dataset.to(inp, tgt, device)\n\n # Run through the model\n if profiling:\n with profile(activities=[ProfilerActivity.CPU],\n record_shapes=True) as prof:\n with record_function(\"model_inference\"):\n output = model(inp, writer=writer, step=epoch, idx=batch_idx)\n if profiling:\n logging.info(\n prof.key_averages().table(sort_by=\"self_cpu_time_total\",\n row_limit=20))\n else:\n output = model(inp, writer=writer, step=epoch, idx=batch_idx)\n\n # Compute loss\n loss = loss_fn(output, tgt)\n\n # Compute metrics\n metrics_batch = metrics_fn(inp, output, tgt)\n for k in metrics_batch.keys():\n if not k in metrics:\n metrics[k] = metrics_batch[k]\n else:\n metrics[k] += metrics_batch[k]\n\n output = test_loader.dataset.output_to(output, 'cpu')\n inp, tgt = test_loader.dataset.to(inp, tgt, 'cpu')\n\n # Results to save\n if results_path is not None:\n results.append(results_fn(\n batch_idx * test_loader.batch_size,\n inp, output, tgt, metrics_batch, output_dir=output_dir))\n\n losses += [loss.item()]\n if profiling:\n runtimes += [ # Runtime per sample in ms\n prof.profiler.self_cpu_time_total / (test_loader.batch_size * 1e3)]\n else:\n runtimes += [0.0]\n\n output = test_loader.dataset.output_to(output, 'cpu')\n inp, tgt = test_loader.dataset.to(inp, tgt, 'cpu')\n if writer is not None:\n if batch_idx == 0:\n test_loader.dataset.tensorboard_add_sample(\n writer, tag='Test',\n sample=(inp, output, tgt),\n step=epoch)\n test_loader.dataset.tensorboard_add_metrics(\n writer, tag='Test', metrics=metrics_batch, step=epoch)\n\n if n_items is not None and batch_idx == (n_items - 1):\n break\n\n if results_path is not None:\n torch.save(results, results_path)\n logging.info(\"Saved results to %s\" % results_path)\n\n avg_metrics = {k: np.mean(metrics[k]) for k in metrics.keys()}\n avg_metrics['loss'] = np.mean(losses)\n avg_metrics['runtime'] = np.mean(runtimes)\n avg_metrics_str = \"Test:\"\n for m in avg_metrics.keys():\n avg_metrics_str += ' %s=%.04f' % (m, avg_metrics[m])\n logging.info(avg_metrics_str)\n\n return avg_metrics" } ]
import argparse import multiprocessing import os import logging import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import traceback # pylint: disable=import-outside-toplevel import wandb from pathlib import Path from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm # pylint: disable=unused-import from torchmetrics.functional import( scale_invariant_signal_noise_ratio as si_snr, signal_noise_ratio as snr, signal_distortion_ratio as sdr, scale_invariant_signal_distortion_ratio as si_sdr) from src.helpers import utils from src.training.eval import test_epoch
1,165
""" The main training script for training on synthetic data """ def train_epoch(model: nn.Module, device: torch.device, optimizer: optim.Optimizer,
""" The main training script for training on synthetic data """ def train_epoch(model: nn.Module, device: torch.device, optimizer: optim.Optimizer,
train_loader: torch.utils.data.dataloader.DataLoader,
0
2023-10-30 05:36:07+00:00
2k
openai/bugbounty-gpt
tests/test_openai_classification.py
[ { "identifier": "OpenAIHandler", "path": "bugbounty_gpt/handlers/openai_handler.py", "snippet": "class OpenAIHandler:\n @staticmethod\n def _classifications_sanitization(input_string):\n \"\"\"\n Sanitizes the input string by removing spaces, converting to upper case, and replacing spaces with underscores.\n\n :param input_string: The input string to sanitize.\n :return: The sanitized string.\n \"\"\"\n return input_string.strip().replace(' ', '_').upper()\n\n @staticmethod\n def _build_request_data(submission_content):\n \"\"\"\n Builds the request data for the OpenAI API.\n\n :param submission_content: The content of the submission to be classified.\n :return: Dictionary containing the request data.\n \"\"\"\n return {\n \"model\": OPENAI_MODEL,\n \"temperature\": 0,\n \"max_tokens\": 512,\n \"messages\": [\n {\"role\": \"system\", \"content\": OPENAI_PROMPT},\n {\"role\": \"user\", \"content\": submission_content}\n ]\n }\n\n @staticmethod\n def _handle_response_error(error):\n \"\"\"\n Handles errors that occurred during the OpenAI request.\n\n :param error: The error that occurred.\n :return: A tuple containing the default category and an error message.\n \"\"\"\n logger.error(f\"An error occurred during the OpenAI request: {error}\")\n return DEFAULT_CATEGORY, \"An error occurred during classification. Please check application logs.\"\n\n @staticmethod\n def _handle_response(response):\n \"\"\"\n Handles the response from the OpenAI API.\n\n :param response: The response object from the OpenAI API.\n :return: A tuple containing the judgment category and explanation, or an error response if something goes wrong.\n \"\"\"\n try:\n response_text = response.choices[0].message.content\n judgement, explanation = response_text.rsplit('\\n', 1)\n sanitized_judgement = OpenAIHandler._classifications_sanitization(judgement)\n if sanitized_judgement in VALID_CATEGORIES:\n return sanitized_judgement, explanation.strip()\n else:\n return DEFAULT_CATEGORY, explanation.strip()\n except Exception as error:\n return OpenAIHandler._handle_response_error(error)\n\n @staticmethod\n async def classify_submission(submission_content):\n \"\"\"\n Classifies the submission content using the OpenAI API.\n\n :param submission_content: The content of the submission to be classified.\n :return: A tuple containing the judgment category and explanation, or an error response if something goes wrong.\n \"\"\"\n logger.info(\"Classifying submission's content.\")\n time.sleep(5) # Consider replacing with a more robust rate-limiting strategy\n try:\n request_data = OpenAIHandler._build_request_data(submission_content)\n loop = asyncio.get_running_loop()\n response = await loop.run_in_executor(None, lambda: openai.ChatCompletion.create(**request_data))\n return OpenAIHandler._handle_response(response)\n except Exception as error:\n return OpenAIHandler._handle_response_error(error)" }, { "identifier": "OPENAI_PROMPT", "path": "bugbounty_gpt/env.py", "snippet": "OPENAI_PROMPT = CONFIG['openai_prompt']" }, { "identifier": "OPENAI_MODEL", "path": "bugbounty_gpt/env.py", "snippet": "OPENAI_MODEL = CONFIG['api']['openai_model']" }, { "identifier": "DEFAULT_CATEGORY", "path": "bugbounty_gpt/env.py", "snippet": "DEFAULT_CATEGORY = sanitize_category(CONFIG['categories']['default'])" } ]
from bugbounty_gpt.handlers.openai_handler import OpenAIHandler from unittest.mock import patch, AsyncMock from bugbounty_gpt.env import OPENAI_PROMPT, OPENAI_MODEL, DEFAULT_CATEGORY import pytest, asyncio
915
def test_classifications_sanitization(): assert OpenAIHandler._classifications_sanitization(" Test Category ") == "TEST_CATEGORY" def test_build_request_data(): submission_content = "Sample content" expected_data = { "model": OPENAI_MODEL, "temperature": 0, "max_tokens": 512, "messages": [
def test_classifications_sanitization(): assert OpenAIHandler._classifications_sanitization(" Test Category ") == "TEST_CATEGORY" def test_build_request_data(): submission_content = "Sample content" expected_data = { "model": OPENAI_MODEL, "temperature": 0, "max_tokens": 512, "messages": [
{"role": "system", "content": OPENAI_PROMPT},
1
2023-10-27 22:41:24+00:00
2k
LeapLabTHU/FamO2O
jax_cql/JaxCQL/sac.py
[ { "identifier": "next_rng", "path": "jax_cql/JaxCQL/jax_utils.py", "snippet": "def next_rng(*args, **kwargs):\n global jax_utils_rng\n return jax_utils_rng(*args, **kwargs)" }, { "identifier": "value_and_multi_grad", "path": "jax_cql/JaxCQL/jax_utils.py", "snippet": "def value_and_multi_grad(fun, n_outputs, argnums=0, has_aux=False):\n def select_output(index):\n def wrapped(*args, **kwargs):\n if has_aux:\n x, *aux = fun(*args, **kwargs)\n return (x[index], *aux)\n else:\n x = fun(*args, **kwargs)\n return x[index]\n return wrapped\n\n grad_fns = tuple(\n jax.value_and_grad(select_output(i), argnums=argnums, has_aux=has_aux)\n for i in range(n_outputs)\n )\n def multi_grad_fn(*args, **kwargs):\n grads = []\n values = []\n for grad_fn in grad_fns:\n (value, *aux), grad = grad_fn(*args, **kwargs)\n values.append(value)\n grads.append(grad)\n return (tuple(values), *aux), tuple(grads)\n return multi_grad_fn" }, { "identifier": "mse_loss", "path": "jax_cql/JaxCQL/jax_utils.py", "snippet": "def mse_loss(val, target):\n return jnp.mean(jnp.square(val - target))" }, { "identifier": "JaxRNG", "path": "jax_cql/JaxCQL/jax_utils.py", "snippet": "class JaxRNG(object):\n \"\"\" A convenient stateful Jax RNG wrapper. Can be used to wrap RNG inside\n pure function.\n \"\"\"\n\n @classmethod\n def from_seed(cls, seed):\n return cls(jax.random.PRNGKey(seed))\n\n def __init__(self, rng):\n self.rng = rng\n\n def __call__(self, keys=None):\n if keys is None:\n self.rng, split_rng = jax.random.split(self.rng)\n return split_rng\n elif isinstance(keys, int):\n split_rngs = jax.random.split(self.rng, num=keys + 1)\n self.rng = split_rngs[0]\n return tuple(split_rngs[1:])\n else:\n split_rngs = jax.random.split(self.rng, num=len(keys) + 1)\n self.rng = split_rngs[0]\n return {key: val for key, val in zip(keys, split_rngs[1:])}" }, { "identifier": "wrap_function_with_rng", "path": "jax_cql/JaxCQL/jax_utils.py", "snippet": "def wrap_function_with_rng(rng):\n \"\"\" To be used as decorator, automatically bookkeep a RNG for the wrapped function. \"\"\"\n def wrap_function(function):\n def wrapped(*args, **kwargs):\n nonlocal rng\n rng, split_rng = jax.random.split(rng)\n return function(split_rng, *args, **kwargs)\n return wrapped\n return wrap_function" }, { "identifier": "collect_jax_metrics", "path": "jax_cql/JaxCQL/jax_utils.py", "snippet": "def collect_jax_metrics(metrics, names, prefix=None):\n collected = {}\n for name in names:\n if name in metrics:\n collected[name] = jnp.mean(metrics[name])\n if prefix is not None:\n collected = {\n '{}/{}'.format(prefix, key): value for key, value in collected.items()\n }\n return collected" }, { "identifier": "Scalar", "path": "jax_cql/JaxCQL/model.py", "snippet": "class Scalar(nn.Module):\n init_value: float\n\n def setup(self):\n self.value = self.param('value', lambda x: self.init_value)\n\n def __call__(self):\n return self.value" }, { "identifier": "update_target_network", "path": "jax_cql/JaxCQL/model.py", "snippet": "def update_target_network(main_params, target_params, tau):\n return jax.tree_util.tree_map(\n lambda x, y: tau * x + (1.0 - tau) * y,\n main_params, target_params\n )" } ]
from collections import OrderedDict from copy import deepcopy from functools import partial from ml_collections import ConfigDict from flax.training.train_state import TrainState from .jax_utils import ( next_rng, value_and_multi_grad, mse_loss, JaxRNG, wrap_function_with_rng, collect_jax_metrics ) from .model import Scalar, update_target_network import numpy as np import jax import jax.numpy as jnp import flax import flax.linen as nn import optax import distrax
1,576
class SAC(object): @staticmethod def get_default_config(updates=None): config = ConfigDict() config.discount = 0.99 config.alpha_multiplier = 1.0 config.use_automatic_entropy_tuning = True config.backup_entropy = False config.target_entropy = 0.0 config.policy_lr = 3e-4 config.qf_lr = 3e-4 config.optimizer_type = 'adam' config.soft_target_update_rate = 5e-3 if updates is not None: config.update(ConfigDict(updates).copy_and_resolve_references()) return config def __init__(self, config, policy, qf): self.config = self.get_default_config(config) self.policy = policy self.qf = qf self.observation_dim = policy.observation_dim self.action_dim = policy.action_dim self._train_states = {} optimizer_class = { 'adam': optax.adam, 'sgd': optax.sgd, }[self.config.optimizer_type] policy_params = self.policy.init( next_rng(self.policy.rng_keys()), jnp.zeros((10, self.observation_dim)) ) self._train_states['policy'] = TrainState.create( params=policy_params, tx=optimizer_class(self.config.policy_lr), apply_fn=None ) qf1_params = self.qf.init( next_rng(self.qf.rng_keys()), jnp.zeros((10, self.observation_dim)), jnp.zeros((10, self.action_dim)) ) self._train_states['qf1'] = TrainState.create( params=qf1_params, tx=optimizer_class(self.config.qf_lr), apply_fn=None, ) qf2_params = self.qf.init( next_rng(self.qf.rng_keys()), jnp.zeros((10, self.observation_dim)), jnp.zeros((10, self.action_dim)) ) self._train_states['qf2'] = TrainState.create( params=qf2_params, tx=optimizer_class(self.config.qf_lr), apply_fn=None, ) self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params}) model_keys = ['policy', 'qf1', 'qf2'] if self.config.use_automatic_entropy_tuning:
class SAC(object): @staticmethod def get_default_config(updates=None): config = ConfigDict() config.discount = 0.99 config.alpha_multiplier = 1.0 config.use_automatic_entropy_tuning = True config.backup_entropy = False config.target_entropy = 0.0 config.policy_lr = 3e-4 config.qf_lr = 3e-4 config.optimizer_type = 'adam' config.soft_target_update_rate = 5e-3 if updates is not None: config.update(ConfigDict(updates).copy_and_resolve_references()) return config def __init__(self, config, policy, qf): self.config = self.get_default_config(config) self.policy = policy self.qf = qf self.observation_dim = policy.observation_dim self.action_dim = policy.action_dim self._train_states = {} optimizer_class = { 'adam': optax.adam, 'sgd': optax.sgd, }[self.config.optimizer_type] policy_params = self.policy.init( next_rng(self.policy.rng_keys()), jnp.zeros((10, self.observation_dim)) ) self._train_states['policy'] = TrainState.create( params=policy_params, tx=optimizer_class(self.config.policy_lr), apply_fn=None ) qf1_params = self.qf.init( next_rng(self.qf.rng_keys()), jnp.zeros((10, self.observation_dim)), jnp.zeros((10, self.action_dim)) ) self._train_states['qf1'] = TrainState.create( params=qf1_params, tx=optimizer_class(self.config.qf_lr), apply_fn=None, ) qf2_params = self.qf.init( next_rng(self.qf.rng_keys()), jnp.zeros((10, self.observation_dim)), jnp.zeros((10, self.action_dim)) ) self._train_states['qf2'] = TrainState.create( params=qf2_params, tx=optimizer_class(self.config.qf_lr), apply_fn=None, ) self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params}) model_keys = ['policy', 'qf1', 'qf2'] if self.config.use_automatic_entropy_tuning:
self.log_alpha = Scalar(0.0)
6
2023-10-25 11:53:25+00:00
2k
RenShuhuai-Andy/TESTA
data/pretrain_dataset.py
[ { "identifier": "pre_caption", "path": "data/utils.py", "snippet": "def pre_caption(caption, max_words=50):\n caption = re.sub(\n r\"([!\\\"()*#~])\", #r\"([!\\\"()*#:;~])\" #r\"([.!\\\"()*#:;~])\",\n ' ',\n caption.lower(),\n )\n caption = re.sub(\n r\"\\s{2,}\",\n ' ', \n caption,\n )\n caption = caption.rstrip('\\n')\n caption = caption.strip(' ')\n\n # truncate caption\n caption_words = caption.split(' ')\n if len(caption_words) > max_words:\n caption = ' '.join(caption_words[:max_words])\n\n return caption" }, { "identifier": "TemporalConsistentRandomAugment", "path": "data/randaugment.py", "snippet": "class TemporalConsistentRandomAugment(object):\n\n def __init__(self, N=2, M=10, p=0.0, tensor_in_tensor_out=True, augs=[]):\n self.N = N\n self.M = M\n self.p = p\n self.tensor_in_tensor_out = tensor_in_tensor_out\n if augs:\n self.augs = augs \n else:\n self.augs = list(arg_dict.keys())\n\n def get_random_ops(self):\n sampled_ops = np.random.choice(self.augs, self.N, replace=False)\n # return [(op, 0.5, self.M) for op in sampled_ops]\n return [(op, self.M) for op in sampled_ops]\n\n def __call__(self, frames):\n assert frames.shape[-1] == 3, 'Expecting last dimension for 3-channels RGB (b, h, w, c).'\n \n if self.tensor_in_tensor_out:\n frames = frames.numpy().astype(np.uint8)\n \n num_frames = frames.shape[0]\n\n ops = num_frames * [self.get_random_ops()]\n apply_or_not = num_frames * [np.random.random(size=self.N) > self.p]\n\n frames = torch.stack(list(map(self._aug, frames, ops, apply_or_not)), dim=0).float()\n\n return frames\n\n def _aug(self, img, ops, apply_or_not):\n for i, (name, level) in enumerate(ops):\n if not apply_or_not[i]:\n continue\n args = arg_dict[name](level)\n img = func_dict[name](img, *args) \n return torch.from_numpy(img)" } ]
import json import os import random import torch import torch import numpy as np import decord import os,glob from pandas import Categorical from torch.utils.data import Dataset from PIL import Image from PIL import ImageFile from decord import VideoReader from data.utils import pre_caption from .randaugment import TemporalConsistentRandomAugment
1,012
ImageFile.LOAD_TRUNCATED_IMAGES = True Image.MAX_IMAGE_PIXELS = None decord.bridge.set_bridge('torch') class pretrain_dataset(Dataset): def __init__(self, ann_file, laion_path, transform): self.ann_pretrain = [] for f in ann_file: print('loading '+f) ann = json.load(open(f,'r')) self.ann_pretrain += ann self.laion_path = laion_path if self.laion_path: self.laion_files = glob.glob(os.path.join(laion_path,'*.json')) print('loading '+self.laion_files[0]) with open(self.laion_files[0],'r') as f: self.ann_laion = json.load(f) self.annotation = self.ann_pretrain + self.ann_laion else: self.annotation = self.ann_pretrain self.transform = transform def reload_laion(self, epoch): n = epoch%len(self.laion_files) print('loading '+self.laion_files[n]) with open(self.laion_files[n],'r') as f: self.ann_laion = json.load(f) self.annotation = self.ann_pretrain + self.ann_laion def __len__(self): return len(self.annotation) def __getitem__(self, index): ann = self.annotation[index] image = Image.open(ann['image']).convert('RGB') image = self.transform(image)
ImageFile.LOAD_TRUNCATED_IMAGES = True Image.MAX_IMAGE_PIXELS = None decord.bridge.set_bridge('torch') class pretrain_dataset(Dataset): def __init__(self, ann_file, laion_path, transform): self.ann_pretrain = [] for f in ann_file: print('loading '+f) ann = json.load(open(f,'r')) self.ann_pretrain += ann self.laion_path = laion_path if self.laion_path: self.laion_files = glob.glob(os.path.join(laion_path,'*.json')) print('loading '+self.laion_files[0]) with open(self.laion_files[0],'r') as f: self.ann_laion = json.load(f) self.annotation = self.ann_pretrain + self.ann_laion else: self.annotation = self.ann_pretrain self.transform = transform def reload_laion(self, epoch): n = epoch%len(self.laion_files) print('loading '+self.laion_files[n]) with open(self.laion_files[n],'r') as f: self.ann_laion = json.load(f) self.annotation = self.ann_pretrain + self.ann_laion def __len__(self): return len(self.annotation) def __getitem__(self, index): ann = self.annotation[index] image = Image.open(ann['image']).convert('RGB') image = self.transform(image)
caption = pre_caption(ann['caption'],30)
0
2023-10-29 12:09:38+00:00
2k
flbraun/poe-palette
data/beasts.py
[ { "identifier": "League", "path": "data/leagues.py", "snippet": "class League:\n type_: LeagueType\n title: str # e.g. \"Ancestor\"\n slug: str # e.g. \"ancestor\"\n is_hardcore: bool" }, { "identifier": "get_ninja_index", "path": "data/ninja.py", "snippet": "@functools.cache\ndef get_ninja_index(league: League) -> NinjaIndex:\n \"\"\"\n Downloads current data from ninja and makes it available as a sort-of index.\n \"\"\"\n session = LoggedRequestsSession()\n\n index = {}\n\n for category, endpoint_info in ninja_api_endpoint_for_category.items():\n url_template, response_attr = endpoint_info\n url = url_template.format(league=league.title)\n\n res = session.get(url)\n assert res.status_code == http.HTTPStatus.OK\n\n res_parsed = res.json()\n index[category] = {line[response_attr] for line in res_parsed['lines']}\n\n index = NinjaIndex(raw=index)\n index.print_stats()\n\n return index" }, { "identifier": "make_ninja_url", "path": "data/ninja.py", "snippet": "def make_ninja_url(league: League, item_name: str, base_name: str | None, category: NinjaCategory) -> URL | None:\n base_url_template, name_as_param = ninja_url_for_category[category]\n base_url = base_url_template.format(league=league.slug)\n\n if name_as_param:\n return f'{base_url}?name={item_name}'\n\n if base_name:\n item_name += f' {base_name}'\n item_name = item_name.replace(' ', '-').replace(\"'\", '').lower()\n return f'{base_url}/{item_name}'" }, { "identifier": "make_trade_url", "path": "data/trade.py", "snippet": "def make_trade_url(league: League, type_: str, name: str | None = None) -> URL:\n # do not change the order of the keys carelessly! the trade site is very sensitive about them.\n query = {\n 'query': {\n 'status': {'option': 'online'},\n 'type': type_,\n 'stats': [{'type': 'and', 'filters': []}],\n },\n 'sort': {'price': 'asc'},\n }\n\n if name:\n query['query']['name'] = name\n\n query_quoted = quote(json.dumps(query))\n return f'https://www.pathofexile.com/trade/search/{league.title}?q={query_quoted}'" }, { "identifier": "NinjaCategory", "path": "data/types.py", "snippet": "class NinjaCategory(Enum):\n # General\n CURRENCY = 'Currency'\n FRAGMENTS = 'Fragments'\n TATTOOS = 'Tattoos'\n OMENS = 'Omens'\n DIVINATION_CARDS = 'Divination Cards'\n ARTIFACTS = 'Artifacts'\n OILS = 'Oils'\n INCUBATORS = 'Incubators'\n # Equipment & Gems\n UNIQUE_WEAPONS = 'Unique Weapons'\n UNIQUE_ARMOURS = 'Unique Armours'\n UNIQUE_ACCESSORIES = 'Unique Accessories'\n UNIQUE_FLASKS = 'Unique Flasks'\n UNIQUE_JEWELS = 'Unique Jewels'\n UNIQUE_RELICS = 'Unique Relics'\n SKILL_GEMS = 'Skill Gems'\n CLUSTER_JEWELS = 'Cluster Jewels'\n # Atlas\n MAPS = 'Maps'\n BLIGHTED_MAPS = 'Blighted Maps'\n BLIGHT_RAVAGED_MAPS = 'Blight-ravaged Maps'\n SCOURGED_MAPS = 'Scourged Maps'\n UNIQUE_MAPS = 'Unique Maps'\n DELIRIUM_ORBS = 'Delirium Orbs'\n INVITATIONS = 'Invitations'\n SCARABS = 'Scarabs'\n MEMORIES = 'Memories'\n # Crafting\n BASE_TYPES = 'Base Types'\n FOSSILS = 'Fossils'\n RESONATORS = 'Resonators'\n HELMET_ENCHANTS = 'Helmet Enchants'\n BEASTS = 'Beasts'\n ESSENCES = 'Essences'\n VIALS = 'Vials'" }, { "identifier": "Entry", "path": "data/utils.py", "snippet": "class Entry:\n \"\"\"\n The final data container that serializes data for the\n electron app to consume.\n \"\"\"\n display_text: str\n wiki_url: URL | None = None\n poedb_url: URL | None = None\n ninja_url: URL | None = None\n trade_url: URL | None = None\n tft_url: URL | None = None\n tool_url: URL | None = None" }, { "identifier": "make_wiki_url", "path": "data/utils.py", "snippet": "def make_wiki_url(item_name: str) -> URL:\n item_name = item_name.replace(' ', '_')\n return f'https://www.poewiki.net/wiki/{item_name}'" } ]
from collections.abc import Generator from .leagues import League from .ninja import get_ninja_index, make_ninja_url from .trade import make_trade_url from .types import NinjaCategory from .utils import Entry, make_wiki_url
1,262
def get_beasts(league: League) -> Generator[Entry, None, None]: index = get_ninja_index(league) for beast in index.raw[NinjaCategory.BEASTS]: yield Entry( display_text=beast, wiki_url=make_wiki_url(beast), ninja_url=make_ninja_url(league, beast, None, NinjaCategory.BEASTS),
def get_beasts(league: League) -> Generator[Entry, None, None]: index = get_ninja_index(league) for beast in index.raw[NinjaCategory.BEASTS]: yield Entry( display_text=beast, wiki_url=make_wiki_url(beast), ninja_url=make_ninja_url(league, beast, None, NinjaCategory.BEASTS),
trade_url=make_trade_url(league, beast),
3
2023-10-27 11:33:43+00:00
2k
ATR-DBI/CityRefer
models/cityrefer.py
[ { "identifier": "SparseConvEncoder", "path": "models/basic_blocks.py", "snippet": "class SparseConvEncoder(nn.Module):\n def __init__(self, input_dim):\n super().__init__()\n\n self.stem = nn.Sequential(\n BasicConvolutionBlock(input_dim, 32, 3)\n )\n\n self.stage1 = nn.Sequential(\n BasicConvolutionBlock(32, 64, ks=2, stride=2),\n ResidualBlock(64, 64, 3),\n )\n\n self.stage2 = nn.Sequential(\n BasicConvolutionBlock(64, 128, ks=2, stride=2),\n ResidualBlock(128, 128, 3),\n )\n\n self.stage3 = nn.Sequential(\n BasicConvolutionBlock(128, 128, ks=2, stride=2),\n ResidualBlock(128, 128, 3),\n )\n\n self.stage4 = nn.Sequential(\n BasicConvolutionBlock(128, 128, ks=2, stride=2),\n ResidualBlock(128, 128, 3),\n )\n\n\n def forward(self, x):\n x = self.stem(x)\n x = self.stage1(x)\n x = self.stage2(x)\n x = self.stage3(x)\n x = self.stage4(x)\n\n return x" }, { "identifier": "LandLangModule", "path": "models/landlang_module.py", "snippet": "class LandLangModule(nn.Module):\n def __init__(self, num_object_class, vocab_size, use_lang_classifier=True, use_bidir=False, \n embed_dim=256, hidden_size=256, max_num_landmark=128, padding_idx=0):\n super().__init__() \n\n self.num_object_class = num_object_class\n self.use_lang_classifier = use_lang_classifier\n self.use_bidir = use_bidir\n self.max_num_landmark = max_num_landmark\n self.word_embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx) #, **factory_kwargs) \n \n self.gru = nn.GRU(\n input_size=embed_dim,\n hidden_size=hidden_size,\n batch_first=True,\n bidirectional=self.use_bidir\n )\n \n lang_size = hidden_size * 2 if self.use_bidir else hidden_size\n # language classifier\n if use_lang_classifier:\n self.lang_cls = nn.Sequential(\n nn.Linear(lang_size, num_object_class),\n nn.Dropout()\n )\n\n def forward(self, data_dict):\n \"\"\"\n encode the input descriptions\n \"\"\"\n input_ids = data_dict[\"landmark_tokens\"]\n word_embs = self.word_embeddings(input_ids)\n landmark_tokens_len = data_dict['landmark_tokens_mask'].sum(axis=1).long().cpu()\n lang_feat = pack_padded_sequence(word_embs, landmark_tokens_len, batch_first=True, enforce_sorted=False)\n \n # encode description\n _, lang_last = self.gru(lang_feat)\n lang_last = lang_last.permute(1, 0, 2).contiguous().flatten(start_dim=1) # batch_size, hidden_size * num_dir\n\n cursor = 0\n landmark_name_feats = []\n for num_landmark in data_dict['landmark_len'].long().cpu():\n landmark_name_feat = lang_last[cursor:cursor+num_landmark]\n landmark_name_feats.append(landmark_name_feat)\n cursor += num_landmark\n landmark_name_feats = pad_sequence(landmark_name_feats, batch_first=True)\n \n # store the encoded language features\n data_dict[\"landmark_name_feats\"] = landmark_name_feats # B, max_landmark_len, hidden_size\n\n return data_dict\n\n def length_to_mask(self, length, max_len=None, dtype=None):\n \"\"\"length: B.\n return B x max_len.\n If max_len is None, then max of length will be used.\n \"\"\"\n assert len(length.shape) == 1, \"Length shape should be 1 dimensional.\"\n max_len = max_len or length.max().item()\n mask = torch.arange(max_len, device=length.device, dtype=length.dtype).expand(\n len(length), max_len\n ) < length.unsqueeze(1)\n if dtype is not None:\n mask = torch.as_tensor(mask, dtype=dtype, device=length.device)\n return mask" } ]
import sys import os import importlib import models import torch import torch.nn as nn import torchsparse.nn as spnn from torch.nn.utils.rnn import pad_sequence from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence from torchsparse.utils.collate import sparse_collate from transformers import BertConfig from models.basic_blocks import SparseConvEncoder from models.landlang_module import LandLangModule
1,357
importlib.reload(models) sys.path.append(os.path.join(os.getcwd(), "lib")) # HACK add the lib folder sys.path.append(os.path.join(os.getcwd(), "models")) # HACK add the lib folder class CityRefer(nn.Module): def __init__(self, args, input_feature_dim=0, num_object_class=None, vocab_size=None, pad_token_id=0): super().__init__() self.args = args self.input_feature_dim = input_feature_dim self.num_object_class = num_object_class self.drop_rate = args.drop_rate self.use_lang_classifier=(not args.no_lang_cls), hidden_size = args.hidden_size # --------- Language Encoder --------- embed_dim = hidden_size self.word_embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=pad_token_id) #, **factory_kwargs) self.lang_gru = nn.GRU( input_size=hidden_size, hidden_size=hidden_size, num_layers=args.num_hidden_layers, batch_first=True, bidirectional=True, ) # --------- Point Encoder --------- # Sparse Volumetric Backbone
importlib.reload(models) sys.path.append(os.path.join(os.getcwd(), "lib")) # HACK add the lib folder sys.path.append(os.path.join(os.getcwd(), "models")) # HACK add the lib folder class CityRefer(nn.Module): def __init__(self, args, input_feature_dim=0, num_object_class=None, vocab_size=None, pad_token_id=0): super().__init__() self.args = args self.input_feature_dim = input_feature_dim self.num_object_class = num_object_class self.drop_rate = args.drop_rate self.use_lang_classifier=(not args.no_lang_cls), hidden_size = args.hidden_size # --------- Language Encoder --------- embed_dim = hidden_size self.word_embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=pad_token_id) #, **factory_kwargs) self.lang_gru = nn.GRU( input_size=hidden_size, hidden_size=hidden_size, num_layers=args.num_hidden_layers, batch_first=True, bidirectional=True, ) # --------- Point Encoder --------- # Sparse Volumetric Backbone
self.sparse_conv = SparseConvEncoder(self.input_feature_dim) # self.input_feature_dim = 3 -> 128
0
2023-10-25 10:02:28+00:00
2k
OATML-Markslab/ProteinNPT
baselines/data_processing.py
[ { "identifier": "slice_sequences", "path": "utils/data_utils.py", "snippet": "def slice_sequences(list_mutant_mutated_seq_pairs, max_positions=1024, method=\"rolling\", rolling_overlap=100, eval_mode=True, batch_target_labels=None, batch_masked_targets=None, target_names=None, start_idx=1, num_extra_tokens=1):\n \"\"\"\n rolling: creates overlapping sequence chunks of length args.max_positions - 1 (minus 1 to allow the BOS token addition)\n center: centers sequence slice around mutation\n left: selects the first (args.max_positions - 1) tokens in the sequence\n batch_target_labels are needed in eval_mode with rolling as we do target duplication for the different windows.\n Assumption: all input sequences are of same length.\n num_extra_tokens: 1 is just BOS added (eg., ESM); 2 if BOS and EOS added (eg., Tranception)\n \"\"\"\n mutant_mutated_seqs = list(zip(*list_mutant_mutated_seq_pairs))\n raw_sequence_length = len(mutant_mutated_seqs[1][0]) # length of first sequence\n all_mutants = mutant_mutated_seqs[0]\n all_mutated_seqs = mutant_mutated_seqs[1]\n scoring_optimal_window = None\n if method==\"center\":\n mutations_barycenters = [int(np.array([ int(mutation[1:-1]) - start_idx for mutation in mutant.split(':')]).mean()) for mutant in all_mutants]\n scoring_optimal_window = [get_optimal_window(x, raw_sequence_length, max_positions - num_extra_tokens) for x in mutations_barycenters] #Removing 1 from args.max_positions to allow subsequent addition of BOS token\n sliced_mutated_seqs = [all_mutated_seqs[index][scoring_optimal_window[index][0]:scoring_optimal_window[index][1]] for index in range(len(all_mutated_seqs))]\n list_mutant_mutated_seq_pairs = list(zip(all_mutants,sliced_mutated_seqs))\n elif method==\"left\":\n sliced_mutated_seqs = [all_mutated_seqs[index][0:max_positions - num_extra_tokens] for index in range(len(all_mutated_seqs))] #minus 1 to keep room for BOS token\n list_mutant_mutated_seq_pairs = list(zip(all_mutants,sliced_mutated_seqs))\n scoring_optimal_window = [(0, max_positions - 1)] * len(all_mutated_seqs)\n else:\n print(\"Sequence slicing method not recognized\")\n sys.exit(0)\n if batch_masked_targets is not None: #Protein NPT output\n return list_mutant_mutated_seq_pairs, batch_target_labels, batch_masked_targets, scoring_optimal_window\n else: #Baseline output\n return list_mutant_mutated_seq_pairs, batch_target_labels, scoring_optimal_window" }, { "identifier": "get_indices_retrieved_embeddings", "path": "utils/data_utils.py", "snippet": "def get_indices_retrieved_embeddings(batch, embeddings_dict_location, number_of_mutated_seqs_to_score=None):\n batch_mutants, batch_sequences = zip(*batch['mutant_mutated_seq_pairs'])\n with h5py.File(embeddings_dict_location, 'r') as h5f:\n num_all_embeddings = len(h5f['mutants'])\n list_mutants = [x.decode('utf-8') for x in h5f['mutants'][:]]\n mutant_indices = range(num_all_embeddings)\n mutants_embeddings_dict = {'mutants': list_mutants, 'mutant_index': mutant_indices}\n mutants_embeddings_df = pd.DataFrame.from_dict(mutants_embeddings_dict, orient='columns')\n if number_of_mutated_seqs_to_score is not None:\n batch_mutants = batch_mutants[:number_of_mutated_seqs_to_score]\n batch_mutants_df = pd.DataFrame(batch_mutants, columns=['mutants'])\n intersection = pd.merge(batch_mutants_df, mutants_embeddings_df, how='inner', on='mutants')\n return np.array(intersection['mutant_index'].values)" }, { "identifier": "weighted_sample_MSA", "path": "utils/msa_utils.py", "snippet": "def weighted_sample_MSA(MSA_all_sequences, MSA_non_ref_sequences_weights, number_sampled_MSA_sequences):\n \"\"\"\n We always enforce the first sequence in the MSA to be the refence sequence.\n \"\"\"\n msa = [MSA_all_sequences[0]]\n msa.extend(random.choices(MSA_all_sequences[1:], weights=MSA_non_ref_sequences_weights, k=number_sampled_MSA_sequences-1))\n msa = [(desc, seq.upper()) for desc, seq in msa]\n return msa" } ]
import sys import numpy as np import h5py import torch from collections import defaultdict from utils.data_utils import slice_sequences, get_indices_retrieved_embeddings from utils.msa_utils import weighted_sample_MSA
1,219
def process_batch(batch, model, alphabet, args, device, MSA_sequences=None, MSA_weights=None, MSA_start_position=None, MSA_end_position=None, eval_mode = True, indel_mode=False, start_idx=1): """ start_idx is the one-indexed postion of the first residue in the sequence. If full sequence is passed (as always assumed in this codebase) this is equal to 1. """ target_names = args.target_config.keys() raw_sequence_length = len(batch['mutant_mutated_seq_pairs'][0][1]) raw_batch_size = len(batch['mutant_mutated_seq_pairs']) if args.sequence_embeddings_location is not None and args.aa_embeddings!="One_hot_encoding": try:
def process_batch(batch, model, alphabet, args, device, MSA_sequences=None, MSA_weights=None, MSA_start_position=None, MSA_end_position=None, eval_mode = True, indel_mode=False, start_idx=1): """ start_idx is the one-indexed postion of the first residue in the sequence. If full sequence is passed (as always assumed in this codebase) this is equal to 1. """ target_names = args.target_config.keys() raw_sequence_length = len(batch['mutant_mutated_seq_pairs'][0][1]) raw_batch_size = len(batch['mutant_mutated_seq_pairs']) if args.sequence_embeddings_location is not None and args.aa_embeddings!="One_hot_encoding": try:
indices_retrieved_embeddings = get_indices_retrieved_embeddings(batch,args.sequence_embeddings_location)
1
2023-10-28 11:41:05+00:00
2k
dyhBUPT/iKUN
test.py
[ { "identifier": "opt", "path": "opts.py", "snippet": "class opts:\n def __init__(self):\n def parse(self, args=''):" }, { "identifier": "get_model", "path": "model.py", "snippet": "def get_model(opt, name='Model'):\n model = eval(name)(opt)\n model.cuda()\n model = nn.DataParallel(model)\n return model" }, { "identifier": "get_dataloader", "path": "dataloader.py", "snippet": "def get_dataloader(mode, opt, dataset='RMOT_Dataset', show=False, **kwargs):\n dataset = eval(dataset)(mode, opt, **kwargs)\n if show:\n dataset.show_information()\n if mode == 'train':\n dataloader = DataLoader(\n dataset,\n batch_size=opt.train_bs,\n shuffle=True,\n drop_last=True,\n num_workers=opt.num_workers,\n )\n elif mode == 'test':\n dataloader = DataLoader(\n dataset,\n batch_size=opt.test_bs,\n shuffle=False,\n drop_last=False,\n num_workers=opt.num_workers,\n )\n return dataloader" }, { "identifier": "get_transform", "path": "dataloader.py", "snippet": "def get_transform(mode, opt, idx):\n if mode == 'train':\n return T.Compose([\n SquarePad(),\n T.RandomResizedCrop(\n opt.img_hw[idx],\n ratio=opt.random_crop_ratio\n ),\n T.ToTensor(),\n T.Normalize(opt.norm_mean, opt.norm_std),\n ])\n elif mode == 'test':\n return T.Compose([\n SquarePad(),\n T.Resize(opt.img_hw[idx]),\n T.ToTensor(),\n T.Normalize(opt.norm_mean, opt.norm_std),\n ])\n elif mode == 'unnorm':\n mean = opt.norm_mean\n std = opt.norm_std\n return T.Normalize(\n [-mean[i]/std[i] for i in range(3)],\n [1/std[i] for i in range(3)],\n )" }, { "identifier": "similarity_calibration", "path": "similarity_calibration.py", "snippet": "def similarity_calibration(TEXT_FEAT_DICT, CLS_DICT, a, b, tau):\n fn = lambda x: a * x + b\n\n cls_dict = deepcopy(CLS_DICT)\n FEATS = np.array([x['feature'] for x in TEXT_FEAT_DICT['train'].values()])\n PROBS = np.array([x['probability'] for x in TEXT_FEAT_DICT['train'].values()])\n\n for video, video_value in cls_dict.items():\n for obj_id, obj_value in video_value.items():\n for frame, frame_value in obj_value.items():\n for exp, exp_value in frame_value.items():\n exp_new = expression_conversion(exp)\n feat = np.array(TEXT_FEAT_DICT['test'][exp_new]['feature'])[None, :]\n sim = (feat @ FEATS.T)[0]\n sim = (sim - sim.min()) / (sim.max() - sim.min())\n weight = np.exp(tau * sim) / np.exp(tau * sim).sum()\n prob = (weight * PROBS).sum()\n new_exp_value = [\n x + fn(prob) for x in exp_value\n ]\n frame_value[exp] = new_exp_value\n\n return cls_dict" } ]
import os import json import shutil import numpy as np import torch import torch.nn.functional as F import warnings from tqdm import tqdm from os.path import join, exists from collections import defaultdict from torch import nn from torchvision.utils import save_image from opts import opt from utils import * from model import get_model from dataloader import get_dataloader, get_transform from similarity_calibration import similarity_calibration
916
warnings.filterwarnings('ignore') # import `opts` first to set gpus def test_accuracy_v1(model, dataloader, save_img=False): model.eval() TP, FP, FN = 0, 0, 0 assert dataloader.batch_size == 1 if save_img: save_dir = join(opt.save_dir, 'images') os.makedirs(save_dir, exist_ok=True) global_idx = 1
warnings.filterwarnings('ignore') # import `opts` first to set gpus def test_accuracy_v1(model, dataloader, save_img=False): model.eval() TP, FP, FN = 0, 0, 0 assert dataloader.batch_size == 1 if save_img: save_dir = join(opt.save_dir, 'images') os.makedirs(save_dir, exist_ok=True) global_idx = 1
un_norm = get_transform('unnorm', opt, -1)
3
2023-10-31 07:08:37+00:00
2k
CVHub520/yolov5_obb
utils/augmentations.py
[ { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "check_version", "path": "utils/general.py", "snippet": "def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\n # Check version vs. required version\n current, minimum = (pkg.parse_version(x) for x in (current, minimum))\n result = (current == minimum) if pinned else (current >= minimum) # bool\n s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string\n if hard:\n assert result, s # assert min requirements met\n if verbose and not result:\n LOGGER.warning(s)\n return result" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "resample_segments", "path": "utils/general.py", "snippet": "def resample_segments(segments, n=1000):\n # Up-sample an (n,2) segment\n for i, s in enumerate(segments):\n x = np.linspace(0, len(s) - 1, n)\n xp = np.arange(len(s))\n segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy\n return segments" }, { "identifier": "segment2box", "path": "utils/general.py", "snippet": "def segment2box(segment, width=640, height=640):\n # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)\n x, y = segment.T # segment xy\n inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)\n x, y, = x[inside], y[inside]\n return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy" }, { "identifier": "bbox_ioa", "path": "utils/metrics.py", "snippet": "def bbox_ioa(box1, box2, eps=1E-7):\n \"\"\" Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2\n box1: np.array of shape(4)\n box2: np.array of shape(nx4)\n returns: np.array of shape(n)\n \"\"\"\n\n box2 = box2.transpose()\n\n # Get the coordinates of bounding boxes\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]\n\n # Intersection area\n inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \\\n (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)\n\n # box2 area\n box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps\n\n # Intersection over box2 area\n return inter_area / box2_area" }, { "identifier": "poly_filter", "path": "utils/rboxs_utils.py", "snippet": "def poly_filter(polys, h, w): \n \"\"\"\n Filter the poly labels which is out of the image.\n Args:\n polys (array): (num, 8)\n\n Return:\n keep_masks (array): (num)\n \"\"\"\n x = polys[:, 0::2] # (num, 4) \n y = polys[:, 1::2]\n x_max = np.amax(x, axis=1) # (num)\n x_min = np.amin(x, axis=1) \n y_max = np.amax(y, axis=1)\n y_min = np.amin(y, axis=1)\n x_ctr, y_ctr = (x_max + x_min) / 2.0, (y_max + y_min) / 2.0 # (num)\n keep_masks = (x_ctr > 0) & (x_ctr < w) & (y_ctr > 0) & (y_ctr < h) \n return keep_masks" } ]
import math import random import cv2 import numpy as np import albumentations as A from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box from utils.metrics import bbox_ioa from utils.rboxs_utils import poly_filter
1,519
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Image augmentation functions """ class Albumentations: # YOLOv5 Albumentations class (optional, only used if package is installed) def __init__(self): self.transform = None try:
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Image augmentation functions """ class Albumentations: # YOLOv5 Albumentations class (optional, only used if package is installed) def __init__(self): self.transform = None try:
check_version(A.__version__, '1.0.3', hard=True) # version requirement
1
2023-10-31 06:06:41+00:00
2k
hyw-dev/AFI-ForwardDeduplicate
models/gmflow/matching.py
[ { "identifier": "coords_grid", "path": "models/gmflow/geometry.py", "snippet": "def coords_grid(b, h, w, homogeneous=False, device=None, dtype: torch.dtype=torch.float32):\r\n k = (str(device), str((b, h, w)))\r\n if k in coords_grid_cache:\r\n return coords_grid_cache[k]\r\n y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W]\r\n\r\n stacks = [x, y]\r\n\r\n if homogeneous:\r\n ones = torch.ones_like(x) # [H, W]\r\n stacks.append(ones)\r\n\r\n grid = torch.stack(stacks, dim=0) # [2, H, W] or [3, H, W]\r\n\r\n grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W]\r\n\r\n if device is not None:\r\n grid = grid.to(device, dtype=dtype)\r\n coords_grid_cache[k] = grid\r\n return grid\r" }, { "identifier": "generate_window_grid", "path": "models/gmflow/geometry.py", "snippet": "def generate_window_grid(h_min, h_max, w_min, w_max, len_h, len_w, device=None, dtype=torch.float32):\r\n assert device is not None\r\n k = (str(device), str((h_min, h_max, w_min, w_max, len_h, len_w)))\r\n if k in window_grid_cache:\r\n return window_grid_cache[k]\r\n x, y = torch.meshgrid([torch.linspace(w_min, w_max, len_w, device=device),\r\n torch.linspace(h_min, h_max, len_h, device=device)],\r\n )\r\n grid = torch.stack((x, y), -1).transpose(0, 1).to(device, dtype=dtype) # [H, W, 2]\r\n window_grid_cache[k] = grid\r\n return grid\r" }, { "identifier": "normalize_coords", "path": "models/gmflow/geometry.py", "snippet": "def normalize_coords(coords, h, w):\r\n # coords: [B, H, W, 2]\r\n k = (str(coords.device), str((h, w)))\r\n if k in normalize_coords_cache:\r\n c = normalize_coords_cache[k]\r\n else:\r\n c = torch.tensor([(w - 1) / 2., (h - 1) / 2.], dtype=coords.dtype, device=coords.device)\r\n normalize_coords_cache[k] = c\r\n return (coords - c) / c # [-1, 1]\r" } ]
import torch import torch.nn.functional as F from models.gmflow.geometry import coords_grid, generate_window_grid, normalize_coords
767
def global_correlation_softmax(feature0, feature1, pred_bidir_flow=False, ): # global correlation b, c, h, w = feature0.shape feature0 = feature0.view(b, c, -1).permute(0, 2, 1) # [B, H*W, C] feature1 = feature1.view(b, c, -1) # [B, C, H*W] correlation = torch.matmul(feature0, feature1).view(b, h, w, h, w) / (c ** 0.5) # [B, H, W, H, W] # flow from softmax
def global_correlation_softmax(feature0, feature1, pred_bidir_flow=False, ): # global correlation b, c, h, w = feature0.shape feature0 = feature0.view(b, c, -1).permute(0, 2, 1) # [B, H*W, C] feature1 = feature1.view(b, c, -1) # [B, C, H*W] correlation = torch.matmul(feature0, feature1).view(b, h, w, h, w) / (c ** 0.5) # [B, H, W, H, W] # flow from softmax
init_grid = coords_grid(b, h, w, device=correlation.device, dtype=feature0.dtype) # [B, 2, H, W]
0
2023-10-29 18:25:36+00:00
2k
bmrussell/LGBattery
device.py
[ { "identifier": "Shared", "path": "globals.py", "snippet": "class Shared:\n \"\"\"_Configuration_\n\n Args:\n Singleton class for application configuration\n \"\"\"\n\n appname = 'lgbattery'\n quit_selected = False\n datadir = f'{os.getenv(\"APPDATA\")}\\\\{appname}'\n selected_device_name = '' # Used to temporarily use the selected device from prefs as \n # we haven't loaded the device list when we load the prefs\n selected_device = None # Set after prefs load when we have the device list\n devices = []\n systray = None\n logger = None\n wait_task = None\n log_level = None\n log_file = None\n level_file = None\n \n \n load_dotenv()\n\n def init_logging(self):\n rootlogger = logging.getLogger()\n rootlogger.setLevel(logging.WARNING)\n rootlogger.handlers[0].level = logging.WARNING\n \n self.logger = logging.getLogger(Shared.appname)\n self.logger.setLevel(self.log_level)\n if self.log_file == None:\n self.logger.addHandler(logging.StreamHandler(stream=sys.stdout))\n else:\n self.logger.addHandler(logging.FileHandler(f'{self.datadir}\\\\{self.log_file}'))\n formatter = logging.Formatter(fmt='[%(asctime)s] %(levelname)s in %(module)s: %(message)s')\n self.logger.handlers[0].setFormatter(formatter)\n \n def load_prefs(self):\n if not os.path.exists(Shared.datadir):\n os.makedirs(Shared.datadir)\n \n config = configparser.ConfigParser()\t\t\n config.read(f'{self.datadir}\\\\config.ini')\n prefs = None\n if 'PREFS' in config:\n prefs = config['PREFS']\n \n if prefs != None:\n if 'selected_device' in prefs:\n self.selected_device_name = prefs['selected_device']\n\n if 'level_file' in prefs:\n self.level_file = prefs['level_file']\n\n if 'log_file' in prefs:\n self.log_file = prefs['log_file']\n \n self.log_level = logging.WARNING\n \n if 'log_level' in prefs:\n switch = {\n 'DEBUG':logging.DEBUG,\n 'INFO':logging.INFO,\n 'WARNING':logging.WARNING,\n 'ERROR':logging.ERROR,\n 'CRITICAL':logging.CRITICAL\n }\n self.log_level = switch.get(prefs['log_level'].upper(), \"Invalid\")\n\n\n def save_prefs(self):\n config = configparser.ConfigParser()\n \n if not config.has_section('PREFS'):\n config.add_section('PREFS')\n \n config.set('PREFS', 'selected_device', self.selected_device.name)\n\n if self.level_file != None:\n config.set('PREFS', 'level_file', self.level_file)\n\n if self.log_file != None:\n config.set('PREFS', 'log_file', self.log_file)\n\n switch = {\n logging.DEBUG:'DEBUG',\n logging.INFO:'INFO',\n logging.WARNING:'WARNING',\n logging.ERROR:'ERROR',\n logging.CRITICAL:'CRITICAL'\n }\n log_level = switch.get(self.log_level, \"Invalid\")\n config.set('PREFS', 'log_level', log_level)\n \n with open(f'{self.datadir}\\\\config.ini', 'w') as configfile:\n config.write(configfile)\n\n def refresh_tray(self):\n if self.selected_device == None or len(self.devices) == 0:\n return None\n \n for dev in self.devices:\n if dev.id == self.selected_device.id:\n dev.select(self.systray)" }, { "identifier": "get_icon", "path": "icons.py", "snippet": "def get_icon(level): \n if level >= 0 and level <= 20:\n icon = 'battery-00-20.ico'\n elif level >= 21 and level <= 40:\n icon = 'battery-21-40.ico'\n elif level >= 41 and level <= 60:\n icon = 'battery-41-60.ico'\n elif level >= 61 and level <= 80:\n icon = 'battery-61-80.ico'\n elif level >= 81 and level <= 100:\n icon = 'battery-81-100.ico'\n else:\n icon = 'battery-unknown.ico'\n return f\"{Shared.datadir}\\\\{icon}\"" } ]
import asyncio import json import logging import websockets from globals import Shared from icons import get_icon
1,575
def get_device_by_id(id): for dev in Shared.devices: if dev.id == id: return dev return None class Device: def __init__(self, id, unitId, name, batteryLevel, charging): self.id = id self.unitId = unitId self.name = name self.batteryLevel = batteryLevel self.charging = charging def __repr__(self): return f"<Device(id:{self.id} unitId:{self.unitId} name:{self.name} batteryLevel:{self.batteryLevel} charging:{self.charging})>" def __str__(self): return f"Device(id:{self.id} unitId:{self.unitId} name:{self.name} batteryLevel:{self.batteryLevel} charging:{self.charging})>" async def get_battery(self): level = None charging = False headers = {'Origin': 'file://', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'Sec-WebSocket-Extensions': 'permessage-deflate; client_max_window_bits', 'Sec-WebSocket-Protocol': 'json'} battery_request = { 'msgId': '', 'verb': 'GET', 'path': f'/battery/{self.id}/state' } async with websockets.connect(uri="ws://localhost:9010", extra_headers=headers, subprotocols=['json'], ) as websocket: while True: request = json.dumps(battery_request) await websocket.send(request) response = await websocket.recv() message = json.loads(response) Shared.logger.info(f'Received: {message}') if message['path'] == f'/battery/{self.id}/state': self.batteryLevel = message['payload']['percentage'] self.charging = (message['payload']['charging'] == True) break def select(self, tray): logger = logging.getLogger(Shared.appname) logger.info(f'Device.SELECT: Selected {self.id} {self.name}') Shared.selected_device = self Shared.selected_device_name = self.name asyncio.run(self.get_battery()) if self.charging: tooltip = f'{self.name}: {self.batteryLevel}% (charging)' else: tooltip = f'{self.name}: {str(self.batteryLevel)}%'
def get_device_by_id(id): for dev in Shared.devices: if dev.id == id: return dev return None class Device: def __init__(self, id, unitId, name, batteryLevel, charging): self.id = id self.unitId = unitId self.name = name self.batteryLevel = batteryLevel self.charging = charging def __repr__(self): return f"<Device(id:{self.id} unitId:{self.unitId} name:{self.name} batteryLevel:{self.batteryLevel} charging:{self.charging})>" def __str__(self): return f"Device(id:{self.id} unitId:{self.unitId} name:{self.name} batteryLevel:{self.batteryLevel} charging:{self.charging})>" async def get_battery(self): level = None charging = False headers = {'Origin': 'file://', 'Pragma': 'no-cache', 'Cache-Control': 'no-cache', 'Sec-WebSocket-Extensions': 'permessage-deflate; client_max_window_bits', 'Sec-WebSocket-Protocol': 'json'} battery_request = { 'msgId': '', 'verb': 'GET', 'path': f'/battery/{self.id}/state' } async with websockets.connect(uri="ws://localhost:9010", extra_headers=headers, subprotocols=['json'], ) as websocket: while True: request = json.dumps(battery_request) await websocket.send(request) response = await websocket.recv() message = json.loads(response) Shared.logger.info(f'Received: {message}') if message['path'] == f'/battery/{self.id}/state': self.batteryLevel = message['payload']['percentage'] self.charging = (message['payload']['charging'] == True) break def select(self, tray): logger = logging.getLogger(Shared.appname) logger.info(f'Device.SELECT: Selected {self.id} {self.name}') Shared.selected_device = self Shared.selected_device_name = self.name asyncio.run(self.get_battery()) if self.charging: tooltip = f'{self.name}: {self.batteryLevel}% (charging)' else: tooltip = f'{self.name}: {str(self.batteryLevel)}%'
icon = get_icon(self.batteryLevel)
1
2023-10-25 20:37:43+00:00
2k
Kiteretsu77/VCISR-official
degradation/ESR/usm_sharp.py
[ { "identifier": "filter2D", "path": "degradation/ESR/utils.py", "snippet": "def filter2D(img, kernel):\n \"\"\"PyTorch version of cv2.filter2D\n\n Args:\n img (Tensor): (b, c, h, w)\n kernel (Tensor): (b, k, k)\n \"\"\"\n k = kernel.size(-1)\n b, c, h, w = img.size()\n if k % 2 == 1:\n img = F.pad(img, (k // 2, k // 2, k // 2, k // 2), mode='reflect')\n else:\n raise ValueError('Wrong kernel size')\n\n ph, pw = img.size()[-2:]\n\n if kernel.size(0) == 1:\n # apply the same kernel to all batch images\n img = img.view(b * c, 1, ph, pw)\n kernel = kernel.view(1, 1, k, k)\n return F.conv2d(img, kernel, padding=0).view(b, c, h, w)\n else:\n img = img.view(1, b * c, ph, pw)\n kernel = kernel.view(b, 1, k, k).repeat(1, c, 1, 1).view(b * c, 1, k, k)\n return F.conv2d(img, kernel, groups=b * c).view(b, c, h, w)" }, { "identifier": "np2tensor", "path": "degradation/ESR/utils.py", "snippet": "def np2tensor(np_frame):\n return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).cuda().float()/255" }, { "identifier": "tensor2np", "path": "degradation/ESR/utils.py", "snippet": "def tensor2np(tensor):\n # tensor should be batch size1 and cannot be grayscale input\n return (np.transpose(tensor.detach().squeeze(0).cpu().numpy(), (1, 2, 0))) * 255" } ]
import cv2 import numpy as np import torch import os, sys from torch.nn import functional as F from degradation.ESR.utils import filter2D, np2tensor, tensor2np
948
# -*- coding: utf-8 -*- root_path = os.path.abspath('.') sys.path.append(root_path) def usm_sharp_func(img, weight=0.5, radius=50, threshold=10): """USM sharpening. Input image: I; Blurry image: B. 1. sharp = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * sharp + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int): """ if radius % 2 == 0: radius += 1 blur = cv2.GaussianBlur(img, (radius, radius), 0) residual = img - blur mask = np.abs(residual) * 255 > threshold mask = mask.astype('float32') soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) sharp = img + weight * residual sharp = np.clip(sharp, 0, 1) return soft_mask * sharp + (1 - soft_mask) * img class USMSharp(torch.nn.Module): def __init__(self, type, radius=50, sigma=0): super(USMSharp, self).__init__() if radius % 2 == 0: radius += 1 self.radius = radius kernel = cv2.getGaussianKernel(radius, sigma) kernel = torch.FloatTensor(np.dot(kernel, kernel.transpose())).unsqueeze_(0).cuda() self.register_buffer('kernel', kernel) self.type = type def forward(self, img, weight=0.5, threshold=10, store=False): if self.type == "cv2": # pre-process cv2 type
# -*- coding: utf-8 -*- root_path = os.path.abspath('.') sys.path.append(root_path) def usm_sharp_func(img, weight=0.5, radius=50, threshold=10): """USM sharpening. Input image: I; Blurry image: B. 1. sharp = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * sharp + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int): """ if radius % 2 == 0: radius += 1 blur = cv2.GaussianBlur(img, (radius, radius), 0) residual = img - blur mask = np.abs(residual) * 255 > threshold mask = mask.astype('float32') soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) sharp = img + weight * residual sharp = np.clip(sharp, 0, 1) return soft_mask * sharp + (1 - soft_mask) * img class USMSharp(torch.nn.Module): def __init__(self, type, radius=50, sigma=0): super(USMSharp, self).__init__() if radius % 2 == 0: radius += 1 self.radius = radius kernel = cv2.getGaussianKernel(radius, sigma) kernel = torch.FloatTensor(np.dot(kernel, kernel.transpose())).unsqueeze_(0).cuda() self.register_buffer('kernel', kernel) self.type = type def forward(self, img, weight=0.5, threshold=10, store=False): if self.type == "cv2": # pre-process cv2 type
img = np2tensor(img)
1
2023-10-29 04:33:38+00:00
2k
serengil/LightPHE
lightphe/models/Tensor.py
[ { "identifier": "Homomorphic", "path": "lightphe/models/Homomorphic.py", "snippet": "class Homomorphic(ABC):\n keys: dict\n plaintext_modulo: int\n ciphertext_modulo: int\n\n @abstractmethod\n def generate_keys(self, key_size: int, s: Optional[int] = None) -> dict:\n pass\n\n @abstractmethod\n def generate_random_key(self) -> int:\n pass\n\n @abstractmethod\n def encrypt(\n self, plaintext: int, random_key: Union[Optional[int], Optional[list]] = None\n ) -> Union[int, tuple, list]:\n pass\n\n @abstractmethod\n def decrypt(self, ciphertext: Union[int, tuple, list]) -> int:\n pass\n\n @abstractmethod\n def add(\n self, ciphertext1: Union[int, tuple, list], ciphertext2: Union[int, tuple, list]\n ) -> Union[int, tuple, list]:\n pass\n\n @abstractmethod\n def multiply(\n self, ciphertext1: Union[int, tuple, list], ciphertext2: Union[int, tuple, list]\n ) -> Union[int, tuple]:\n pass\n\n @abstractmethod\n def xor(self, ciphertext1: list, ciphertext2: list) -> list:\n pass\n\n @abstractmethod\n def multiply_by_contant(self, ciphertext: Union[int, tuple, list], constant: int) -> int:\n pass\n\n @abstractmethod\n def reencrypt(self, ciphertext: Union[int, tuple, list]) -> Union[int, tuple, list]:\n pass" }, { "identifier": "phe_utils", "path": "lightphe/commons/phe_utils.py", "snippet": "def parse_int(value: Union[int, float], modulo: int) -> int:\ndef fractionize(value: float, modulo: int, precision: Optional[int] = None) -> Tuple[int, int]:\ndef solve_dlp():" } ]
from typing import Union, List from lightphe.models.Homomorphic import Homomorphic from lightphe.commons import phe_utils
674
# pylint: disable=too-few-public-methods, no-else-return class Fraction: """ Class to store fractional values """ def __init__( self, dividend: Union[int, tuple, list], abs_dividend: Union[int, tuple, list], divisor: Union[int, tuple, list], sign: int = 1, ): self.dividend = dividend self.divisor = divisor self.sign = sign self.abs_dividend = abs_dividend def __str__(self): """ Print Fraction Class Object """ sign = "-" if self.sign == -1 else "+" return f"Fraction({sign}{self.abs_dividend} / {self.divisor})" def __repr__(self): """ Print Fraction Class Object """ return self.__str__() class EncryptedTensor: """ Class to store encrypted tensor objects """
# pylint: disable=too-few-public-methods, no-else-return class Fraction: """ Class to store fractional values """ def __init__( self, dividend: Union[int, tuple, list], abs_dividend: Union[int, tuple, list], divisor: Union[int, tuple, list], sign: int = 1, ): self.dividend = dividend self.divisor = divisor self.sign = sign self.abs_dividend = abs_dividend def __str__(self): """ Print Fraction Class Object """ sign = "-" if self.sign == -1 else "+" return f"Fraction({sign}{self.abs_dividend} / {self.divisor})" def __repr__(self): """ Print Fraction Class Object """ return self.__str__() class EncryptedTensor: """ Class to store encrypted tensor objects """
def __init__(self, fractions: List[Fraction], cs: Homomorphic):
0
2023-10-28 14:57:59+00:00
2k
DataCanvasIO/LMS
lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/save_and_load.py
[ { "identifier": "PeftType", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"" }, { "identifier": "PromptLearningConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})" } ]
from .config import PeftType, PromptLearningConfig
732
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_peft_model_state_dict(model, state_dict=None, adapter_name="default"): """ Get the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP, the model should be the underlying model/unwrapped model (i.e. model.module). state_dict (`dict`, *optional*, defaults to `None`): The state dict of the model. If not provided, the state dict of the model will be used. """ config = model.peft_config[adapter_name] if state_dict is None: state_dict = model.state_dict()
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_peft_model_state_dict(model, state_dict=None, adapter_name="default"): """ Get the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP, the model should be the underlying model/unwrapped model (i.e. model.module). state_dict (`dict`, *optional*, defaults to `None`): The state dict of the model. If not provided, the state dict of the model will be used. """ config = model.peft_config[adapter_name] if state_dict is None: state_dict = model.state_dict()
if config.peft_type in (PeftType.LORA, PeftType.ADALORA):
0
2023-10-30 10:50:32+00:00
2k
imhotep/hass-unifi-access
custom_components/unifi_access/hub.py
[ { "identifier": "DEVICE_NOTIFICATIONS_URL", "path": "custom_components/unifi_access/const.py", "snippet": "DEVICE_NOTIFICATIONS_URL = \"/api/v1/developer/devices/notifications\"" }, { "identifier": "DOOR_UNLOCK_URL", "path": "custom_components/unifi_access/const.py", "snippet": "DOOR_UNLOCK_URL = \"/api/v1/developer/doors/{door_id}/unlock\"" }, { "identifier": "DOORS_URL", "path": "custom_components/unifi_access/const.py", "snippet": "DOORS_URL = \"/api/v1/developer/doors\"" }, { "identifier": "UNIFI_ACCESS_API_PORT", "path": "custom_components/unifi_access/const.py", "snippet": "UNIFI_ACCESS_API_PORT = 12445" }, { "identifier": "UnifiAccessDoor", "path": "custom_components/unifi_access/door.py", "snippet": "class UnifiAccessDoor:\n \"\"\"Unifi Access Door Class.\"\"\"\n\n def __init__(\n self,\n door_id: str,\n name: str,\n door_position_status: str,\n door_lock_relay_status: str,\n hub,\n ) -> None:\n \"\"\"Initialize door.\"\"\"\n self._callbacks: set[Callable] = set()\n self._is_locking = False\n self._is_unlocking = False\n self._hub = hub\n self._id = door_id\n self.name = name\n self.door_position_status = door_position_status\n self.door_lock_relay_status = door_lock_relay_status\n self.doorbell_request_id = None\n\n @property\n def doorbell_pressed(self) -> bool:\n \"\"\"Get doorbell pressed status.\"\"\"\n return self.doorbell_request_id is not None\n\n @property\n def id(self) -> str:\n \"\"\"Get door ID.\"\"\"\n return self._id\n\n @property\n def is_open(self):\n \"\"\"Get door status.\"\"\"\n return self.door_position_status == \"open\"\n\n @property\n def is_locked(self):\n \"\"\"Solely used for locked state when calling lock.\"\"\"\n return self.door_lock_relay_status == \"lock\"\n\n @property\n def is_locking(self):\n \"\"\"Solely used for locking state when calling lock.\"\"\"\n return False\n\n @property\n def is_unlocking(self):\n \"\"\"Solely used for unlocking state when calling unlock.\"\"\"\n return self._is_unlocking\n\n def unlock(self) -> None:\n \"\"\"Unlock door.\"\"\"\n if self.is_locked:\n self._is_unlocking = True\n self._hub.unlock_door(self._id)\n self._is_unlocking = False\n _LOGGER.info(\"Door with door ID %s is unlocked\", self.id)\n else:\n _LOGGER.error(\"Door with door ID %s is already unlocked\", self.id)\n\n def register_callback(self, callback: Callable[[], None]) -> None:\n \"\"\"Register callback, called when Roller changes state.\"\"\"\n self._callbacks.add(callback)\n\n def remove_callback(self, callback: Callable[[], None]) -> None:\n \"\"\"Remove previously registered callback.\"\"\"\n self._callbacks.discard(callback)\n\n def publish_updates(self) -> None:\n \"\"\"Schedule call all registered callbacks.\"\"\"\n for callback in self._callbacks:\n callback()" } ]
import asyncio import json import logging import ssl import urllib3 import websocket from datetime import timedelta from threading import Thread from urllib.parse import urlparse from requests import request from requests.exceptions import ConnectionError as ConnError, SSLError from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryAuthFailed from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed from .const import ( DEVICE_NOTIFICATIONS_URL, DOOR_UNLOCK_URL, DOORS_URL, UNIFI_ACCESS_API_PORT, ) from .door import UnifiAccessDoor
1,326
"""Unifi Access Hub. This module interacts with the Unifi Access API server. """ _LOGGER = logging.getLogger(__name__) class ApiAuthError(Exception): """Raised when we can't authenticate with the API Token.""" class ApiError(Exception): """Raised when we have some trouble using the API.""" class UnifiAccessHub: """UnifiAccessHub. This class takes care of interacting with the Unifi Access API. """ def __init__( self, host: str, verify_ssl: bool = False, use_polling: bool = False ) -> None: """Initialize.""" self.use_polling = use_polling self.verify_ssl = verify_ssl if self.verify_ssl is False: _LOGGER.warning("SSL Verification disabled for %s", host) urllib3.disable_warnings() host_parts = host.split(":") parsed_host = urlparse(host) hostname = parsed_host.hostname if parsed_host.hostname else host_parts[0] port = ( parsed_host.port if parsed_host.port else (host_parts[1] if len(host_parts) > 1 else UNIFI_ACCESS_API_PORT) ) self._api_token = None self.host = f"https://{hostname}:{port}" self._http_headers = { "Accept": "application/json", "Content-Type": "application/json", } self.websocket_host = f"wss://{hostname}:{port}" self._websocket_headers = { "Upgrade": "websocket", "Connection": "Upgrade", } self._doors: dict[str, UnifiAccessDoor] = {} self.update_t = None @property def doors(self): """Get current doors.""" return self._doors def set_api_token(self, api_token): """Set API Access Token.""" self._api_token = api_token self._http_headers["Authorization"] = f"Bearer {self._api_token}" self._websocket_headers["Authorization"] = f"Bearer {self._api_token}" def update(self): """Get latest door data.""" _LOGGER.info( "Getting door updates from Unifi Access %s Use Polling %s", self.host, self.use_polling, )
"""Unifi Access Hub. This module interacts with the Unifi Access API server. """ _LOGGER = logging.getLogger(__name__) class ApiAuthError(Exception): """Raised when we can't authenticate with the API Token.""" class ApiError(Exception): """Raised when we have some trouble using the API.""" class UnifiAccessHub: """UnifiAccessHub. This class takes care of interacting with the Unifi Access API. """ def __init__( self, host: str, verify_ssl: bool = False, use_polling: bool = False ) -> None: """Initialize.""" self.use_polling = use_polling self.verify_ssl = verify_ssl if self.verify_ssl is False: _LOGGER.warning("SSL Verification disabled for %s", host) urllib3.disable_warnings() host_parts = host.split(":") parsed_host = urlparse(host) hostname = parsed_host.hostname if parsed_host.hostname else host_parts[0] port = ( parsed_host.port if parsed_host.port else (host_parts[1] if len(host_parts) > 1 else UNIFI_ACCESS_API_PORT) ) self._api_token = None self.host = f"https://{hostname}:{port}" self._http_headers = { "Accept": "application/json", "Content-Type": "application/json", } self.websocket_host = f"wss://{hostname}:{port}" self._websocket_headers = { "Upgrade": "websocket", "Connection": "Upgrade", } self._doors: dict[str, UnifiAccessDoor] = {} self.update_t = None @property def doors(self): """Get current doors.""" return self._doors def set_api_token(self, api_token): """Set API Access Token.""" self._api_token = api_token self._http_headers["Authorization"] = f"Bearer {self._api_token}" self._websocket_headers["Authorization"] = f"Bearer {self._api_token}" def update(self): """Get latest door data.""" _LOGGER.info( "Getting door updates from Unifi Access %s Use Polling %s", self.host, self.use_polling, )
data = self._make_http_request(f"{self.host}{DOORS_URL}")
2
2023-10-27 20:34:27+00:00
2k
aws-samples/amazon-bedrock-serverless-prompt-chaining
stacks/trip_planner_stack.py
[ { "identifier": "get_lambda_bundling_options", "path": "stacks/util.py", "snippet": "def get_lambda_bundling_options():\n return lambda_python.BundlingOptions(\n asset_excludes=[\".venv\", \".mypy_cache\", \"__pycache__\"],\n command_hooks=CommandHooks(),\n )" }, { "identifier": "get_claude_instant_invoke_chain", "path": "stacks/util.py", "snippet": "def get_claude_instant_invoke_chain(\n scope: Construct,\n id: builtins.str,\n prompt: builtins.str,\n max_tokens_to_sample: typing.Optional[int] = 250,\n temperature: typing.Optional[float] = 1,\n include_previous_conversation_in_prompt=True,\n):\n model_prompt = sfn.JsonPath.format(\n f\"{CLAUDE_HUMAN_PROMPT}{{}}{CLAUDE_AI_PROMPT}\",\n prompt,\n )\n if include_previous_conversation_in_prompt:\n model_prompt = sfn.JsonPath.format(\n \"{}{}\",\n sfn.JsonPath.string_at(\"$.output.conversation\"),\n model_prompt,\n )\n format_prompt = sfn.Pass(\n scope,\n id + \" (Format Model Inputs)\",\n parameters={\n \"prompt\": model_prompt,\n \"max_tokens_to_sample\": max_tokens_to_sample,\n \"temperature\": temperature,\n },\n result_path=\"$.model_inputs\",\n )\n invoke_model = tasks.BedrockInvokeModel(\n scope,\n id + \" (Invoke Model)\",\n model=bedrock.FoundationModel.from_foundation_model_id(\n scope,\n \"Model\",\n bedrock.FoundationModelIdentifier.ANTHROPIC_CLAUDE_INSTANT_V1,\n ),\n body=sfn.TaskInput.from_json_path_at(\"$.model_inputs\"),\n result_selector={\"response\": sfn.JsonPath.string_at(\"$.Body.completion\")},\n result_path=\"$.model_outputs\",\n )\n add_bedrock_retries(invoke_model)\n format_response = sfn.Pass(\n scope,\n id + \" (Format Model Outputs)\",\n parameters={\n \"response\": sfn.JsonPath.string_at(\"$.model_outputs.response\"),\n \"conversation\": sfn.JsonPath.format(\n \"{}{}\",\n sfn.JsonPath.string_at(\"$.model_inputs.prompt\"),\n sfn.JsonPath.string_at(\"$.model_outputs.response\"),\n ),\n },\n result_path=\"$.output\",\n )\n return format_prompt.next(invoke_model).next(format_response)" } ]
from aws_cdk import ( Duration, Stack, RemovalPolicy, aws_lambda as lambda_, aws_lambda_python_alpha as lambda_python, aws_s3 as s3, aws_ssm as ssm, aws_stepfunctions as sfn, aws_stepfunctions_tasks as tasks, ) from constructs import Construct from .util import ( get_lambda_bundling_options, get_claude_instant_invoke_chain, )
692
class TripPlannerStack(Stack): def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Agent #1: suggest places to stay
class TripPlannerStack(Stack): def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Agent #1: suggest places to stay
hotels_job = get_claude_instant_invoke_chain(
1
2023-10-26 22:17:30+00:00
2k
pengsongyou/lseg_feature_extraction
modules/models/lseg_blocks.py
[ { "identifier": "_make_pretrained_clip_vitl16_384", "path": "modules/models/lseg_vit.py", "snippet": "def _make_pretrained_clip_vitl16_384(\n pretrained, use_readout=\"ignore\", hooks=None, enable_attention_hooks=False\n):\n clip_pretrained, _ = clip.load(\"ViT-B/32\", device='cuda', jit=False)\n model = timm.create_model(\"vit_large_patch16_384\", pretrained=pretrained)\n\n hooks = [5, 11, 17, 23] if hooks == None else hooks\n \n pretrained = _make_vit_b16_backbone(\n model,\n features=[256, 512, 1024, 1024],\n hooks=hooks,\n vit_features=1024,\n use_readout=use_readout,\n enable_attention_hooks=enable_attention_hooks,\n )\n return clip_pretrained, pretrained" }, { "identifier": "_make_pretrained_clip_vitb32_384", "path": "modules/models/lseg_vit.py", "snippet": "def _make_pretrained_clip_vitb32_384(pretrained, use_readout=\"ignore\", hooks=None, enable_attention_hooks=False):\n clip_pretrained, _ = clip.load(\"ViT-B/32\", device='cuda', jit=False)\n model = timm.create_model(\"vit_base_patch32_384\", pretrained=pretrained)\n\n hooks = [2, 5, 8, 11] if hooks == None else hooks\n \n pretrained = _make_vit_b32_backbone(\n model, \n features=[96, 192, 384, 768], \n hooks=hooks, \n use_readout=use_readout,\n enable_attention_hooks=False,\n )\n return clip_pretrained, pretrained" }, { "identifier": "_make_pretrained_clipRN50x16_vitl16_384", "path": "modules/models/lseg_vit.py", "snippet": "def _make_pretrained_clipRN50x16_vitl16_384(\n pretrained, use_readout=\"ignore\", hooks=None, enable_attention_hooks=False\n):\n clip_pretrained, _ = clip.load(\"RN50x16\", device='cuda', jit=False)\n model = timm.create_model(\"vit_large_patch16_384\", pretrained=pretrained)\n\n hooks = [5, 11, 17, 23] if hooks == None else hooks\n \n pretrained = _make_vit_b16_backbone(\n model,\n features=[256, 512, 1024, 1024],\n hooks=hooks,\n vit_features=1024,\n use_readout=use_readout,\n enable_attention_hooks=enable_attention_hooks,\n )\n return clip_pretrained, pretrained" }, { "identifier": "forward_vit", "path": "modules/models/lseg_vit.py", "snippet": "def forward_vit(pretrained, x):\n b, c, h, w = x.shape\n \n # encoder\n glob = pretrained.model.forward_flex(x)\n\n layer_1 = pretrained.activations[\"1\"]\n layer_2 = pretrained.activations[\"2\"]\n layer_3 = pretrained.activations[\"3\"]\n layer_4 = pretrained.activations[\"4\"]\n\n layer_1 = pretrained.act_postprocess1[0:2](layer_1)\n layer_2 = pretrained.act_postprocess2[0:2](layer_2)\n layer_3 = pretrained.act_postprocess3[0:2](layer_3)\n layer_4 = pretrained.act_postprocess4[0:2](layer_4)\n\n unflatten = nn.Sequential(\n nn.Unflatten(\n 2,\n torch.Size(\n [\n h // pretrained.model.patch_size[1],\n w // pretrained.model.patch_size[0],\n ]\n ),\n )\n )\n\n if layer_1.ndim == 3:\n layer_1 = unflatten(layer_1)\n if layer_2.ndim == 3:\n layer_2 = unflatten(layer_2)\n if layer_3.ndim == 3:\n layer_3 = unflatten(layer_3)\n if layer_4.ndim == 3:\n layer_4 = unflatten(layer_4)\n\n layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)\n layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)\n layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)\n layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)\n\n return layer_1, layer_2, layer_3, layer_4" } ]
import torch import torch.nn as nn from .lseg_vit import ( _make_pretrained_clip_vitl16_384, _make_pretrained_clip_vitb32_384, _make_pretrained_clipRN50x16_vitl16_384, forward_vit, )
1,199
def _make_encoder( backbone, features, use_pretrained=True, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore", enable_attention_hooks=False, ): if backbone == "clip_vitl16_384":
def _make_encoder( backbone, features, use_pretrained=True, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore", enable_attention_hooks=False, ): if backbone == "clip_vitl16_384":
clip_pretrained, pretrained = _make_pretrained_clip_vitl16_384(
0
2023-10-27 15:40:36+00:00
2k
chenran-li/RQL-release
rl_zoo3/plots/plot_train.py
[ { "identifier": "LoadMonitorResultsError", "path": "stable_baselines3/common/monitor.py", "snippet": "class LoadMonitorResultsError(Exception):\n \"\"\"\n Raised when loading the monitor log fails.\n \"\"\"\n\n pass" }, { "identifier": "load_results", "path": "stable_baselines3/common/monitor.py", "snippet": "def load_results(path: str) -> pandas.DataFrame:\n \"\"\"\n Load all Monitor logs from a given directory path matching ``*monitor.csv``\n\n :param path: the directory path containing the log file(s)\n :return: the logged data\n \"\"\"\n monitor_files = get_monitor_files(path)\n if len(monitor_files) == 0:\n raise LoadMonitorResultsError(f\"No monitor files of the form *{Monitor.EXT} found in {path}\")\n data_frames, headers = [], []\n for file_name in monitor_files:\n with open(file_name) as file_handler:\n first_line = file_handler.readline()\n assert first_line[0] == \"#\"\n header = json.loads(first_line[1:])\n data_frame = pandas.read_csv(file_handler, index_col=None)\n headers.append(header)\n data_frame[\"t\"] += header[\"t_start\"]\n data_frames.append(data_frame)\n data_frame = pandas.concat(data_frames)\n data_frame.sort_values(\"t\", inplace=True)\n data_frame.reset_index(inplace=True)\n data_frame[\"t\"] -= min(header[\"t_start\"] for header in headers)\n return data_frame" }, { "identifier": "X_EPISODES", "path": "stable_baselines3/common/results_plotter.py", "snippet": "X_EPISODES = \"episodes\"" }, { "identifier": "X_TIMESTEPS", "path": "stable_baselines3/common/results_plotter.py", "snippet": "X_TIMESTEPS = \"timesteps\"" }, { "identifier": "X_WALLTIME", "path": "stable_baselines3/common/results_plotter.py", "snippet": "X_WALLTIME = \"walltime_hrs\"" }, { "identifier": "ts2xy", "path": "stable_baselines3/common/results_plotter.py", "snippet": "def ts2xy(data_frame: pd.DataFrame, x_axis: str) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Decompose a data frame variable to x ans ys\n\n :param data_frame: the input data\n :param x_axis: the axis for the x and y output\n (can be X_TIMESTEPS='timesteps', X_EPISODES='episodes' or X_WALLTIME='walltime_hrs')\n :return: the x and y output\n \"\"\"\n if x_axis == X_TIMESTEPS:\n x_var = np.cumsum(data_frame.l.values)\n y_var = data_frame.r.values\n elif x_axis == X_EPISODES:\n x_var = np.arange(len(data_frame))\n y_var = data_frame.r.values\n elif x_axis == X_WALLTIME:\n # Convert to hours\n x_var = data_frame.t.values / 3600.0\n y_var = data_frame.r.values\n else:\n raise NotImplementedError\n return x_var, y_var" }, { "identifier": "window_func", "path": "stable_baselines3/common/results_plotter.py", "snippet": "def window_func(var_1: np.ndarray, var_2: np.ndarray, window: int, func: Callable) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Apply a function to the rolling window of 2 arrays\n\n :param var_1: variable 1\n :param var_2: variable 2\n :param window: length of the rolling window\n :param func: function to apply on the rolling window on variable 2 (such as np.mean)\n :return: the rolling output with applied function\n \"\"\"\n var_2_window = rolling_window(var_2, window)\n function_on_var2 = func(var_2_window, axis=-1)\n return var_1[window - 1 :], function_on_var2" } ]
import argparse import os import numpy as np import seaborn from matplotlib import pyplot as plt from stable_baselines3.common.monitor import LoadMonitorResultsError, load_results from stable_baselines3.common.results_plotter import X_EPISODES, X_TIMESTEPS, X_WALLTIME, ts2xy, window_func
1,247
""" Plot training reward/success rate """ # Activate seaborn seaborn.set() def plot_train(): parser = argparse.ArgumentParser("Gather results, plot training reward/success") parser.add_argument("-a", "--algo", help="Algorithm to include", type=str, required=True) parser.add_argument("-e", "--env", help="Environment(s) to include", nargs="+", type=str, required=True) parser.add_argument("-f", "--exp-folder", help="Folders to include", type=str, required=True) parser.add_argument("--figsize", help="Figure size, width, height in inches.", nargs=2, type=int, default=[6.4, 4.8]) parser.add_argument("--fontsize", help="Font size", type=int, default=14) parser.add_argument("-max", "--max-timesteps", help="Max number of timesteps to display", type=int) parser.add_argument("-x", "--x-axis", help="X-axis", choices=["steps", "episodes", "time"], type=str, default="steps") parser.add_argument("-y", "--y-axis", help="Y-axis", choices=["success", "reward", "length"], type=str, default="reward") parser.add_argument("-w", "--episode-window", help="Rolling window size", type=int, default=100) args = parser.parse_args() algo = args.algo envs = args.env log_path = os.path.join(args.exp_folder, algo) x_axis = {
""" Plot training reward/success rate """ # Activate seaborn seaborn.set() def plot_train(): parser = argparse.ArgumentParser("Gather results, plot training reward/success") parser.add_argument("-a", "--algo", help="Algorithm to include", type=str, required=True) parser.add_argument("-e", "--env", help="Environment(s) to include", nargs="+", type=str, required=True) parser.add_argument("-f", "--exp-folder", help="Folders to include", type=str, required=True) parser.add_argument("--figsize", help="Figure size, width, height in inches.", nargs=2, type=int, default=[6.4, 4.8]) parser.add_argument("--fontsize", help="Font size", type=int, default=14) parser.add_argument("-max", "--max-timesteps", help="Max number of timesteps to display", type=int) parser.add_argument("-x", "--x-axis", help="X-axis", choices=["steps", "episodes", "time"], type=str, default="steps") parser.add_argument("-y", "--y-axis", help="Y-axis", choices=["success", "reward", "length"], type=str, default="reward") parser.add_argument("-w", "--episode-window", help="Rolling window size", type=int, default=100) args = parser.parse_args() algo = args.algo envs = args.env log_path = os.path.join(args.exp_folder, algo) x_axis = {
"steps": X_TIMESTEPS,
3
2023-10-28 01:09:21+00:00
2k
AmgdGocha/DriveFS-Sleuth
drivefs_sleuth/tasks.py
[ { "identifier": "copy_file", "path": "drivefs_sleuth/utils.py", "snippet": "def copy_file(file_path, dest_filename, recovery_path=''):\n if not recovery_path:\n recovery_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'recovered_items')\n\n if not os.path.exists(recovery_path):\n os.makedirs(recovery_path)\n\n shutil.copy2(file_path, os.path.join(recovery_path, dest_filename))" }, { "identifier": "lookup_account_id", "path": "drivefs_sleuth/utils.py", "snippet": "def lookup_account_id(drivefs_path, account_id):\n logs_dir = os.path.join(drivefs_path, \"Logs\")\n for _, _, files in os.walk(logs_dir):\n for file in files:\n if file.startswith(\"drive_fs\") and file.endswith(\".txt\"):\n with open(os.path.join(logs_dir, file), 'r') as logs_file:\n logs = logs_file.read()\n match = re.search(r\"([\\w\\.-]+@[\\w\\.-]+\\.\\w+) \\(\" + account_id + r\"\\)\", logs)\n if match:\n return match.group(1)\n return ''" }, { "identifier": "get_properties_list", "path": "drivefs_sleuth/utils.py", "snippet": "def get_properties_list(profile_path):\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(\"SELECT DISTINCT key FROM item_properties\")\n return [prop[0] for prop in cursor.fetchall()]\n except sqlite3.OperationalError:\n return []" }, { "identifier": "get_account_properties", "path": "drivefs_sleuth/utils.py", "snippet": "def get_account_properties(profile_path):\n properties = {\n 'name': '',\n 'photo_url': ''\n }\n try:\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(\"SELECT value FROM properties WHERE property = 'driveway_account'\")\n\n driveway_account = parse_protobuf(cursor.fetchone()[0])\n name = driveway_account['2']['1']['3']\n if isinstance(name, str):\n properties['name'] = name\n properties['photo_url'] = driveway_account['2']['1']['5']\n\n except sqlite3.OperationalError:\n try:\n with sqlite3.connect(os.path.join(profile_path, \"metadata_sqlite_db\")) as metadata_sqlite_db:\n cursor = metadata_sqlite_db.cursor()\n cursor.execute(\"SELECT value FROM properties WHERE property = 'account'\")\n\n account = parse_protobuf(cursor.fetchone()[0])\n name = account['1']['3']\n if isinstance(name, str):\n properties['name'] = name\n properties['photo_url'] = account['1']['5']\n\n except sqlite3.OperationalError:\n return properties\n\n except TypeError:\n return properties\n\n return properties" }, { "identifier": "get_available_profiles", "path": "drivefs_sleuth/utils.py", "snippet": "def get_available_profiles(drivefs_path):\n profiles = []\n for subdir in os.listdir(drivefs_path):\n if subdir.isdigit() and len(subdir) == 21:\n profiles.append(subdir)\n return profiles" }, { "identifier": "get_experiment_account_ids", "path": "drivefs_sleuth/utils.py", "snippet": "def get_experiment_account_ids(drivefs_path):\n try:\n with sqlite3.connect(os.path.join(drivefs_path, \"experiments.db\")) as experiments_db:\n cursor = experiments_db.cursor()\n cursor.execute(\"SELECT value FROM PhenotypeValues WHERE key='account_ids'\")\n return re.findall(r'\\d+', cursor.fetchall()[0][0].decode('utf-8'))\n except sqlite3.OperationalError as e:\n return []" }, { "identifier": "File", "path": "drivefs_sleuth/synced_files_tree.py", "snippet": "class File(Item):\n def __init__(self, stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date, viewed_by_me_date,\n trashed, properties, tree_path, content_cache_path, proto):\n super().__init__(stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date,\n viewed_by_me_date, trashed, properties, tree_path, proto)\n\n self.__content_cache_path = content_cache_path\n self.__file_type = parse_protobuf(proto).get('45', '')\n\n def get_content_cache_path(self):\n return self.__content_cache_path\n\n def get_file_type(self):\n return self.__file_type" }, { "identifier": "Link", "path": "drivefs_sleuth/synced_files_tree.py", "snippet": "class Link(Item):\n def __init__(self, stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date, viewed_by_me_date,\n trashed, properties, tree_path, target_item, proto):\n super().__init__(stable_id, url_id, local_title, mime_type, is_owner, file_size, modified_date,\n viewed_by_me_date, trashed, properties, tree_path, proto)\n self.__target_item = target_item\n\n def get_target_item(self):\n return self.__target_item" } ]
import os import csv from jinja2 import Environment from jinja2 import FileSystemLoader from drivefs_sleuth.utils import copy_file from drivefs_sleuth.utils import lookup_account_id from drivefs_sleuth.utils import get_properties_list from drivefs_sleuth.utils import get_account_properties from drivefs_sleuth.utils import get_available_profiles from drivefs_sleuth.utils import get_experiment_account_ids from drivefs_sleuth.synced_files_tree import File from drivefs_sleuth.synced_files_tree import Link
1,505
def get_accounts(drivefs_path): accounts = {} experiments_ids = get_experiment_account_ids(drivefs_path) profiles = get_available_profiles(drivefs_path) available_accounts = set(experiments_ids + profiles) for account_id in available_accounts: accounts[account_id] = { 'email': lookup_account_id(drivefs_path, account_id) } logged_in = account_id in profiles accounts[account_id]['logged_in'] = logged_in accounts[account_id]['properties'] = get_account_properties(os.path.join(drivefs_path, account_id)) return accounts def __build_headers(setup): headers = ['stable_id', 'type', 'url_id', 'local_title', 'mime_type', 'path_in_content_cache', 'is_owner', 'file_size', 'modified_date', 'viewed_by_me_date', 'trashed', 'tree_path', 'md5'] for account in setup.get_accounts(): if account.is_logged_in():
def get_accounts(drivefs_path): accounts = {} experiments_ids = get_experiment_account_ids(drivefs_path) profiles = get_available_profiles(drivefs_path) available_accounts = set(experiments_ids + profiles) for account_id in available_accounts: accounts[account_id] = { 'email': lookup_account_id(drivefs_path, account_id) } logged_in = account_id in profiles accounts[account_id]['logged_in'] = logged_in accounts[account_id]['properties'] = get_account_properties(os.path.join(drivefs_path, account_id)) return accounts def __build_headers(setup): headers = ['stable_id', 'type', 'url_id', 'local_title', 'mime_type', 'path_in_content_cache', 'is_owner', 'file_size', 'modified_date', 'viewed_by_me_date', 'trashed', 'tree_path', 'md5'] for account in setup.get_accounts(): if account.is_logged_in():
for prop in get_properties_list(os.path.join(setup.get_drivefs_path(), account.get_account_id())):
2
2023-10-29 11:05:04+00:00
2k
zyang1580/CoLLM
minigpt4/datasets/builders/rec_base_dataset_builder.py
[ { "identifier": "is_dist_avail_and_initialized", "path": "minigpt4/common/dist_utils.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "is_main_process", "path": "minigpt4/common/dist_utils.py", "snippet": "def is_main_process():\n return get_rank() == 0" }, { "identifier": "registry", "path": "minigpt4/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "BaseProcessor", "path": "minigpt4/processors/base_processor.py", "snippet": "class BaseProcessor:\n def __init__(self):\n self.transform = lambda x: x\n return\n\n def __call__(self, item):\n return self.transform(item)\n\n @classmethod\n def from_config(cls, cfg=None):\n return cls()\n\n def build(self, **kwargs):\n cfg = OmegaConf.create(kwargs)\n\n return self.from_config(cfg)" } ]
import logging import os import shutil import warnings import torch.distributed as dist import minigpt4.common.utils as utils from omegaconf import OmegaConf from torchvision.datasets.utils import download_url from minigpt4.common.dist_utils import is_dist_avail_and_initialized, is_main_process from minigpt4.common.registry import registry from minigpt4.processors.base_processor import BaseProcessor
799
""" This file is from Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ class RecBaseDatasetBuilder: train_dataset_cls, eval_dataset_cls = None, None def __init__(self, cfg=None): super().__init__() if cfg is None: # help to create datasets from default config. self.config = load_dataset_config(self.default_config_path()) elif isinstance(cfg, str): self.config = load_dataset_config(cfg) else: # when called from task.build_dataset() self.config = cfg self.data_type = self.config.data_type # self.vis_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} self.text_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} def build_datasets(self): # download, split, etc... # only called on 1 GPU/TPU in distributed
""" This file is from Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ class RecBaseDatasetBuilder: train_dataset_cls, eval_dataset_cls = None, None def __init__(self, cfg=None): super().__init__() if cfg is None: # help to create datasets from default config. self.config = load_dataset_config(self.default_config_path()) elif isinstance(cfg, str): self.config = load_dataset_config(cfg) else: # when called from task.build_dataset() self.config = cfg self.data_type = self.config.data_type # self.vis_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} self.text_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} def build_datasets(self): # download, split, etc... # only called on 1 GPU/TPU in distributed
if is_main_process():
1
2023-10-29 12:47:25+00:00
2k
naver/bq-nco
learning/op/traj_learner.py
[ { "identifier": "decode", "path": "learning/op/decoding.py", "snippet": "def decode(node_coords: Tensor, node_values: Tensor, upper_bounds: Tensor, dist_matrices: Tensor, net: Module,\n beam_size: int, knns: int) -> Tensor:\n if beam_size == 1:\n tours, collected_rewards = greedy_decoding_loop(node_coords, node_values, upper_bounds, dist_matrices, net, knns)\n else:\n tours, collected_rewards = beam_search_decoding_loop(node_coords, node_values, upper_bounds, dist_matrices, net, beam_size, knns)\n\n distances = compute_tour_lens(tours, dist_matrices)\n assert torch.all(distances <= upper_bounds + 1e-4)\n\n return collected_rewards, tours" }, { "identifier": "do_lr_decay", "path": "utils/misc.py", "snippet": "def do_lr_decay(optimizer, decay_rate):\n for param_group in optimizer.param_groups:\n new_learning_rate = param_group['lr'] * decay_rate\n param_group['lr'] = new_learning_rate\n print(\"Learning rate decayed by {:.4f}\".format(decay_rate))" }, { "identifier": "EpochMetrics", "path": "utils/misc.py", "snippet": "class EpochMetrics:\n # dict of metrics values over epoch\n # makes sure the same metric names are given for each update\n\n def __init__(self):\n self.metrics = None\n\n def update(self, d):\n d = {k: (v.item() if isinstance(v, Tensor) else v) for k, v in d.items()}\n if self.metrics is None:\n self.metrics = {kd: [vd] for kd, vd in d.items()}\n else:\n for (k, v), (kd, vd) in zip(self.metrics.items(), d.items()):\n assert k == kd\n v.append(vd)\n\n def get_means(self):\n return {k: np.mean(v) for k, v in self.metrics.items()}" } ]
import time import torch from torch import nn from learning.op.decoding import decode from utils.misc import do_lr_decay, EpochMetrics
791
""" BQ-NCO Copyright (c) 2023-present NAVER Corp. Creative Commons Attribution-NonCommercial-ShareAlike 4.0 license """ DEBUG_NUM_BATCHES = 3 class TrajectoryLearner: def __init__(self, args, net, module, device, data_iterator, optimizer=None, checkpointer=None): # same supervisor is used for training and testing, during testing we do not have optimizer, mlflow etc. self.net = net self.module = module self.device = device self.data_iterator = data_iterator self.optimizer = optimizer self.checkpointer = checkpointer self.beam_size = args.beam_size self.knns = args.knns self.output_dir = args.output_dir self.test_only = args.test_only self.debug = args.debug if not args.test_only: try: self.test_every = args.test_every if args.test_every > 0 else None except AttributeError: self.test_every = None self.decay_rate = args.decay_rate self.decay_every = args.decay_every self.loss = nn.CrossEntropyLoss() self.best_current_val_metric = float('inf') self.epoch_done = 0 self.nb_epochs = args.nb_total_epochs def train(self): assert not self.test_only for _ in range(self.nb_epochs): # Train one epoch start = time.time() self.net.train()
""" BQ-NCO Copyright (c) 2023-present NAVER Corp. Creative Commons Attribution-NonCommercial-ShareAlike 4.0 license """ DEBUG_NUM_BATCHES = 3 class TrajectoryLearner: def __init__(self, args, net, module, device, data_iterator, optimizer=None, checkpointer=None): # same supervisor is used for training and testing, during testing we do not have optimizer, mlflow etc. self.net = net self.module = module self.device = device self.data_iterator = data_iterator self.optimizer = optimizer self.checkpointer = checkpointer self.beam_size = args.beam_size self.knns = args.knns self.output_dir = args.output_dir self.test_only = args.test_only self.debug = args.debug if not args.test_only: try: self.test_every = args.test_every if args.test_every > 0 else None except AttributeError: self.test_every = None self.decay_rate = args.decay_rate self.decay_every = args.decay_every self.loss = nn.CrossEntropyLoss() self.best_current_val_metric = float('inf') self.epoch_done = 0 self.nb_epochs = args.nb_total_epochs def train(self): assert not self.test_only for _ in range(self.nb_epochs): # Train one epoch start = time.time() self.net.train()
epoch_metrics_train = EpochMetrics()
2
2023-10-27 09:08:45+00:00
2k
coder-pig/YuQueBackups
app.py
[ { "identifier": "init_token", "path": "yuque_doc_backups.py", "snippet": "def is_dir_existed(file_path, mkdir=True):\ndef write_text_to_file(content, file_path, mode=\"w+\"):\ndef scan_file_list_by_suffix(file_dir=os.getcwd(), suffix=\"\"):\n def __init__(self, repo_id, repo_type, repo_slug, repo_name, repo_namespace):\n def __init__(self, node_type, node_title, node_uuid, parent_uuid, doc_id, repo_id, repo_name):\n def __init__(self, doc_id, book_id, book_name, doc_slug, doc_title, doc_content):\n def save_to_md(self):\ndef init_token(token):\ndef send_request(desc, api):\ndef fetch_user_id():\ndef fetch_repo_list(user_id):\ndef fetch_toc_list(repo_id, repo_name):\ndef traverse_nodes(node, save_path=\"\"):\ndef fetch_doc_detail(node, save_path):\nclass Repo:\nclass TocNode:\nclass Doc:" }, { "identifier": "search_all_file", "path": "yeque_md_to_local.py", "snippet": "def search_all_file(file_dir=backups_origin_md_dir, target_suffix_tuple=('.md')):\nasync def download_pic(pic_path, url, headers=None):\ndef read_file_text_content(file_path):\ndef write_text_to_file(content, file_path, mode=\"w+\"):\ndef is_dir_existed(file_path, mkdir=True):\ndef pic_to_local(match_result, pic_save_dir):\ndef md_to_local(md_file_list):" } ]
from yuque_doc_backups import init_token, fetch_user_id, fetch_repo_list, fetch_toc_list, doc_count from yeque_md_to_local import search_all_file, md_to_local, pic_url_path_record_list, download_pic import asyncio import time
685
# -*- coding: utf-8 -*- # !/usr/bin/env python """ ------------------------------------------------- File : app.py Author : CoderPig date : 2023-10-26 14:57 Desc : 语雀备份脚本-入口 ------------------------------------------------- """ if __name__ == '__main__': yq_token = input("请输入你的语雀Token:") if len(yq_token) == 0: exit("请输入正确的Token!") init_token(yq_token) start_time = time.time() yq_user_id = fetch_user_id() print("开始执行文档备份,请稍等...") yq_repo_list = fetch_repo_list(yq_user_id) for yq_repo in yq_repo_list: print("开始拉取【{}】仓库下的文档".format(yq_repo.repo_name)) fetch_toc_list(yq_repo.repo_id, yq_repo.repo_name) print("文档备份完毕,共记备份文档【{}】篇,开始执行Markdown文件批量本地化...".format(doc_count)) yq_doc_file_list = search_all_file() print("共扫描到Markdown文件【{}】篇,开始批量本地化...".format(len(yq_doc_file_list)))
# -*- coding: utf-8 -*- # !/usr/bin/env python """ ------------------------------------------------- File : app.py Author : CoderPig date : 2023-10-26 14:57 Desc : 语雀备份脚本-入口 ------------------------------------------------- """ if __name__ == '__main__': yq_token = input("请输入你的语雀Token:") if len(yq_token) == 0: exit("请输入正确的Token!") init_token(yq_token) start_time = time.time() yq_user_id = fetch_user_id() print("开始执行文档备份,请稍等...") yq_repo_list = fetch_repo_list(yq_user_id) for yq_repo in yq_repo_list: print("开始拉取【{}】仓库下的文档".format(yq_repo.repo_name)) fetch_toc_list(yq_repo.repo_id, yq_repo.repo_name) print("文档备份完毕,共记备份文档【{}】篇,开始执行Markdown文件批量本地化...".format(doc_count)) yq_doc_file_list = search_all_file() print("共扫描到Markdown文件【{}】篇,开始批量本地化...".format(len(yq_doc_file_list)))
md_to_local(yq_doc_file_list)
1
2023-10-26 08:35:04+00:00
2k
tobagin/whakarere
whakarere/pages/whatsapp.py
[ { "identifier": "ChatItem", "path": "whakarere/types/chat.py", "snippet": "class ChatItem(GObject.Object):\n chat_id = GObject.Property(type=str)\n chat_name = GObject.Property(type=str)\n chat_picture = GObject.Property(type=Gdk.Texture)\n last_message_body = GObject.Property(type=str)\n chat_timestamp = GObject.Property(type=str)\n last_messager_user = GObject.Property(type=str)\n unread_messages = GObject.Property(type=int)\n is_group = GObject.Property(type=bool, default=False)\n\n def __init__(self, chat_id, chat_name, chat_picture, last_message_body, chat_timestamp, last_messager_user, unread_messages, is_group):\n super().__init__()\n self.chat_id = chat_id\n self.chat_name = chat_name\n self.chat_picture = chat_picture\n self.last_message_body = last_message_body\n self.chat_timestamp = chat_timestamp\n self.last_messager_user = last_messager_user\n self.unread_messages = unread_messages\n self.is_group = is_group" }, { "identifier": "WindowTitlebarWidget", "path": "whakarere/widgets/titlebar.py", "snippet": "class WindowTitlebarWidget(Gtk.Box):\n def __init__(self):\n super().__init__(orientation=Gtk.Orientation.VERTICAL, spacing=2)\n self.label_title = Gtk.Label(label=\"Whakarere\")\n self.label_title.add_css_class(\"title\")\n self.label_subtitle = Gtk.Label(label=\"Available Sessions\")\n self.label_subtitle.add_css_class(\"subtitle\")\n self.append(self.label_title)\n self.append(self.label_subtitle)\n\n def set_title(self, title):\n self.label_title.set_label(title)\n\n def set_subtitle(self, subtitle):\n self.label_subtitle.set_label(subtitle)" }, { "identifier": "MainMenuButtonWidget", "path": "whakarere/widgets/main_menu.py", "snippet": "class MainMenuButtonWidget(Gtk.MenuButton):\n def __init__(self):\n super().__init__()\n # Create MainMenu Button Widget\n self.set_icon_name(\"open-menu-symbolic\")\n self.set_tooltip_text(\"Main Menu\")\n self.set_has_frame(False)\n self.set_direction(Gtk.ArrowType.DOWN)\n self.set_popover(Gtk.Popover())\n self.get_popover().set_position(Gtk.PositionType.BOTTOM)\n self.get_popover().set_has_arrow(True)\n self.get_popover().set_size_request(200, 200)\n self.get_popover().set_child(Gtk.Label(label=\"Main Menu\"))\n \n # About Button\n about_button = Gtk.Button()\n about_button.set_label(\"About Whakarere\")\n about_button.set_has_frame(False)\n about_button.connect(\"clicked\", self.on_about_clicked)\n \n # Keyboard Shortcuts Button\n shortcut_button = Gtk.Button()\n shortcut_button.set_label(\"Keyboard Shortcuts\")\n shortcut_button.set_has_frame(False)\n shortcut_button.connect(\"clicked\", self.on_shortcuts_clicked)\n \n # Preferences Button\n preferences_button = Gtk.Button()\n preferences_button.set_label(\"Preferences\")\n preferences_button.set_has_frame(False)\n preferences_button.connect(\"clicked\", self.on_preferences_clicked)\n\n settings_menu = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n separetor = Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL)\n settings_menu.append(separetor)\n settings_menu.append(preferences_button)\n settings_menu.append(shortcut_button)\n settings_menu.append(about_button)\n\n self.get_popover().set_child(settings_menu)\n\n def on_about_clicked(self, button):\n about_window = Adw.AboutWindow(modal=True, transient_for=self)\n about_window.set_application_icon(\"com.mudeprolinux.whakarere\")\n about_window.set_application_name(\"Whakarere\")\n about_window.set_version(\"0.1.0\")\n #about_window.set_comments(\"A Gtk4 Whatsapp Client.\")\n about_window.set_website(\"https://mudeprolinux.com\")\n about_window.set_developer_name(\"Mude Pro Linux\")\n about_window.set_developers([\"Thiago Fernandes <[email protected]>\"])\n about_window.set_designers([\"Thiago Fernandes <[email protected]>\"])\n about_window.set_license_type(Gtk.License.MIT_X11)\n about_window.set_copyright(\"2023 © Mude Pro Linux\")\n about_window.set_issue_url(\"https://github.com/tobagin/whakarere/issues\")\n\n # Show the About window\n about_window.present()\n \n def on_shortcuts_clicked(self, button):\n shortcuts_window = Gtk.ShortcutsWindow(modal=True, transient_for=self)\n shortcuts_section = Gtk.ShortcutsSection()\n shortcuts_group = Gtk.ShortcutsGroup()\n shortcuts_section.add_group(shortcuts_group)\n shortcuts_window.add_session(shortcuts_section)\n copy_shortcut = Gtk.Shortcut.new_from_string(\"<Ctrl>C\", Gtk.Label.new(\"Copy Selected Text\"))\n shortcuts_group.add(copy_shortcut)\n shortcuts_window.show()\n\n def on_preferences_clicked(self, button):\n pass" } ]
import gi import base64, requests, threading from whakarere.types.chat import ChatItem from whakarere.widgets.titlebar import WindowTitlebarWidget from whakarere.widgets.main_menu import MainMenuButtonWidget from gi.repository import Gtk, Adw, GLib, Gio, GdkPixbuf, Pango, Gdk, GObject from datetime import datetime
1,319
gi.require_version("Gtk", "4.0") gi.require_version("Adw", "1") gi.require_version("GdkPixbuf", "2.0") class WhatsappMessengerPage(Adw.NavigationPage): def __init__(self, app_manager, session_id): super().__init__() self.set_title("Whakarere") self.app_manager = app_manager self.session_id = session_id # Create TitleBar Widget
gi.require_version("Gtk", "4.0") gi.require_version("Adw", "1") gi.require_version("GdkPixbuf", "2.0") class WhatsappMessengerPage(Adw.NavigationPage): def __init__(self, app_manager, session_id): super().__init__() self.set_title("Whakarere") self.app_manager = app_manager self.session_id = session_id # Create TitleBar Widget
self.window_titlebar_widget = WindowTitlebarWidget()
1
2023-10-29 15:46:50+00:00
2k
Agricultural-Robotics-Bonn/pagnerf
loss/lin_assignment_things.py
[ { "identifier": "centers_from_3d_points_with_ids", "path": "utils/outlier_rejection.py", "snippet": "def centers_from_3d_points_with_ids(points):\n # points: [N,[x,y,z,ID]]\n # return: [I,[x,y,z,ID]]\n # K: number of unique IDs\n # [K,[x,y,z,ID]]: centers of the points with the same ID\n\n # get unique center ids [I]\n ids = points[:,3].unique()\n # same ID mask [I,N]\n same_id_mask = ids[:,None] == points[None,:,3]\n # Compute mean centers of all points with the same ID [I,3]\n centers = (same_id_mask.type(torch.float) @ points[:,:3]) / same_id_mask.sum(dim=1)[:,None]\n # add ID to centers [I,4]\n centers = torch.cat((centers, ids[:,None]), dim=1)\n # return centers [I,4]\n return centers" }, { "identifier": "add_position_id_range_cost", "path": "utils/outlier_rejection.py", "snippet": "def add_position_id_range_cost(cost_matrix, inst_centers,\n frame_min_length = 0.3,\n max_num_inst_at_x = 30,\n id_margin_at_frame_length = 30):\n # cost_matrix: [N,M] (N: detection masks, M: predicted masks)\n # current_inst_centers: [M,4] torch tensor ([N,[x,y,z,ID]])\n # frame_min_length: minimum length of the frame aprox. at the closest target depth in meters\n # max_num_inst_at_x: maximum number of instances at a given x position\n # id_margin_at_frame_length: margin in number of IDs at the frame length\n # \n # return: cost_matrix [N,M]\n \n # get number of IDs\n num_ids = cost_matrix.shape[1]\n # Available IDs slope\n m = (max_num_inst_at_x + id_margin_at_frame_length )/ frame_min_length\n # x limit to wraparround the IDs\n x_limit = (num_ids - id_margin_at_frame_length) / m\n # lambda function to compute the lower bound of available IDs for each instance x position\n available_ids_0 = lambda x: torch.clamp(m * (x % x_limit), 0, num_ids - 1).type(torch.long)\n # lambda function to compute both bounds of available IDs for each instance x position\n available_ids = lambda x: (available_ids_0(x), torch.clamp(available_ids_0(x) + id_margin_at_frame_length, 0, num_ids - 1))\n # Lambda to remap x position from [1,-1] to [0,1]\n x_remap = lambda x: (-x + 1) / 2\n\n # Compute the available IDs for each instance x position [2,N]\n available_ids_x = available_ids(x_remap(inst_centers[:,0]))\n # Compute the mask of available IDs for each instance x position [N,M]\n\n # # TODO (csmitt): test y id rejection\n # # Lambda to remap x position from [1,-1] to [0,1]\n # y_remap = lambda y: (y * 0.8 + 1) / 2\n # # disperse available IDs along y axis\n # max_num_inst_at_y = 8\n # available_ids_y = lambda y, ids_x: (torch.max(ids_x[0], ids_x[0] + (y*max_num_inst_at_x) - (max_num_inst_at_y/2)).type(torch.int),\n # torch.min(ids_x[1], ids_x[0] + (y*max_num_inst_at_x) + (max_num_inst_at_y/2)).type(torch.int))\n # available_ids_x = available_ids_y(y_remap(inst_centers[:,1]), available_ids_x)\n\n available_ids_mask = torch.logical_and(available_ids_x[0][:,None] <= torch.arange(num_ids)[None,:].to(inst_centers.device),\n torch.arange(num_ids)[None,:].to(inst_centers.device) <= available_ids_x[1][:,None])\n # Set the cost of the unavailable IDs to a very high value\n cost_matrix[~available_ids_mask.cpu()] = 10000\n\n return cost_matrix" } ]
import numpy as np import torch import scipy import torch.nn.functional as F from torch import nn from utils.outlier_rejection import centers_from_3d_points_with_ids, add_position_id_range_cost
1,485
# from panoptic lifting implementation # #https://github.com/nihalsid/panoptic-lifting/blob/7af7a3e8477ead8e57f699a240d993e3bc21ee42/trainer/train_panopli_tensorf.py#L195-L206 class LinAssignmentThingsLoss(nn.Module): def __init__(self, outlier_rejection=False, min_distance=0.2, max_distance=0.5, *args, **kwargs): super().__init__() self.outlier_rejection = outlier_rejection self.min_distance = min_distance self.max_distance = max_distance self.inst_centers_db = torch.zeros([0,4]).to('cuda') @torch.no_grad() def create_virtual_gt_with_linear_assignment(self, inst_probabilities, labels_gt, points_3d=None): # Leave first element for stuff things_mask = labels_gt > 0 things_gt = labels_gt[things_mask] things_prob = inst_probabilities[things_mask][...,1:] # Compute surrogate labels labels = sorted(torch.unique(things_gt).cpu().tolist())[:things_prob.shape[-1]] cost_matrix = np.zeros([len(labels), things_prob.shape[-1]]) for lidx, label in enumerate(labels): cost_matrix[lidx, :] = -(things_prob[things_gt == label, :].sum(dim=0) / ((things_gt == label).sum() + 1e-4)).cpu().numpy() # Update cost matrix to avoid early assignment of repeated IDs assert self.outlier_rejection and points_3d is not None or not self.outlier_rejection, \ 'Outlier rejection requires 3d points' if self.outlier_rejection: # Compute centers of the current things Id gts points_3d_gt = torch.cat([points_3d[things_mask], things_gt[:,None]], dim=-1) current_inst_centers = centers_from_3d_points_with_ids(points_3d_gt) # Update cost matrix to have high cost when trying to assign repeated IDs
# from panoptic lifting implementation # #https://github.com/nihalsid/panoptic-lifting/blob/7af7a3e8477ead8e57f699a240d993e3bc21ee42/trainer/train_panopli_tensorf.py#L195-L206 class LinAssignmentThingsLoss(nn.Module): def __init__(self, outlier_rejection=False, min_distance=0.2, max_distance=0.5, *args, **kwargs): super().__init__() self.outlier_rejection = outlier_rejection self.min_distance = min_distance self.max_distance = max_distance self.inst_centers_db = torch.zeros([0,4]).to('cuda') @torch.no_grad() def create_virtual_gt_with_linear_assignment(self, inst_probabilities, labels_gt, points_3d=None): # Leave first element for stuff things_mask = labels_gt > 0 things_gt = labels_gt[things_mask] things_prob = inst_probabilities[things_mask][...,1:] # Compute surrogate labels labels = sorted(torch.unique(things_gt).cpu().tolist())[:things_prob.shape[-1]] cost_matrix = np.zeros([len(labels), things_prob.shape[-1]]) for lidx, label in enumerate(labels): cost_matrix[lidx, :] = -(things_prob[things_gt == label, :].sum(dim=0) / ((things_gt == label).sum() + 1e-4)).cpu().numpy() # Update cost matrix to avoid early assignment of repeated IDs assert self.outlier_rejection and points_3d is not None or not self.outlier_rejection, \ 'Outlier rejection requires 3d points' if self.outlier_rejection: # Compute centers of the current things Id gts points_3d_gt = torch.cat([points_3d[things_mask], things_gt[:,None]], dim=-1) current_inst_centers = centers_from_3d_points_with_ids(points_3d_gt) # Update cost matrix to have high cost when trying to assign repeated IDs
cost_matrix = add_position_id_range_cost(cost_matrix, current_inst_centers)
1
2023-10-30 16:14:39+00:00
2k
John-WL/sd-webui-inpaint-difference
lib_inpaint_difference/webui_hijacks.py
[ { "identifier": "DifferenceGlobals", "path": "lib_inpaint_difference/globals.py", "snippet": "class DifferenceGlobals:\n tab_index = None\n\n base_image = None\n altered_image = None\n generated_mask = None\n\n is_extension_enabled = opts.data.get('inpaint_difference_enabled', True)\n show_image_under_mask = opts.data.get('inpaint_difference_show_image_under_mask', True)\n mask_brush_color = opts.data.get('inpaint_difference_mask_brush_color', '#ffffff')" }, { "identifier": "one_time_callable", "path": "lib_inpaint_difference/one_time_callable.py", "snippet": "def one_time_callable(func):\n def wrapper(*args, **kwargs):\n if not wrapper.called:\n wrapper.called = True\n return func(*args, **kwargs)\n\n wrapper.called = False\n return wrapper" }, { "identifier": "Img2imgTabExtender", "path": "lib_inpaint_difference/img2img_tab_extender.py", "snippet": "class Img2imgTabExtender:\n img2img_tabs_block = None\n inpaint_params_block = None\n amount_of_default_tabs = None\n tab_data_list = []\n\n @classmethod\n def on_after_component(cls, component, **kwargs):\n elem_id = kwargs.get('elem_id', None)\n\n if elem_id == 'img2img_batch_inpaint_mask_dir':\n cls.register_img2img_tabs_block(component)\n\n if elem_id == 'img2img_mask_blur':\n cls.register_inpaint_params_block(component)\n\n cls.register_requested_elem_ids(component, elem_id)\n\n @classmethod\n def register_img2img_tabs_block(cls, component):\n cls.img2img_tabs_block = component.parent.parent\n\n @classmethod\n def register_inpaint_params_block(cls, component):\n cls.inpaint_params_block = component.parent.parent\n\n @classmethod\n def register_requested_elem_ids(cls, component, elem_id):\n if elem_id is None:\n return\n\n for tab_class in NEW_TAB_CLASSES:\n if not hasattr(tab_class, 'requested_elem_ids'):\n continue\n\n if not hasattr(tab_class, '_registered_elem_ids'):\n tab_class._registered_elem_ids = dict()\n\n if elem_id in tab_class.requested_elem_ids:\n tab_class._registered_elem_ids[elem_id] = component\n\n @classmethod\n def create_custom_tabs(cls):\n cls.register_default_amount_of_tabs()\n cls.tab_data_list = []\n\n for tab_class in NEW_TAB_CLASSES:\n tab_index = cls._find_new_tab_index()\n custom_tab_object = tab_class(tab_index)\n registered_components = getattr(tab_class, \"_registered_elem_ids\", None)\n\n with GradioContextSwitch(cls.img2img_tabs_block):\n custom_tab_object.tab()\n with GradioContextSwitch(cls.inpaint_params_block):\n custom_tab_object.section(registered_components)\n\n cls.register_custom_tab_data(tab_index, tab_class, custom_tab_object)\n\n with GradioContextSwitch(cls.inpaint_params_block):\n img2img_tabs = cls._get_img2img_tabs()\n cls.setup_navigation_events(img2img_tabs)\n for tab_data in cls.tab_data_list:\n tab_data.tab_object.gradio_events(img2img_tabs)\n\n @classmethod\n def register_default_amount_of_tabs(cls):\n cls.amount_of_default_tabs = cls._find_new_tab_index()\n\n @classmethod\n def register_custom_tab_data(cls, tab_index, tab_class, tab_object):\n cls.tab_data_list.append(TabData(tab_index, tab_class, tab_object))\n\n @classmethod\n def setup_navigation_events(cls, img2img_tabs):\n block_data_iterator = zip(img2img_tabs[cls.amount_of_default_tabs:], cls.tab_data_list, strict=True)\n for tab_block, custom_tab in block_data_iterator:\n def update_func(custom_tab):\n should_show_inpaint_params = getattr(custom_tab.tab_class, 'show_inpaint_params', True)\n return gr.update(visible=should_show_inpaint_params)\n\n func_dict = dict(\n fn=functools.partial(update_func, custom_tab=custom_tab),\n inputs=[],\n outputs=[\n cls.inpaint_params_block\n ]\n )\n\n tab_block.select(**func_dict)\n\n @classmethod\n def _find_new_tab_index(cls):\n img2img_tabs = [\n child\n for child in cls.img2img_tabs_block.children\n if isinstance(child, gr.TabItem)\n ]\n return len(img2img_tabs)\n\n @classmethod\n def _get_img2img_tabs(cls):\n return [\n child\n for child in cls.img2img_tabs_block.children\n if isinstance(child, gr.TabItem)\n ]" } ]
import gradio as gr from modules import img2img, ui_loadsave from lib_inpaint_difference.globals import DifferenceGlobals from lib_inpaint_difference.one_time_callable import one_time_callable from lib_inpaint_difference.img2img_tab_extender import Img2imgTabExtender
1,429
@one_time_callable def hijack_img2img_processing(): original_img2img_processing = img2img.img2img def hijack_func(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args, ):
@one_time_callable def hijack_img2img_processing(): original_img2img_processing = img2img.img2img def hijack_func(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args, ):
if mode == DifferenceGlobals.tab_index:
0
2023-10-30 16:17:34+00:00
2k
BIT-DA/Annotator
tools/utils/train_utils.py
[ { "identifier": "common_utils", "path": "tools/utils/common/common_utils.py", "snippet": "def check_numpy_to_torch(x):\ndef limit_period(val, offset=0.5, period=np.pi):\ndef drop_info_with_name(info, name):\ndef rotate_points_along_z(points, angle):\ndef mask_points_by_range(points, limit_range):\ndef get_voxel_centers(voxel_coords, downsample_times, voxel_size, point_cloud_range):\ndef create_logger(log_file=None, rank=0, log_level=logging.INFO):\ndef set_random_seed(seed):\ndef get_pad_params(desired_size, cur_size):\ndef keep_arrays_by_name(gt_names, used_classes):\ndef init_dist_slurm(tcp_port, local_rank, backend='nccl'):\ndef init_dist_pytorch(tcp_port, local_rank, backend='nccl'):\ndef get_dist_info(return_gpu_per_machine=False):\ndef merge_results_dist(result_part, size, tmpdir):\ndef scatter_point_inds(indices, point_inds, shape):\ndef generate_voxel2pinds(sparse_tensor):\ndef sa_create(name, var):\n def __init__(self):\n def reset(self):\n def update(self, val, n=1):\nclass AverageMeter(object):" }, { "identifier": "commu_utils", "path": "tools/utils/common/commu_utils.py", "snippet": "def get_world_size():\ndef get_rank():\ndef is_main_process():\ndef synchronize():\ndef all_gather(data):\ndef reduce_dict(input_dict, average=True):\ndef average_reduce_value(data):\ndef all_reduce(data, op=\"sum\", average=False):\n def op_map(op):\ndef concat_all_gather(tensor):" } ]
import glob import os import torch import tqdm import time import pcdet from torch.nn.utils import clip_grad_norm_ from tools.utils.common import common_utils, commu_utils
756
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg, rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False): if total_it_each_epoch == len(train_loader): dataloader_iter = iter(train_loader) if rank == 0: pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True) data_time = common_utils.AverageMeter() batch_time = common_utils.AverageMeter() forward_time = common_utils.AverageMeter() for cur_it in range(total_it_each_epoch): end = time.time() try: batch = next(dataloader_iter) except StopIteration: dataloader_iter = iter(train_loader) batch = next(dataloader_iter) print('new iters') data_timer = time.time() cur_data_time = data_timer - end lr_scheduler.step(accumulated_iter) try: cur_lr = float(optimizer.lr) except: cur_lr = optimizer.param_groups[0]['lr'] if tb_log is not None: tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter) model.train() optimizer.zero_grad() loss, tb_dict, disp_dict = model_func(model, batch) forward_timer = time.time() cur_forward_time = forward_timer - data_timer loss.backward() clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP) optimizer.step() accumulated_iter += 1 cur_batch_time = time.time() - end # average reduce
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg, rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False): if total_it_each_epoch == len(train_loader): dataloader_iter = iter(train_loader) if rank == 0: pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True) data_time = common_utils.AverageMeter() batch_time = common_utils.AverageMeter() forward_time = common_utils.AverageMeter() for cur_it in range(total_it_each_epoch): end = time.time() try: batch = next(dataloader_iter) except StopIteration: dataloader_iter = iter(train_loader) batch = next(dataloader_iter) print('new iters') data_timer = time.time() cur_data_time = data_timer - end lr_scheduler.step(accumulated_iter) try: cur_lr = float(optimizer.lr) except: cur_lr = optimizer.param_groups[0]['lr'] if tb_log is not None: tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter) model.train() optimizer.zero_grad() loss, tb_dict, disp_dict = model_func(model, batch) forward_timer = time.time() cur_forward_time = forward_timer - data_timer loss.backward() clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP) optimizer.step() accumulated_iter += 1 cur_batch_time = time.time() - end # average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
1
2023-10-31 08:11:57+00:00
2k
hl123-123/yiyan-ppt
gradio_test.py
[ { "identifier": "yiyan_api", "path": "yiyan.py", "snippet": "def yiyan_api(message,access_token,use4=False):\n if use4:\n url = \"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro?access_token=\" + access_token\n else:\n url = \"https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions?access_token=\" + access_token\n payload = json.dumps({\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": message\n }\n ]\n })\n headers = {\n 'Content-Type': 'application/json'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n try:\n result = json.loads(response.text)[\"result\"]\n except:\n print(response.text)\n # print(result)\n return result" }, { "identifier": "get_access_token", "path": "yiyan.py", "snippet": "def get_access_token():\n \"\"\"\n 使用 API Key,Secret Key 获取access_token,替换下列示例中的应用API Key、应用Secret Key\n \"\"\"\n API_Key = Config.API_Key\n Secret_Key = Config.Secret_Key\n url = \"https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={API_Key}&client_secret={Secret_Key}\".format(API_Key=API_Key,Secret_Key=Secret_Key)\n\n payload = json.dumps(\"\")\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n return response.json().get(\"access_token\")" }, { "identifier": "tree2ppt", "path": "mdtree/tree2ppt.py", "snippet": "class Tree2PPT:\nclass MarkdownCategory:\nclass MD2Slide:\nclass THEME_MD2Slide:\n def __init__(self, md_str1,theme_path,save_path=\"\"):\n def init_pptx(self,theme_path = \"../my_ppt_mode/1\"):\n def init_markdown(self, md_str):\n def traverse_tree(self, heading):\n def save_stream(self):\n def __init__(self, presentation, theme_path, title, content, *args,img_dict={}, **kwargs):\n def init_slide(self):\n def init_img(self):\n def init_font(self, **kwargs):\n def get_font(self, font: Font, category: str):\n def init_title(self):\n def init_content(self):\n def processing_md_str(self, md_str):\n def __init__(self, presentation, theme_path, title, *args, **kwargs):\n def init_slide(self):\n def init_font(self, **kwargs):\n def get_font(self, font: Font, category: str):\n def init_title(self):\n TITLE = \"#\"\n CONTENT = \"<p>\"" } ]
import gradio as gr import gradio as gr import os import time import random import structure_article import shutil from yiyan import yiyan_api, get_access_token from mdtree import tree2ppt from PIL import Image
953
# def image_mod(): # return Image.open("pptx_static/static/img.png") def save_knowledge_func(task_name,knowledge_content,mode,sub_num): time1= time.time() sub_num = int(sub_num) rand_seed = str(random.randint(0,10000000000000000000000000000000000000000000000000000)) character_a = "你是一个精通各方面知识的人" struct_article = structure_article.StructureArticle(api_type="yiyan",main_idea_knowledge=knowledge_content,max_sub_idea_num=sub_num,min_sub_idea_num=sub_num) content = struct_article.generate_final_summary(task_name,character_a) # md_content = read_md_file("./"+task_name+".md") if len(os.listdir("./myppt"))>100: shutil.rmtree("./myppt") os.makedirs("./myppt") save_path = "./myppt/test" + rand_seed + ".pptx"
# def image_mod(): # return Image.open("pptx_static/static/img.png") def save_knowledge_func(task_name,knowledge_content,mode,sub_num): time1= time.time() sub_num = int(sub_num) rand_seed = str(random.randint(0,10000000000000000000000000000000000000000000000000000)) character_a = "你是一个精通各方面知识的人" struct_article = structure_article.StructureArticle(api_type="yiyan",main_idea_knowledge=knowledge_content,max_sub_idea_num=sub_num,min_sub_idea_num=sub_num) content = struct_article.generate_final_summary(task_name,character_a) # md_content = read_md_file("./"+task_name+".md") if len(os.listdir("./myppt"))>100: shutil.rmtree("./myppt") os.makedirs("./myppt") save_path = "./myppt/test" + rand_seed + ".pptx"
tree2ppt.Tree2PPT(content,"./my_ppt_mode/"+str(int(mode)),save_path=save_path)
2
2023-10-29 15:10:06+00:00
2k
thoddnn/open-datagen
opendatagen/anonymizer.py
[ { "identifier": "OpenAIChatModel", "path": "opendatagen/model.py", "snippet": "class OpenAIChatModel(BaseModel):\n\n name:str = \"gpt-3.5-turbo-1106\"\n system_prompt:Optional[str] = \"No verbose.\"\n max_tokens:Optional[int] = 256\n temperature:Optional[List[float]] = [1]\n json_mode:Optional[bool] = False \n seed:Optional[int] = None \n tools:Optional[list] = None \n top_p:Optional[int] = 1 \n stop:Optional[str] = None \n presence_penalty: Optional[float] = 0\n frequency_penalty: Optional[float] = 0 \n client:Optional[Type[OpenAI]] = None \n logprobs:Optional[bool] = False \n confidence_score:Optional[Dict] = {} \n \n def __init__(self, **data):\n super().__init__(**data)\n \n self.client = OpenAI()\n self.client.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n \n @retry(retry=retry_if_result(is_retryable_answer), stop=stop_after_attempt(N_RETRIES), wait=wait_exponential(multiplier=1, min=4, max=60))\n def ask(self, messages) -> str:\n \n param = {\n\n \"model\":self.name,\n \"temperature\": random.choice(self.temperature),\n \"messages\": messages,\n \"logprobs\": self.logprobs\n\n }\n\n if self.tools:\n param[\"functions\"] = self.tools\n \n if self.max_tokens:\n param[\"max_tokens\"] = self.max_tokens\n\n if self.seed:\n param[\"seed\"] = self.seed\n \n if self.max_tokens:\n param[\"max_tokens\"] = self.max_tokens\n\n if self.json_mode:\n param[\"response_format\"] = {\"type\": \"json_object\"}\n\n if self.seed:\n param[\"seed\"] = self.seed\n\n completion = self.client.chat.completions.create(**param)\n\n if self.logprobs:\n self.confidence_score = get_confidence_score(completion=completion)\n\n answer = completion.choices[0].message.content\n \n return answer" }, { "identifier": "ModelName", "path": "opendatagen/model.py", "snippet": "class ModelName(Enum):\n GPT_35_TURBO_INSTRUCT = \"gpt-3.5-turbo-instruct\"\n TEXT_DAVINCI_INSTRUCT = \"text-davinci-003\"\n GPT_35_TURBO_CHAT = \"gpt-3.5-turbo-1106\"\n GPT_35_TURBO_16K_CHAT = \"gpt-3.5-turbo-16k\"\n GPT_4_CHAT = \"gpt-4\"\n GPT_4_TURBO_CHAT = \"gpt-4-1106-preview\"\n TEXT_EMBEDDING_ADA = \"text-embedding-ada-002\"\n SMARTCHUNK = \"SmartChunk-0.1-Mistral-7B\"\n MISTRAL_7B = \"Mistral-7B-v0.1\"\n LLAMA_7B = \"Llama-2-7b-chat-hf\"\n LLAMA_13B = \"Llama-2-13b-chat-hf\"\n LLAMA_70B = \"Llama-2-70b-chat-hf\"" }, { "identifier": "load_file", "path": "opendatagen/utils.py", "snippet": "def load_file(path:str):\n # Adjust the path based on this module's location\n absolute_path = os.path.join(os.path.dirname(__file__), path)\n\n with open(absolute_path, 'r') as file:\n content = file.read()\n\n return content" } ]
import re import spacy from opendatagen.model import OpenAIChatModel, ModelName from opendatagen.utils import load_file
1,575
class Anonymizer: NER_PLACEHOLDER = { "PERSON": "{person}", "ORG": "{organization}", "GPE": "{location}", "DATE": "{date}", "TIME": "{time}", "NORP": "{group}", "FAC": "{facility}", "LOC": "{location}", "PRODUCT": "{product}", "EVENT": "{event}", "WORK_OF_ART": "{artwork}", "LAW": "{law}", "LANGUAGE": "{language}", "MONEY": "{money}", "PERCENT": "{percentage}", "ORDINAL": "{ordinal}", "CARDINAL": "{number}", # Add more if needed } REGEX_PATTERN = { "{phone_number}": r"\+?\d{1,4}?[-.\s]?\(?\d{1,3}?\)?[-.\s]?\d{1,4}[-.\s]?\d{1,4}[-.\s]?\d{1,9}", "{email}": r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b", "{credit_card_pattern}": r"\d{4}[-\s]?\d{4}[-\s]?\d{4}[-\s]?\d{4}", "{address_pattern}": r"\d{1,5}\s\w+(\s\w+)*,\s\w+,\s\w+(\s\w+)*", "{date_pattern}": r"(\d{4}[-/]\d{1,2}[-/]\d{1,2})|(\d{1,2}[-/]\d{1,2}[-/]\d{4})", "{time_pattern}": r"(?:[01]\d|2[0-3]):[0-5]\d", "{ipv4_pattern}": r"\b(?:\d{1,3}\.){3}\d{1,3}\b", "{url_pattern}": r"https?://(?:www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)", "{ssn_pattern}": r"\d{3}-\d{2}-\d{4}", "{license_plate_pattern}": r"[A-Z0-9]{2,}-[A-Z0-9]{2,}", "{zip_code_pattern}": r"\d{5}(-\d{4})?", "{vin_pattern}": r"[A-HJ-NPR-Z0-9]{17}", "{iban_pattern}": r"[A-Z]{2}\d{2}[A-Z0-9]{1,30}", "{driver_license_pattern}": r"[A-Z]{1,2}-\d{4,9}" }
class Anonymizer: NER_PLACEHOLDER = { "PERSON": "{person}", "ORG": "{organization}", "GPE": "{location}", "DATE": "{date}", "TIME": "{time}", "NORP": "{group}", "FAC": "{facility}", "LOC": "{location}", "PRODUCT": "{product}", "EVENT": "{event}", "WORK_OF_ART": "{artwork}", "LAW": "{law}", "LANGUAGE": "{language}", "MONEY": "{money}", "PERCENT": "{percentage}", "ORDINAL": "{ordinal}", "CARDINAL": "{number}", # Add more if needed } REGEX_PATTERN = { "{phone_number}": r"\+?\d{1,4}?[-.\s]?\(?\d{1,3}?\)?[-.\s]?\d{1,4}[-.\s]?\d{1,4}[-.\s]?\d{1,9}", "{email}": r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b", "{credit_card_pattern}": r"\d{4}[-\s]?\d{4}[-\s]?\d{4}[-\s]?\d{4}", "{address_pattern}": r"\d{1,5}\s\w+(\s\w+)*,\s\w+,\s\w+(\s\w+)*", "{date_pattern}": r"(\d{4}[-/]\d{1,2}[-/]\d{1,2})|(\d{1,2}[-/]\d{1,2}[-/]\d{4})", "{time_pattern}": r"(?:[01]\d|2[0-3]):[0-5]\d", "{ipv4_pattern}": r"\b(?:\d{1,3}\.){3}\d{1,3}\b", "{url_pattern}": r"https?://(?:www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)", "{ssn_pattern}": r"\d{3}-\d{2}-\d{4}", "{license_plate_pattern}": r"[A-Z0-9]{2,}-[A-Z0-9]{2,}", "{zip_code_pattern}": r"\d{5}(-\d{4})?", "{vin_pattern}": r"[A-HJ-NPR-Z0-9]{17}", "{iban_pattern}": r"[A-Z]{2}\d{2}[A-Z0-9]{1,30}", "{driver_license_pattern}": r"[A-Z]{1,2}-\d{4,9}" }
def __init__(self, completion_model:OpenAIChatModel):
0
2023-10-27 17:38:37+00:00
2k
HAMNET-AI/PDFTriage
src/routers.py
[ { "identifier": "fetch_figure", "path": "src/triage.py", "snippet": "def fetch_figure(query):\n query_prompt = f\"What contents mentioned in the figure of this pdf\"\n path = query_engine.query(query_prompt).metadata['json_path_response_str'].replace(\"&&\", \"&\")\n jsonpath_expression = parse(path)\n matches = jsonpath_expression.find(data)\n result = [match.value for match in matches]\n figure_indexs = get_num(query, type=\"figure\")\n print(figure_indexs)\n figure_indexs = ast.literal_eval(figure_indexs)\n content = [f'figure{i}:{result[i]}' for i in figure_indexs]\n prompt = f\"Please answer a question based on something in the pdf\\n, this is the question{query}\\n, The contents of figures, mentioned in the question are listed in text as follows {content}\"\n response = llm.complete(prompt)\n print(response)" }, { "identifier": "fetch_pages", "path": "src/triage.py", "snippet": "def fetch_pages(query):\n # print(\"pages\")\n query_prompt = f\"What contents to the number of pages mentioned in this question : {query}\"\n path = query_engine.query(query_prompt).metadata['json_path_response_str'].replace(\"&&\", \"&\")\n # path = \"$.data[?(@.page >= 5 & @.page <= 7)].boxes[*].text\"\n # .replace(\"&&\", \"&\")\n jsonpath_expression = parse(path)\n matches = jsonpath_expression.find(data)\n content = [match.value for match in matches]\n prompt = f\"Please answer a question based on something in the pdf\\n, this is the question{query}\\n, The contents of the pages mentioned in the question are listed in text as follows {content}\"\n response = llm.complete(prompt)\n print(response)" }, { "identifier": "fetch_sections", "path": "src/triage.py", "snippet": "def fetch_sections(query):\n print(\"Fetching sections\")" }, { "identifier": "fetch_table", "path": "src/triage.py", "snippet": "def fetch_table(query):\n query_prompt = f\"What contents mentioned in the table of this pdf\"\n path = query_engine.query(query_prompt).metadata['json_path_response_str'].replace(\"&&\", \"&\")\n jsonpath_expression = parse(path)\n matches = jsonpath_expression.find(data)\n result = [match.value for match in matches]\n # print(\"table\")\n table_indexs = get_num(query,type=\"table\")\n table_indexs = ast.literal_eval(table_indexs)\n # content = [result[i] for i in table_indexs]\n content = [f'table{i}:{result[i]}' for i in table_indexs]\n prompt = f\"Please answer a question based on something in the pdf\\n, this is the question{query}\\n, The contents of tables, mentioned in the question are listed in text as follows {content}\"\n response = llm.complete(prompt)\n print(response)" }, { "identifier": "retrieve", "path": "src/triage.py", "snippet": "def retrieve():\n print(\"retrieve\")" } ]
from llama_index.tools import ToolMetadata from llama_index.selectors.llm_selectors import LLMSingleSelector from .triage import fetch_figure, fetch_pages, fetch_sections, fetch_table, retrieve
884
def router(query): choices = [ ToolMetadata(description="Get the text contained in the pages listed", name="fetch_pages"), ToolMetadata(description="Get the text contained in the section listed", name="fetch_sections"), ToolMetadata(description="Get the text contained in the figure caption listed", name="fetch_figure"), ToolMetadata(description="Get the text contained in the table caption listed", name="fetch_table"), ToolMetadata(description="Issue a natural language query over the document, and fetch relevant chunks.", name="retrieve"), ] # choices as a list of strings # choices = ["fetch_pages", "fetch_sections", "fetch_figure", "fetch_table", "retrieve"] selector = LLMSingleSelector.from_defaults() result = selector.select(choices, query=query).selections flag = result[0].index print(flag) if flag == 0:
def router(query): choices = [ ToolMetadata(description="Get the text contained in the pages listed", name="fetch_pages"), ToolMetadata(description="Get the text contained in the section listed", name="fetch_sections"), ToolMetadata(description="Get the text contained in the figure caption listed", name="fetch_figure"), ToolMetadata(description="Get the text contained in the table caption listed", name="fetch_table"), ToolMetadata(description="Issue a natural language query over the document, and fetch relevant chunks.", name="retrieve"), ] # choices as a list of strings # choices = ["fetch_pages", "fetch_sections", "fetch_figure", "fetch_table", "retrieve"] selector = LLMSingleSelector.from_defaults() result = selector.select(choices, query=query).selections flag = result[0].index print(flag) if flag == 0:
content = fetch_pages(query=query)
1
2023-10-30 14:36:23+00:00
2k
zhanggang001/HEDNet
pcdet/models/backbones_3d/vfe/dynamic_voxel_vfe.py
[ { "identifier": "VFETemplate", "path": "pcdet/models/backbones_3d/vfe/vfe_template.py", "snippet": "class VFETemplate(nn.Module):\n def __init__(self, model_cfg, **kwargs):\n super().__init__()\n self.model_cfg = model_cfg\n\n def get_output_feature_dim(self):\n raise NotImplementedError\n\n def forward(self, **kwargs):\n \"\"\"\n Args:\n **kwargs:\n\n Returns:\n batch_dict:\n ...\n vfe_features: (num_voxels, C)\n \"\"\"\n raise NotImplementedError" }, { "identifier": "PFNLayerV2", "path": "pcdet/models/backbones_3d/vfe/dynamic_pillar_vfe.py", "snippet": "class PFNLayerV2(nn.Module):\n def __init__(self,\n in_channels,\n out_channels,\n use_norm=True,\n last_layer=False):\n super().__init__()\n \n self.last_vfe = last_layer\n self.use_norm = use_norm\n if not self.last_vfe:\n out_channels = out_channels // 2\n\n if self.use_norm:\n self.linear = nn.Linear(in_channels, out_channels, bias=False)\n self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01)\n else:\n self.linear = nn.Linear(in_channels, out_channels, bias=True)\n \n self.relu = nn.ReLU()\n\n def forward(self, inputs, unq_inv):\n\n x = self.linear(inputs)\n x = self.norm(x) if self.use_norm else x\n x = self.relu(x)\n x_max = torch_scatter.scatter_max(x, unq_inv, dim=0)[0]\n\n if self.last_vfe:\n return x_max\n else:\n x_concatenated = torch.cat([x, x_max[unq_inv, :]], dim=1)\n return x_concatenated" } ]
import torch import torch.nn as nn import torch.nn.functional as F import torch_scatter from .vfe_template import VFETemplate from .dynamic_pillar_vfe import PFNLayerV2
746
try: except Exception as e: # Incase someone doesn't want to use dynamic pillar vfe and hasn't installed torch_scatter pass class DynamicVoxelVFE(VFETemplate): def __init__(self, model_cfg, num_point_features, voxel_size, grid_size, point_cloud_range, **kwargs): super().__init__(model_cfg=model_cfg) self.use_norm = self.model_cfg.USE_NORM self.with_distance = self.model_cfg.WITH_DISTANCE self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ num_point_features += 6 if self.use_absolute_xyz else 3 if self.with_distance: num_point_features += 1 self.num_filters = self.model_cfg.NUM_FILTERS assert len(self.num_filters) > 0 num_filters = [num_point_features] + list(self.num_filters) pfn_layers = [] for i in range(len(num_filters) - 1): in_filters = num_filters[i] out_filters = num_filters[i + 1] pfn_layers.append(
try: except Exception as e: # Incase someone doesn't want to use dynamic pillar vfe and hasn't installed torch_scatter pass class DynamicVoxelVFE(VFETemplate): def __init__(self, model_cfg, num_point_features, voxel_size, grid_size, point_cloud_range, **kwargs): super().__init__(model_cfg=model_cfg) self.use_norm = self.model_cfg.USE_NORM self.with_distance = self.model_cfg.WITH_DISTANCE self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ num_point_features += 6 if self.use_absolute_xyz else 3 if self.with_distance: num_point_features += 1 self.num_filters = self.model_cfg.NUM_FILTERS assert len(self.num_filters) > 0 num_filters = [num_point_features] + list(self.num_filters) pfn_layers = [] for i in range(len(num_filters) - 1): in_filters = num_filters[i] out_filters = num_filters[i + 1] pfn_layers.append(
PFNLayerV2(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2))
1
2023-10-25 02:57:35+00:00
2k
deepsearch-ai/deepsearch
deepsearchai/tests/sources/test_local.py
[ { "identifier": "MEDIA_TYPE", "path": "deepsearchai/enums.py", "snippet": "class MEDIA_TYPE(Enum):\n UNKNOWN = -1\n IMAGE = 1\n TEXT = 2\n AUDIO = 3\n VIDEO = 4" }, { "identifier": "DataSource", "path": "deepsearchai/sources/data_source.py", "snippet": "class DataSource(Enum):\n LOCAL = 1\n S3 = 2\n YOUTUBE = 3" }, { "identifier": "LocalDataSource", "path": "deepsearchai/sources/local.py", "snippet": "class LocalDataSource(BaseSource):\n def __init__(self):\n super().__init__()\n\n def add_data(\n self,\n source: str,\n embedding_models_config: EmbeddingModelsConfig,\n vector_database: BaseVectorDatabase,\n ) -> None:\n # Recursively iterate over all the files and subdirectories in the current directory\n existing_document_identifiers = {}\n file_paths = self._get_all_file_path(source)\n for file in file_paths:\n media_type = get_mime_type(file)\n embedding_models = embedding_models_config.get_embedding_model(media_type)\n for embedding_model in embedding_models:\n if media_type not in existing_document_identifiers:\n existing_document_identifiers[\n media_type\n ] = vector_database.get_existing_document_ids(\n {\"document_id\": file_paths},\n embedding_model.get_collection_name(media_type),\n )\n\n if file in existing_document_identifiers[media_type]:\n \"{} already exists, skipping...\".format(file)\n continue\n if media_type == MEDIA_TYPE.IMAGE:\n try:\n data = Image.open(file)\n except FileNotFoundError:\n print(\"The supplied file does not exist {}\".format(file))\n continue\n except UnidentifiedImageError:\n print(\"The supplied file is not an image {}\".format(file))\n continue\n except Exception as e:\n print(\"Error while reading file {}\".format(file))\n print(e)\n continue\n\n elif media_type == MEDIA_TYPE.AUDIO:\n data = file\n else:\n print(\"Unsupported media type {}\".format(file))\n continue\n vector_database.add(\n data, DataSource.LOCAL, file, source, media_type, embedding_model\n )\n\n def _get_all_file_path(self, directory):\n if os.path.isfile(directory):\n return [directory]\n\n file_paths = []\n for root, _, files in os.walk(directory):\n for file in files:\n file_path = os.path.join(root, file)\n file_paths.append(file_path)\n return file_paths" } ]
import unittest import mock.mock from unittest import mock from unittest.mock import patch from deepsearchai.enums import MEDIA_TYPE from deepsearchai.sources.data_source import DataSource from deepsearchai.sources.local import LocalDataSource
862
class LocalDataSourceTest(unittest.TestCase): def setUp(self): self.local_data_source = LocalDataSource() @patch("os.walk") @patch("PIL.Image.open") def test_add_data_image_directory_with_no_existing_files( self, mock_image_file, mock_listdir ): embedding_models_config = mock.Mock() embedding_model = mock.Mock() embedding_models_config.get_embedding_model.return_value = [embedding_model] # Create a mock image file image_data = mock.Mock() mock_listdir.return_value = [ ("test_directory", "", ["image1.jpg", "image2.png"]) ] mock_image_file.return_value = image_data # Create a mock vector database vector_database = mock.Mock() vector_database.get_existing_document_ids.return_value = [] # Add local datasource data for a local directory directory = "test_directory" self.local_data_source.add_data( directory, embedding_models_config, vector_database ) assert vector_database.add.mock_calls == [ mock.call( image_data,
class LocalDataSourceTest(unittest.TestCase): def setUp(self): self.local_data_source = LocalDataSource() @patch("os.walk") @patch("PIL.Image.open") def test_add_data_image_directory_with_no_existing_files( self, mock_image_file, mock_listdir ): embedding_models_config = mock.Mock() embedding_model = mock.Mock() embedding_models_config.get_embedding_model.return_value = [embedding_model] # Create a mock image file image_data = mock.Mock() mock_listdir.return_value = [ ("test_directory", "", ["image1.jpg", "image2.png"]) ] mock_image_file.return_value = image_data # Create a mock vector database vector_database = mock.Mock() vector_database.get_existing_document_ids.return_value = [] # Add local datasource data for a local directory directory = "test_directory" self.local_data_source.add_data( directory, embedding_models_config, vector_database ) assert vector_database.add.mock_calls == [ mock.call( image_data,
DataSource.LOCAL,
1
2023-10-27 06:46:22+00:00
2k
jerpint/RAGTheDocs
app.py
[ { "identifier": "embed_documents", "path": "embed_docs.py", "snippet": "def embed_documents(homepage_url, save_directory, target_version=None):\n # adds https:// and trailing slash\n homepage_url = sanitize_url(homepage_url)\n\n # Crawl the website using scrapy\n run_spider(\n homepage_url, save_directory=save_directory, target_version=target_version\n )\n\n # # Convert the .html pages into chunks using Buster's SphinxParser\n # root_dir is the folder containing the scraped content e.g. crawled_outputs/buster.readthedocs.io/\n root_dir = os.path.join(save_directory, homepage_url.split(\"https://\")[1])\n df = get_all_documents(\n root_dir=root_dir,\n base_url=homepage_url,\n parser_cls=SphinxParser,\n min_section_length=100,\n max_section_length=1000,\n )\n df[\"source\"] = \"readthedocs\" # Add the source column\n\n # Initialize the DeepLake vector store\n vector_store_path = os.path.join(save_directory, \"deeplake_store\")\n dm = DeepLakeDocumentsManager(\n vector_store_path=vector_store_path,\n overwrite=True,\n required_columns=[\"url\", \"content\", \"source\", \"title\"],\n )\n\n # Add all embeddings to the vector store\n dm.batch_add(\n df=df,\n batch_size=3000,\n min_time_interval=60,\n num_workers=32,\n )" }, { "identifier": "setup_buster", "path": "cfg.py", "snippet": "def setup_buster(buster_cfg: BusterConfig):\n \"\"\"initialize buster with a buster_cfg class\"\"\"\n retriever: Retriever = DeepLakeRetriever(**buster_cfg.retriever_cfg)\n tokenizer = GPTTokenizer(**buster_cfg.tokenizer_cfg)\n document_answerer: DocumentAnswerer = DocumentAnswerer(\n completer=ChatGPTCompleter(**buster_cfg.completion_cfg),\n documents_formatter=DocumentsFormatterJSON(\n tokenizer=tokenizer, **buster_cfg.documents_formatter_cfg\n ),\n prompt_formatter=PromptFormatter(\n tokenizer=tokenizer, **buster_cfg.prompt_formatter_cfg\n ),\n **buster_cfg.documents_answerer_cfg,\n )\n validator: Validator = QuestionAnswerValidator(**buster_cfg.validator_cfg)\n buster: Buster = Buster(\n retriever=retriever, document_answerer=document_answerer, validator=validator\n )\n return buster" } ]
import os import gradio as gr import pandas as pd import cfg from typing import Optional, Tuple from buster.completers import Completion from embed_docs import embed_documents from cfg import setup_buster
922
# from embed_docs import embed_rtd_website # from rtd_scraper.scrape_rtd import scrape_rtd # Typehint for chatbot history ChatHistory = list[list[Optional[str], Optional[str]]] # Because this is a one-click deploy app, we will be relying on env. variables being set openai_api_key = os.getenv("OPENAI_API_KEY") # Mandatory for app to work readthedocs_url = os.getenv("READTHEDOCS_URL") # Mandatory for app to work as intended readthedocs_version = os.getenv("READTHEDOCS_VERSION") if openai_api_key is None: print( "Warning: No OPENAI_API_KEY detected. Set it with 'export OPENAI_API_KEY=sk-...'." ) if readthedocs_url is None: raise ValueError( "No READTHEDOCS_URL detected. Set it with e.g. 'export READTHEDOCS_URL=https://orion.readthedocs.io/'" ) if readthedocs_version is None: print( """ Warning: No READTHEDOCS_VERSION detected. If multiple versions of the docs exist, they will all be scraped. Set it with e.g. 'export READTHEDOCS_VERSION=en/stable' """ ) # Override to put it anywhere save_directory = "outputs/" # scrape and embed content from readthedocs website # You only need to embed the first time the app runs, comment it out to skip
# from embed_docs import embed_rtd_website # from rtd_scraper.scrape_rtd import scrape_rtd # Typehint for chatbot history ChatHistory = list[list[Optional[str], Optional[str]]] # Because this is a one-click deploy app, we will be relying on env. variables being set openai_api_key = os.getenv("OPENAI_API_KEY") # Mandatory for app to work readthedocs_url = os.getenv("READTHEDOCS_URL") # Mandatory for app to work as intended readthedocs_version = os.getenv("READTHEDOCS_VERSION") if openai_api_key is None: print( "Warning: No OPENAI_API_KEY detected. Set it with 'export OPENAI_API_KEY=sk-...'." ) if readthedocs_url is None: raise ValueError( "No READTHEDOCS_URL detected. Set it with e.g. 'export READTHEDOCS_URL=https://orion.readthedocs.io/'" ) if readthedocs_version is None: print( """ Warning: No READTHEDOCS_VERSION detected. If multiple versions of the docs exist, they will all be scraped. Set it with e.g. 'export READTHEDOCS_VERSION=en/stable' """ ) # Override to put it anywhere save_directory = "outputs/" # scrape and embed content from readthedocs website # You only need to embed the first time the app runs, comment it out to skip
embed_documents(
0
2023-10-31 03:36:43+00:00
2k
Paulo-Lopes-Estevao/ci-generator
cigen/core/github/nodejs_action.py
[ { "identifier": "Steps", "path": "cigen/core/github/github_action.py", "snippet": "class Steps:\n def __init__(self, steps: list[dict]) -> None:\n self.steps = steps\n\n def to_dict(self) -> list[dict]:\n return self.steps\n\n def add(self, step: dict) -> None:\n self.steps.append(step)\n\n def add_at(self, step: dict, index: int) -> None:\n self.steps.insert(index, step)\n\n def add_all(self, steps: list[dict]) -> None:\n self.steps.extend(steps)\n\n def to_yaml(self):\n return yaml.dump(self.to_dict())" }, { "identifier": "Action", "path": "cigen/core/github/github_action.py", "snippet": "class Action:\n on: OnEvent\n steps: Steps\n\n def __init__(self, name, version, on, steps: Steps, env=None) -> None:\n self.name = name\n self.version = version\n self.on = on\n self.steps = steps\n self.env = env\n\n def base(self):\n return base_action(self.name, self.on, self.steps)\n\n def base_version_list(self):\n return base_version_list_action(self.name, self.on, self.steps, self.version)\n\n def base_to_yaml(self):\n return yaml.dump(self.base())\n\n def run(self):\n return self.base()\n\n def run_with_env(self):\n return {\n **self.base(),\n 'env': self.env\n }" } ]
from abc import ABC, abstractmethod from cigen.core.github.github_action import Steps, Action
792
from __future__ import annotations class NodejsActionBuilder(ABC): @property @abstractmethod def build(self) -> Action: pass @property @abstractmethod def build_steps(self) -> NodejsActionSteps: pass @abstractmethod def base(self) -> None: pass @abstractmethod def base_version_list(self) -> None: pass def add_steps(self, step): pass @abstractmethod def base_to_yaml(self) -> None: pass @abstractmethod def run(self) -> None: pass @abstractmethod def run_with_env(self) -> None: pass @abstractmethod def step_checkout(self) -> None: pass @abstractmethod def step_setup_node(self) -> None: pass @abstractmethod def step_setup_node_with_version_matrix(self) -> None: pass @abstractmethod def step_install_dependencies(self) -> None: pass @abstractmethod def step_build(self) -> None: pass @abstractmethod def step_test(self) -> None: pass @abstractmethod def step_publish(self) -> None: pass @abstractmethod def step_publish_with_tag(self) -> None: pass @abstractmethod def step_publish_with_access(self) -> None: pass @abstractmethod def step_security_scan(self) -> None: pass @abstractmethod def step_run_cache(self) -> None: pass class NodejsActionBuilderImpl(NodejsActionBuilder): def __init__(self, name, version, on, env=None) -> None: self._steps = None self._build = None self.name = name self.version = version self.on = on self.env = env
from __future__ import annotations class NodejsActionBuilder(ABC): @property @abstractmethod def build(self) -> Action: pass @property @abstractmethod def build_steps(self) -> NodejsActionSteps: pass @abstractmethod def base(self) -> None: pass @abstractmethod def base_version_list(self) -> None: pass def add_steps(self, step): pass @abstractmethod def base_to_yaml(self) -> None: pass @abstractmethod def run(self) -> None: pass @abstractmethod def run_with_env(self) -> None: pass @abstractmethod def step_checkout(self) -> None: pass @abstractmethod def step_setup_node(self) -> None: pass @abstractmethod def step_setup_node_with_version_matrix(self) -> None: pass @abstractmethod def step_install_dependencies(self) -> None: pass @abstractmethod def step_build(self) -> None: pass @abstractmethod def step_test(self) -> None: pass @abstractmethod def step_publish(self) -> None: pass @abstractmethod def step_publish_with_tag(self) -> None: pass @abstractmethod def step_publish_with_access(self) -> None: pass @abstractmethod def step_security_scan(self) -> None: pass @abstractmethod def step_run_cache(self) -> None: pass class NodejsActionBuilderImpl(NodejsActionBuilder): def __init__(self, name, version, on, env=None) -> None: self._steps = None self._build = None self.name = name self.version = version self.on = on self.env = env
self.step = Steps([])
0
2023-10-31 03:36:36+00:00
2k
TheCompAce/ShellSpeak
modules/llm.py
[ { "identifier": "get_token_count", "path": "modules/utils.py", "snippet": "def get_token_count(text, token_adjust=1):\n # Define the maximum length for a text chunk\n max_length = 1000000\n\n # Initialize the total token count\n total_token_count = 0\n\n # Split the text into chunks of up to max_length characters\n for start in range(0, len(text), max_length):\n # Get a chunk of text\n chunk = text[start:start + max_length]\n\n # Process the chunk with the NLP tool\n doc = nlp(chunk)\n\n # Update the total token count\n total_token_count += int(len(doc) * token_adjust)\n\n # Return the total token count\n return total_token_count" }, { "identifier": "ResponseCache", "path": "modules/responseCache.py", "snippet": "class ResponseCache:\n def __init__(self, cache_file=None):\n self.cache_file = os.path.abspath(cache_file) if cache_file else None\n self.cache = {} # In-memory cache\n if cache_file:\n self._init_db()\n\n def _init_db(self):\n with sqlite3.connect(self.cache_file) as conn:\n conn.execute('''CREATE TABLE IF NOT EXISTS cache\n (system_prompt TEXT, user_prompt TEXT, response TEXT, PRIMARY KEY(system_prompt, user_prompt))''')\n\n def get(self, system_prompt, user_prompt):\n if self.cache_file:\n print(self.cache_file)\n with sqlite3.connect(self.cache_file) as conn:\n cur = conn.execute('SELECT response FROM cache WHERE system_prompt=? AND user_prompt=?', (system_prompt, user_prompt))\n row = cur.fetchone()\n return row[0] if row else None\n return self.cache.get((system_prompt, user_prompt))\n\n def set(self, system_prompt, user_prompt, response):\n if self.cache_file:\n with sqlite3.connect(self.cache_file) as conn:\n conn.execute('INSERT OR REPLACE INTO cache VALUES (?, ?, ?)', (system_prompt, user_prompt, response))\n else:\n self.cache[(system_prompt, user_prompt)] = response" } ]
from enum import Enum from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline from concurrent.futures import ThreadPoolExecutor from modules.utils import get_token_count from modules.responseCache import ResponseCache import json import sqlite3 import os import torch import transformers import requests import asyncio
1,430
transformers.logging.set_verbosity_error() executor = ThreadPoolExecutor() class ModelTypes(Enum): OpenAI = "OpenAI" OpenAI4 = "OpenAI4" Mistral = "Mistral" StableBeluga7B = "StableBeluga7B" Zephyr7bAlpha = "Zephyr7bAlpha" Zephyr7bBeta = "Zephyr7bBeta" Falcon7BInst = "Falcon7BInst" class LLM: def __init__(self, model_type, use_cache=False, cache_file=None): self.ClearModel(model_type) self.use_cache = use_cache if use_cache: self.cache = ResponseCache(cache_file) def ClearModel(self, model_type): self.model = ModelTypes(model_type) self.modelObj = None self.tokenizerObj = None self.pipeObj = None def SetupModel(self): if self.model == ModelTypes.Mistral: return self._setup_mistral() elif self.model == ModelTypes.StableBeluga7B: return self._setup_beluga_7b() elif self.model == ModelTypes.Zephyr7bAlpha: return self._setup_zephyr_7b() elif self.model == ModelTypes.Zephyr7bBeta: return self._setup_zephyr_7bB() async def async_ask(llm, system_prompt, user_prompt, model_type=None, max_tokens=4096, return_type="text"): loop = asyncio.get_event_loop() response = await loop.run_in_executor(executor, llm.ask, system_prompt, user_prompt, model_type, max_tokens, return_type) return response def ask(self, system_prompt, user_prompt, model_type=None, max_tokens=4096, return_type="text"): if self.use_cache: cached_response = self.cache.get(system_prompt, user_prompt) if cached_response: return cached_response response = self._ask(system_prompt, user_prompt, model_type, max_tokens, return_type) if self.use_cache: self.cache.set(system_prompt, user_prompt, response) return response def _ask(self, system_prompt, user_prompt, model_type = None, max_tokens=4096, return_type="text"): if model_type is None: model_type = self.model elif model_type is not self.model: self.ClearModel(model_type) if model_type == ModelTypes.OpenAI: return self._ask_openai(system_prompt, user_prompt, max_tokens=16000, return_type=return_type) elif model_type == ModelTypes.OpenAI4: return self._ask_openai(system_prompt, user_prompt, model="gpt-4-1106-preview", max_tokens=140000, return_type=return_type) elif model_type == ModelTypes.Mistral: return self._ask_mistral(system_prompt, user_prompt) elif model_type == ModelTypes.StableBeluga7B: return self._ask_stable_beluga_7b(system_prompt, user_prompt) elif model_type == ModelTypes.Zephyr7bAlpha: return self._ask_zephyr_7b(system_prompt, user_prompt) elif model_type == ModelTypes.Zephyr7bBeta: return self._ask_zephyr_7bB(system_prompt, user_prompt) elif model_type == ModelTypes.Falcon7BInst: return self._ask_falcon_7b_instruct(system_prompt, user_prompt) def _ask_openai(self, system_prompt, user_prompt, model = "gpt-3.5-turbo-1106", max_tokens=16000, return_type="text"): # Placeholder for actual OpenAI API request # Uncomment and complete the following code in your local environment api_key = os.environ.get("OPENAI_API_KEY", "your-default-openai-api-key-here") api_url = "https://api.openai.com/v1/chat/completions" token_ct = 0
transformers.logging.set_verbosity_error() executor = ThreadPoolExecutor() class ModelTypes(Enum): OpenAI = "OpenAI" OpenAI4 = "OpenAI4" Mistral = "Mistral" StableBeluga7B = "StableBeluga7B" Zephyr7bAlpha = "Zephyr7bAlpha" Zephyr7bBeta = "Zephyr7bBeta" Falcon7BInst = "Falcon7BInst" class LLM: def __init__(self, model_type, use_cache=False, cache_file=None): self.ClearModel(model_type) self.use_cache = use_cache if use_cache: self.cache = ResponseCache(cache_file) def ClearModel(self, model_type): self.model = ModelTypes(model_type) self.modelObj = None self.tokenizerObj = None self.pipeObj = None def SetupModel(self): if self.model == ModelTypes.Mistral: return self._setup_mistral() elif self.model == ModelTypes.StableBeluga7B: return self._setup_beluga_7b() elif self.model == ModelTypes.Zephyr7bAlpha: return self._setup_zephyr_7b() elif self.model == ModelTypes.Zephyr7bBeta: return self._setup_zephyr_7bB() async def async_ask(llm, system_prompt, user_prompt, model_type=None, max_tokens=4096, return_type="text"): loop = asyncio.get_event_loop() response = await loop.run_in_executor(executor, llm.ask, system_prompt, user_prompt, model_type, max_tokens, return_type) return response def ask(self, system_prompt, user_prompt, model_type=None, max_tokens=4096, return_type="text"): if self.use_cache: cached_response = self.cache.get(system_prompt, user_prompt) if cached_response: return cached_response response = self._ask(system_prompt, user_prompt, model_type, max_tokens, return_type) if self.use_cache: self.cache.set(system_prompt, user_prompt, response) return response def _ask(self, system_prompt, user_prompt, model_type = None, max_tokens=4096, return_type="text"): if model_type is None: model_type = self.model elif model_type is not self.model: self.ClearModel(model_type) if model_type == ModelTypes.OpenAI: return self._ask_openai(system_prompt, user_prompt, max_tokens=16000, return_type=return_type) elif model_type == ModelTypes.OpenAI4: return self._ask_openai(system_prompt, user_prompt, model="gpt-4-1106-preview", max_tokens=140000, return_type=return_type) elif model_type == ModelTypes.Mistral: return self._ask_mistral(system_prompt, user_prompt) elif model_type == ModelTypes.StableBeluga7B: return self._ask_stable_beluga_7b(system_prompt, user_prompt) elif model_type == ModelTypes.Zephyr7bAlpha: return self._ask_zephyr_7b(system_prompt, user_prompt) elif model_type == ModelTypes.Zephyr7bBeta: return self._ask_zephyr_7bB(system_prompt, user_prompt) elif model_type == ModelTypes.Falcon7BInst: return self._ask_falcon_7b_instruct(system_prompt, user_prompt) def _ask_openai(self, system_prompt, user_prompt, model = "gpt-3.5-turbo-1106", max_tokens=16000, return_type="text"): # Placeholder for actual OpenAI API request # Uncomment and complete the following code in your local environment api_key = os.environ.get("OPENAI_API_KEY", "your-default-openai-api-key-here") api_url = "https://api.openai.com/v1/chat/completions" token_ct = 0
token_ct = max_tokens - int(get_token_count(system_prompt + "\n" + user_prompt) + 20)
0
2023-10-31 23:35:19+00:00
2k
qym7/SparseDiff
sparse_diffusion/models/transconv_layer.py
[ { "identifier": "SparseXtoy", "path": "sparse_diffusion/models/layers.py", "snippet": "class SparseXtoy(nn.Module):\n def __init__(self, dx, dy):\n \"\"\"Map node features to global features\"\"\"\n super().__init__()\n self.lin = nn.Linear(4 * dx, dy)\n\n def forward(self, X, batch):\n \"\"\"X: N, dx.\"\"\"\n batch = batch.long()\n m = pool.global_mean_pool(X, batch)\n mi = -pool.global_max_pool(-X, batch)\n ma = pool.global_max_pool(X, batch)\n std = (X - m[batch]) * (X - m[batch])\n std = pool.global_mean_pool(std, batch)\n z = torch.hstack((m, mi, ma, std))\n out = self.lin(z)\n return out" }, { "identifier": "SparseEtoy", "path": "sparse_diffusion/models/layers.py", "snippet": "class SparseEtoy(nn.Module):\n def __init__(self, d, dy):\n \"\"\"Map edge features to global features.\"\"\"\n super().__init__()\n self.lin = nn.Linear(4 * d, dy)\n\n def forward(self, edge_index, edge_attr, batch, top_triu=False):\n \"\"\"E: M, de\n Features relative to the diagonal of E could potentially be added.\n \"\"\"\n batch = batch.long()\n if not top_triu:\n batchE = batch[edge_index[0]]\n m = pool.global_mean_pool(edge_attr, batchE)\n mi = -pool.global_max_pool(-edge_attr, batchE)\n ma = pool.global_max_pool(edge_attr, batchE)\n std = (edge_attr - m[batchE]) * (edge_attr - m[batchE])\n std = pool.global_mean_pool(std, batchE)\n z = torch.hstack((m, mi, ma, std))\n else:\n dy = edge_attr.shape[-1]\n batchE1 = batch[edge_index[0]]\n batchE2 = batch[edge_index[1]]\n batchE = torch.hstack([batchE1, batchE2])\n edge_attr_rep = edge_attr.repeat((2, 1))\n m = pool.global_mean_pool(edge_attr_rep, batchE)\n mi = -pool.global_max_pool(-edge_attr_rep, batchE)\n ma = pool.global_max_pool(edge_attr_rep, batchE)\n std = (edge_attr_rep - m[batchE]) * (edge_attr_rep - m[batchE])\n std = pool.global_mean_pool(std, batchE)\n\n len_m = len(m)\n z = torch.zeros((batch.max() + 1, 4 * dy)).to(edge_index.device)\n z[:len_m, :dy] = m\n z[:len_m, dy : 2 * dy] = mi\n z[:len_m, 2 * dy : 3 * dy] = ma\n z[:len_m, 3 * dy :] = std\n\n out = self.lin(z)\n return out" } ]
import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Union from torch import Tensor from torch_geometric.nn.conv import MessagePassing from torch_geometric.nn.dense.linear import Linear from torch_geometric.typing import Adj, OptTensor, Size from torch_geometric.utils import softmax from sparse_diffusion.models.layers import SparseXtoy, SparseEtoy
1,359
class TransformerConv(MessagePassing): r"""The graph transformer operator from the `"Masked Label Prediction: Unified Message Passing Model for Semi-Supervised Classification" <https://arxiv.org/abs/2009.03509>`_ paper .. math:: \mathbf{x}^{\prime}_i = \mathbf{W}_1 \mathbf{x}_i + \sum_{j \in \mathcal{N}(i)} \alpha_{i,j} \mathbf{W}_2 \mathbf{x}_{j}, where the attention coefficients :math:`\alpha_{i,j}` are computed via multi-head dot product attention: .. math:: \alpha_{i,j} = \textrm{softmax} \left( \frac{(\mathbf{W}_3\mathbf{x}_i)^{\top} (\mathbf{W}_4\mathbf{x}_j)} {\sqrt{d}} \right) """ _alpha: OptTensor def __init__( self, dx: int, de: int, dy: int, heads: int = 1, concat: bool = True, dropout: float = 0.0, bias: bool = True, last_layer: bool = True, **kwargs, ): kwargs.setdefault("aggr", "add") super().__init__(node_dim=0, **kwargs) self.dx = dx self.de = de self.dy = dy self.df = int(dx / heads) self.heads = heads self.concat = concat self.dropout = dropout self.last_layer = last_layer self.lin_key = Linear(dx, heads * self.df) self.lin_query = Linear(dx, heads * self.df) self.lin_value = Linear(dx, heads * self.df) if concat: self.lin_skip = Linear(dx, heads * self.df, bias=bias) else: self.lin_skip = Linear(dx, self.df, bias=bias) # FiLM E to X: de = dx here as defined in lin_edge self.e_add = Linear(de, heads) self.e_mul = Linear(de, heads) # FiLM y to E self.y_e_mul = Linear(dy, de) self.y_e_add = Linear(dy, de) # FiLM y to X self.y_x_mul = Linear(dy, dx) self.y_x_add = Linear(dy, dx) # Process y if self.last_layer: self.y_y = Linear(dy, dy) self.x_y = SparseXtoy(dx, dy)
class TransformerConv(MessagePassing): r"""The graph transformer operator from the `"Masked Label Prediction: Unified Message Passing Model for Semi-Supervised Classification" <https://arxiv.org/abs/2009.03509>`_ paper .. math:: \mathbf{x}^{\prime}_i = \mathbf{W}_1 \mathbf{x}_i + \sum_{j \in \mathcal{N}(i)} \alpha_{i,j} \mathbf{W}_2 \mathbf{x}_{j}, where the attention coefficients :math:`\alpha_{i,j}` are computed via multi-head dot product attention: .. math:: \alpha_{i,j} = \textrm{softmax} \left( \frac{(\mathbf{W}_3\mathbf{x}_i)^{\top} (\mathbf{W}_4\mathbf{x}_j)} {\sqrt{d}} \right) """ _alpha: OptTensor def __init__( self, dx: int, de: int, dy: int, heads: int = 1, concat: bool = True, dropout: float = 0.0, bias: bool = True, last_layer: bool = True, **kwargs, ): kwargs.setdefault("aggr", "add") super().__init__(node_dim=0, **kwargs) self.dx = dx self.de = de self.dy = dy self.df = int(dx / heads) self.heads = heads self.concat = concat self.dropout = dropout self.last_layer = last_layer self.lin_key = Linear(dx, heads * self.df) self.lin_query = Linear(dx, heads * self.df) self.lin_value = Linear(dx, heads * self.df) if concat: self.lin_skip = Linear(dx, heads * self.df, bias=bias) else: self.lin_skip = Linear(dx, self.df, bias=bias) # FiLM E to X: de = dx here as defined in lin_edge self.e_add = Linear(de, heads) self.e_mul = Linear(de, heads) # FiLM y to E self.y_e_mul = Linear(dy, de) self.y_e_add = Linear(dy, de) # FiLM y to X self.y_x_mul = Linear(dy, dx) self.y_x_add = Linear(dy, dx) # Process y if self.last_layer: self.y_y = Linear(dy, dy) self.x_y = SparseXtoy(dx, dy)
self.e_y = SparseEtoy(de, dy)
1
2023-10-30 12:12:16+00:00
2k
ZhangLin-PKU/FedFTG
train.py
[ { "identifier": "util_dataset", "path": "utils/util_dataset.py", "snippet": "COLOR_MAP = ['red', 'green', 'blue', 'black', 'brown', 'purple', 'yellow', 'pink', 'cyan', 'gray']\r\nclass DatasetObject:\r\nclass Dataset(torch.utils.data.Dataset):\r\nclass DatasetFromDir(data.Dataset):\r\n def __init__(self, dataset, n_client, seed, rule, unbalanced_sgm=0, rule_arg='', data_path=''):\r\n def _get_data_info(self):\r\n def _load_data(self):\r\n def _split_data(self, clnt_data_list, trn_x, trn_y, rule, rule_arg, sgm):\r\n def _load_split_data(self, seed, rule, rule_arg, sgm):\r\n def __init__(self, data_x, data_y=True, train=False, dataset_name=''):\r\n def __len__(self):\r\n def __getitem__(self, idx):\r\ndef split_datasets(dataname, num_clients, num_class, seed, sgm, rule, alpha, data_path='./data', showfig=False):\r\n def __init__(self, img_root, img_list, label_list, transformer):\r\n def __getitem__(self, index):\r\n def __len__(self):\r\ndef show_statis(data_obj, num_clients, num_class, dataname, save_path):\r" }, { "identifier": "util_parser", "path": "utils/util_parser.py", "snippet": "MODEL_ARCH = ['resnet18']\r\nDATASET_NAME = ['CIFAR10', 'CIFAR100']\r\nRULE = ['iid', 'Dirichlet']\r\nMETHODS = ['FedAvg', 'FedProx', 'FedDyn', 'SCAFFOLD', 'MOON',\r\n 'FedFTG', 'FedProxGAN', 'FedDynGAN', 'SCAFFOLDGAN', 'MOONGAN']\r\ndef prepare_parser():\r" }, { "identifier": "model_choose_fn", "path": "models/model_choose_fn.py", "snippet": "def choose_model(model_name, **kwargs):\r\ndef choose_g_model(model_name, **kwargs):\r" }, { "identifier": "FedAvg", "path": "methods/FedAvg.py", "snippet": "def train_FedAvg(data_obj, act_prob, learning_rate, batch_size, epoch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, sch_step, sch_gamma,\r\n save_period, suffix='', trial=True, data_path='',\r\n rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "FedProx", "path": "methods/FedProx.py", "snippet": "def train_FedProx(data_obj, act_prob, learning_rate, batch_size, epoch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, sch_step, sch_gamma,\r\n save_period, mu, suffix='', trial=True, data_path='',\r\n rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "SCAFFOLD", "path": "methods/SCAFFOLD.py", "snippet": "def train_SCAFFOLD(data_obj, act_prob, learning_rate, batch_size, n_minibatch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, sch_step, sch_gamma,\r\n save_period, suffix='', trial=True, data_path='', rand_seed=0, lr_decay_per_round=1,\r\n global_learning_rate=1):\r" }, { "identifier": "MOON", "path": "methods/MOON.py", "snippet": "def train_MOON(data_obj, act_prob, learning_rate, batch_size, epoch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, sch_step, sch_gamma,\r\n save_period, mu, tau, suffix='', trial=True, data_path='',\r\n rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "FedDyn", "path": "methods/FedDyn.py", "snippet": "def train_FedDyn(data_obj, act_prob,\r\n learning_rate, batch_size, epoch, com_amount, print_per,\r\n weight_decay, model_func, init_model, alpha_coef,\r\n sch_step, sch_gamma, save_period,\r\n suffix='', trial=True, data_path='', rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "FedFTG", "path": "methods/FedFTG.py", "snippet": "def train_FedFTG(data_obj, act_prob, learning_rate, batch_size, epoch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, init_g_model, sch_step, sch_gamma,\r\n save_period, suffix='', trial=True, data_path='',\r\n rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "FedProxGAN", "path": "methods/FedProxGAN.py", "snippet": "def train_FedProxGAN(data_obj, act_prob, learning_rate, batch_size, epoch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, init_g_model, sch_step, sch_gamma,\r\n save_period, mu, suffix='', trial=True, data_path='',\r\n rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "SCAFFOLDGAN", "path": "methods/SCAFFOLDGAN.py", "snippet": "def train_SCAFFOLDGAN(data_obj, act_prob, learning_rate, batch_size, n_minibatch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, init_g_model, sch_step, sch_gamma,\r\n save_period, suffix='', trial=True, data_path='', rand_seed=0, lr_decay_per_round=1,\r\n global_learning_rate=1):\r" }, { "identifier": "MOONGAN", "path": "methods/MOONGAN.py", "snippet": "def train_MOONGAN(data_obj, act_prob, learning_rate, batch_size, epoch,\r\n com_amount, print_per, weight_decay,\r\n model_func, init_model, init_g_model, sch_step, sch_gamma,\r\n save_period, mu, tau, suffix='', trial=True, data_path='',\r\n rand_seed=0, lr_decay_per_round=1):\r" }, { "identifier": "FedDynGAN", "path": "methods/FedDynGAN.py", "snippet": "def train_FedDynGAN(data_obj, act_prob,\r\n learning_rate, batch_size, epoch, com_amount, print_per,\r\n weight_decay, model_func, init_model, init_g_model, alpha_coef,\r\n sch_step, sch_gamma, save_period,\r\n suffix='', trial=True, data_path='', rand_seed=0, lr_decay_per_round=1):\r" } ]
from utils import util_dataset, util_parser from models import model_choose_fn from methods import FedAvg, FedProx, SCAFFOLD, MOON, FedDyn from methods import FedFTG, FedProxGAN, SCAFFOLDGAN, MOONGAN, FedDynGAN import torch import os import random import numpy as np import matplotlib.pyplot as plt
1,537
def run(conf): print('Init-------------------------') root_path = os.getcwd() # print(root_path) if root_path.endswith('scripts'): root_path = os.path.dirname(root_path) conf['savepath'] = os.path.join(root_path, conf['savepath'].strip()) print('Data and results save path is: ', conf['savepath']) ###################################################### # Provide reproducibility torch.manual_seed(conf['seed']) random.seed(conf['seed']) np.random.seed(conf['seed']) in_channel = 3 out_channel = 10 ###################################################### # Split the dataset
def run(conf): print('Init-------------------------') root_path = os.getcwd() # print(root_path) if root_path.endswith('scripts'): root_path = os.path.dirname(root_path) conf['savepath'] = os.path.join(root_path, conf['savepath'].strip()) print('Data and results save path is: ', conf['savepath']) ###################################################### # Provide reproducibility torch.manual_seed(conf['seed']) random.seed(conf['seed']) np.random.seed(conf['seed']) in_channel = 3 out_channel = 10 ###################################################### # Split the dataset
data_obj = util_dataset.DatasetObject(dataset=conf['dataset'],
0
2023-10-26 03:35:17+00:00
2k
Shou-Hsu/Report.ai
summarize.py
[ { "identifier": "convert_json", "path": "utils.py", "snippet": "def convert_json(txt:str, item_list:list) -> str:\n txt = txt.replace('\\n', '').replace('#', '')\n\n output = dict()\n for i in range(len(item_list)):\n start = txt.lower().find(item_list[i].lower() + ':')\n\n if i != len(item_list) - 1: \n end = txt.lower().find(item_list[i+1].lower() + ':')\n else:\n end = len(txt)\n\n output[item_list[i]] = txt[start + len(item_list[i]) + 1 : end].strip()\n\n return output" }, { "identifier": "get_items", "path": "utils.py", "snippet": "def get_items(type:str):\n if type == 'individuel':\n with open('./template/individuel.txt') as f: lines = f.readlines()\n elif type == 'general':\n with open('./template/general.txt') as f: lines = f.readlines()\n else: raise ValueError('type must be \"individuel\" or \"general\"')\n\n item = ''.join(lines)\n item_list = ''.join(lines).split(',') \n item_list = [items.strip() for items in item_list]\n item_format = ''\n for i in item_list: item_format += f'{i}:'\n\n return item_list, item, item_format" } ]
from langchain.chains.combine_documents.stuff import StuffDocumentsChain from utils import convert_json, get_items from langchain.docstore.document import Document from langchain.prompts import PromptTemplate from langchain.chains.llm import LLMChain from tqdm import tqdm from utils import llm from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains.summarize import load_summarize_chain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.docstore.document import Document from langchain.prompts import PromptTemplate from langchain.chains.llm import LLMChain from langdetect import detect_langs from utils import add_hyperlink, divide_audio import json import docx, datetime
669
class generate_summary(): def __init__(self, file_name:str, original_language:str, translated_language:str, chunk_size:int, output_dir:str) -> None: self.file_name = file_name self.chunk_size = chunk_size self.original_language = original_language self.translated_language = translated_language self.output_dir = output_dir self.llm = llm def _get_general_summary(self, article_divided:dict) -> None: text_splitter = RecursiveCharacterTextSplitter( chunk_size = self.chunk_size//2, chunk_overlap = 0, length_function = len) # load transcript with open(f'./transcript/{self.file_name}.txt', 'r') as f: transcript = ''.join(f.readlines()) split_text = text_splitter.split_text(transcript)
class generate_summary(): def __init__(self, file_name:str, original_language:str, translated_language:str, chunk_size:int, output_dir:str) -> None: self.file_name = file_name self.chunk_size = chunk_size self.original_language = original_language self.translated_language = translated_language self.output_dir = output_dir self.llm = llm def _get_general_summary(self, article_divided:dict) -> None: text_splitter = RecursiveCharacterTextSplitter( chunk_size = self.chunk_size//2, chunk_overlap = 0, length_function = len) # load transcript with open(f'./transcript/{self.file_name}.txt', 'r') as f: transcript = ''.join(f.readlines()) split_text = text_splitter.split_text(transcript)
item_list, items, item_format = get_items('general')
1
2023-10-30 12:29:20+00:00
2k
Thinksy-app/thinksy
app/review_ops.py
[ { "identifier": "SYSTEM_TEXT", "path": "app/env.py", "snippet": "SYSTEM_TEXT = os.environ.get(\"OPENAI_SYSTEM_TEXT\", DEFAULT_SYSTEM_TEXT)" }, { "identifier": "fetch_channel_messages", "path": "app/slack_ops.py", "snippet": "def fetch_channel_messages(\n client: WebClient,\n user: str,\n conversations: list[str],\n start_date: datetime,\n end_date: datetime,\n limit: int = 100\n):\n \"\"\"\n Fetches messages from a Slack channel\n\n Args:\n client (WebClient): The Slack WebClient instance.\n channel_id (str): The ID of the channel from which to fetch messages.\n limit (int, optional): The maximum number of messages to fetch. Defaults to 100.\n\n Returns:\n list: A list of message dictionaries.\n \"\"\"\n\n result = []\n for conversation_id in conversations:\n try:\n response = client.conversations_history(\n channel=conversation_id,\n limit=limit,\n user=user,\n latest=str(end_date.timestamp()) + \"00000\",\n oldest=str(start_date.timestamp()) + \"00000\",\n )\n messages = response[\"messages\"]\n\n for message in messages:\n if message.get(\"user\") ==user:\n text = message.get(\"text\", \"\")\n link = client.chat_getPermalink(\n channel=conversation_id,\n message_ts=message.get(\"ts\", \"\")\n )\n url = link.get(\"permalink\", \"\")\n result.append({\"text\": text, \"url\": url})\n\n\n except SlackApiError as exception:\n print(exception)\n\n return result" }, { "identifier": "filter_non_membership_and_join", "path": "app/slack_ops.py", "snippet": "def filter_non_membership_and_join(client, logger, selected_conversations: list[str]):\n \"\"\"\n Joins the channel that Thinksy is not already a member of\n\n Args:\n client (WebClient): The Slack WebClient instance.\n channel_id (str): The ID of the channel from which to fetch messages.\n limit (int, optional): The maximum number of messages to fetch. Defaults to 100.\n\n Returns:\n list: A list of message dictionaries.\n \"\"\"\n conversations_bot_is_not_in: list[str] = []\n\n for conversation in selected_conversations:\n try:\n response = client.conversations_info(channel=conversation)\n if not response[\"channel\"][\"is_member\"]:\n conversations_bot_is_not_in.append(conversation)\n except SlackApiError as e:\n logger.error(f\"Error fetching channel review: {e}\")\n\n for to_join_conversation in conversations_bot_is_not_in:\n try:\n response = client.conversations_join(\n channel=to_join_conversation,\n )\n except SlackApiError as e:\n logger.error(f\"Error showing warning: {e}\")" }, { "identifier": "make_synchronous_openai_call", "path": "app/openai_ops.py", "snippet": "def make_synchronous_openai_call(\n *,\n openai_api_key: str,\n model: str,\n temperature: float,\n messages,\n openai_api_type: str,\n openai_api_base: str,\n openai_api_version: str,\n openai_deployment_id: str,\n timeout_seconds: int,\n) -> OpenAIObject:\n return openai.ChatCompletion.create(\n api_key=openai_api_key,\n model=model,\n messages=messages,\n top_p=1,\n n=1,\n max_tokens=MAX_TOKENS,\n temperature=temperature,\n presence_penalty=0,\n frequency_penalty=0,\n logit_bias={},\n stream=False,\n api_type=openai_api_type,\n api_base=openai_api_base,\n api_version=openai_api_version,\n deployment_id=openai_deployment_id,\n request_timeout=timeout_seconds,\n )" } ]
from datetime import datetime from slack_sdk import WebClient from app.env import ( SYSTEM_TEXT, ) from app.slack_ops import fetch_channel_messages, filter_non_membership_and_join from app.openai_ops import make_synchronous_openai_call import json import re
1,105
""" Business logic writing the reviews """ def generate_review(context, user: str, web_client: WebClient, selected_conversations, start_date, end_date, logger): """ Generates the review based on the user's criteria Parameters: user (str): The user ID from Slack slack_enc_team_id (str): The team ID from Slack Returns: dict: The payload for setting up review criteria """ filter_non_membership_and_join(web_client, logger, selected_conversations) start_date_num = datetime.strptime(start_date, "%Y-%m-%d") end_date_num = datetime.strptime(end_date, "%Y-%m-%d") slack_messages = fetch_channel_messages(web_client, user, selected_conversations, start_date_num, end_date_num) messages = [ { "role": "system",
""" Business logic writing the reviews """ def generate_review(context, user: str, web_client: WebClient, selected_conversations, start_date, end_date, logger): """ Generates the review based on the user's criteria Parameters: user (str): The user ID from Slack slack_enc_team_id (str): The team ID from Slack Returns: dict: The payload for setting up review criteria """ filter_non_membership_and_join(web_client, logger, selected_conversations) start_date_num = datetime.strptime(start_date, "%Y-%m-%d") end_date_num = datetime.strptime(end_date, "%Y-%m-%d") slack_messages = fetch_channel_messages(web_client, user, selected_conversations, start_date_num, end_date_num) messages = [ { "role": "system",
"content": SYSTEM_TEXT
0
2023-10-26 23:47:28+00:00
2k
CrystalWindSnake/nicegui-toolkit
__tests/test_componentStore.py
[ { "identifier": "ComponentStore", "path": "niceguiToolkit/layout/componentStore.py", "snippet": "class ComponentStore:\n def __init__(self) -> None:\n self.cpMapper: Dict[_TNiceguiComponentId, ComponentInfo] = {}\n self._styles_records: Set[_TNiceguiComponentId] = set()\n self._classes_records: Set[_TNiceguiComponentId] = set()\n\n def _set_componentInfo(\n self,\n component_type_name: str,\n component_id: _TNiceguiComponentId,\n code_info: astCore._T_source_code_info,\n ):\n cp_info = ComponentInfo(component_id, component_type_name, code_info)\n self.cpMapper[component_id] = cp_info\n\n if code_info.style.has:\n style_str = astCore.get_call_content(code_info, code_info.style)\n cp_info.stylesHistory.update(ng_vars.ui.element._parse_style(style_str))\n\n if code_info.classes.has:\n classes_str = astCore.get_call_content(code_info, code_info.classes)\n cp_info.classesHistory = ng_vars.ui.element._update_classes_list(\n cp_info.classesHistory, classes_str\n )\n\n def set_componentInfo(\n self, component: ng_vars.ui.element, code_info: astCore._T_source_code_info\n ):\n self._set_componentInfo(type(component).__name__, component.id, code_info)\n\n def _collect_component_info(\n self, component_info: ComponentInfo, styles: Dict[str, str], cleasses: List[str]\n ):\n component_info.styles = {**styles}\n component_info.classes = [c for c in cleasses]\n\n def collect_component_infos(self, client: ng_vars.Client):\n for id, info in self.cpMapper.items():\n ele = client.elements[id]\n self._collect_component_info(info, ele._style, ele._classes)\n\n def get_info(self, id: _TNiceguiComponentId):\n return self.cpMapper[id]\n\n def change_styles(self, id: _TNiceguiComponentId, styles: Dict[str, str]):\n self.get_info(id).stylesHistory.update(styles)\n self._styles_records.add(id)\n\n def remove_styles(self, id: _TNiceguiComponentId, style_names: List[str]):\n stylesHistory = self.get_info(id).stylesHistory\n for name in style_names:\n if name in stylesHistory:\n del stylesHistory[name]\n self._styles_records.add(id)\n\n def change_classes(self, id: _TNiceguiComponentId, classes: List[str]):\n self.get_info(id).classesHistory.extend(classes)\n self._classes_records.add(id)\n\n def create_changed_records(self):\n style_infos = (self.get_info(id) for id in self._styles_records)\n style_infos = [(info, info.create_style_code()) for info in style_infos]\n\n classes_infos = (self.get_info(id) for id in self._classes_records)\n classes_infos = [(info, info.create_classes_code()) for info in classes_infos]\n\n need_infos = chain(style_infos, classes_infos)\n\n for file_path, gp in groupby(\n sorted(need_infos, key=lambda m: m[0].sourceCodeInfo.entry_point.file),\n key=lambda m: m[0].sourceCodeInfo.entry_point.file,\n ):\n records = (item[1] for item in gp)\n code_lines = astCore.apply_code(file_path, records)\n\n code = \"\\n\".join(code_lines)\n yield _T_create_changed_records(file_path, code)\n\n def clear_records(self):\n self._styles_records.clear()\n self._classes_records.clear()\n\n def clear_all_data(self):\n self.cpMapper.clear()\n self.clear_records()" }, { "identifier": "astCore", "path": "niceguiToolkit/utils/astCore.py", "snippet": "class CallerStemNodeVisitor(ast.NodeVisitor):\nclass AttrStemNodeVisitor(ast.NodeVisitor):\nclass EntryPointCallerNodeVisitor(ast.NodeVisitor):\nclass _T_entry_point_position:\nclass _T_get_ast4file_return:\nclass _T_ast_info:\nclass _T_apply_code_record:\nclass _T_get_ast_infos:\nclass _T_entry_point_info:\nclass _T_source_code_info:\n def __init__(self, positions: _T_entry_point_position) -> None:\n def visit(self, node: AST) -> Any:\n def get_target(self, node: Any) -> AST:\n def __init__(self, attr: str) -> None:\n def check(self, node: AST):\n def visit(self, node: AST) -> Any:\n def get_target(self, node: Any) -> AST:\n def __init__(self, name: str, lineno: int) -> None:\n def visit(self, node: AST) -> Any:\n def get_target(self, node: Any) -> AST:\ndef get_entry_point_position(file: Path, func_name: str, lineno: int):\ndef _get_ast4file(file: Path):\ndef clear_ast_code_cache():\ndef _get_ast_info(entry_point: _T_entry_point_info, call_name: _T_call_name):\ndef get_call_content(source_code: _T_source_code_info, ast_info: _T_ast_info):\ndef _replace_str_by_position(content: str, replace: str, start: int, end: int):\ndef apply_code(source_code_file: Path, records: Iterable[_T_apply_code_record]):\ndef get_ast_infos(source_code: _T_source_code_info):\ndef get_frame_info_match_file(targets: List[Path]) -> Optional[_T_entry_point_info]:\ndef get_frame_info_exclude_dir(\n exclude_dirs: List[Path],\n) -> Optional[_T_entry_point_info]:\ndef get_source_info(entry_point_info: _T_entry_point_info) -> _T_source_code_info:\ndef _get_call_name(node: Union[ast.Name, ast.Attribute]):\ndef _try_exce_info2_entry_info(exec_info: Executing, file_path: Path):" }, { "identifier": "get_data_file", "path": "__tests/utils.py", "snippet": "def get_data_file(file: str):\n return DATA_ROOT / file" } ]
from niceguiToolkit.layout.componentStore import ComponentStore from niceguiToolkit.utils import astCore from .utils import get_data_file
1,464
def test_create_new_style_call(): mock_code_file = get_data_file("code1.py") exp_file = get_data_file("code1_exp.txt") store = ComponentStore()
def test_create_new_style_call(): mock_code_file = get_data_file("code1.py") exp_file = get_data_file("code1_exp.txt") store = ComponentStore()
entry_info = astCore._T_entry_point_info(
1
2023-10-27 13:50:03+00:00
2k
EnVision-Research/Defect_Spectrum
models/stylegan/mapper.py
[ { "identifier": "EqualLinear", "path": "models/stylegan/modules.py", "snippet": "class EqualLinear(nn.Module):\r\n def __init__(\r\n self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None\r\n ):\r\n super().__init__()\r\n\r\n self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))\r\n\r\n if bias:\r\n self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))\r\n\r\n else:\r\n self.bias = None\r\n\r\n self.activation = activation\r\n\r\n self.scale = (1 / math.sqrt(in_dim)) * lr_mul\r\n self.lr_mul = lr_mul\r\n\r\n def forward(self, input):\r\n if self.activation:\r\n out = F.linear(input, self.weight * self.scale)\r\n out = fused_leaky_relu(out, self.bias * self.lr_mul)\r\n\r\n else:\r\n out = F.linear(\r\n input, self.weight * self.scale, bias=self.bias * self.lr_mul\r\n )\r\n\r\n return out\r\n\r\n def __repr__(self):\r\n return (\r\n f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'\r\n )\r" }, { "identifier": "PixelNorm", "path": "models/stylegan/modules.py", "snippet": "class PixelNorm(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n def forward(self, input):\r\n return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)\r" } ]
from abc import abstractmethod from torch import nn from models.stylegan.modules import EqualLinear, PixelNorm import torch
1,149
STYLESPACE_DIMENSIONS = [512 for _ in range(15)] + [256, 256, 256] + [128, 128, 128] + [64, 64, 64] + [32, 32] class ConcatSquashLinear(nn.Module): def __init__(self, dim_in, dim_out, dim_ctx): super(ConcatSquashLinear, self).__init__() self._layer = EqualLinear(dim_in, dim_out, lr_mul=0.01, activation='fused_lrelu') self._hyper_bias = EqualLinear(dim_ctx, dim_out, lr_mul=0.01, activation='fused_lrelu') self._hyper_gate = EqualLinear(dim_ctx, dim_out, lr_mul=0.01, activation='fused_lrelu') def forward(self, x, ctx): gate = torch.sigmoid(self._hyper_gate(ctx)) bias = self._hyper_bias(ctx) # if x.dim() == 3: # gate = gate.unsqueeze(1) # bias = bias.unsqueeze(1) ret = self._layer(x) * gate + bias return ret class TimeStyleSpaceMapper(nn.Module): def __init__(self, dim_ctx): super(TimeStyleSpaceMapper, self).__init__() for c, c_dim in enumerate(STYLESPACE_DIMENSIONS): setattr(self, f"mapper_{c}", ConcatSquashLinear(dim_in=c_dim, dim_out=c_dim, dim_ctx=dim_ctx+3)) def forward(self, x, beta, context): batch_size = x.size(0) beta = beta.view(batch_size, 1, 1) # (B, 1, 1) context = context.view(batch_size, 1, -1) # (B, 1, F) time_emb = torch.cat([beta, torch.sin(beta), torch.cos(beta)], dim=-1) # (B, 1, 3) ctx_emb = torch.cat([time_emb, context], dim=-1) # (B, 1, F+3) out = [] for c, c_dim in enumerate(STYLESPACE_DIMENSIONS): curr_mapper = getattr(self, f"mapper_{c}") x_c = x[:, :, sum(STYLESPACE_DIMENSIONS[:c]):sum(STYLESPACE_DIMENSIONS[:c])+c_dim] # (B, 1, 512*15 + 256*3 + 128*3 + 64*3 + 32*2) x_c_res = curr_mapper(x_c, ctx_emb).view(x_c.shape) out.append(x_c_res) return torch.cat(out, dim=-1) class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) else: x = layer(x) return x class Mapper(nn.Module): def __init__(self, opts, latent_dim=512): super(Mapper, self).__init__() self.opts = opts
STYLESPACE_DIMENSIONS = [512 for _ in range(15)] + [256, 256, 256] + [128, 128, 128] + [64, 64, 64] + [32, 32] class ConcatSquashLinear(nn.Module): def __init__(self, dim_in, dim_out, dim_ctx): super(ConcatSquashLinear, self).__init__() self._layer = EqualLinear(dim_in, dim_out, lr_mul=0.01, activation='fused_lrelu') self._hyper_bias = EqualLinear(dim_ctx, dim_out, lr_mul=0.01, activation='fused_lrelu') self._hyper_gate = EqualLinear(dim_ctx, dim_out, lr_mul=0.01, activation='fused_lrelu') def forward(self, x, ctx): gate = torch.sigmoid(self._hyper_gate(ctx)) bias = self._hyper_bias(ctx) # if x.dim() == 3: # gate = gate.unsqueeze(1) # bias = bias.unsqueeze(1) ret = self._layer(x) * gate + bias return ret class TimeStyleSpaceMapper(nn.Module): def __init__(self, dim_ctx): super(TimeStyleSpaceMapper, self).__init__() for c, c_dim in enumerate(STYLESPACE_DIMENSIONS): setattr(self, f"mapper_{c}", ConcatSquashLinear(dim_in=c_dim, dim_out=c_dim, dim_ctx=dim_ctx+3)) def forward(self, x, beta, context): batch_size = x.size(0) beta = beta.view(batch_size, 1, 1) # (B, 1, 1) context = context.view(batch_size, 1, -1) # (B, 1, F) time_emb = torch.cat([beta, torch.sin(beta), torch.cos(beta)], dim=-1) # (B, 1, 3) ctx_emb = torch.cat([time_emb, context], dim=-1) # (B, 1, F+3) out = [] for c, c_dim in enumerate(STYLESPACE_DIMENSIONS): curr_mapper = getattr(self, f"mapper_{c}") x_c = x[:, :, sum(STYLESPACE_DIMENSIONS[:c]):sum(STYLESPACE_DIMENSIONS[:c])+c_dim] # (B, 1, 512*15 + 256*3 + 128*3 + 64*3 + 32*2) x_c_res = curr_mapper(x_c, ctx_emb).view(x_c.shape) out.append(x_c_res) return torch.cat(out, dim=-1) class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. """ @abstractmethod def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """ class TimestepEmbedSequential(nn.Sequential, TimestepBlock): """ A sequential module that passes timestep embeddings to the children that support it as an extra input. """ def forward(self, x, emb): for layer in self: if isinstance(layer, TimestepBlock): x = layer(x, emb) else: x = layer(x) return x class Mapper(nn.Module): def __init__(self, opts, latent_dim=512): super(Mapper, self).__init__() self.opts = opts
layers = [PixelNorm()]
1
2023-10-26 10:28:26+00:00
2k
ORI-Muchim/BEGANSing
main.py
[ { "identifier": "update_text_file_in_yaml", "path": "main_util.py", "snippet": "def update_text_file_in_yaml(yaml_path):\n yaml = YAML()\n yaml.preserve_quotes = True\n try:\n with open(yaml_path, 'r', encoding='utf-8') as file:\n data = yaml.load(file)\n\n current_text_file_path = data.get('text_file')\n if current_text_file_path is None:\n print('text_file key not found in YAML file.')\n return\n\n directory, file_name_with_extension = os.path.split(current_text_file_path)\n file_name, extension = os.path.splitext(file_name_with_extension)\n\n print('Current text file base name:', file_name)\n \n new_file_base_name = sys.argv[2]\n new_text_file_path = os.path.join(directory, new_file_base_name + extension)\n data['text_file'] = new_text_file_path\n \n with open(yaml_path, 'w', encoding='utf-8') as file:\n yaml.dump(data, file)\n \n print('text_file has been updated to:', new_text_file_path)\n \n except Exception as e:\n print('Error:', str(e))" }, { "identifier": "find_index_files", "path": "main_util.py", "snippet": "def find_index_files(directory):\n\n pattern = os.path.join(directory, 'added*.index')\n files = glob.glob(pattern)\n \n return files" }, { "identifier": "get_model", "path": "get_models.py", "snippet": "def get_model():\n base_dir = './RVC'\n model_urls = {\n 'pretrained_v2/f0D48k.pth': 'https://github.com/ORI-Muchim/BEGANSing/releases/download/v1.0/f0D48k.pth',\n 'pretrained_v2/f0G48k.pth': 'https://github.com/ORI-Muchim/BEGANSing/releases/download/v1.0/f0G48k.pth',\n 'hubert_base.pt': 'https://github.com/ORI-Muchim/BEGANSing/releases/download/v1.0/hubert_base.pt',\n 'rmvpe.pt': 'https://github.com/ORI-Muchim/BEGANSing/releases/download/v1.0/rmvpe.pt',\n 'checkpoint/default/latest_D.pt': 'https://github.com/ORI-Muchim/BEGANSing/releases/download/v1.0/latest_D.pt',\n 'checkpoint/default/latest_G.pt': 'https://github.com/ORI-Muchim/BEGANSing/releases/download/v1.0/latest_G.pt',\n 'checkpoint/default/default_train.yml': 'https://github.com/ORI-Muchim/BEGANSing/releases/download/v1.0/default_train.yml',\n 'hifi_gan/default/do_02500000': 'https://github.com/ORI-Muchim/BEGANSing/releases/download/v1.0/do_02500000',\n 'hifi_gan/default/g_02500000': 'https://github.com/ORI-Muchim/BEGANSing/releases/download/v1.0/g_02500000',\n }\n\n for filename, url in model_urls.items():\n file_path = os.path.join(base_dir, filename)\n if not os.path.isfile(file_path):\n print(f\"Downloading {filename}...\")\n download_file(url, file_path)\n print(f\"Saved {filename}.\\n\")\n else:\n print(f'Skipping Download... {filename} exists.')" } ]
import os import sys import shutil import argparse from main_util import update_text_file_in_yaml, find_index_files from get_models import get_model
1,003
if len(sys.argv) < 4: print("Usage: python main.py <model_name> <song_name> <f0_up_key> [--audiosr]") sys.exit(1) # Init model_name = sys.argv[1] song_name = sys.argv[2] f0_up_key = int(sys.argv[3]) # transpose value input_path = f"../samples/latest_G_{song_name}.wav" output_path = f"../samples/latest_G_{song_name}.wav" model_path = f"./weights/{model_name}.pth" device = "cuda:0" f0_method = "rmvpe" # pm or harvest or crepe or rmvpe parser = argparse.ArgumentParser() parser.add_argument('--audiosr', action='store_true', help='Enable audio processing') args = parser.parse_args(sys.argv[4:]) yaml_path = "./config/default_infer.yml" update_text_file_in_yaml(yaml_path) # Download Necessary Models / Files
if len(sys.argv) < 4: print("Usage: python main.py <model_name> <song_name> <f0_up_key> [--audiosr]") sys.exit(1) # Init model_name = sys.argv[1] song_name = sys.argv[2] f0_up_key = int(sys.argv[3]) # transpose value input_path = f"../samples/latest_G_{song_name}.wav" output_path = f"../samples/latest_G_{song_name}.wav" model_path = f"./weights/{model_name}.pth" device = "cuda:0" f0_method = "rmvpe" # pm or harvest or crepe or rmvpe parser = argparse.ArgumentParser() parser.add_argument('--audiosr', action='store_true', help='Enable audio processing') args = parser.parse_args(sys.argv[4:]) yaml_path = "./config/default_infer.yml" update_text_file_in_yaml(yaml_path) # Download Necessary Models / Files
get_model()
2
2023-10-29 09:32:19+00:00
2k
Charl-AI/stochastic-caching
run_benchmark.py
[ { "identifier": "DummyDataset", "path": "benchmark/dataset.py", "snippet": "class DummyDataset(Dataset):\n def __init__(self, data_dir: str, cache_limit_gib: int):\n \"\"\"PyTorch dataset for dummy data.\n No cache is used if cache_limit_gib is 0.\"\"\"\n self.data_dir = data_dir\n self.cache_limit_gib = cache_limit_gib\n self.transforms = get_transforms()\n self.augmentations = get_augmentations()\n\n save_dummy_data(data_dir)\n\n if cache_limit_gib != 0:\n self.cache = SharedCache(\n cache_limit_gib, DATASET_LEN, CACHED_DATA_DIMS, dtype=torch.uint8\n )\n\n def _get_img(self, idx) -> torch.Tensor:\n \"\"\"Reads dummy data from disk to a uint8 torch tensor.\"\"\"\n img_path = os.path.join(self.data_dir, f\"{idx}.jpg\")\n img = Image.open(img_path).convert(\"RGB\")\n img = self.transforms(img)\n return img\n\n def __len__(self):\n return DATASET_LEN\n\n def __getitem__(self, idx) -> torch.Tensor:\n # caching disabled\n if self.cache_limit_gib == 0:\n return self.augmentations(self._get_img(idx))\n\n # try to read the image from cache\n img = self.cache.get_slot(idx)\n # otherwise, read from disk and try to cache\n if img is None:\n img = self._get_img(idx) # uint8 tensor\n self.cache.set_slot(idx, img)\n\n return self.augmentations(img)" }, { "identifier": "train", "path": "benchmark/trainer.py", "snippet": "def train(loader: DataLoader, num_epochs: int) -> list[float]:\n # using a tiny model because we do not want to be compute bound when\n # benchmarking the dataloading\n model = mobilenet_v3_small()\n model.to(\"cuda\")\n prediction_head = nn.Linear(1000, 10)\n prediction_head.to(\"cuda\")\n model.train()\n params = list(model.parameters()) + list(prediction_head.parameters())\n optim = torch.optim.Adam(params, lr=1e-3)\n\n times = []\n for epoch in range(num_epochs):\n torch.cuda.synchronize()\n epoch_start = time.time()\n for batch in tqdm(loader, desc=f\"Epoch {epoch}\"):\n x = batch.to(\"cuda\")\n embeddings = model(x)\n logits = prediction_head(embeddings)\n # just random labels\n y = torch.randint(0, 10, (len(x),), device=\"cuda\")\n loss = nn.CrossEntropyLoss()(logits, y)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n torch.cuda.synchronize() # wait for all computations to finish\n epoch_end = time.time()\n epoch_time = epoch_end - epoch_start\n times.append(epoch_time)\n print(f\"Epoch {epoch} took {epoch_time:.3f}s\")\n return times" } ]
import argparse import os import pandas as pd import torch from torch.utils.data import DataLoader from benchmark.dataset import DummyDataset from benchmark.trainer import train
889
parser = argparse.ArgumentParser() parser.add_argument("--seed", type=int, default=42) parser.add_argument("--data-dir", type=str, default="/data2/dummy_data") parser.add_argument("--cache-limit-gib", type=int, default=0) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--num-workers", type=int, default=8) parser.add_argument("--pin-memory", type=bool, default=True) parser.add_argument("--output-dir", type=str, default="outputs/") NUM_EPOCHS = 2 def main(args): # first epoch fills cache, second epoch uses cache torch.manual_seed(args.seed)
parser = argparse.ArgumentParser() parser.add_argument("--seed", type=int, default=42) parser.add_argument("--data-dir", type=str, default="/data2/dummy_data") parser.add_argument("--cache-limit-gib", type=int, default=0) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--num-workers", type=int, default=8) parser.add_argument("--pin-memory", type=bool, default=True) parser.add_argument("--output-dir", type=str, default="outputs/") NUM_EPOCHS = 2 def main(args): # first epoch fills cache, second epoch uses cache torch.manual_seed(args.seed)
dataset = DummyDataset(args.data_dir, args.cache_limit_gib)
0
2023-10-27 09:33:43+00:00
2k
hugoycj/light-hloc
lighthloc/pipeline.py
[ { "identifier": "extract_features", "path": "lighthloc/extract_features.py", "snippet": "def resize_image(image, size, interp):\n def __init__(self, root, conf, paths=None):\n def __getitem__(self, idx):\n def __len__(self):\ndef main(conf: Dict,\n image_dir: Path,\n export_dir: Optional[Path] = None,\n as_half: bool = True,\n image_list: Optional[Union[Path, List[str]]] = None,\n feature_path: Optional[Path] = None,\n overwrite: bool = False) -> Path:\nclass ImageDataset(torch.utils.data.Dataset):" }, { "identifier": "match_features", "path": "lighthloc/match_features.py", "snippet": "class WorkQueue():\nclass FeaturePairsDataset(torch.utils.data.Dataset):\n def __init__(self, work_fn, num_threads=1):\n def join(self):\n def thread_fn(self, work_fn):\n def put(self, data):\n def __init__(self, pairs, feature_path_q, feature_path_r):\n def __getitem__(self, idx):\n def __len__(self):\ndef writer_fn(inp, match_path):\ndef main(conf: Dict,\n pairs: Path, features: Union[Path, str],\n export_dir: Optional[Path] = None,\n matches: Optional[Path] = None,\n features_ref: Optional[Path] = None,\n overwrite: bool = False) -> Path:\ndef find_unique_new_pairs(pairs_all: List[Tuple[str]], match_path: Path = None):\ndef match_from_paths(conf: Dict,\n pairs_path: Path,\n match_path: Path,\n feature_path_q: Path,\n feature_path_ref: Path,\n overwrite: bool = False) -> Path:" }, { "identifier": "reconstruction", "path": "lighthloc/reconstruction.py", "snippet": "def create_empty_db(database_path: Path):\ndef import_images(image_dir: Path,\n database_path: Path,\n camera_mode: pycolmap.CameraMode,\n image_list: Optional[List[str]] = None,\n options: Optional[Dict[str, Any]] = None):\ndef get_image_ids(database_path: Path) -> Dict[str, int]:\ndef run_reconstruction(sfm_dir: Path,\n database_path: Path,\n image_dir: Path,\n verbose: bool = False,\n options: Optional[Dict[str, Any]] = None,\n ) -> pycolmap.Reconstruction:\ndef main(sfm_dir: Path,\n image_dir: Path,\n pairs: Path,\n features: Path,\n matches: Path,\n camera_mode: pycolmap.CameraMode = pycolmap.CameraMode.SINGLE,\n verbose: bool = False,\n skip_geometric_verification: bool = False,\n min_match_score: Optional[float] = None,\n image_list: Optional[List[str]] = None,\n image_options: Optional[Dict[str, Any]] = None,\n mapper_options: Optional[Dict[str, Any]] = None,\n ) -> pycolmap.Reconstruction:" }, { "identifier": "pairs_from_retrieval", "path": "lighthloc/associators/pairs_from_retrieval.py", "snippet": "def parse_names(prefix, names, names_all):\ndef get_descriptors(names, path, name2idx=None, key='global_descriptor'):\ndef pairs_from_score_matrix(scores: torch.Tensor,\n invalid: np.array,\n num_select: int,\n min_score: Optional[float] = None):\ndef main(descriptors, output, num_matched,\n query_prefix=None, query_list=None,\n db_prefix=None, db_list=None, db_model=None, db_descriptors=None):" }, { "identifier": "pairs_from_exhaustive", "path": "lighthloc/associators/pairs_from_exhaustive.py", "snippet": "def main(\n output: Path,\n image_list: Optional[Union[Path, List[str]]] = None,\n features: Optional[Path] = None,\n ref_list: Optional[Union[Path, List[str]]] = None,\n ref_features: Optional[Path] = None):" }, { "identifier": "pairs_from_sequance", "path": "lighthloc/associators/pairs_from_sequance.py", "snippet": "def main(\n output: Path,\n image_list: Optional[Union[Path, List[str]]] = None,\n features: Optional[Path] = None,\n overlap: Optional[int] = 5,\n quadratic_overlap: bool = True):\n N = len(names_q)" } ]
from lighthloc import extract_features, match_features, reconstruction from lighthloc.associators import pairs_from_retrieval, pairs_from_exhaustive, pairs_from_sequance from pathlib import Path import click
1,377
# To install hloc, see: https://github.com/cvg/Hierarchical-retrivalization mapper_confs = { 'default' : {}, 'fast' : {'ba_global_max_num_iterations': 20, "ba_global_max_refinements":1, "ba_global_points_freq":200000} } @click.command() @click.option('--data', type=str, help='Path to data directory') @click.option('--match-type', default='retrival', help='Type of matching to perform (default: retrival)', type=click.Choice(['exhaustive', 'sequential', 'retrival'])) @click.option('--feature-type', default='superpoint_inloc', help='Type of feature extraction (default: superpoint_inloc)', type=click.Choice(['superpoint_inloc', 'superpoint_aachen'])) @click.option('--matcher-type', default='lightglue', help='Type of feature matching (default: lightglue)', type=click.Choice(['lightglue', 'lightglue_trt', 'superglue'])) @click.option('--mapper-type', default='default', help='Type of mapper (default: default)', type=click.Choice(['default', 'fast'])) def main(data, match_type, feature_type, matcher_type, mapper_type): images = Path(data) / 'images/' outputs = Path(data) sfm_pairs = outputs / 'pairs-sfm.txt' loc_pairs = outputs / 'pairs-loc.txt' sfm_dir = outputs / 'sparse' / '0' features = outputs / 'features.h5' matches = outputs / 'matches.h5' feature_conf = extract_features.confs[feature_type]
# To install hloc, see: https://github.com/cvg/Hierarchical-retrivalization mapper_confs = { 'default' : {}, 'fast' : {'ba_global_max_num_iterations': 20, "ba_global_max_refinements":1, "ba_global_points_freq":200000} } @click.command() @click.option('--data', type=str, help='Path to data directory') @click.option('--match-type', default='retrival', help='Type of matching to perform (default: retrival)', type=click.Choice(['exhaustive', 'sequential', 'retrival'])) @click.option('--feature-type', default='superpoint_inloc', help='Type of feature extraction (default: superpoint_inloc)', type=click.Choice(['superpoint_inloc', 'superpoint_aachen'])) @click.option('--matcher-type', default='lightglue', help='Type of feature matching (default: lightglue)', type=click.Choice(['lightglue', 'lightglue_trt', 'superglue'])) @click.option('--mapper-type', default='default', help='Type of mapper (default: default)', type=click.Choice(['default', 'fast'])) def main(data, match_type, feature_type, matcher_type, mapper_type): images = Path(data) / 'images/' outputs = Path(data) sfm_pairs = outputs / 'pairs-sfm.txt' loc_pairs = outputs / 'pairs-loc.txt' sfm_dir = outputs / 'sparse' / '0' features = outputs / 'features.h5' matches = outputs / 'matches.h5' feature_conf = extract_features.confs[feature_type]
matcher_conf = match_features.confs[matcher_type]
1
2023-10-27 01:20:50+00:00
2k
KUNLP/XAI_EvidenceExtraction
src/functions/utils.py
[ { "identifier": "SquadV1Processor", "path": "src/functions/processor_sent.py", "snippet": "class SquadV1Processor(SquadProcessor):\r\n train_file = \"train-v1.1.json\"\r\n dev_file = \"dev-v1.1.json\"\r" }, { "identifier": "squad_convert_examples_to_features", "path": "src/functions/processor_sent.py", "snippet": "def squad_convert_examples_to_features(\r\n examples,\r\n tokenizer,\r\n max_seq_length,\r\n doc_stride,\r\n max_query_length,\r\n is_training,\r\n return_dataset=False,\r\n threads=1,\r\n tqdm_enabled=True,\r\n):\r\n \"\"\"\r\n Converts a list of examples into a list of features that can be directly given as input to a model.\r\n It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.\r\n\r\n Args:\r\n examples: list of :class:`~transformers.data.processors.squad.SquadExample`\r\n tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer`\r\n max_seq_length: The maximum sequence length of the inputs.\r\n doc_stride: The stride used when the context is too large and is split across several features.\r\n max_query_length: The maximum length of the query.\r\n is_training: whether to create features for model evaluation or model training.\r\n return_dataset: Default False. Either 'pt' or 'tf'.\r\n if 'pt': returns a torch.data.TensorDataset,\r\n if 'tf': returns a tf.data.Dataset\r\n threads: multiple processing threadsa-smi\r\n\r\n\r\n Returns:\r\n list of :class:`~transformers.data.processors.squad.SquadFeatures`\r\n\r\n Example::\r\n\r\n processor = SquadV2Processor()\r\n examples = processor.get_dev_examples(data_dir)\r\n\r\n features = squad_convert_examples_to_features(\r\n examples=examples,\r\n tokenizer=tokenizer,\r\n max_seq_length=args.max_seq_length,\r\n doc_stride=args.doc_stride,\r\n max_query_length=args.max_query_length,\r\n is_training=not evaluate,\r\n )\r\n \"\"\"\r\n\r\n # Defining helper methods\r\n features = []\r\n threads = min(threads, cpu_count())\r\n with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:\r\n annotate_ = partial(\r\n squad_convert_example_to_features,\r\n max_seq_length=max_seq_length,\r\n doc_stride=doc_stride,\r\n max_query_length=max_query_length,\r\n is_training=is_training,\r\n )\r\n features = list(\r\n tqdm(\r\n p.imap(annotate_, examples, chunksize=32),\r\n total=len(examples),\r\n desc=\"convert squad examples to features\",\r\n disable=not tqdm_enabled,\r\n )\r\n )\r\n refine_examples = []\r\n new_features = []\r\n unique_id = 1000000000\r\n\r\n example_index = 0\r\n for example_features in tqdm(\r\n features, total=len(features), desc=\"add example index and unique id\", disable=not tqdm_enabled\r\n ):\r\n example, example_features = example_features\r\n\r\n if not example_features:\r\n continue\r\n refine_examples.append(example)\r\n\r\n new_feature = []\r\n for example_feature in example_features:\r\n example_feature.example_index = example_index\r\n example_feature.unique_id = unique_id\r\n new_feature.append(example_feature)\r\n unique_id += 1\r\n example_index += 1\r\n\r\n new_features.append(new_feature)\r\n features = new_features\r\n del new_features\r\n global max_sent_num\r\n print(max_sent_num)\r\n if return_dataset == \"pt\":\r\n if not is_torch_available():\r\n raise RuntimeError(\"PyTorch must be installed to return a PyTorch dataset.\")\r\n return refine_examples, features\r" } ]
import logging import random import torch import numpy as np import os from src.functions.processor_sent import ( SquadV1Processor, squad_convert_examples_to_features )
1,277
def init_logger(): logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if not args.no_cuda and torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed) # tensor를 list 형으로 변환하기위한 함수 def to_list(tensor): return tensor.detach().cpu().tolist() # dataset을 load 하는 함수 def load_examples(args, tokenizer, evaluate=False, output_examples=False, do_predict=False, input_dict=None): ''' :param args: 하이퍼 파라미터 :param tokenizer: tokenization에 사용되는 tokenizer :param evaluate: 평가나 open test시, True :param output_examples: 평가나 open test 시, True / True 일 경우, examples와 features를 같이 return :param do_predict: open test시, True :param input_dict: open test시 입력되는 문서와 질문으로 이루어진 dictionary :return: examples : max_length 상관 없이, 원문으로 각 데이터를 저장한 리스트 features : max_length에 따라 분할 및 tokenize된 원문 리스트 dataset : max_length에 따라 분할 및 학습에 직접적으로 사용되는 tensor 형태로 변환된 입력 ids ''' input_dir = args.data_dir print("Creating features from dataset file at {}".format(input_dir)) # processor 선언
def init_logger(): logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if not args.no_cuda and torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed) # tensor를 list 형으로 변환하기위한 함수 def to_list(tensor): return tensor.detach().cpu().tolist() # dataset을 load 하는 함수 def load_examples(args, tokenizer, evaluate=False, output_examples=False, do_predict=False, input_dict=None): ''' :param args: 하이퍼 파라미터 :param tokenizer: tokenization에 사용되는 tokenizer :param evaluate: 평가나 open test시, True :param output_examples: 평가나 open test 시, True / True 일 경우, examples와 features를 같이 return :param do_predict: open test시, True :param input_dict: open test시 입력되는 문서와 질문으로 이루어진 dictionary :return: examples : max_length 상관 없이, 원문으로 각 데이터를 저장한 리스트 features : max_length에 따라 분할 및 tokenize된 원문 리스트 dataset : max_length에 따라 분할 및 학습에 직접적으로 사용되는 tensor 형태로 변환된 입력 ids ''' input_dir = args.data_dir print("Creating features from dataset file at {}".format(input_dir)) # processor 선언
processor = SquadV1Processor()
0
2023-10-25 07:03:47+00:00
2k
joenghl/HYPO
hypo/algo/hypo_bc.py
[ { "identifier": "PPOPolicy", "path": "hypo/network/policy.py", "snippet": "class PPOPolicy(nn.Module):\n\n def __init__(self, state_shape, action_shape, hidden_units=(64, 64),\n hidden_activation=nn.Tanh()):\n super().__init__()\n\n self.net = build_mlp(\n input_dim=state_shape[0],\n output_dim=action_shape[0],\n hidden_units=hidden_units,\n hidden_activation=hidden_activation\n )\n self.log_stds = nn.Parameter(torch.zeros(1, action_shape[0]))\n\n def forward(self, states):\n return torch.tanh(self.net(states))\n\n def sample(self, states):\n dist = self.get_dist(states)\n action = dist.sample()\n log_pi = dist.log_prob(action)\n return action, log_pi\n\n def evaluate_log_pi(self, states, actions):\n raise NotImplementedError\n\n def get_dist(self, states):\n mean = self.forward(states)\n log_stds = self.log_stds.expand_as(mean)\n std = torch.exp(log_stds)\n dist = Normal(mean, std)\n return dist\n\n def sample_with_dist(self, states):\n dist = self.get_dist(states)\n action = dist.sample()\n log_pi = dist.log_prob(action)\n return action, log_pi, dist.loc, dist.scale" }, { "identifier": "Algorithm", "path": "hypo/algo/base.py", "snippet": "class Algorithm(ABC):\n\n def __init__(self, state_shape, action_shape, device, seed, logger, gamma):\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n self.actor = None\n self.learning_steps = 0\n self.state_shape = state_shape\n self.action_shape = action_shape\n self.device = device\n self.gamma = gamma\n self.logger = logger\n\n def explore(self, state):\n state = torch.tensor(state, dtype=torch.float, device=self.device)\n with torch.no_grad():\n action, log_pi = self.actor.sample(state.unsqueeze_(0))\n return action.cpu().numpy()[0], log_pi.cpu().numpy()[0]\n\n def exploit(self, state):\n state = torch.tensor(state, dtype=torch.float, device=self.device)\n with torch.no_grad():\n action = self.actor(state.unsqueeze_(0))\n return action.cpu().numpy()[0]\n\n @abstractmethod\n def is_update(self, step):\n pass\n\n @abstractmethod\n def update(self, step):\n pass\n\n @abstractmethod\n def save_models(self, save_dir):\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)" } ]
from torch import nn from torch.optim import Adam from hypo.network import PPOPolicy from .base import Algorithm import torch
768
class HBC(Algorithm): def __init__(self, buffer_exp, state_shape, action_shape, device, seed, logger, gamma=0.995, log_interval=1e3, lr_actor=3e-4, batch_size=64, units_actor=(64, 64), **kwargs): super().__init__(state_shape, action_shape, device, seed, logger, gamma) self.buffer_exp = buffer_exp self.batch_size = batch_size
class HBC(Algorithm): def __init__(self, buffer_exp, state_shape, action_shape, device, seed, logger, gamma=0.995, log_interval=1e3, lr_actor=3e-4, batch_size=64, units_actor=(64, 64), **kwargs): super().__init__(state_shape, action_shape, device, seed, logger, gamma) self.buffer_exp = buffer_exp self.batch_size = batch_size
self.actor = PPOPolicy(
0
2023-10-27 10:37:44+00:00
2k
jmcruvellier/little_monkey
custom_components/little_monkey/sensor.py
[ { "identifier": "EcojokoEntity", "path": "custom_components/little_monkey/entity.py", "snippet": "class EcojokoEntity(CoordinatorEntity):\n \"\"\"EcojokoEntity class.\"\"\"\n\n _attr_attribution = ATTRIBUTION\n\n def __init__(self, coordinator, device_name, firmware_version):\n \"\"\"Initialize the main device entity.\"\"\"\n super().__init__(coordinator)\n self._device_name = device_name\n self._firmware_version = firmware_version\n self._child_entities = []\n\n @property\n def name(self):\n \"\"\"Return the name of the Ecojoko device entity.\"\"\"\n return f\"{self._device_name}\"\n\n @property\n def unique_id(self):\n \"\"\"Return a unique ID for the Ecojoko device entity.\"\"\"\n return f\"{DOMAIN}_{self._device_name}\"\n # return f\"{DOMAIN}_main_device_{self._device_name}\"\n\n @property\n def state(self):\n \"\"\"Return the state of the main device.\"\"\"\n return self._firmware_version\n\n @property\n def device_info(self) -> DeviceInfo:\n \"\"\"Return device information for the main device.\"\"\"\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"name\": self.name,\n \"manufacturer\": MANUFACTURER,\n \"model\": MODEL,\n \"sw_version\": VERSION,\n \"hw_version\": self._firmware_version,\n }\n\n @property\n def child_entities(self):\n \"\"\"Return a list of child entities linked to the main device.\"\"\"\n return self._child_entities\n\n def add_child_entity(self, child_entity):\n \"\"\"Add a child entity to the main device.\"\"\"\n self._child_entities.append(child_entity)" }, { "identifier": "EcojokoSensor", "path": "custom_components/little_monkey/entity.py", "snippet": "class EcojokoSensor(CoordinatorEntity, SensorEntity):\n \"\"\"Representation of a my_device sensor.\"\"\"\n\n def __init__(self, main_device, sensor_name, state_class, device_class, unit_of_measurement, icon):\n \"\"\"Initialize the sensor.\"\"\"\n super().__init__(main_device.coordinator)\n self._main_device = main_device\n self._sensor_name = sensor_name\n self._state_class = state_class\n self._device_class = device_class\n self._unit_of_measurement = unit_of_measurement\n self._icon = icon\n self._attr_translation_key = sensor_name\n self._attr_has_entity_name = True\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return f\"{self._main_device.name} - {self._main_device.coordinator.tranfile[self._sensor_name]}\"\n\n @property\n def unique_id(self):\n \"\"\"Return a unique ID for the sensor.\"\"\"\n return f\"{self._main_device.unique_id}_{self._sensor_name}\"\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self.coordinator.data.get(self._sensor_name)\n\n @property\n def state_class(self):\n \"\"\"Return the state class of the sensor.\"\"\"\n return self._state_class\n\n @property\n def device_class(self):\n \"\"\"Return the device class of the sensor.\"\"\"\n return self._device_class\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit of measurement.\"\"\"\n return self._unit_of_measurement\n\n @property\n def icon(self):\n \"\"\"Return the icon of the sensor.\"\"\"\n return self._icon\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the state attributes.\"\"\"\n return {\n \"device_name\": self._main_device.name,\n \"firmware_version\": self._main_device._firmware_version,\n }\n\n # def update(self):\n # \"\"\"Update the sensor data.\"\"\"\n # # Add code here to update sensor data (e.g., read temperature from the device)\n # # For simplicity, we'll set a dummy value\n # self.coordinator.data[self._sensor_name] = 27.0 # Replace with actual sensor data" }, { "identifier": "DOMAIN", "path": "custom_components/little_monkey/const.py", "snippet": "DOMAIN = \"little_monkey\"" }, { "identifier": "CONF_USE_HCHP_FEATURE", "path": "custom_components/little_monkey/const.py", "snippet": "CONF_USE_HCHP_FEATURE = \"use_hchp_feature\"" }, { "identifier": "CONF_USE_TEMPO_FEATURE", "path": "custom_components/little_monkey/const.py", "snippet": "CONF_USE_TEMPO_FEATURE = \"use_tempo_feature\"" }, { "identifier": "CONF_USE_TEMPHUM_FEATURE", "path": "custom_components/little_monkey/const.py", "snippet": "CONF_USE_TEMPHUM_FEATURE = \"use_temphum_feature\"" }, { "identifier": "CONF_USE_PROD_FEATURE", "path": "custom_components/little_monkey/const.py", "snippet": "CONF_USE_PROD_FEATURE = \"use_prod_feature\"" } ]
from homeassistant.components.sensor import ( SensorStateClass, SensorDeviceClass, ) from homeassistant.const import UnitOfPower, UnitOfEnergy, UnitOfTemperature, PERCENTAGE, CONF_NAME from custom_components.little_monkey.entity import EcojokoEntity, EcojokoSensor from .const import ( DOMAIN, CONF_USE_HCHP_FEATURE, CONF_USE_TEMPO_FEATURE, CONF_USE_TEMPHUM_FEATURE, CONF_USE_PROD_FEATURE )
1,325
"""Sensor platform for mon_ecojoko.""" from __future__ import annotations async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the custom component sensors.""" # Fetch data or configure your sensors here coordinator = hass.data[DOMAIN][config_entry.entry_id] # Create the main device entity firmware = coordinator.data["gateway_firmware_version"] main_device = EcojokoEntity(coordinator, config_entry.data.get(CONF_NAME), firmware) # Create child entities and link them to the main device # Real time sensor
"""Sensor platform for mon_ecojoko.""" from __future__ import annotations async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the custom component sensors.""" # Fetch data or configure your sensors here coordinator = hass.data[DOMAIN][config_entry.entry_id] # Create the main device entity firmware = coordinator.data["gateway_firmware_version"] main_device = EcojokoEntity(coordinator, config_entry.data.get(CONF_NAME), firmware) # Create child entities and link them to the main device # Real time sensor
main_device.add_child_entity(EcojokoSensor(
1
2023-10-29 21:03:13+00:00
2k
stanleylsx/text_embedding
engines/predict.py
[ { "identifier": "configure", "path": "config.py", "snippet": "" }, { "identifier": "Model", "path": "engines/model.py", "snippet": "class Model(torch.nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.model_type = configure['model_type']\n self.emb_type = configure['emb_type']\n max_sequence_length = configure['max_sequence_length']\n config_kwargs = {\n 'max_position_embeddings': max_sequence_length,\n 'ignore_mismatched_sizes': True\n }\n if self.model_type == 'XLMRoberta':\n self.model = XLMRobertaModel.from_pretrained(configure['hf_tag'], **config_kwargs)\n elif self.model_type == 'RoFormer':\n self.tokenizer = RoFormerModel.from_pretrained(configure['hf_tag'], **config_kwargs)\n elif self.model_type == 'Bert':\n self.model = BertModel.from_pretrained(configure['hf_tag'], **config_kwargs)\n else:\n raise ValueError('model_type must be in [XLMRoberta, RoFormer, Bert]')\n\n if configure['hierarchical_position']:\n # 创建分层的position embedding\n hierarchical_embedding = hierarchical_position(self.model)\n # 新的position embedding 嵌入到现有的model中\n self.model.embeddings.position_embeddings = hierarchical_embedding\n\n def forward(self, input_ids):\n attention_mask = torch.where(input_ids > 0, 1, 0)\n model_output = self.model(input_ids, attention_mask=attention_mask)\n if self.emb_type == 'last-avg':\n token_embeddings = model_output[0]\n input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)\n sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n vectors = sum_embeddings / sum_mask\n elif self.emb_type == 'cls':\n vectors = model_output.last_hidden_state[:, 0]\n elif self.emb_type == 'pooler':\n vectors = model_output.pooler_output\n vectors = torch.nn.functional.normalize(vectors, 2.0, dim=1)\n return vectors" }, { "identifier": "MyModel", "path": "engines/utils/metrics.py", "snippet": "class MyModel():\n def __init__(self, data_manage, model, device):\n self.model = model\n self.data_manage = data_manage\n self.device = device\n\n @torch.inference_mode()\n def encode(self, sentences, batch_size, **kwargs):\n vectors = []\n sentences = [sentences[i: i + batch_size] for i in range(0, len(sentences), batch_size)]\n for sentence in sentences:\n input_ids = self.data_manage.batch_tokenize(sentence).to(self.device)\n vector = self.model(input_ids)\n vector = vector.detach().cpu().tolist()\n vectors.extend(vector)\n return vectors" } ]
from config import configure from engines.model import Model from engines.utils.metrics import MyModel from torch.utils.data import DataLoader from mteb import MTEB import pandas as pd import torch import os
837
# -*- coding: utf-8 -*- # @Time : 2023/10/27 22:05 # @Author : lishouxian # @Email : [email protected] # @File : predict.py # @Software: VSCode class Predictor: def __init__(self, data_manage, device, logger): self.logger = logger self.data_manage = data_manage self.device = device self.checkpoints_dir = configure['checkpoints_dir'] self.model_name = configure['model_name']
# -*- coding: utf-8 -*- # @Time : 2023/10/27 22:05 # @Author : lishouxian # @Email : [email protected] # @File : predict.py # @Software: VSCode class Predictor: def __init__(self, data_manage, device, logger): self.logger = logger self.data_manage = data_manage self.device = device self.checkpoints_dir = configure['checkpoints_dir'] self.model_name = configure['model_name']
self.model = Model().to(device)
1
2023-10-27 07:47:02+00:00
2k
akekic/causal-component-analysis
data_generator/mixing_function.py
[ { "identifier": "leaky_tanh", "path": "data_generator/utils.py", "snippet": "def leaky_tanh(x: Tensor, alpha: float = 1.0, beta: float = 0.1) -> Tensor:\n return torch.tanh(alpha * x) + beta * x" }, { "identifier": "sample_invertible_matrix", "path": "data_generator/utils.py", "snippet": "def sample_invertible_matrix(n: int) -> Tensor:\n matrix = torch.rand((n, n))\n while torch.abs(torch.det(matrix)) < 0.1:\n matrix = torch.randn((n, n))\n return matrix" } ]
from abc import ABC from pathlib import Path from torch import Tensor from .utils import leaky_tanh, sample_invertible_matrix import pandas as pd import torch
1,048
""" def __init__(self, latent_dim: int, observation_dim: int) -> None: self.latent_dim = latent_dim self.observation_dim = observation_dim def __call__(self, v: Tensor) -> Tensor: """ Apply the mixing function to the latent variables. Parameters ---------- v: Tensor, shape (num_samples, latent_dim) Latent variables. Returns ------- x: Tensor, shape (num_samples, observation_dim) Observed variables. """ raise NotImplementedError() def save_coeffs(self, path: Path) -> None: """ Save the coefficients of the mixing function to disk. Parameters ---------- path: Path Path to save the coefficients to. """ raise NotImplementedError() def unmixing_jacobian(self, v: Tensor) -> Tensor: """ Compute the jacobian of the inverse mixing function using autograd and the inverse function theorem. Parameters ---------- v: Tensor, shape (num_samples, latent_dim) Latent variables. Returns ------- unmixing_jacobian: Tensor, shape (num_samples, observation_dim, latent_dim) Jacobian of the inverse mixing function. References ---------- https://en.wikipedia.org/wiki/Inverse_function_theorem https://discuss.pytorch.org/t/computing-batch-jacobian-efficiently/80771/7 """ func = self.__call__ inputs = v mixing_jacobian = torch.vmap(torch.func.jacrev(func))(inputs) unmixing_jacobian = torch.inverse(mixing_jacobian) return unmixing_jacobian class LinearMixing(MixingFunction): """ Linear mixing function. The coefficients are sampled from a uniform distribution. Parameters ---------- latent_dim: int Dimension of the latent space. observation_dim: int Dimension of the observation space. """ def __init__(self, latent_dim: int, observation_dim: int) -> None: super().__init__(latent_dim, observation_dim) self.coeffs = torch.rand((latent_dim, observation_dim)) def __call__(self, v: Tensor) -> Tensor: return torch.matmul(v, self.coeffs.to(v.device)) def save_coeffs(self, path: Path) -> None: # save matrix coefficients torch.save(self.coeffs, path / "matrix.pt") matrix_np = self.coeffs.numpy() # convert to Numpy array df = pd.DataFrame(matrix_np) # convert to a dataframe df.to_csv(path / "matrix.csv", index=False) # save as csv class NonlinearMixing(MixingFunction): """ Nonlinear mixing function. The function is composed of a number of invertible matrices and leaky-tanh nonlinearities. I.e. we apply a random neural network to the latent variables. Parameters ---------- latent_dim: int Dimension of the latent space. observation_dim: int Dimension of the observation space. n_nonlinearities: int Number of layers (i.e. invertible maps and nonlinearities) in the mixing function. Default: 1. """ def __init__( self, latent_dim: int, observation_dim: int, n_nonlinearities: int = 1 ) -> None: super().__init__(latent_dim, observation_dim) assert latent_dim == observation_dim self.coefs = torch.rand((latent_dim, observation_dim)) self.n_nonlinearities = n_nonlinearities matrices = [] for i in range(n_nonlinearities): matrices.append(sample_invertible_matrix(observation_dim)) self.matrices = matrices nonlinearities = [] for i in range(n_nonlinearities):
class MixingFunction(ABC): """ Base class for mixing functions. The mixing function is the function that maps from the latent space to the observation space. Parameters ---------- latent_dim: int Dimension of the latent space. observation_dim: int Dimension of the observation space. """ def __init__(self, latent_dim: int, observation_dim: int) -> None: self.latent_dim = latent_dim self.observation_dim = observation_dim def __call__(self, v: Tensor) -> Tensor: """ Apply the mixing function to the latent variables. Parameters ---------- v: Tensor, shape (num_samples, latent_dim) Latent variables. Returns ------- x: Tensor, shape (num_samples, observation_dim) Observed variables. """ raise NotImplementedError() def save_coeffs(self, path: Path) -> None: """ Save the coefficients of the mixing function to disk. Parameters ---------- path: Path Path to save the coefficients to. """ raise NotImplementedError() def unmixing_jacobian(self, v: Tensor) -> Tensor: """ Compute the jacobian of the inverse mixing function using autograd and the inverse function theorem. Parameters ---------- v: Tensor, shape (num_samples, latent_dim) Latent variables. Returns ------- unmixing_jacobian: Tensor, shape (num_samples, observation_dim, latent_dim) Jacobian of the inverse mixing function. References ---------- https://en.wikipedia.org/wiki/Inverse_function_theorem https://discuss.pytorch.org/t/computing-batch-jacobian-efficiently/80771/7 """ func = self.__call__ inputs = v mixing_jacobian = torch.vmap(torch.func.jacrev(func))(inputs) unmixing_jacobian = torch.inverse(mixing_jacobian) return unmixing_jacobian class LinearMixing(MixingFunction): """ Linear mixing function. The coefficients are sampled from a uniform distribution. Parameters ---------- latent_dim: int Dimension of the latent space. observation_dim: int Dimension of the observation space. """ def __init__(self, latent_dim: int, observation_dim: int) -> None: super().__init__(latent_dim, observation_dim) self.coeffs = torch.rand((latent_dim, observation_dim)) def __call__(self, v: Tensor) -> Tensor: return torch.matmul(v, self.coeffs.to(v.device)) def save_coeffs(self, path: Path) -> None: # save matrix coefficients torch.save(self.coeffs, path / "matrix.pt") matrix_np = self.coeffs.numpy() # convert to Numpy array df = pd.DataFrame(matrix_np) # convert to a dataframe df.to_csv(path / "matrix.csv", index=False) # save as csv class NonlinearMixing(MixingFunction): """ Nonlinear mixing function. The function is composed of a number of invertible matrices and leaky-tanh nonlinearities. I.e. we apply a random neural network to the latent variables. Parameters ---------- latent_dim: int Dimension of the latent space. observation_dim: int Dimension of the observation space. n_nonlinearities: int Number of layers (i.e. invertible maps and nonlinearities) in the mixing function. Default: 1. """ def __init__( self, latent_dim: int, observation_dim: int, n_nonlinearities: int = 1 ) -> None: super().__init__(latent_dim, observation_dim) assert latent_dim == observation_dim self.coefs = torch.rand((latent_dim, observation_dim)) self.n_nonlinearities = n_nonlinearities matrices = [] for i in range(n_nonlinearities): matrices.append(sample_invertible_matrix(observation_dim)) self.matrices = matrices nonlinearities = [] for i in range(n_nonlinearities):
nonlinearities.append(leaky_tanh)
0
2023-10-25 09:25:26+00:00
2k
facebookresearch/verde
src/generate/export.py
[ { "identifier": "to_cuda", "path": "src/utils.py", "snippet": "def to_cuda(*args):\n \"\"\"\n Move tensors to CUDA.\n \"\"\"\n if not CUDA:\n return args\n return [None if x is None else x.cuda() for x in args]" }, { "identifier": "timeout", "path": "src/utils.py", "snippet": "def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):\n\n def decorator(func):\n\n def _handle_timeout(repeat_id, signum, frame):\n signal.signal(signal.SIGALRM, partial(_handle_timeout, repeat_id + 1))\n signal.alarm(seconds)\n raise TimeoutError(error_message)\n\n def wrapper(*args, **kwargs):\n old_signal = signal.signal(signal.SIGALRM, partial(_handle_timeout, 0))\n old_time_left = signal.alarm(seconds)\n assert type(old_time_left) is int and old_time_left >= 0\n if 0 < old_time_left < seconds: # do not exceed previous timer\n signal.alarm(old_time_left)\n start_time = time.time()\n try:\n result = func(*args, **kwargs)\n finally:\n if old_time_left == 0:\n signal.alarm(0)\n else:\n sub = time.time() - start_time\n signal.signal(signal.SIGALRM, old_signal)\n signal.alarm(max(0, math.ceil(old_time_left - sub)))\n return result\n\n return wraps(func)(wrapper)\n\n return decorator" }, { "identifier": "TimeoutError", "path": "src/utils.py", "snippet": "class TimeoutError(BaseException):\n pass" } ]
import os import io import sys import ast import time import numpy as np import torch from logging import getLogger from collections import OrderedDict from torch import nn from ..utils import to_cuda, timeout, TimeoutError
1,067
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = getLogger() class Generator(object): def __init__(self, params, gen): """ Initialize trainer. """ # params self.params = params self.gen = gen self.print_cycle = 500 if params.step == "Ab" else 1 # epoch / iteration size self.epoch_size = params.epoch_size assert self.epoch_size > 0 # training statistics self.epoch = 0 self.n_iter = 0 self.n_total_iter = 0 self.timeout_count = 0 self.total_count = 0 self.stats = { "processed_e": 0 } self.last_time = time.time() # file handler to export data export_path_prefix = os.path.join(params.dump_path, "data.prefix") if params.step == 'Ab': export_path_prefix = os.path.join(params.dump_path, "test.prefix") self.file_handler_prefix = io.open(export_path_prefix, mode="a", encoding="utf-8") logger.info(f"Data will be stored in: {export_path_prefix} ...") def iter(self): """ End of iteration. """ self.n_iter += 1 self.n_total_iter += 1 self.print_stats() def print_stats(self): """ Print statistics about the training. """ if self.n_total_iter % self.print_cycle != 0: return s_iter = "%7i - " % self.n_total_iter s_stat = " || ".join( [ "{}: {:7.4f}".format(k.upper().replace("_", "-"), np.mean(v)) for k, v in self.stats.items() if type(v) is list and len(v) > 0 ] ) for k in self.stats.keys(): if type(self.stats[k]) is list: del self.stats[k][:] # processing speed new_time = time.time() diff = new_time - self.last_time s_speed = "{:7.2f} equations/s - ".format( self.stats["processed_e"] * 1.0 / diff, ) self.stats["processed_e"] = 0 self.last_time = new_time # log speed + stats logger.info(s_iter + s_speed + s_stat) def end_epoch(self): """ End the epoch. """ if self.params.step == 'Ab': np.save(os.path.join(self.params.dump_path, 'diff.npy'), self.gen.diff) logger.info(f'Saved diff at {os.path.join(self.params.dump_path, "diff.npy")}') def export_data(self): """ Export data to the disk. """ while True: try: sample = self.gen.generate() break
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = getLogger() class Generator(object): def __init__(self, params, gen): """ Initialize trainer. """ # params self.params = params self.gen = gen self.print_cycle = 500 if params.step == "Ab" else 1 # epoch / iteration size self.epoch_size = params.epoch_size assert self.epoch_size > 0 # training statistics self.epoch = 0 self.n_iter = 0 self.n_total_iter = 0 self.timeout_count = 0 self.total_count = 0 self.stats = { "processed_e": 0 } self.last_time = time.time() # file handler to export data export_path_prefix = os.path.join(params.dump_path, "data.prefix") if params.step == 'Ab': export_path_prefix = os.path.join(params.dump_path, "test.prefix") self.file_handler_prefix = io.open(export_path_prefix, mode="a", encoding="utf-8") logger.info(f"Data will be stored in: {export_path_prefix} ...") def iter(self): """ End of iteration. """ self.n_iter += 1 self.n_total_iter += 1 self.print_stats() def print_stats(self): """ Print statistics about the training. """ if self.n_total_iter % self.print_cycle != 0: return s_iter = "%7i - " % self.n_total_iter s_stat = " || ".join( [ "{}: {:7.4f}".format(k.upper().replace("_", "-"), np.mean(v)) for k, v in self.stats.items() if type(v) is list and len(v) > 0 ] ) for k in self.stats.keys(): if type(self.stats[k]) is list: del self.stats[k][:] # processing speed new_time = time.time() diff = new_time - self.last_time s_speed = "{:7.2f} equations/s - ".format( self.stats["processed_e"] * 1.0 / diff, ) self.stats["processed_e"] = 0 self.last_time = new_time # log speed + stats logger.info(s_iter + s_speed + s_stat) def end_epoch(self): """ End the epoch. """ if self.params.step == 'Ab': np.save(os.path.join(self.params.dump_path, 'diff.npy'), self.gen.diff) logger.info(f'Saved diff at {os.path.join(self.params.dump_path, "diff.npy")}') def export_data(self): """ Export data to the disk. """ while True: try: sample = self.gen.generate() break
except TimeoutError:
2
2023-10-30 17:53:57+00:00
2k
Paiman-Rasoli/flatway
src/tests/test_flatten.py
[ { "identifier": "mock_list_with_deep_one", "path": "src/tests/fixtures.py", "snippet": "@pytest.fixture\ndef mock_list_with_deep_one():\n return [1, 2, 3, 4, 5, 6, 7, 8, [9, 10, 11], 12]" }, { "identifier": "mock_list_with_deep_five", "path": "src/tests/fixtures.py", "snippet": "@pytest.fixture\ndef mock_list_with_deep_five():\n return [1, 2, 3, 4, 5, 6, 7, 8, [\n 9,\n 10,\n 11,\n [\n 12, 13,\n [\n 14, 15,\n [16, 17, [\n 18,\n 19,\n 20\n ]]\n ]\n ]\n ]]" }, { "identifier": "mock_tuple_with_deep_one", "path": "src/tests/fixtures.py", "snippet": "@pytest.fixture\ndef mock_tuple_with_deep_one():\n return (1, 2, 3, 4, 5, 6, 7, 8,\n (9, 10, 11),\n 12)" }, { "identifier": "mock_tuple_with_deep_five", "path": "src/tests/fixtures.py", "snippet": "@pytest.fixture\ndef mock_tuple_with_deep_five():\n return (1, 2, 3, 4, 5, 6, 7, 8, (\n 9,\n 10,\n (\n 11, 12,\n (\n 13, 14,\n (15, 16, (\n 17,\n 18,\n 19,\n 20\n ))\n )\n )\n ))" }, { "identifier": "mock_dictionary_deep_one", "path": "src/tests/fixtures.py", "snippet": "@pytest.fixture\ndef mock_dictionary_deep_one():\n return {\"name\": \"Jhon\", \"languages\": {\"en\": True, \"per\": False}, \"age\": 20}" }, { "identifier": "mock_dictionary_deep_three", "path": "src/tests/fixtures.py", "snippet": "@pytest.fixture\ndef mock_dictionary_deep_three():\n return {\n \"name\": \"Jhon\",\n \"languages\": {\n \"en\": True, \"per\": False, \"info\": {\n \"newChild\": True,\n \"deep\": {\n \"height\": 100,\n \"ids\": [1, 2, 3, 4, 5]\n }\n }\n }\n , \"age\": 20}" } ]
from flatway.flatten import flatten, flattenDict from .fixtures import (mock_list_with_deep_one, mock_list_with_deep_five, mock_tuple_with_deep_one, mock_tuple_with_deep_five, mock_dictionary_deep_one, mock_dictionary_deep_three)
646
def test_flatten_of_list_with_deep_one(mock_list_with_deep_one): result = flatten(mock_list_with_deep_one) expect = [x for x in range(1, 13)] assert result == expect assert isinstance(result, list)
def test_flatten_of_list_with_deep_one(mock_list_with_deep_one): result = flatten(mock_list_with_deep_one) expect = [x for x in range(1, 13)] assert result == expect assert isinstance(result, list)
def test_flatten_of_list_with_deep_five(mock_list_with_deep_five):
1
2023-10-25 20:47:36+00:00
2k
Muhammadali-Akbarov/aiogram-bot-template
aiogram_bot_template/db/db_api/storages/postgres/storage.py
[ { "identifier": "MultipleQueryResults", "path": "aiogram_bot_template/db/db_api/storages/basestorage/storage.py", "snippet": "class MultipleQueryResults:\n def __init__(self, results: list[typing.Mapping[str, Any]]):\n self._data: list[dict[str, Any]] = [{**i} for i in results]\n\n @property\n def data(self) -> list[dict[str, Any]]:\n return self._data\n\n def convert(self, model: type[T]) -> list[T]:\n return [model(**i) for i in self._data]" }, { "identifier": "RawConnection", "path": "aiogram_bot_template/db/db_api/storages/basestorage/storage.py", "snippet": "class RawConnection:\n async def _fetch(\n self,\n sql: str,\n params: Optional[tuple[Any, ...] | list[tuple[Any, ...]]] = None,\n con: Optional[Any] = None,\n ) -> MultipleQueryResults:\n raise NotImplementedError\n\n async def _fetchrow(\n self,\n sql: str,\n params: Optional[tuple[Any, ...] | list[tuple[Any, ...]]] = None,\n con: Optional[Any] = None,\n ) -> SingleQueryResult:\n raise NotImplementedError\n\n async def _execute(\n self,\n sql: str,\n params: Optional[tuple[Any, ...] | list[tuple[Any, ...]]] = None,\n con: Optional[Any] = None,\n ) -> None:\n raise NotImplementedError" }, { "identifier": "SingleQueryResult", "path": "aiogram_bot_template/db/db_api/storages/basestorage/storage.py", "snippet": "class SingleQueryResult:\n def __init__(self, result: Optional[typing.Mapping[str, Any]]):\n self._data = {**result} if result else None\n\n @property\n def data(self) -> Optional[dict[str, Any]]:\n return self._data\n\n def convert(self, model: type[T]) -> Optional[T]:\n return model(**self.data) if self._data else None" } ]
import time import asyncpg import structlog from typing import Any, Optional, TypeVar from ..basestorage.storage import MultipleQueryResults, RawConnection, SingleQueryResult
901
T = TypeVar("T") class PostgresConnection(RawConnection): def __init__( self, connection_poll: asyncpg.Pool, logger: structlog.typing.FilteringBoundLogger, ): self._pool = connection_poll self._logger = logger async def _fetch( self, sql: str, params: Optional[tuple[Any, ...] | list[tuple[Any, ...]]] = None, con: Optional[asyncpg.Connection] = None, ) -> MultipleQueryResults: st = time.monotonic() request_logger = self._logger.bind(sql=sql, params=params) request_logger.debug("Making query to DB") try: if con is None: async with self._pool.acquire() as con: if params is not None: raw_result = await con.fetch(sql, *params) else: raw_result = await con.fetch(sql) else: if params is not None: raw_result = await con.fetch(sql, *params) else: raw_result = await con.fetch(sql) except Exception as e: # change to appropriate error handling request_logger = request_logger.bind(error=e) request_logger.error(f"Error while making query: {e}") raise e else: results = [i for i in raw_result] finally: request_logger.debug( "Finished query to DB", spent_time_ms=(time.monotonic() - st) * 1000 ) return MultipleQueryResults(results) async def _fetchrow( self, sql: str, params: Optional[tuple[Any, ...] | list[tuple[Any, ...]]] = None, con: Optional[asyncpg.Connection] = None,
T = TypeVar("T") class PostgresConnection(RawConnection): def __init__( self, connection_poll: asyncpg.Pool, logger: structlog.typing.FilteringBoundLogger, ): self._pool = connection_poll self._logger = logger async def _fetch( self, sql: str, params: Optional[tuple[Any, ...] | list[tuple[Any, ...]]] = None, con: Optional[asyncpg.Connection] = None, ) -> MultipleQueryResults: st = time.monotonic() request_logger = self._logger.bind(sql=sql, params=params) request_logger.debug("Making query to DB") try: if con is None: async with self._pool.acquire() as con: if params is not None: raw_result = await con.fetch(sql, *params) else: raw_result = await con.fetch(sql) else: if params is not None: raw_result = await con.fetch(sql, *params) else: raw_result = await con.fetch(sql) except Exception as e: # change to appropriate error handling request_logger = request_logger.bind(error=e) request_logger.error(f"Error while making query: {e}") raise e else: results = [i for i in raw_result] finally: request_logger.debug( "Finished query to DB", spent_time_ms=(time.monotonic() - st) * 1000 ) return MultipleQueryResults(results) async def _fetchrow( self, sql: str, params: Optional[tuple[Any, ...] | list[tuple[Any, ...]]] = None, con: Optional[asyncpg.Connection] = None,
) -> SingleQueryResult:
2
2023-10-28 19:44:58+00:00
2k
Doubling-Open-Source/git_calculator
src/calculators/throughput_calculator.py
[ { "identifier": "git_log", "path": "src/git_ir.py", "snippet": "def git_log():\n def to_obj(line):\n parts = line.split('|', 5)\n parts[3] = parts[3].split() # Multiple parents\n return git_obj.commit(*parts)\n res = [\n to_obj(line)\n for line in git_run('log','--all','--reflog',r'--format=%ct|%H|%T|%P|%ae|%an').stdout.splitlines()\n ]\n git_obj.link_children()\n git_sha.calibrate_min()\n return res" }, { "identifier": "format_git_logs_as_string", "path": "src/git_ir.py", "snippet": "def format_git_logs_as_string(log_entries):\n \"\"\"\n Formats a list of git log entries into a structured string.\n\n Args:\n log_entries (list of str): Each string is a git log entry in the format \"child SHA < parent SHA author email\".\n\n Returns:\n str: A formatted string representing the commit chain.\n \"\"\"\n formatted_output = \"Commit Chain:\\n\"\n for entry in log_entries:\n formatted_output += entry+\"\\n\"\n return formatted_output" } ]
from datetime import datetime from src.git_ir import git_log, format_git_logs_as_string from collections import defaultdict from io import StringIO from subprocess import run as sp_run import logging
880
# Logging configuration logging.basicConfig( level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) def extract_commits_and_authors(logs): """ Extract commits and their authors from git logs. Args: logs (list): List of commit logs. Returns: dict: Dictionary with months as keys and tuples (set of authors, commit count) as values. """ data_by_month = defaultdict(lambda: (set(), 0)) for commit in logs: author_email = commit._author[0] commit_date = datetime.fromtimestamp(commit._when) month_key = f"{commit_date.year}-{commit_date.month}" authors_set, commit_count = data_by_month[month_key] authors_set.add(author_email) data_by_month[month_key] = (authors_set, commit_count + 1) return data_by_month def calculate_throughput(data_by_month): """ Calculate the number of commits per active unique developer per month. Args: data_by_month (dict): Dictionary with months as keys and tuples (set of authors, commit count) as values. Returns: dict: Dictionary with months as keys and throughput (commits per unique developer) as values. """ throughput_stats = {} for month, (authors, commit_count) in data_by_month.items(): if authors: throughput_stats[month] = commit_count / len(authors) else: throughput_stats[month] = 0 return throughput_stats def throughput_stats_to_string(throughput_stats): """ Convert throughput statistics to a CSV-formatted string. Args: throughput_stats (dict): Dictionary with months as keys and throughput values as values. Returns: str: CSV-formatted string. """ buf = StringIO() print("Month,Commits Per Unique Developer", file=buf) for month, throughput in sorted(throughput_stats.items()): print(f"{month},{throughput:.2f}", file=buf) return buf.getvalue() def write_throughput_stats_to_file(throughput_stats, fname='throughput_by_month.csv'): """ Write the throughput statistics to a file. Args: throughput_stats (dict): Dictionary with months as keys and throughput values as values. fname (str): Filename for the output. """ stats_string = throughput_stats_to_string(throughput_stats) with open(fname, 'wt') as fout: fout.write(stats_string) if fname.endswith('.csv'): sp_run(['open', fname]) def monthly_throughput_analysis(): """ Main function to calculate and write monthly throughput statistics. """ logs = git_log()
# Logging configuration logging.basicConfig( level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) def extract_commits_and_authors(logs): """ Extract commits and their authors from git logs. Args: logs (list): List of commit logs. Returns: dict: Dictionary with months as keys and tuples (set of authors, commit count) as values. """ data_by_month = defaultdict(lambda: (set(), 0)) for commit in logs: author_email = commit._author[0] commit_date = datetime.fromtimestamp(commit._when) month_key = f"{commit_date.year}-{commit_date.month}" authors_set, commit_count = data_by_month[month_key] authors_set.add(author_email) data_by_month[month_key] = (authors_set, commit_count + 1) return data_by_month def calculate_throughput(data_by_month): """ Calculate the number of commits per active unique developer per month. Args: data_by_month (dict): Dictionary with months as keys and tuples (set of authors, commit count) as values. Returns: dict: Dictionary with months as keys and throughput (commits per unique developer) as values. """ throughput_stats = {} for month, (authors, commit_count) in data_by_month.items(): if authors: throughput_stats[month] = commit_count / len(authors) else: throughput_stats[month] = 0 return throughput_stats def throughput_stats_to_string(throughput_stats): """ Convert throughput statistics to a CSV-formatted string. Args: throughput_stats (dict): Dictionary with months as keys and throughput values as values. Returns: str: CSV-formatted string. """ buf = StringIO() print("Month,Commits Per Unique Developer", file=buf) for month, throughput in sorted(throughput_stats.items()): print(f"{month},{throughput:.2f}", file=buf) return buf.getvalue() def write_throughput_stats_to_file(throughput_stats, fname='throughput_by_month.csv'): """ Write the throughput statistics to a file. Args: throughput_stats (dict): Dictionary with months as keys and throughput values as values. fname (str): Filename for the output. """ stats_string = throughput_stats_to_string(throughput_stats) with open(fname, 'wt') as fout: fout.write(stats_string) if fname.endswith('.csv'): sp_run(['open', fname]) def monthly_throughput_analysis(): """ Main function to calculate and write monthly throughput statistics. """ logs = git_log()
logging.debug('Logs: %s', format_git_logs_as_string(logs))
1
2023-10-28 13:43:03+00:00
2k
sisl/SceneInformer
sceneinformer/model/encoder.py
[ { "identifier": "MLPPointEncoder", "path": "sceneinformer/model/utils.py", "snippet": "class MLPPointEncoder(nn.Module):\n def __init__(self, config):\n super(MLPPointEncoder, self).__init__()\n self.config = config\n in_dim = config['in_dim'] * 11\n out_dim = config['out_dim']\n hidden_dim = config['hidden_dim']\n self.n_layers = config['n_hidden_layers']\n self.fc_in = nn.Linear(in_dim, hidden_dim)\n hidden_layers = []\n for i in range(self.n_layers):\n hidden_layers.append(nn.Linear(hidden_dim, hidden_dim))\n hidden_layers.append(nn.ReLU())\n self.fc_hidden = nn.Sequential(*hidden_layers)\n self.fc_out = nn.Linear(hidden_dim, out_dim)\n\n def forward(self, x):\n B, D, T = x.shape\n \n x = x.reshape(B, D*T)\n x = F.relu(self.fc_in(x))\n if self.n_layers > 0:\n x = self.fc_hidden(x)\n x = self.fc_out(x)\n return x" }, { "identifier": "PointEncoder", "path": "sceneinformer/model/utils.py", "snippet": "class PointEncoder(nn.Module):\n def __init__(self, config):\n super(PointEncoder, self).__init__()\n self.config = config\n in_dim = config['in_dim']\n out_dim = config['out_dim']\n self.conv1 = nn.Conv1d(in_dim, 64, 1)\n self.conv2 = nn.Conv1d(64, 128, 1)\n self.conv3 = nn.Conv1d(128, 1024, 1)\n self.fc1 = nn.Linear(1024, out_dim)\n self.fc2 = nn.Linear(out_dim, out_dim)\n self.enable_temporal_encoding = config['temporal_encoding']\n if self.enable_temporal_encoding:\n self.temporal_encoding = nn.Parameter(torch.randn(1, in_dim, 11))\n\n self.bn1 = nn.BatchNorm1d(64)\n self.bn2 = nn.BatchNorm1d(128)\n self.bn3 = nn.BatchNorm1d(1024)\n self.bn4 = nn.BatchNorm1d(out_dim)\n\n def forward(self, x):\n if self.enable_temporal_encoding:\n x = x + self.temporal_encoding\n\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n\n x = torch.max(x, 2, keepdim=True)[0]\n\n x = x.view(-1, 1024)\n x = F.relu(self.bn4(self.fc1(x)))\n x = self.fc2(x)\n\n x = x.view(-1, self.config['out_dim'])\n return x" }, { "identifier": "count_parameters", "path": "sceneinformer/model/utils.py", "snippet": "def count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)" } ]
import torch import torch.nn as nn import torch.nn.functional as F import lightning.pytorch as pl from sceneinformer.model.utils import MLPPointEncoder, PointEncoder, count_parameters
861
class Encoder(pl.LightningModule): def __init__(self, config: dict) -> None: super(Encoder, self).__init__() self.config = config self.hidden_dim = config['d_model'] if 'point_enc' in config.keys(): if config['point_enc'] == 'mlp': self.veh_encoder = MLPPointEncoder(config['vehicle_encoder']) self.ped_encoder = MLPPointEncoder(config['pedestrian_encoder']) self.bike_encoder = MLPPointEncoder(config['bike_encoder']) elif config['point_enc'] == 'pointnet':
class Encoder(pl.LightningModule): def __init__(self, config: dict) -> None: super(Encoder, self).__init__() self.config = config self.hidden_dim = config['d_model'] if 'point_enc' in config.keys(): if config['point_enc'] == 'mlp': self.veh_encoder = MLPPointEncoder(config['vehicle_encoder']) self.ped_encoder = MLPPointEncoder(config['pedestrian_encoder']) self.bike_encoder = MLPPointEncoder(config['bike_encoder']) elif config['point_enc'] == 'pointnet':
self.veh_encoder = PointEncoder(config['vehicle_encoder'])
1
2023-10-31 08:08:26+00:00
2k
LFhase/GALA
drugood/models/algorithms/groupdro.py
[ { "identifier": "BaseAlgorithm", "path": "drugood/models/algorithms/base.py", "snippet": "class BaseAlgorithm(BaseModule, metaclass=ABCMeta):\n def __init__(self, init_cfg=None):\n super(BaseAlgorithm, self).__init__(init_cfg)\n\n @abstractmethod\n def forward_train(self, input, group, **kwargs):\n \"\"\"Placeholder for Forward function for training.\"\"\"\n pass\n\n @abstractmethod\n def simple_test(self, input, group, **kwargs):\n \"\"\"Placeholder for single case test.\"\"\"\n pass\n\n def forward_test(self, input, group, **kwargs):\n return self.simple_test(input, group, **kwargs)\n\n def forward(self, input, group, return_loss=True, **kwargs):\n if return_loss:\n return self.forward_train(input, group, **kwargs)\n else:\n return self.forward_test(input, group, **kwargs)\n\n def train_step(self, data_batch, optimizer):\n losses = self(**data_batch)\n loss, log_vars = self._parse_losses(losses)\n\n outputs = dict(\n loss=loss,\n log_vars=log_vars,\n num_samples=self.get_batch_num(data_batch))\n\n return outputs\n\n def _parse_losses(self, losses):\n log_vars = OrderedDict()\n for loss_name, loss_value in losses.items():\n if isinstance(loss_value, torch.Tensor):\n log_vars[loss_name] = loss_value.mean()\n elif isinstance(loss_value, list):\n log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)\n elif isinstance(loss_value, dict):\n for name, value in loss_value.items():\n log_vars[name] = value\n else:\n raise TypeError(\n f'{loss_name} is not a tensor or list of tensors')\n\n loss = sum(_value for _key, _value in log_vars.items()\n if 'loss' in _key)\n\n log_vars['loss'] = loss\n for loss_name, loss_value in log_vars.items():\n # reduce loss when distributed training\n if dist.is_available() and dist.is_initialized():\n loss_value = loss_value.data.clone()\n dist.all_reduce(loss_value.div_(dist.get_world_size()))\n log_vars[loss_name] = loss_value.item()\n\n return loss, log_vars\n\n def get_batch_num(self, batch):\n if isinstance(batch[\"input\"], torch.Tensor):\n return len(batch[\"input\"].data)\n elif isinstance(batch[\"input\"], torch_geometric.data.Data):\n return batch[\"input\"].num_graphs\n elif isinstance(batch['input'], DGLGraph):\n return batch['input'].batch_size\n elif isinstance(batch['input'], transformers.BatchEncoding):\n return len(batch['input'])\n else:\n raise NotImplementedError" }, { "identifier": "MODELS", "path": "drugood/models/builder.py", "snippet": "MODELS = Registry('models', parent=MMCV_MODELS)" }, { "identifier": "build_tasker", "path": "drugood/models/builder.py", "snippet": "def build_tasker(cfg):\n return TASKERS.build(cfg)" } ]
import torch import torch_scatter from drugood.models.algorithms.base import BaseAlgorithm from ..builder import MODELS, build_tasker
970
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. @MODELS.register_module() class GroupDRO(BaseAlgorithm): """ Group distributionally robust optimization. Original paper: @inproceedings{sagawa2019distributionally, title={Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization}, author={Sagawa, Shiori and Koh, Pang Wei and Hashimoto, Tatsunori B and Liang, Percy}, booktitle={International Conference on Learning Representations}, year={2019} } The GroupDRO implementation below is adapted from Wilds's implementation: https://github.com/p-lambda/wilds/blob/a7a452c80cad311cf0aabfd59af8348cba1b9861/examples/algorithms/groupDRO.py """ def __init__(self, tasker, num_groups=44930, group_dro_step_size=0.01, ): super().__init__()
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. @MODELS.register_module() class GroupDRO(BaseAlgorithm): """ Group distributionally robust optimization. Original paper: @inproceedings{sagawa2019distributionally, title={Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization}, author={Sagawa, Shiori and Koh, Pang Wei and Hashimoto, Tatsunori B and Liang, Percy}, booktitle={International Conference on Learning Representations}, year={2019} } The GroupDRO implementation below is adapted from Wilds's implementation: https://github.com/p-lambda/wilds/blob/a7a452c80cad311cf0aabfd59af8348cba1b9861/examples/algorithms/groupDRO.py """ def __init__(self, tasker, num_groups=44930, group_dro_step_size=0.01, ): super().__init__()
self.tasker = build_tasker(tasker)
2
2023-10-30 16:57:56+00:00
2k
Graph-and-Geometric-Learning/D4Explainer
main.py
[ { "identifier": "feature_dict", "path": "constants.py", "snippet": "" }, { "identifier": "get_datasets", "path": "utils/dataset.py", "snippet": "def get_datasets(name, root=\"data/\"):\n \"\"\"\n Get preloaded datasets by name\n :param name: name of the dataset\n :param root: root path of the dataset\n :return: train_dataset, test_dataset, val_dataset\n \"\"\"\n if name == \"mutag\":\n folder = os.path.join(root, \"MUTAG\")\n train_dataset = Mutagenicity(folder, mode=\"training\")\n test_dataset = Mutagenicity(folder, mode=\"testing\")\n val_dataset = Mutagenicity(folder, mode=\"evaluation\")\n elif name == \"NCI1\":\n folder = os.path.join(root, \"NCI1\")\n train_dataset = NCI1(folder, mode=\"training\")\n test_dataset = NCI1(folder, mode=\"testing\")\n val_dataset = NCI1(folder, mode=\"evaluation\")\n elif name == \"ba3\":\n folder = os.path.join(root, \"BA3\")\n train_dataset = BA3Motif(folder, mode=\"training\")\n test_dataset = BA3Motif(folder, mode=\"testing\")\n val_dataset = BA3Motif(folder, mode=\"evaluation\")\n elif name == \"BA_shapes\":\n folder = os.path.join(root)\n test_dataset = SynGraphDataset(folder, mode=\"testing\", name=\"BA_shapes\")\n val_dataset = SynGraphDataset(folder, mode=\"evaluating\", name=\"BA_shapes\")\n train_dataset = SynGraphDataset(folder, mode=\"training\", name=\"BA_shapes\")\n elif name == \"Tree_Cycle\":\n folder = os.path.join(root)\n test_dataset = SynGraphDataset(folder, mode=\"testing\", name=\"Tree_Cycle\")\n val_dataset = SynGraphDataset(folder, mode=\"evaluating\", name=\"Tree_Cycle\")\n train_dataset = SynGraphDataset(folder, mode=\"training\", name=\"Tree_Cycle\")\n elif name == \"Tree_Grids\":\n folder = os.path.join(root)\n test_dataset = SynGraphDataset(folder, mode=\"testing\", name=\"Tree_Grids\")\n val_dataset = SynGraphDataset(folder, mode=\"evaluating\", name=\"Tree_Grids\")\n train_dataset = SynGraphDataset(folder, mode=\"training\", name=\"Tree_Grids\")\n elif name == \"bbbp\":\n folder = os.path.join(root, \"bbbp\")\n dataset = bbbp(folder)\n test_dataset = dataset[:200]\n val_dataset = dataset[200:400]\n train_dataset = dataset[400:]\n elif name == \"cornell\":\n folder = os.path.join(root)\n test_dataset = WebDataset(folder, mode=\"testing\", name=name)\n val_dataset = WebDataset(folder, mode=\"evaluating\", name=name)\n train_dataset = WebDataset(folder, mode=\"training\", name=name)\n else:\n raise ValueError\n return train_dataset, val_dataset, test_dataset" } ]
import argparse import torch from torch_geometric.loader import DataLoader from constants import feature_dict, task_type, dataset_choices from explainers import * from gnns import * from utils.dataset import get_datasets
1,234
def parse_args(): parser = argparse.ArgumentParser(description="Train explainers") parser.add_argument("--cuda", type=int, default=0, help="GPU device.") parser.add_argument("--root", type=str, default="results/", help="Result directory.") parser.add_argument("--dataset", type=str, default="Tree_Cycle", choices=dataset_choices) parser.add_argument("--verbose", type=int, default=10) parser.add_argument("--gnn_type", type=str, default="gcn") parser.add_argument("--task", type=str, default="nc") parser.add_argument("--train_batchsize", type=int, default=32) parser.add_argument("--test_batchsize", type=int, default=32) parser.add_argument("--sigma_length", type=int, default=10) parser.add_argument("--epoch", type=int, default=800) parser.add_argument("--feature_in", type=int) parser.add_argument("--data_size", type=int, default=-1) parser.add_argument("--threshold", type=float, default=0.5) parser.add_argument("--alpha_cf", type=float, default=0.5) parser.add_argument("--dropout", type=float, default=0.001) parser.add_argument("--learning_rate", type=float, default=1e-3) parser.add_argument("--lr_decay", type=float, default=0.999) parser.add_argument("--weight_decay", type=float, default=0) parser.add_argument("--prob_low", type=float, default=0.0) parser.add_argument("--prob_high", type=float, default=0.4) parser.add_argument("--sparsity_level", type=float, default=2.5) parser.add_argument("--normalization", type=str, default="instance") parser.add_argument("--num_layers", type=int, default=6) parser.add_argument("--layers_per_conv", type=int, default=1) parser.add_argument("--n_hidden", type=int, default=64) parser.add_argument("--cat_output", type=bool, default=True) parser.add_argument("--residual", type=bool, default=False) parser.add_argument("--noise_mlp", type=bool, default=True) parser.add_argument("--simplified", type=bool, default=False) return parser.parse_args() args = parse_args() args.noise_list = None args.device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() else "cpu") args.feature_in = feature_dict[args.dataset]
def parse_args(): parser = argparse.ArgumentParser(description="Train explainers") parser.add_argument("--cuda", type=int, default=0, help="GPU device.") parser.add_argument("--root", type=str, default="results/", help="Result directory.") parser.add_argument("--dataset", type=str, default="Tree_Cycle", choices=dataset_choices) parser.add_argument("--verbose", type=int, default=10) parser.add_argument("--gnn_type", type=str, default="gcn") parser.add_argument("--task", type=str, default="nc") parser.add_argument("--train_batchsize", type=int, default=32) parser.add_argument("--test_batchsize", type=int, default=32) parser.add_argument("--sigma_length", type=int, default=10) parser.add_argument("--epoch", type=int, default=800) parser.add_argument("--feature_in", type=int) parser.add_argument("--data_size", type=int, default=-1) parser.add_argument("--threshold", type=float, default=0.5) parser.add_argument("--alpha_cf", type=float, default=0.5) parser.add_argument("--dropout", type=float, default=0.001) parser.add_argument("--learning_rate", type=float, default=1e-3) parser.add_argument("--lr_decay", type=float, default=0.999) parser.add_argument("--weight_decay", type=float, default=0) parser.add_argument("--prob_low", type=float, default=0.0) parser.add_argument("--prob_high", type=float, default=0.4) parser.add_argument("--sparsity_level", type=float, default=2.5) parser.add_argument("--normalization", type=str, default="instance") parser.add_argument("--num_layers", type=int, default=6) parser.add_argument("--layers_per_conv", type=int, default=1) parser.add_argument("--n_hidden", type=int, default=64) parser.add_argument("--cat_output", type=bool, default=True) parser.add_argument("--residual", type=bool, default=False) parser.add_argument("--noise_mlp", type=bool, default=True) parser.add_argument("--simplified", type=bool, default=False) return parser.parse_args() args = parse_args() args.noise_list = None args.device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available() else "cpu") args.feature_in = feature_dict[args.dataset]
args.task = task_type[args.dataset]
0
2023-10-28 19:58:40+00:00
2k
p4p1/havoc-reporter
reporter.py
[ { "identifier": "html_panel_mitre", "path": "html_source/mitre.py", "snippet": "" }, { "identifier": "html_panel_vulns", "path": "html_source/vulnerabilities.py", "snippet": "" }, { "identifier": "network_vulns", "path": "vulns/network_vulnerabilities.py", "snippet": "" }, { "identifier": "active_directory_vulns", "path": "vulns/active_directory.py", "snippet": "" }, { "identifier": "windows_privesc_vulns", "path": "vulns/windows_privesc.py", "snippet": "" } ]
import havocui import webbrowser import os, sys, html, json from html_source.mitre import html_panel_mitre from html_source.vulnerabilities import html_panel_vulns from mitre.tactics import * from vulns.network_vulnerabilities import network_vulns from vulns.active_directory import active_directory_vulns from vulns.windows_privesc import windows_privesc_vulns
1,298
#!/usr/bin/env python # -*- coding: utf-8 -*- # Made by papi # Created on: Wen 25 Oct 2023 # reporter.py # Description: # A havoc extention to provide examples for different vulnerabilities that can # be tested on the infected networks and on the infected machines. # Usage: # To use this script save it on your machine and add it to the script manager of Havoc # inside of: Scripts > Scripts Manager > Load Script config = { "install_path": "" } # if no config file found create one if not os.path.exists(os.path.expanduser("~/") + ".config/havoc-reporter/config.json"): if not os.path.exists(os.path.expanduser("~/") + ".config/havoc-reporter/"): os.mkdir(os.path.expanduser("~/") + ".config/havoc-reporter") with open(os.path.expanduser("~/") + ".config/havoc-reporter/config.json", "w") as outfile: json.dump(config, outfile) else: # use config file with open(os.path.expanduser("~/") + ".config/havoc-reporter/config.json") as outfile: config = json.load(outfile) # specify here the path of the install script... while not os.path.exists(config["install_path"]): new_path = havocui.inputdialog("specify the path of the install", "The path of where this script is installed is wrong please provide with the correct path:") config["install_path"] = new_path.decode('utf-8') with open(os.path.expanduser("~/") + ".config/havoc-reporter/config.json", "w") as outfile: json.dump(config, outfile) sys.path.append(config["install_path"]) tree_display_vulns = None tree_display_mitre = None settings_widget = None net_titles = [item["title"] for item in network_vulns] ad_titles = [item["title"] for item in active_directory_vulns] winpriv_titles = [item["title"] for item in windows_privesc_vulns] # MITRE ATT&CK techniques reconnaissance_titles = [item["technique"] for item in reconnaissance] resource_development_titles = [item["technique"] for item in resource_development] initial_access_titles = [item["technique"] for item in initial_access] execution_titles = [item["technique"] for item in execution] persistence_titles = [item["technique"] for item in persistence] privilege_escalation_titles = [item["technique"] for item in privilege_escalation] defense_evasion_titles = [item["technique"] for item in defense_evasion] credential_access_titles = [item["technique"] for item in credential_access] discovery_titles = [item["technique"] for item in discovery] lateral_movement_titles = [item["technique"] for item in lateral_movement] collection_titles = [item["technique"] for item in collection] command_and_control_titles = [item["technique"] for item in command_and_control] exfiltration_titles = [item["technique"] for item in exfiltration] impact_titles = [item["technique"] for item in impact] # Function to set the HTML of the page def select_tree_vulns(data): global tree_display_vulns title = "" desc = "" image = "" mitre = "" external = [] command = "" if data in net_titles: title = network_vulns[net_titles.index(data)]["title"] desc = network_vulns[net_titles.index(data)]["desc"] image = network_vulns[net_titles.index(data)]["image"] mitre = network_vulns[net_titles.index(data)]["mitre"] external = network_vulns[net_titles.index(data)]["external"] command = network_vulns[net_titles.index(data)]["command"] elif data in ad_titles: title = active_directory_vulns[ad_titles.index(data)]["title"] desc = active_directory_vulns[ad_titles.index(data)]["desc"] image = active_directory_vulns[ad_titles.index(data)]["image"] mitre = active_directory_vulns[ad_titles.index(data)]["mitre"] external = active_directory_vulns[ad_titles.index(data)]["external"] command = active_directory_vulns[ad_titles.index(data)]["command"] elif data in winpriv_titles: title = windows_privesc_vulns[winpriv_titles.index(data)]["title"] desc = windows_privesc_vulns[winpriv_titles.index(data)]["desc"] image = windows_privesc_vulns[winpriv_titles.index(data)]["image"] mitre = windows_privesc_vulns[winpriv_titles.index(data)]["mitre"] external = windows_privesc_vulns[winpriv_titles.index(data)]["external"] command = windows_privesc_vulns[winpriv_titles.index(data)]["command"] if title != "": external_data = "" for obj in external: external_data = external_data + "<li><a style=\"color:#e100ff\" href=\"%s\">%s</a></li>" % (obj["link"], obj["title"])
#!/usr/bin/env python # -*- coding: utf-8 -*- # Made by papi # Created on: Wen 25 Oct 2023 # reporter.py # Description: # A havoc extention to provide examples for different vulnerabilities that can # be tested on the infected networks and on the infected machines. # Usage: # To use this script save it on your machine and add it to the script manager of Havoc # inside of: Scripts > Scripts Manager > Load Script config = { "install_path": "" } # if no config file found create one if not os.path.exists(os.path.expanduser("~/") + ".config/havoc-reporter/config.json"): if not os.path.exists(os.path.expanduser("~/") + ".config/havoc-reporter/"): os.mkdir(os.path.expanduser("~/") + ".config/havoc-reporter") with open(os.path.expanduser("~/") + ".config/havoc-reporter/config.json", "w") as outfile: json.dump(config, outfile) else: # use config file with open(os.path.expanduser("~/") + ".config/havoc-reporter/config.json") as outfile: config = json.load(outfile) # specify here the path of the install script... while not os.path.exists(config["install_path"]): new_path = havocui.inputdialog("specify the path of the install", "The path of where this script is installed is wrong please provide with the correct path:") config["install_path"] = new_path.decode('utf-8') with open(os.path.expanduser("~/") + ".config/havoc-reporter/config.json", "w") as outfile: json.dump(config, outfile) sys.path.append(config["install_path"]) tree_display_vulns = None tree_display_mitre = None settings_widget = None net_titles = [item["title"] for item in network_vulns] ad_titles = [item["title"] for item in active_directory_vulns] winpriv_titles = [item["title"] for item in windows_privesc_vulns] # MITRE ATT&CK techniques reconnaissance_titles = [item["technique"] for item in reconnaissance] resource_development_titles = [item["technique"] for item in resource_development] initial_access_titles = [item["technique"] for item in initial_access] execution_titles = [item["technique"] for item in execution] persistence_titles = [item["technique"] for item in persistence] privilege_escalation_titles = [item["technique"] for item in privilege_escalation] defense_evasion_titles = [item["technique"] for item in defense_evasion] credential_access_titles = [item["technique"] for item in credential_access] discovery_titles = [item["technique"] for item in discovery] lateral_movement_titles = [item["technique"] for item in lateral_movement] collection_titles = [item["technique"] for item in collection] command_and_control_titles = [item["technique"] for item in command_and_control] exfiltration_titles = [item["technique"] for item in exfiltration] impact_titles = [item["technique"] for item in impact] # Function to set the HTML of the page def select_tree_vulns(data): global tree_display_vulns title = "" desc = "" image = "" mitre = "" external = [] command = "" if data in net_titles: title = network_vulns[net_titles.index(data)]["title"] desc = network_vulns[net_titles.index(data)]["desc"] image = network_vulns[net_titles.index(data)]["image"] mitre = network_vulns[net_titles.index(data)]["mitre"] external = network_vulns[net_titles.index(data)]["external"] command = network_vulns[net_titles.index(data)]["command"] elif data in ad_titles: title = active_directory_vulns[ad_titles.index(data)]["title"] desc = active_directory_vulns[ad_titles.index(data)]["desc"] image = active_directory_vulns[ad_titles.index(data)]["image"] mitre = active_directory_vulns[ad_titles.index(data)]["mitre"] external = active_directory_vulns[ad_titles.index(data)]["external"] command = active_directory_vulns[ad_titles.index(data)]["command"] elif data in winpriv_titles: title = windows_privesc_vulns[winpriv_titles.index(data)]["title"] desc = windows_privesc_vulns[winpriv_titles.index(data)]["desc"] image = windows_privesc_vulns[winpriv_titles.index(data)]["image"] mitre = windows_privesc_vulns[winpriv_titles.index(data)]["mitre"] external = windows_privesc_vulns[winpriv_titles.index(data)]["external"] command = windows_privesc_vulns[winpriv_titles.index(data)]["command"] if title != "": external_data = "" for obj in external: external_data = external_data + "<li><a style=\"color:#e100ff\" href=\"%s\">%s</a></li>" % (obj["link"], obj["title"])
tree_display_vulns.setPanel(html_panel_vulns % (title, image, mitre, desc, html.escape(command), external_data))
1
2023-10-25 10:39:20+00:00
2k
amazon-science/adaptive-in-context-learning
MetaICL/utils/download.py
[ { "identifier": "all_settings", "path": "MetaICL/utils/utils.py", "snippet": "def get_checkpoint_id(key):\ndef download_file(_id, dest):" }, { "identifier": "download_file", "path": "MetaICL/utils/utils.py", "snippet": "def download_file(_id, dest):\n if os.path.exists(dest):\n print (\"[Already exists] Skipping\", dest)\n print (\"If you want to download the file in another location, please specify a different path\")\n return\n\n if \"/\" in dest:\n dest_dir = \"/\".join(dest.split(\"/\")[:-1])\n if not os.path.isdir(dest_dir):\n os.makedirs(dest_dir)\n else:\n dest_dir = \".\"\n\n if _id.startswith(\"https://\"):\n command = \"\"\"wget -O %s %s\"\"\" % (dest, _id)\n else:\n command = \"\"\"wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=%s' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\\\1\\\\n/p')&id=%s\" -O %s && rm -rf /tmp/cookies.txt\"\"\" % (_id, _id, dest)\n\n ret_code = subprocess.run([command], shell=True)\n if ret_code.returncode != 0:\n print(\"Download {} ... [Failed]\".format(dest))\n else:\n print(\"Download {} ... [Success]\".format(dest))\n\n if dest.endswith(\".zip\"):\n command = \"\"\"unzip %s -d %s && rm %s\"\"\" % (dest, dest_dir, dest)\n\n ret_code = subprocess.run([command], shell=True)\n if ret_code.returncode != 0:\n print(\"Unzip {} ... [Failed]\".format(dest))\n else:\n print(\"Unzip {} ... [Success]\".format(dest))" }, { "identifier": "get_checkpoint_id", "path": "MetaICL/utils/utils.py", "snippet": "def get_checkpoint_id(key):\n\n if key in all_methods:\n setting = \"hr_to_lr\"\n method = key\n elif key in [method + \"-inst\" for method in all_methods] or \\\n key in [method + \"-instruction\" for method in all_methods]:\n setting = \"hr_to_lr_inst_all\"\n method = \"-\".join(key.split(\"-\")[:-1])\n elif key in [\"%s/%s\" % (method, setting) for method in all_methods for setting in all_settings]:\n method, setting = key.split(\"/\")\n else:\n return None\n return method, setting, os.path.join(checkpoint_dir, method, setting, \"model.pt\")" } ]
import os import json import argparse import subprocess from .utils import all_settings, all_methods from .utils import download_file, get_checkpoint_id
970
''' script for downloading preprocessed data and trained checkpoints ''' def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--checkpoints", default=False, action="store_true") parser.add_argument("--demo_data", default=False, action="store_true") parser.add_argument("--target_only", default=False, action="store_true") parser.add_argument("--inst", default=False, action="store_true") parser.add_argument("--setting", default="all", type=str, choices=["all"]+all_settings) parser.add_argument("--method", default="all", type=str, choices=["all"]+all_methods) parser.add_argument("--data_dir", type=str, default="data") parser.add_argument("--checkpoint_dir", type=str, default="checkpoints") args = parser.parse_args() return args def main(args): if args.demo_data: download_file("15grQwt3B1tALtUCGtaDI_rwC28LL8wSj", os.path.join(args.data_dir, "financial_phrasebank", "financial_phrasebank_16_100_train.jsonl")) if args.checkpoints: if args.setting=="all": settings = all_settings else: settings = [args.setting] if args.method=="all": methods = all_methods else: methods = [args.method] for method in methods: for setting in settings:
''' script for downloading preprocessed data and trained checkpoints ''' def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--checkpoints", default=False, action="store_true") parser.add_argument("--demo_data", default=False, action="store_true") parser.add_argument("--target_only", default=False, action="store_true") parser.add_argument("--inst", default=False, action="store_true") parser.add_argument("--setting", default="all", type=str, choices=["all"]+all_settings) parser.add_argument("--method", default="all", type=str, choices=["all"]+all_methods) parser.add_argument("--data_dir", type=str, default="data") parser.add_argument("--checkpoint_dir", type=str, default="checkpoints") args = parser.parse_args() return args def main(args): if args.demo_data: download_file("15grQwt3B1tALtUCGtaDI_rwC28LL8wSj", os.path.join(args.data_dir, "financial_phrasebank", "financial_phrasebank_16_100_train.jsonl")) if args.checkpoints: if args.setting=="all": settings = all_settings else: settings = [args.setting] if args.method=="all": methods = all_methods else: methods = [args.method] for method in methods: for setting in settings:
_, _, _id = get_checkpoint_id(method + "/" + setting)
2
2023-10-30 16:34:21+00:00
2k
endo-yuki-t/MAG
ldm/models/diffusion/ddim.py
[ { "identifier": "make_ddim_sampling_parameters", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())\n\n # according the the formula provided in https://arxiv.org/abs/2010.02502\n sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))\n if verbose:\n print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')\n print(f'For the chosen value of eta, which is {eta}, '\n f'this results in the following sigma_t schedule for ddim sampler {sigmas}')\n return sigmas, alphas, alphas_prev" }, { "identifier": "make_ddim_timesteps", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):\n if ddim_discr_method == 'uniform':\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == 'quad':\n ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)\n else:\n raise NotImplementedError(f'There is no ddim discretization method called \"{ddim_discr_method}\"')\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f'Selected timesteps for ddim sampler: {steps_out}')\n return steps_out" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "denoising_step", "path": "ldm/diffusion_utils.py", "snippet": "def denoising_step(xt, c, t, t_next, *,\n model,\n b,\n eta=0.0,\n unconditional_guidance_scale=1., \n unconditional_conditioning=None,\n att_mask = None\n ):\n \n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n et = model.apply_model(xt, t, c, att_mask=att_mask)\n elif att_mask is not None:\n x_in = xt\n t_in = t\n c_in = unconditional_conditioning\n et_uncond = model.apply_model(x_in, t_in, c_in)\n c_in = c\n et = model.apply_model(x_in, t_in, c_in, att_mask=att_mask)\n et = et_uncond + unconditional_guidance_scale * (et - et_uncond)\n else:\n x_in = torch.cat([xt] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n att_mask = None if att_mask is None else torch.cat([att_mask, att_mask])\n et_uncond, et = model.apply_model(x_in, t_in, c_in, att_mask=att_mask).chunk(2)\n et = et_uncond + unconditional_guidance_scale * (et - et_uncond)\n \n # Compute the next x\n at = extract((1.0 - b).cumprod(dim=0), t, xt.shape)\n \n if t_next.sum() == -t_next.shape[0]:\n at_next = torch.ones_like(at)\n else:\n at_next = extract((1.0 - b).cumprod(dim=0), t_next, xt.shape)\n \n xt_next = torch.zeros_like(xt)\n \n x0_t = (xt - et * (1 - at).sqrt()) / at.sqrt()\n if eta == 0:\n xt_next = at_next.sqrt() * x0_t + (1 - at_next).sqrt() * et\n else:\n c1 = eta * ((1 - at / (at_next)) * (1 - at_next) / (1 - at)).sqrt()\n c2 = ((1 - at_next) - c1 ** 2).sqrt()\n xt_next = at_next.sqrt() * x0_t + c2 * et + c1 * torch.randn_like(xt)\n \n return xt_next, x0_t" } ]
import torch import cv2 import matplotlib.pyplot as plt import numpy as np import math from tqdm import tqdm from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor from ldm.diffusion_utils import denoising_step from einops import rearrange, repeat
1,445
"""SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
"""SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
1
2023-10-27 06:56:37+00:00
2k
LibreTranslate/LexiLang
lexilang/utils.py
[ { "identifier": "get_supported_languages", "path": "lexilang/languages.py", "snippet": "def get_supported_languages():\n return {\n 'afrikaans': 'af', \n 'albanian': 'sq', \n 'arabic': 'ar', \n 'bengali': 'bn', \n 'bulgarian': 'bg', \n 'catalan': 'ca', \n 'chinese': 'zh', \n 'czech': 'cs', \n 'danish': 'da', \n 'dutch': 'nl', \n 'english': 'en', \n 'esperanto': 'eo', \n 'estonian': 'et', \n 'finnish': 'fi', \n 'french': 'fr', \n 'german': 'de', \n 'greek': 'el', \n 'hebrew': 'he', \n 'hindi': 'hi', \n 'hungarian': 'hu', \n 'indonesian': 'id', \n 'italian': 'it', \n 'japanese': 'ja', \n 'kazakh': 'kk', \n 'korean': 'ko', \n 'latvian': 'lv', \n 'lithuanian': 'lt', \n 'macedonian': 'mk', \n 'norwegian': 'nb', \n 'polish': 'pl', \n 'portuguese': 'pt', \n 'romanian': 'ro', \n 'russian': 'ru', \n 'serbian': 'sr', \n 'slovak': 'sk', \n 'slovenian': 'sl', \n 'spanish': 'es', \n 'swedish': 'sv', \n 'thai': 'th', \n 'turkish': 'tr', \n 'ukrainian': 'uk', \n 'vietnamese': 'vi', \n 'farsi': 'fa'\n }" }, { "identifier": "tokenize", "path": "lexilang/languages.py", "snippet": "def tokenize(code, text):\n if code in ['zh', 'ko', 'ja']:\n return list(text)\n else:\n return text.split(\" \")" } ]
import os import pickle from .languages import get_supported_languages, tokenize
647
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) def compile_data(): print("Compiling database...") words = {} langs = get_supported_languages() for name in langs: code = langs[name] with open(os.path.join(root_dir, "dictionaries", f"{name}.txt"), "r", encoding="utf-8") as f: lines = [l.strip().lower() for l in f.read().split("\n")] for l in lines:
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) def compile_data(): print("Compiling database...") words = {} langs = get_supported_languages() for name in langs: code = langs[name] with open(os.path.join(root_dir, "dictionaries", f"{name}.txt"), "r", encoding="utf-8") as f: lines = [l.strip().lower() for l in f.read().split("\n")] for l in lines:
tokens = tokenize(code, l.strip())
1
2023-10-30 13:43:19+00:00
2k
alexeichhorn/typegpt
typegpt/parser.py
[ { "identifier": "LLMOutputFieldMissing", "path": "typegpt/exceptions.py", "snippet": "class LLMOutputFieldMissing(LLMParseException):\n ..." }, { "identifier": "LLMOutputFieldWrongType", "path": "typegpt/exceptions.py", "snippet": "class LLMOutputFieldWrongType(LLMParseException):\n ..." }, { "identifier": "LLMArrayOutputInfo", "path": "typegpt/fields.py", "snippet": "class LLMArrayOutputInfo(Generic[T]):\n instruction: Callable[[ExamplePosition], str]\n min_count: int\n max_count: int | None\n multiline: bool" }, { "identifier": "LLMFieldInfo", "path": "typegpt/fields.py", "snippet": "class LLMFieldInfo(Generic[T]):\n key: str\n name: str\n type_: type[T]\n info: LLMOutputInfo[T] | LLMArrayOutputInfo[T] | LLMArrayElementOutputInfo[T]" }, { "identifier": "LLMOutputInfo", "path": "typegpt/fields.py", "snippet": "class LLMOutputInfo(Generic[T]):\n instruction: str\n default: T | _NoDefaultType\n required: bool\n multiline: bool" }, { "identifier": "LLMArrayElementOutputInfo", "path": "typegpt/fields.py", "snippet": "class LLMArrayElementOutputInfo(Generic[T]):\n instruction: Callable[[ExamplePosition], str]\n default: T | _NoDefaultType\n required: bool\n multiline: bool" }, { "identifier": "symmetric_strip", "path": "typegpt/utils/utils.py", "snippet": "def symmetric_strip(content: str, chars: list[str]) -> str:\n \"\"\"Strips the given chars from the beginning and end of the string, but only if they are present on both sides\"\"\"\n result = content\n did_strip = True\n while did_strip:\n did_strip = False\n\n for char in chars:\n if result.startswith(char) and result.endswith(char):\n result = result[len(char) : -len(char)]\n did_strip = True\n\n return result" } ]
import re from typing import TYPE_CHECKING, Generic, TypeVar from .exceptions import LLMOutputFieldMissing, LLMOutputFieldWrongType from .fields import LLMArrayOutputInfo, LLMFieldInfo, LLMOutputInfo, LLMArrayElementOutputInfo from .utils.utils import symmetric_strip from .utils.type_checker import if_response_type, is_response_type, is_array_element_list_type, if_array_element_list_type from .utils.type_checker import if_response_type, if_array_element_list_type from .base import BaseLLMResponse, BaseLLMArrayElement
781
from __future__ import annotations _Output = TypeVar("_Output", bound="BaseLLMResponse | BaseLLMArrayElement") class Parser(Generic[_Output]): def __init__(self, output_type: type[_Output]): self.output_type = output_type self.fields = self.output_type.__fields__.values() def _regex_for_field(self, field: LLMFieldInfo) -> str: other_fields = [f for f in self.fields if f.key != field.key] other_field_names = ["\n" + f.name for f in other_fields] excluded_lookahead = other_field_names if not field.info.multiline and not is_response_type(field.type_) and not is_array_element_list_type(field.type_): excluded_lookahead.append("\n") # also add current field if it's an array if isinstance(field.info, LLMArrayOutputInfo) or is_response_type(field.type_): excluded_lookahead.append("\n" + field.name) exclusion_cases_regex = "|".join(excluded_lookahead) if exclusion_cases_regex: exclusion_cases_regex = f"(?!{exclusion_cases_regex})"
from __future__ import annotations _Output = TypeVar("_Output", bound="BaseLLMResponse | BaseLLMArrayElement") class Parser(Generic[_Output]): def __init__(self, output_type: type[_Output]): self.output_type = output_type self.fields = self.output_type.__fields__.values() def _regex_for_field(self, field: LLMFieldInfo) -> str: other_fields = [f for f in self.fields if f.key != field.key] other_field_names = ["\n" + f.name for f in other_fields] excluded_lookahead = other_field_names if not field.info.multiline and not is_response_type(field.type_) and not is_array_element_list_type(field.type_): excluded_lookahead.append("\n") # also add current field if it's an array if isinstance(field.info, LLMArrayOutputInfo) or is_response_type(field.type_): excluded_lookahead.append("\n" + field.name) exclusion_cases_regex = "|".join(excluded_lookahead) if exclusion_cases_regex: exclusion_cases_regex = f"(?!{exclusion_cases_regex})"
if isinstance(field.info, LLMOutputInfo) or isinstance(field.info, LLMArrayElementOutputInfo):
4
2023-10-25 22:17:27+00:00
2k
andriioreshk1118/python-storage-main
tests/system/test_transfer_manager.py
[ { "identifier": "transfer_manager", "path": "google/cloud/storage/transfer_manager.py", "snippet": "TM_DEFAULT_CHUNK_SIZE = 32 * 1024 * 1024\nDEFAULT_MAX_WORKERS = 8\nMAX_CRC32C_ZERO_ARRAY_SIZE = 4 * 1024 * 1024\nMETADATA_HEADER_TRANSLATION = {\n \"cacheControl\": \"Cache-Control\",\n \"contentDisposition\": \"Content-Disposition\",\n \"contentEncoding\": \"Content-Encoding\",\n \"contentLanguage\": \"Content-Language\",\n \"customTime\": \"x-goog-custom-time\",\n \"storageClass\": \"x-goog-storage-class\",\n}\nPROCESS = \"process\"\nTHREAD = \"thread\"\nDOWNLOAD_CRC32C_MISMATCH_TEMPLATE = \"\"\"\\\nChecksum mismatch while downloading:\n\n {}\n\nThe object metadata indicated a crc32c checksum of:\n\n {}\n\nbut the actual crc32c checksum of the downloaded contents was:\n\n {}\n\"\"\"\ndef _deprecate_threads_param(func):\n def convert_threads_or_raise(*args, **kwargs):\ndef upload_many(\n file_blob_pairs,\n skip_if_exists=False,\n upload_kwargs=None,\n threads=None,\n deadline=None,\n raise_exception=False,\n worker_type=PROCESS,\n max_workers=DEFAULT_MAX_WORKERS,\n):\ndef download_many(\n blob_file_pairs,\n download_kwargs=None,\n threads=None,\n deadline=None,\n raise_exception=False,\n worker_type=PROCESS,\n max_workers=DEFAULT_MAX_WORKERS,\n *,\n skip_if_exists=False,\n):\ndef upload_many_from_filenames(\n bucket,\n filenames,\n source_directory=\"\",\n blob_name_prefix=\"\",\n skip_if_exists=False,\n blob_constructor_kwargs=None,\n upload_kwargs=None,\n threads=None,\n deadline=None,\n raise_exception=False,\n worker_type=PROCESS,\n max_workers=DEFAULT_MAX_WORKERS,\n *,\n additional_blob_attributes=None,\n):\ndef download_many_to_path(\n bucket,\n blob_names,\n destination_directory=\"\",\n blob_name_prefix=\"\",\n download_kwargs=None,\n threads=None,\n deadline=None,\n create_directories=True,\n raise_exception=False,\n worker_type=PROCESS,\n max_workers=DEFAULT_MAX_WORKERS,\n *,\n skip_if_exists=False,\n):\ndef download_chunks_concurrently(\n blob,\n filename,\n chunk_size=TM_DEFAULT_CHUNK_SIZE,\n download_kwargs=None,\n deadline=None,\n worker_type=PROCESS,\n max_workers=DEFAULT_MAX_WORKERS,\n *,\n crc32c_checksum=True,\n):\ndef upload_chunks_concurrently(\n filename,\n blob,\n content_type=None,\n chunk_size=TM_DEFAULT_CHUNK_SIZE,\n deadline=None,\n worker_type=PROCESS,\n max_workers=DEFAULT_MAX_WORKERS,\n *,\n checksum=\"md5\",\n timeout=_DEFAULT_TIMEOUT,\n retry=DEFAULT_RETRY,\n):\ndef _upload_part(\n maybe_pickled_client,\n url,\n upload_id,\n filename,\n start,\n end,\n part_number,\n checksum,\n headers,\n retry,\n):\ndef _headers_from_metadata(metadata):\ndef _download_and_write_chunk_in_place(\n maybe_pickled_blob, filename, start, end, download_kwargs, crc32c_checksum\n):\n def __init__(self, filename, start_position, crc32c_enabled):\n def write(self, chunk):\n def crc(self):\n def __enter__(self):\n def __exit__(self, exc_type, exc_value, tb):\ndef _call_method_on_maybe_pickled_blob(\n maybe_pickled_blob, method_name, *args, **kwargs\n):\ndef _reduce_client(cl):\ndef _pickle_client(obj):\ndef _get_pool_class_and_requirements(worker_type):\ndef _digest_ordered_checksum_and_size_pairs(checksum_and_size_pairs):\n def __new__(cls, id, *args, **kwargs):\nclass _ChecksummingSparseFileWrapper:\nclass _LazyClient:" }, { "identifier": "_base64_md5hash", "path": "google/cloud/storage/_helpers.py", "snippet": "def _base64_md5hash(buffer_object):\n \"\"\"Get MD5 hash of bytes (as base64).\n\n :type buffer_object: bytes buffer\n :param buffer_object: Buffer containing bytes used to compute an MD5\n hash (as base64).\n\n :rtype: str\n :returns: A base64 encoded digest of the MD5 hash.\n \"\"\"\n hash_obj = md5()\n _write_buffer_to_hash(buffer_object, hash_obj)\n digest_bytes = hash_obj.digest()\n return base64.b64encode(digest_bytes)" } ]
import tempfile import os import pytest import datetime import gzip from google.cloud.storage import transfer_manager from google.cloud.storage._helpers import _base64_md5hash from google.api_core import exceptions from google.cloud._helpers import UTC
1,412
# coding=utf-8 # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. DEADLINE = 30 encryption_key = "b23ff11bba187db8c37077e6af3b25b8" def _check_blob_hash(blob, info): md5_hash = blob.md5_hash if not isinstance(md5_hash, bytes): md5_hash = md5_hash.encode("utf-8") assert md5_hash == info["hash"] def test_upload_many(shared_bucket, file_data, blobs_to_delete): FILE_BLOB_PAIRS = [ (file_data["simple"]["path"], shared_bucket.blob("simple1")), (file_data["simple"]["path"], shared_bucket.blob("simple2")), ]
# coding=utf-8 # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. DEADLINE = 30 encryption_key = "b23ff11bba187db8c37077e6af3b25b8" def _check_blob_hash(blob, info): md5_hash = blob.md5_hash if not isinstance(md5_hash, bytes): md5_hash = md5_hash.encode("utf-8") assert md5_hash == info["hash"] def test_upload_many(shared_bucket, file_data, blobs_to_delete): FILE_BLOB_PAIRS = [ (file_data["simple"]["path"], shared_bucket.blob("simple1")), (file_data["simple"]["path"], shared_bucket.blob("simple2")), ]
results = transfer_manager.upload_many(
0
2023-10-31 10:36:21+00:00
2k
TopGuru777/badsecrets
badsecrets/modules/aspnet_viewstate.py
[ { "identifier": "unpad", "path": "badsecrets/helpers.py", "snippet": "def unpad(s):\n return s[: -ord(s[len(s) - 1 :])]" }, { "identifier": "sp800_108_derivekey", "path": "badsecrets/helpers.py", "snippet": "def sp800_108_derivekey(key, label, context, keyLengthInBits):\n lblcnt = 0 if label is None else len(label)\n ctxcnt = 0 if context is None else len(context)\n buffer = b\"\\x00\" * (4 + lblcnt + 1 + ctxcnt + 4)\n if lblcnt != 0:\n buffer = buffer[:4] + label + buffer[4 + lblcnt :]\n if ctxcnt != 0:\n buffer = buffer[: 5 + lblcnt] + context + buffer[5 + lblcnt + ctxcnt :]\n buffer = buffer[: 5 + lblcnt + ctxcnt] + _writeuint(keyLengthInBits) + buffer[5 + lblcnt + ctxcnt + 4 :]\n v = int(keyLengthInBits / 8)\n res = b\"\\x00\" * v\n num = 1\n while v > 0:\n buffer = _writeuint(num) + buffer[4:]\n h = hmac.new(key, buffer, hashlib.sha512)\n hash = h.digest()\n cnt = min(v, len(hash))\n res = hash[:cnt] + res[cnt:]\n v -= cnt\n num += 1\n return res" }, { "identifier": "sp800_108_get_key_derivation_parameters", "path": "badsecrets/helpers.py", "snippet": "def sp800_108_get_key_derivation_parameters(primary_purpose, specific_purposes):\n derived_key_label = primary_purpose.encode(\"utf-8\")\n derived_key_context = b\"\".join([write_vlq_string(purpose) for purpose in specific_purposes])\n return derived_key_label, derived_key_context" }, { "identifier": "BadsecretsBase", "path": "badsecrets/base.py", "snippet": "class BadsecretsBase:\n def __init__(self, custom_resource=None, **kwargs):\n def check_secret(self, secret):\n def attempt_decompress(value):\n def get_description(self):\n def get_product_from_carve(self, regex_search):\n def get_hashcat_commands(self, s):\n def load_resources(self, resource_list):\n def carve_to_check_secret(self, s, **kwargs):\n def carve_regex(self):\n def carve(self, body=None, cookies=None, headers=None, requests_response=None, **kwargs):\n def identify(self, product):\n def search_dict(d, query):\ndef hashcat_all_modules(product, detecting_module=None, *args):\ndef check_all_modules(*args, **kwargs):\ndef carve_all_modules(**kwargs):" } ]
import re import hmac import struct import base64 import hashlib import binascii from Crypto.Cipher import AES from Crypto.Cipher import DES from Crypto.Cipher import DES3 from viewstate import ViewState from contextlib import suppress from urllib.parse import urlsplit, urlparse from badsecrets.helpers import unpad, sp800_108_derivekey, sp800_108_get_key_derivation_parameters from viewstate.exceptions import ViewStateException from badsecrets.base import BadsecretsBase, generic_base64_regex
1,372
class ASPNET_Viewstate(BadsecretsBase): check_secret_args = 3 identify_regex = generic_base64_regex description = {"product": "ASP.NET Viewstate", "secret": "ASP.NET MachineKey", "severity": "CRITICAL"} def carve_regex(self): return re.compile( r"<input.+__VIEWSTATE\"\svalue=\"(.+)\"[\S\s]+<input.+__VIEWSTATEGENERATOR\"\svalue=\"(\w+)\"" ) def carve_to_check_secret(self, s, url=None): if len(s.groups()) == 2: r = self.check_secret(s.groups()[0], s.groups()[1], url) return r @staticmethod def valid_preamble(sourcebytes): if sourcebytes[0:2] == b"\xff\x01": return True return False def viewstate_decrypt(self, ekey_bytes, hash_alg, viewstate_B64, url, mode): viewstate_bytes = base64.b64decode(viewstate_B64) vs_size = len(viewstate_bytes) dec_algos = set() hash_size = self.hash_sizes[hash_alg] if (vs_size - hash_size) % AES.block_size == 0: dec_algos.add("AES") if (vs_size - hash_size) % DES.block_size == 0: dec_algos.add("DES") dec_algos.add("3DES") for dec_algo in list(dec_algos): with suppress(ValueError): if dec_algo == "AES": block_size = AES.block_size iv = viewstate_bytes[0:block_size] if mode == "DOTNET45" and url: s = Simulate_dotnet45_kdf_context_parameters(url) label, context = sp800_108_get_key_derivation_parameters( "WebForms.HiddenFieldPageStatePersister.ClientState", s.get_specific_purposes() ) ekey_bytes = sp800_108_derivekey(ekey_bytes, label, context, (len(ekey_bytes) * 8)) cipher = AES.new(ekey_bytes, AES.MODE_CBC, iv) blockpadlen_raw = len(ekey_bytes) % AES.block_size if blockpadlen_raw == 0: blockpadlen = block_size else: blockpadlen = blockpadlen_raw elif dec_algo == "3DES": block_size = DES3.block_size iv = viewstate_bytes[0:block_size] cipher = DES3.new(ekey_bytes[:24], DES3.MODE_CBC, iv) blockpadlen = 16 elif dec_algo == "DES": block_size = DES.block_size iv = viewstate_bytes[0:block_size] cipher = DES.new(ekey_bytes[:8], DES.MODE_CBC, iv) blockpadlen = 0 encrypted_raw = viewstate_bytes[block_size:-hash_size] decrypted_raw = cipher.decrypt(encrypted_raw) with suppress(TypeError): if mode == "DOTNET45":
class ASPNET_Viewstate(BadsecretsBase): check_secret_args = 3 identify_regex = generic_base64_regex description = {"product": "ASP.NET Viewstate", "secret": "ASP.NET MachineKey", "severity": "CRITICAL"} def carve_regex(self): return re.compile( r"<input.+__VIEWSTATE\"\svalue=\"(.+)\"[\S\s]+<input.+__VIEWSTATEGENERATOR\"\svalue=\"(\w+)\"" ) def carve_to_check_secret(self, s, url=None): if len(s.groups()) == 2: r = self.check_secret(s.groups()[0], s.groups()[1], url) return r @staticmethod def valid_preamble(sourcebytes): if sourcebytes[0:2] == b"\xff\x01": return True return False def viewstate_decrypt(self, ekey_bytes, hash_alg, viewstate_B64, url, mode): viewstate_bytes = base64.b64decode(viewstate_B64) vs_size = len(viewstate_bytes) dec_algos = set() hash_size = self.hash_sizes[hash_alg] if (vs_size - hash_size) % AES.block_size == 0: dec_algos.add("AES") if (vs_size - hash_size) % DES.block_size == 0: dec_algos.add("DES") dec_algos.add("3DES") for dec_algo in list(dec_algos): with suppress(ValueError): if dec_algo == "AES": block_size = AES.block_size iv = viewstate_bytes[0:block_size] if mode == "DOTNET45" and url: s = Simulate_dotnet45_kdf_context_parameters(url) label, context = sp800_108_get_key_derivation_parameters( "WebForms.HiddenFieldPageStatePersister.ClientState", s.get_specific_purposes() ) ekey_bytes = sp800_108_derivekey(ekey_bytes, label, context, (len(ekey_bytes) * 8)) cipher = AES.new(ekey_bytes, AES.MODE_CBC, iv) blockpadlen_raw = len(ekey_bytes) % AES.block_size if blockpadlen_raw == 0: blockpadlen = block_size else: blockpadlen = blockpadlen_raw elif dec_algo == "3DES": block_size = DES3.block_size iv = viewstate_bytes[0:block_size] cipher = DES3.new(ekey_bytes[:24], DES3.MODE_CBC, iv) blockpadlen = 16 elif dec_algo == "DES": block_size = DES.block_size iv = viewstate_bytes[0:block_size] cipher = DES.new(ekey_bytes[:8], DES.MODE_CBC, iv) blockpadlen = 0 encrypted_raw = viewstate_bytes[block_size:-hash_size] decrypted_raw = cipher.decrypt(encrypted_raw) with suppress(TypeError): if mode == "DOTNET45":
decrypt = unpad(decrypted_raw)
0
2023-10-30 12:52:39+00:00
2k
asprenger/ray_vllm_inference
tests/prompt_format_test.py
[ { "identifier": "Message", "path": "ray_vllm_inference/prompt_format.py", "snippet": "class Message(BaseModel):\n role: Literal[\"system\", \"assistant\", \"user\"]\n content: str\n\n def __str__(self):\n return self.content" }, { "identifier": "Prompt", "path": "ray_vllm_inference/prompt_format.py", "snippet": "class Prompt(BaseModel):\n prompt: Union[str, List[Message]]\n use_prompt_format: bool = True\n parameters: Optional[Union[Dict[str, Any], BaseModel]] = None" }, { "identifier": "PromptFormat", "path": "ray_vllm_inference/prompt_format.py", "snippet": "class PromptFormat(BaseModel):\n system: str\n assistant: str\n trailing_assistant: str\n user: str\n\n default_system_message: str = \"\"\n system_in_user: bool = False\n add_system_tags_even_if_message_is_empty: bool = False\n strip_whitespace: bool = True\n\n @validator(\"system\")\n def check_system(cls, value):\n assert value and (\n \"{instruction}\" in value\n ), \"system must be a string containing '{instruction}'\"\n return value\n\n @validator(\"assistant\")\n def check_assistant(cls, value):\n assert (\n value and \"{instruction}\" in value\n ), \"assistant must be a string containing '{instruction}'\"\n return value\n\n @validator(\"user\")\n def check_user(cls, value):\n assert value and (\n \"{instruction}\" in value\n ), \"user must be a string containing '{instruction}'\"\n return value\n\n @root_validator\n def check_user_system_in_user(cls, values):\n if values[\"system_in_user\"]:\n assert (\n \"{system}\" in values[\"user\"]\n ), \"If system_in_user=True, user must contain '{system}'\"\n return values\n\n def generate_prompt(self, messages: Union[Prompt, List[Message]]) -> str:\n if isinstance(messages, Prompt):\n if isinstance(messages.prompt, str):\n if not messages.use_prompt_format:\n return messages.prompt\n new_messages = []\n if self.default_system_message:\n new_messages.append(\n Message(role=\"system\", content=self.default_system_message),\n )\n new_messages.append(\n Message(role=\"user\", content=messages.prompt),\n )\n messages = new_messages\n else:\n messages = messages.prompt\n\n # Get system message\n system_message_index = -1\n for i, message in enumerate(messages):\n if message.role == \"system\":\n if system_message_index == -1:\n system_message_index = i\n else:\n raise ValueError(\"Only one system message can be specified.\")\n\n system_message = None\n if system_message_index != -1:\n system_message = messages.pop(system_message_index)\n elif (\n self.default_system_message or self.add_system_tags_even_if_message_is_empty\n ):\n system_message = Message(role=\"system\", content=self.default_system_message)\n if (\n system_message is not None\n and (\n system_message.content or self.add_system_tags_even_if_message_is_empty\n )\n and not self.system_in_user\n ):\n messages.insert(0, system_message)\n\n prompt = []\n for message in messages:\n message_content = message.content\n if self.strip_whitespace:\n message_content = message_content.strip()\n if message.role == \"system\":\n prompt.append(self.system.format(instruction=message_content))\n elif message.role == \"user\":\n if self.system_in_user:\n prompt.append(\n self.user.format(\n instruction=message_content,\n system=self.system.format(\n instruction=system_message.content\n )\n if system_message\n else \"\",\n )\n )\n system_message = None\n else:\n prompt.append(self.user.format(instruction=message_content))\n elif message.role == \"assistant\":\n prompt.append(self.assistant.format(instruction=message_content))\n prompt.append(self.trailing_assistant)\n return \"\".join(prompt)" } ]
import unittest import pytest from pydantic import ValidationError from ray_vllm_inference.prompt_format import Message, Prompt, PromptFormat
1,231
# Adapted from: # https://github.com/ray-project/ray-llm/blob/master/rayllm/common/models.py class PromptFormatCases(unittest.TestCase): def test_prompt_format_with_prompt_obj(self): prompt_format = PromptFormat( system="[system] {instruction} [/system] ", assistant="[assistant] {instruction} [/assistant] ", trailing_assistant="[assistant]", user="[user] {instruction} [/user] ", default_system_message="", ) prompt = prompt_format.generate_prompt( Prompt( prompt="hello1", use_prompt_format=True, ) ) assert prompt == "[user] hello1 [/user] [assistant]" prompt = prompt_format.generate_prompt( Prompt( prompt="hello1", use_prompt_format=False, ) ) assert prompt == "hello1" def test_prompt_format(self): prompt_format = PromptFormat( system="[system] {instruction} [/system] ", assistant="[assistant] {instruction} [/assistant] ", trailing_assistant="[assistant]", user="[user] {instruction} [/user] ", default_system_message="", ) # Only user, no system
# Adapted from: # https://github.com/ray-project/ray-llm/blob/master/rayllm/common/models.py class PromptFormatCases(unittest.TestCase): def test_prompt_format_with_prompt_obj(self): prompt_format = PromptFormat( system="[system] {instruction} [/system] ", assistant="[assistant] {instruction} [/assistant] ", trailing_assistant="[assistant]", user="[user] {instruction} [/user] ", default_system_message="", ) prompt = prompt_format.generate_prompt( Prompt( prompt="hello1", use_prompt_format=True, ) ) assert prompt == "[user] hello1 [/user] [assistant]" prompt = prompt_format.generate_prompt( Prompt( prompt="hello1", use_prompt_format=False, ) ) assert prompt == "hello1" def test_prompt_format(self): prompt_format = PromptFormat( system="[system] {instruction} [/system] ", assistant="[assistant] {instruction} [/assistant] ", trailing_assistant="[assistant]", user="[user] {instruction} [/user] ", default_system_message="", ) # Only user, no system
messages = [Message(role="user", content="hello1")]
0
2023-10-28 23:17:59+00:00
2k
fu-feng/GRL
algos/ppo.py
[ { "identifier": "Actor", "path": "algos/network.py", "snippet": "class Actor(Network):\n def __init__(self, layer_num, input_dim, output_dim, hidden_dim, activation_function = torch.tanh,last_activation_mu = None, last_activation_std = None, is_actor=True):\n super(Actor, self).__init__(layer_num, input_dim, output_dim, hidden_dim, activation_function ,last_activation_mu, is_actor, last_activation_std)\n\n def forward(self, x):\n mu, std = self._forward(x)\n return mu, std" }, { "identifier": "Critic", "path": "algos/network.py", "snippet": "class Critic(Network):\n def __init__(self, layer_num, input_dim, output_dim, hidden_dim, activation_function, last_activation = None, is_actor=False):\n super(Critic, self).__init__(layer_num, input_dim, output_dim, hidden_dim, activation_function ,last_activation, is_actor)\n \n def forward(self, *x):\n x = torch.cat(x,-1)\n return self._forward(x)" }, { "identifier": "ReplayBuffer", "path": "utils/utils.py", "snippet": "class ReplayBuffer():\n def __init__(self, action_prob_exist, max_size, state_dim, num_action):\n self.max_size = max_size\n self.data_idx = 0\n self.action_prob_exist = action_prob_exist\n self.data = {}\n \n self.data['state'] = np.zeros((self.max_size, state_dim))\n self.data['action'] = np.zeros((self.max_size, num_action))\n self.data['reward'] = np.zeros((self.max_size, 1))\n self.data['next_state'] = np.zeros((self.max_size, state_dim))\n self.data['done'] = np.zeros((self.max_size, 1))\n if self.action_prob_exist :\n self.data['log_prob'] = np.zeros((self.max_size, 1))\n def put_data(self, transition):\n idx = self.data_idx % self.max_size\n self.data['state'][idx] = transition['state']\n self.data['action'][idx] = transition['action']\n self.data['reward'][idx] = transition['reward']\n self.data['next_state'][idx] = transition['next_state']\n self.data['done'][idx] = float(transition['done'])\n if self.action_prob_exist :\n self.data['log_prob'][idx] = transition['log_prob']\n \n self.data_idx += 1\n def sample(self, shuffle, batch_size = None):\n if shuffle :\n sample_num = min(self.max_size, self.data_idx)\n rand_idx = np.random.choice(sample_num, batch_size,replace=False)\n sampled_data = {}\n sampled_data['state'] = self.data['state'][rand_idx]\n sampled_data['action'] = self.data['action'][rand_idx]\n sampled_data['reward'] = self.data['reward'][rand_idx]\n sampled_data['next_state'] = self.data['next_state'][rand_idx]\n sampled_data['done'] = self.data['done'][rand_idx]\n if self.action_prob_exist :\n sampled_data['log_prob'] = self.data['log_prob'][rand_idx]\n return sampled_data\n else:\n return self.data\n def size(self):\n return min(self.max_size, self.data_idx)" }, { "identifier": "make_mini_batch", "path": "utils/utils.py", "snippet": "def make_mini_batch(*value):\n mini_batch_size = value[0]\n full_batch_size = len(value[1])\n full_indices = np.arange(full_batch_size)\n np.random.shuffle(full_indices)\n for i in range(full_batch_size // mini_batch_size):\n indices = full_indices[mini_batch_size*i : mini_batch_size*(i+1)]\n yield [x[indices] for x in value[1:]]" }, { "identifier": "convert_to_tensor", "path": "utils/utils.py", "snippet": "def convert_to_tensor(*value):\n device = value[0]\n return [torch.tensor(x).float().to(device) for x in value[1:]]" } ]
from algos.network import Actor, Critic from utils.utils import ReplayBuffer, make_mini_batch, convert_to_tensor import torch import torch.nn as nn import torch.optim as optim
976
class PPO(nn.Module): def __init__(self, device, state_dim, action_dim, args): super(PPO,self).__init__() self.args = args
class PPO(nn.Module): def __init__(self, device, state_dim, action_dim, args): super(PPO,self).__init__() self.args = args
self.data = ReplayBuffer(action_prob_exist = True, max_size = self.args.traj_length, state_dim = state_dim, num_action = action_dim)
2
2023-10-27 07:39:01+00:00
2k
CoderMungan/Otel
OtelIcerik/forms.py
[ { "identifier": "OtelOda", "path": "OtelIcerik/models.py", "snippet": "class OtelOda(models.Model):\n otel = models.ForeignKey(OtelYonetim, verbose_name=(\"Otel Adı\"), on_delete=models.CASCADE)\n odaNumarasi = models.CharField((\"Oda Numarası\"), max_length=5)\n odaTipi = models.CharField((\"Oda Tipi\"), max_length=50)\n odaTemizMi = models.BooleanField((\"Oda Temiz Mi?\"), default=True, blank=True)\n odaArizaliMi = models.BooleanField((\"Oda Arızalı Mı?\"), default=False, blank=True)\n odaRezerveMi = models.BooleanField(('Oda Rezerve Mi?'), default=False, blank=True)\n odaBosMu = models.BooleanField((\"Oda Boş Mu?\"), default=True, blank=True)\n odaProblemi = models.TextField((\"Odanın Problemi Nedir?\"), max_length=500, blank=True)\n\n class Meta:\n verbose_name = \"Otel Oda Bilgileri\"\n verbose_name_plural = \"Otel Oda Bilgileri\"\n\n def __str__(self) -> str:\n return self.odaNumarasi" }, { "identifier": "KonukBilgileri", "path": "OtelIcerik/models.py", "snippet": "class KonukBilgileri(models.Model):\n otel = models.ForeignKey(OtelYonetim, verbose_name=(\"Otel Adı\"), on_delete=models.CASCADE)\n firstname = models.CharField((\"Müşterinin Adı\"), max_length=50)\n lastname = models.CharField((\"Müşterinin Soyadı\"), max_length=50)\n birthday = models.CharField((\"Müşteri Doğum Tarihi\"), max_length=50)\n uyrugu = models.CharField((\"Müşterinin Uyruğu\"), max_length=50)\n musteriTC = models.CharField((\"Müşteri TC Numarası\"), max_length=11, blank=True)\n musteriID = models.CharField((\"Müşteri Passaport Numarası\"), max_length=50, blank = True)\n musteriNotu = models.TextField((\"Müşteri Notu\"), max_length=250, default=\"\", blank=True)\n fiyat = models.DecimalField((\"Fiyat\"), max_digits=10, decimal_places=2, default= 000.00, blank=True)\n kur = models.CharField((\"Para Birimi\"), max_length=50, default=\"TRY\", blank=True)\n\n class Meta:\n verbose_name = \"Konuk Bilgileri\"\n verbose_name_plural = \"Konuk Bilgileri\"\n\n\n def __str__(self) -> str:\n return self.firstname + \" \" + self.lastname" }, { "identifier": "KonukCheckInveCheckOut", "path": "OtelIcerik/models.py", "snippet": "class KonukCheckInveCheckOut(models.Model):\n otel = models.ForeignKey(OtelYonetim, verbose_name=(\"Otel Adı\"), on_delete=models.CASCADE)\n konuk = models.ForeignKey(KonukBilgileri, verbose_name=(\"Konuk Bilgileri\"), on_delete=models.CASCADE)\n oda = models.ForeignKey(OtelOda, verbose_name=(\"Otel Oda\"), on_delete=models.CASCADE)\n checkIn = models.DateTimeField((\"Check-In Zamanı\"), auto_now=False, auto_now_add=False)\n checkOut = models.DateTimeField((\"Check-Out Zamanı\"), auto_now=False, auto_now_add=False)\n color = models.CharField((\"Renk\"), max_length=50, default=\"#940101\", blank=True)\n \n class Meta:\n verbose_name = \"CheckInCheckOut\"\n verbose_name_plural = \"CheckInCheckOut\"\n\n def __str__(self) -> str:\n return str(self.konuk)" } ]
from django import forms from .models import OtelOda, KonukBilgileri, KonukCheckInveCheckOut
957
class UpdateOtelOdaForm(forms.ModelForm): class Meta: model = OtelOda fields = ["odaNumarasi","odaTipi","odaTemizMi","odaArizaliMi","odaBosMu","odaProblemi",] class UpdateMusteriDetay(forms.ModelForm): class Meta:
class UpdateOtelOdaForm(forms.ModelForm): class Meta: model = OtelOda fields = ["odaNumarasi","odaTipi","odaTemizMi","odaArizaliMi","odaBosMu","odaProblemi",] class UpdateMusteriDetay(forms.ModelForm): class Meta:
model = KonukBilgileri
1
2023-10-26 02:42:23+00:00
2k
lukas-clarke/pyEight
pyeight/eight.py
[ { "identifier": "Token", "path": "pyeight/structs.py", "snippet": "class Token:\n bearer_token: str\n expiration: float\n main_id: str" }, { "identifier": "User", "path": "pyeight/structs.py", "snippet": "class User:\n def __init__(self,\n user_name: str,\n user_id: str,\n user_side: str):\n self.user_name = user_name.lower()\n self.user_id = user_id\n self.user_side = user_side\n\n def match(self, match_str):\n if match_str.lower() == self.user_name or match_str == self.user_side:\n return self.user_id\n return False" } ]
import asyncio import time import httpx import atexit import logging from aiohttp.client import ClientError, ClientSession, ClientTimeout from pyeight.constants import * from pyeight.structs import Token, User
793
_LOGGER = logging.getLogger(__name__) CLIENT_TIMEOUT = ClientTimeout(total=DEFAULT_TIMEOUT) class EightSleep(): def __init__( self, email: str, password: str, client_id: str, client_secret: str): self.email = email self.password = password self.client_id = client_id self.client_secret = client_secret self._api_session = None self._token = None self._users = [] # Stop on exit atexit.register(self.at_exit) def at_exit(self) -> None: """Run at exit.""" try: loop = asyncio.get_running_loop() asyncio.run_coroutine_threadsafe(self.stop(), loop).result() except RuntimeError: asyncio.run(self.stop()) async def set_heating_level(self, level: int, user_id: str): """ set heating level from -100 to 100 ``user_id`` can either be the name of the user or the side of the bed""" await self.turn_on_side(user_id) # Turn on side before setting temperature url = APP_API_URL + f"v1/users/{self.match_user(user_id)}/temperature" data = {"currentLevel": level} await self.api_request("PUT", url, data=data) async def set_heating_and_duration_level(self, level: int, duration_seconds, user_id: str): """ set heating level from -100 to 100 for a period of time ``user_id`` can either be the name of the user or the side of the bed""" await self.set_heating_level(level, user_id) # Have to set temperature before duration url = APP_API_URL + f"v1/users/{self.match_user(user_id)}/temperature" data = {"timeBased": {"level": level, "durationSeconds": duration_seconds}} await self.api_request("PUT", url, data=data) async def turn_on_side(self, user_id: str): """ Turns on the side of the user ``user_id`` can either be the name of the user or the side of the bed""" url = APP_API_URL + f"v1/users/{self.match_user(user_id)}/temperature" data = {"currentState": {"type": "smart"}} await self.api_request("PUT", url, data=data) async def turn_off_side(self, user_id: str): """ Turns off the side of the user ``user_id`` can either be the name of the user or the side of the bed""" url = APP_API_URL + f"v1/users/{self.match_user(user_id)}/temperature" data = {"currentState": {"type": "off"}} await self.api_request("PUT", url, data=data)
_LOGGER = logging.getLogger(__name__) CLIENT_TIMEOUT = ClientTimeout(total=DEFAULT_TIMEOUT) class EightSleep(): def __init__( self, email: str, password: str, client_id: str, client_secret: str): self.email = email self.password = password self.client_id = client_id self.client_secret = client_secret self._api_session = None self._token = None self._users = [] # Stop on exit atexit.register(self.at_exit) def at_exit(self) -> None: """Run at exit.""" try: loop = asyncio.get_running_loop() asyncio.run_coroutine_threadsafe(self.stop(), loop).result() except RuntimeError: asyncio.run(self.stop()) async def set_heating_level(self, level: int, user_id: str): """ set heating level from -100 to 100 ``user_id`` can either be the name of the user or the side of the bed""" await self.turn_on_side(user_id) # Turn on side before setting temperature url = APP_API_URL + f"v1/users/{self.match_user(user_id)}/temperature" data = {"currentLevel": level} await self.api_request("PUT", url, data=data) async def set_heating_and_duration_level(self, level: int, duration_seconds, user_id: str): """ set heating level from -100 to 100 for a period of time ``user_id`` can either be the name of the user or the side of the bed""" await self.set_heating_level(level, user_id) # Have to set temperature before duration url = APP_API_URL + f"v1/users/{self.match_user(user_id)}/temperature" data = {"timeBased": {"level": level, "durationSeconds": duration_seconds}} await self.api_request("PUT", url, data=data) async def turn_on_side(self, user_id: str): """ Turns on the side of the user ``user_id`` can either be the name of the user or the side of the bed""" url = APP_API_URL + f"v1/users/{self.match_user(user_id)}/temperature" data = {"currentState": {"type": "smart"}} await self.api_request("PUT", url, data=data) async def turn_off_side(self, user_id: str): """ Turns off the side of the user ``user_id`` can either be the name of the user or the side of the bed""" url = APP_API_URL + f"v1/users/{self.match_user(user_id)}/temperature" data = {"currentState": {"type": "off"}} await self.api_request("PUT", url, data=data)
async def _get_auth(self) -> Token:
0
2023-10-26 21:11:20+00:00
2k
loliverhennigh/PhantomGaze
phantomgaze/render/camera.py
[ { "identifier": "normalize", "path": "phantomgaze/utils/math.py", "snippet": "@cuda.jit(device=True)\ndef normalize(vector):\n \"\"\"Normalize a vector.\n\n Parameters\n ----------\n vector : tuple\n The vector to normalize.\n\n Returns\n -------\n tuple\n The normalized vector.\n \"\"\"\n\n # Get the length of the vector\n length = (vector[0] ** 2 + vector[1] ** 2 + vector[2] ** 2) ** 0.5\n\n # Normalize the vector\n return vector[0] / length, vector[1] / length, vector[2] / length" }, { "identifier": "dot", "path": "phantomgaze/utils/math.py", "snippet": "@cuda.jit(device=True)\ndef dot(vector1, vector2):\n \"\"\"Compute the dot product of two vectors.\n\n Parameters\n ----------\n vector1 : tuple\n The first vector.\n vector2 : tuple\n The second vector.\n\n Returns\n -------\n float\n The dot product of the two vectors.\n \"\"\"\n\n # Compute the dot product\n if len(vector1) == 2:\n return vector1[0] * vector2[0] + vector1[1] * vector2[1]\n else:\n return vector1[0] * vector2[0] + vector1[1] * vector2[1] + vector1[2] * vector2[2]" }, { "identifier": "cross", "path": "phantomgaze/utils/math.py", "snippet": "@cuda.jit(device=True)\ndef cross(vector1, vector2):\n \"\"\"Compute the cross product of two vectors.\n\n Parameters\n ----------\n vector1 : tuple\n The first vector.\n vector2 : tuple\n The second vector.\n\n Returns\n -------\n tuple\n The cross product of the two vectors.\n \"\"\"\n\n # Compute the cross product\n return (vector1[1] * vector2[2] - vector1[2] * vector2[1],\n vector1[2] * vector2[0] - vector1[0] * vector2[2],\n vector1[0] * vector2[1] - vector1[1] * vector2[0])" } ]
import math import numba from numba import cuda from phantomgaze.utils.math import normalize, dot, cross
773
# Render functions for volumes @cuda.jit(device=True) def calculate_ray_direction( x, y, img_shape, camera_position, camera_focal, camera_up): """ Calculate the direction of a ray from the camera to the image plane. Parameters ---------- x : int The x coordinate of the pixel. y : int The y coordinate of the pixel. img_shape : tuple The shape of the image. camera_position : tuple The position of the camera. camera_focal : tuple The focal point of the camera. camera_up : tuple The up vector of the camera. Returns ------- ray_direction : tuple """ # Compute base vectors forward = ( camera_focal[0] - camera_position[0], camera_focal[1] - camera_position[1], camera_focal[2] - camera_position[2], )
# Render functions for volumes @cuda.jit(device=True) def calculate_ray_direction( x, y, img_shape, camera_position, camera_focal, camera_up): """ Calculate the direction of a ray from the camera to the image plane. Parameters ---------- x : int The x coordinate of the pixel. y : int The y coordinate of the pixel. img_shape : tuple The shape of the image. camera_position : tuple The position of the camera. camera_focal : tuple The focal point of the camera. camera_up : tuple The up vector of the camera. Returns ------- ray_direction : tuple """ # Compute base vectors forward = ( camera_focal[0] - camera_position[0], camera_focal[1] - camera_position[1], camera_focal[2] - camera_position[2], )
forward = normalize(forward)
0
2023-10-26 23:53:16+00:00
2k
Khushiyant/dockerpulse
dockerpulse/lgbert/bert_pytorch/trainer/pretrain.py
[ { "identifier": "BERT", "path": "dockerpulse/lgbert/bert_pytorch/model/bert.py", "snippet": "class BERT(nn.Module):\r\n \"\"\"\r\n BERT model : Bidirectional Encoder Representations from Transformers.\r\n \"\"\"\r\n\r\n def __init__(self, vocab_size, max_len=512, hidden=768, n_layers=12,\r\n attn_heads=12, dropout=0.1, is_logkey=True, is_time=False):\r\n \"\"\"\r\n :param vocab_size: vocab_size of total words\r\n :param hidden: BERT model hidden size\r\n :param n_layers: numbers of Transformer blocks(layers)\r\n :param attn_heads: number of attention heads\r\n :param dropout: dropout rate\r\n \"\"\"\r\n\r\n super().__init__()\r\n self.hidden = hidden\r\n self.n_layers = n_layers\r\n self.attn_heads = attn_heads\r\n\r\n # paper noted they used 4*hidden_size for ff_network_hidden_size\r\n self.feed_forward_hidden = hidden * 2\r\n\r\n # embedding for BERT, sum of positional, segment, token embeddings\r\n self.embedding = BERTEmbedding(\r\n vocab_size=vocab_size,\r\n embed_size=hidden,\r\n max_len=max_len,\r\n is_logkey=is_logkey,\r\n is_time=is_time)\r\n\r\n # multi-layers transformer blocks, deep network\r\n self.transformer_blocks = nn.ModuleList(\r\n [TransformerBlock(hidden, attn_heads, hidden * 2, dropout) for _ in range(n_layers)])\r\n\r\n def forward(self, x, segment_info=None, time_info=None):\r\n # attention masking for padded token\r\n # torch.ByteTensor([batch_size, 1, seq_len, seq_len)\r\n mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)\r\n\r\n # embedding the indexed sequence to sequence of vectors\r\n x = self.embedding(x, segment_info, time_info)\r\n\r\n # running over multiple transformer blocks\r\n for transformer in self.transformer_blocks:\r\n x = transformer.forward(x, mask)\r\n\r\n return x\r" }, { "identifier": "BERTLog", "path": "dockerpulse/lgbert/bert_pytorch/model/log_model.py", "snippet": "class BERTLog(nn.Module):\r\n \"\"\"\r\n BERT Log Model\r\n \"\"\"\r\n\r\n def __init__(self, bert: BERT, vocab_size):\r\n \"\"\"\r\n :param bert: BERT model which should be trained\r\n :param vocab_size: total vocab size for masked_lm\r\n \"\"\"\r\n\r\n super().__init__()\r\n self.bert = bert\r\n self.mask_lm = MaskedLogModel(self.bert.hidden, vocab_size)\r\n self.time_lm = TimeLogModel(self.bert.hidden)\r\n # self.fnn_cls = LinearCLS(self.bert.hidden)\r\n # self.cls_lm = LogClassifier(self.bert.hidden)\r\n self.result = {\r\n \"logkey_output\": None,\r\n \"time_output\": None,\r\n \"cls_output\": None,\r\n \"cls_fnn_output\": None}\r\n\r\n def forward(self, x, time_info):\r\n x = self.bert(x, time_info=time_info)\r\n\r\n self.result[\"logkey_output\"] = self.mask_lm(x)\r\n # self.result[\"time_output\"] = self.time_lm(x)\r\n\r\n # self.result[\"cls_output\"] = x.float().mean(axis=1) #x[:, 0]\r\n self.result[\"cls_output\"] = x[:, 0]\r\n # self.result[\"cls_output\"] = self.fnn_cls(x[:, 0])\r\n\r\n # print(self.result[\"cls_fnn_output\"].shape)\r\n\r\n return self.result\r" }, { "identifier": "ScheduledOptim", "path": "dockerpulse/lgbert/bert_pytorch/trainer/optim_schedule.py", "snippet": "class ScheduledOptim():\r\n '''A simple wrapper class for learning rate scheduling'''\r\n\r\n def __init__(self, optimizer, d_model, n_warmup_steps):\r\n self._optimizer = optimizer\r\n self.n_warmup_steps = n_warmup_steps\r\n self.n_current_steps = 0\r\n self.init_lr = np.power(d_model, -0.5)\r\n\r\n def step_and_update_lr(self):\r\n \"Step with the inner optimizer\"\r\n self._update_learning_rate()\r\n self._optimizer.step()\r\n\r\n def zero_grad(self):\r\n \"Zero out the gradients by the inner optimizer\"\r\n self._optimizer.zero_grad()\r\n\r\n def _get_lr_scale(self):\r\n return np.min([\r\n np.power(self.n_current_steps, -0.5),\r\n np.power(self.n_warmup_steps, -1.5) * self.n_current_steps])\r\n\r\n def _update_learning_rate(self):\r\n ''' Learning rate scheduling per step '''\r\n\r\n self.n_current_steps += 1\r\n lr = self.init_lr * self._get_lr_scale()\r\n\r\n for param_group in self._optimizer.param_groups:\r\n param_group['lr'] = lr\r" } ]
import torch import torch.nn as nn import time import tqdm import numpy as np import pandas as pd from torch.optim import Adam from torch.utils.data import DataLoader from ..model import BERTLog, BERT from .optim_schedule import ScheduledOptim
1,287
class BERTTrainer: """ BERTTrainer make the pretrained BERT model with two LM training method. 1. Masked Language Model : 3.3.1 Task #1: Masked LM 2. Next Sentence prediction : 3.3.2 Task #2: Next Sentence Prediction please check the details on README.md with simple example. """
class BERTTrainer: """ BERTTrainer make the pretrained BERT model with two LM training method. 1. Masked Language Model : 3.3.1 Task #1: Masked LM 2. Next Sentence prediction : 3.3.2 Task #2: Next Sentence Prediction please check the details on README.md with simple example. """
def __init__(self, bert: BERT, vocab_size: int,
0
2023-10-29 09:52:36+00:00
2k
audiodude/rainfall
rainfall/blueprint/site.py
[ { "identifier": "db", "path": "rainfall/db.py", "snippet": "class Base(DeclarativeBase):" }, { "identifier": "with_current_user", "path": "rainfall/decorators.py", "snippet": "def with_current_user(f):\n '''\n Retrieves the current user from the session, performs some checks, and then\n calls the underlying handler\n '''\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n user_id = flask.session.get('user_id')\n if user_id is None:\n return flask.jsonify(status=404, error='No signed in user'), 404\n\n user = db.session.get(User, user_id)\n if user is None:\n return flask.jsonify(status=404, error='User does not exist'), 404\n\n value = f(*args, user=user, **kwargs)\n return value\n\n return wrapped" }, { "identifier": "with_current_site", "path": "rainfall/decorators.py", "snippet": "def with_current_site(f):\n '''Requires the with_current_user decorator above'''\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n if 'site_id' not in kwargs:\n return flask.jsonify(status=500,\n error='Wrapper requires site_id kwarg'), 500\n\n site_id = kwargs.pop('site_id')\n user = kwargs['user']\n site = db.session.get(Site, UUID(site_id))\n if site is None:\n return flask.jsonify(\n status=404, error=f'Could not find a site with id={site_id}'), 404\n\n if site.user.id != user.id:\n return flask.jsonify(status=401,\n error='Not authorized for that site'), 401\n\n value = f(*args, site=site, **kwargs)\n return value\n\n return wrapped" }, { "identifier": "Site", "path": "rainfall/models/site.py", "snippet": "class Site(db.Model):\n __tablename__ = 'sites'\n\n id: Mapped[bytes] = mapped_column(Uuid, primary_key=True, default=uuid7)\n user_id: Mapped[bytes] = mapped_column(ForeignKey(\"users.id\"))\n user: Mapped[\"User\"] = relationship(back_populates=\"sites\")\n name: Mapped[str] = mapped_column(String(255))\n\n releases: Mapped[List[\"Release\"]] = relationship(back_populates=\"site\")\n\n def __repr__(self) -> str:\n return f'Site(id={self.id!r}, user_id={self.user_id!r}, name={self.name!r})'\n\n def serialize(self):\n props = []\n for field in fields(self):\n if field.name == 'user':\n continue\n\n if field.name == 'releases':\n props.append(\n ('releases', [release.serialize() for release in self.releases]))\n continue\n\n props.append((field.name, getattr(self, field.name)))\n return dict(props)" } ]
from uuid import UUID from rainfall.db import db from rainfall.decorators import with_current_user, with_current_site from rainfall.models.site import Site import flask
944
site = flask.Blueprint('site', __name__) @site.route('/site', methods=['POST']) @with_current_user def create_site(user): if not user.is_welcomed: return flask.jsonify(status=400, error='User has not yet been welcomed'), 400 data = flask.request.get_json() if data is None: return flask.jsonify(status=400, error='No JSON provided'), 400 site_data = data.get('site') if site_data is None: return flask.jsonify(status=400, error='Missing site data'), 400 if site_data.get('name') is None: return flask.jsonify(status=400, error='Site name is required'), 400 user.sites.append(Site(**site_data)) db.session.add(user) db.session.commit() return '', 204 @site.route('/site/list') @with_current_user def list_sites(user): return flask.jsonify({'sites': [site.serialize() for site in user.sites]}) @site.route('/site/<site_id>') @with_current_user
site = flask.Blueprint('site', __name__) @site.route('/site', methods=['POST']) @with_current_user def create_site(user): if not user.is_welcomed: return flask.jsonify(status=400, error='User has not yet been welcomed'), 400 data = flask.request.get_json() if data is None: return flask.jsonify(status=400, error='No JSON provided'), 400 site_data = data.get('site') if site_data is None: return flask.jsonify(status=400, error='Missing site data'), 400 if site_data.get('name') is None: return flask.jsonify(status=400, error='Site name is required'), 400 user.sites.append(Site(**site_data)) db.session.add(user) db.session.commit() return '', 204 @site.route('/site/list') @with_current_user def list_sites(user): return flask.jsonify({'sites': [site.serialize() for site in user.sites]}) @site.route('/site/<site_id>') @with_current_user
@with_current_site
2
2023-10-30 04:43:03+00:00
2k
LasticXYZ/price-simulation
tests/test_poly.py
[ { "identifier": "Linear", "path": "poly.py", "snippet": "class Linear:\n @staticmethod\n def leadin_factor_at(when, factor = 1):\n \"\"\"\n Factor represents the slope of the linear function\n Factor is not a parameter that is originally used in the `broker pallet code`.\n\n Function follows the code in: https://github.com/paritytech/polkadot-sdk/blob/2610450a18e64079abfe98f0a5b57069bbb61009/substrate/frame/broker/src/adapt_price.rs#L50 \n \"\"\"\n return 1 + (factor) * (1 - when)\n\n @staticmethod\n def adapt_price(sold, target, limit):\n \"\"\"\n Function follows the code in: https://github.com/paritytech/polkadot-sdk/blob/2610450a18e64079abfe98f0a5b57069bbb61009/substrate/frame/broker/src/adapt_price.rs#L54C13-L54C13\n \"\"\"\n if sold <= target:\n return max(sold, 1) / target\n else:\n return 1 + (sold - target) / (limit - target)" }, { "identifier": "Exponential", "path": "poly.py", "snippet": "class Exponential:\n \"\"\"\n This part of the code is not implemented in the `broker pallet`.\n It is given as an example of how would an exponential function be implemented if it were to be implemented.\n \"\"\"\n @staticmethod\n def leadin_factor_at(when, factor: int = 1):\n # Exponential decay model for the lead-in factor\n # Factor is not a parameter that is originally used in the `broker pallet code`.\n return pow(2 - when, factor)\n\n @staticmethod\n def adapt_price(sold, target, limit):\n # Exponential price adaptation based on the sold quantity\n if sold <= target:\n return pow(2, -sold/target) # Decreases exponentially as sold approaches target\n else:\n # Increases exponentially as sold exceeds target, up to the limit\n excess_ratio = (sold - target) / (limit - target)\n return 1 + pow(2, excess_ratio)" } ]
import unittest from poly import Linear, Exponential
653
class TestLinearNoPanic(unittest.TestCase): def test_linear_no_panic(self): for limit in range(10): for target in range(1, 10): for sold in range(limit + 1): price = Linear.adapt_price(sold, target, limit) if sold > target: self.assertTrue(price > 1) else: self.assertTrue(price <= 1) class TestExponentialNoPanic(unittest.TestCase): def test_exponential_no_panic(self): for limit in range(10): for target in range(1, 10): for sold in range(limit + 1):
class TestLinearNoPanic(unittest.TestCase): def test_linear_no_panic(self): for limit in range(10): for target in range(1, 10): for sold in range(limit + 1): price = Linear.adapt_price(sold, target, limit) if sold > target: self.assertTrue(price > 1) else: self.assertTrue(price <= 1) class TestExponentialNoPanic(unittest.TestCase): def test_exponential_no_panic(self): for limit in range(10): for target in range(1, 10): for sold in range(limit + 1):
price = Exponential.adapt_price(sold, target, limit)
1
2023-10-30 12:49:00+00:00
2k
dangeng/flowmag
test_time_adapt.py
[ { "identifier": "TestTimeAdaptDataset", "path": "dataset.py", "snippet": "class TestTimeAdaptDataset(Dataset):\n def __init__(self, root, mode='first', length=None):\n '''\n args:\n root: (string) path to directory of frames\n mode: ['first', 'random'] how to sample frames\n first: always samples first frame + idx^th frame\n random: randomly samples two frames\n '''\n self.root = Path(root)\n self.frame_names = sorted(os.listdir(self.root))\n self.mode = mode\n self.im_size = 512\n self.scale = 1.1 # stretching scale\n\n self.geom_transform = Compose([\n RandomRotation(5),\n ])\n self.color_transform = ColorJitter(brightness=.5, contrast=.5, saturation=.5, hue=.3)\n\n if length is not None:\n self.length = length\n else:\n self.length = len(self.frame_names)\n\n def transform_frames(self, frames):\n c, t, h, w = frames.shape\n\n # Apply geometric transforms on all frames\n frames = rearrange(frames, 'c t h w -> (c t) h w')\n frames = self.geom_transform(frames)\n frames = rearrange(frames, '(c t) h w -> c t h w', c=c, t=t)\n\n # scale\n new_h = h * self.scale**np.random.uniform(-1, 1) # rescale for stretch aug\n new_w = w * self.scale**np.random.uniform(-1, 1)\n new_h = int(new_h)\n new_w = int(new_w)\n frames = resize(frames, (new_h, new_w))\n\n # Pad out so edges are at least im_size.\n to_pad_h = max(self.im_size - new_h, 0)\n to_pad_w = max(self.im_size - new_w, 0)\n pad_l_h = to_pad_h // 2\n pad_r_h = to_pad_h - pad_l_h\n pad_l_w = to_pad_w // 2\n pad_r_w = to_pad_w - pad_l_w\n frames = F.pad(frames, (pad_l_w, pad_r_w, pad_l_h, pad_r_h))\n _, _, padded_h, padded_w = frames.shape\n \n # crop to (im_size, im_size)\n ch = self.im_size\n cw = self.im_size\n ct = np.random.randint(padded_h-ch+1)\n cl = np.random.randint(padded_w-cw+1)\n frames = frames[:,:,ct:ct+ch,cl:cl+cw]\n\n # Apply color transforms (must have c=3, treating t as batch)\n frames = rearrange(frames, 'c t h w -> t c h w')\n frames = self.color_transform(frames)\n frames = rearrange(frames, 't c h w -> c t h w')\n\n return frames\n\n def __getitem__(self, idx):\n idx = idx % len(self.frame_names)\n\n if self.mode == 'first':\n idx0 = 0\n idx1 = idx\n elif self.mode == 'random':\n idx0 = np.random.randint(self.length)\n idx1 = np.random.randint(self.length)\n else:\n raise NotImplementedError\n\n frame0 = Image.open(self.root / self.frame_names[idx0])\n frame1 = Image.open(self.root / self.frame_names[idx1])\n\n frame0 = to_tensor(frame0)\n frame1 = to_tensor(frame1)\n\n frames = torch.stack([frame0, frame1], dim=1)\n\n # Transform tensors\n frames = self.transform_frames(frames)\n\n return frames\n\n def __len__(self):\n return self.length" }, { "identifier": "AverageMeter", "path": "myutils.py", "snippet": "class AverageMeter(object):\n def __init__(self, name):\n self.name = name\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.data = []\n\n def update(self, val, n=1):\n # Compute the sum, avg, std and standard error for data\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n self.data.append(val)\n self.std = np.std(self.data)\n self.se = np.std(self.data, ddof=1) / np.sqrt(np.size(self.data))\n\n def __str__(self):\n return f\"{self.name}: {self.val:.5f} {self.avg:.5f}\"" } ]
from tqdm import tqdm from torch.optim import Adam from torch.utils.data import DataLoader from dataset import TestTimeAdaptDataset from myutils import AverageMeter import torch import matplotlib.pyplot as plt
1,430
def test_time_adapt(model, frames_dir, num_epochs=5, mode='first', device=0, inference_fn=None, inference_freq=1, alpha=None, save_dir=None, dataset_length=None): ''' params: model: (nn.Module) model with checkpoint already loaded frames_dir: (string) path to directory of frames for test time adaptation (OPTIONAL NOW) dataset: optional dataset to give, default is TestTimeAdaptDataset num_epochs: (int) number of passes through the frames_dir mode: ['first', 'random'] how to sample frames device: device to put model and data on inference_fn: function to call at the end of each epoch output: model: (nn.Module) finetuned input module ''' model.train() model = model.to(device) # Get dataset from frames # if dataset is None: dataset = TestTimeAdaptDataset(frames_dir, mode=mode, length=dataset_length) dataloader = DataLoader(dataset, batch_size=8, shuffle=True, num_workers=4, drop_last=False) # Optimizer optimizer = Adam(model.module.trainable_parameters(), lr=1e-4) # Record losses
def test_time_adapt(model, frames_dir, num_epochs=5, mode='first', device=0, inference_fn=None, inference_freq=1, alpha=None, save_dir=None, dataset_length=None): ''' params: model: (nn.Module) model with checkpoint already loaded frames_dir: (string) path to directory of frames for test time adaptation (OPTIONAL NOW) dataset: optional dataset to give, default is TestTimeAdaptDataset num_epochs: (int) number of passes through the frames_dir mode: ['first', 'random'] how to sample frames device: device to put model and data on inference_fn: function to call at the end of each epoch output: model: (nn.Module) finetuned input module ''' model.train() model = model.to(device) # Get dataset from frames # if dataset is None: dataset = TestTimeAdaptDataset(frames_dir, mode=mode, length=dataset_length) dataloader = DataLoader(dataset, batch_size=8, shuffle=True, num_workers=4, drop_last=False) # Optimizer optimizer = Adam(model.module.trainable_parameters(), lr=1e-4) # Record losses
meter_loss = AverageMeter('loss')
1
2023-10-27 05:23:08+00:00
2k
warner-benjamin/optimi
optimi/adam.py
[ { "identifier": "MIN_TORCH_2_1", "path": "optimi/utils.py", "snippet": "MIN_TORCH_2_1 = parse(torch.__version__) >= parse(\"2.1\")" }, { "identifier": "debias_beta", "path": "optimi/utils.py", "snippet": "def debias_beta(beta: float, step: int) -> float:\n \"\"\"Applies the Adam-style debias correction into beta.\n\n Simplified version of `betahat = beta*(1-beta**(step-1))/(1-beta**step)`\n \"\"\"\n return (beta**step - beta) / (beta**step - 1)" } ]
from typing import Any, Callable, Iterable from warnings import warn from torch import Tensor from torch.optim.optimizer import Optimizer, _default_to_fused_or_foreach from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype from optimi.utils import MIN_TORCH_2_1, debias_beta import torch
1,021
# Copyright (c) 2023 Benjamin Warner # SPDX-License-Identifier: MIT # Based on PyTorch Optimizers # PyTorch - PyTorch BSD-style license - Copyright (c) 2013-present PyTorch contributors # Kahan summation inspired by Torch Distributed Experimental's `AnyPrecisionAdamW` # torchdistX - BSD 3-Clause License - Copyright (c) Meta Platforms, Inc. and affiliates # Learning rate decoupled weight decay inspired by Composer's `DecoupledSGDW` & `DecoupledAdamW` # Composer - Apache License 2.0 - Copyright (c) 2022 MosaicML Composer authors from __future__ import annotations __all__ = ["Adam", "adam"] class Adam(Optimizer): """Adam optimizer. Optionally with decoupled weight decay (AdamW). Args: params: Iterable of parameters to optimize or dicts defining parameter groups lr: Learning rate betas: Coefficients for gradient and squared gradient moving averages (default: (0.9, 0.99)) weight_decay: Weight decay coefficient. If `decouple_wd` and `decouple_lr` are False, applies L2 penalty (default: 0) eps: Added to denominator to improve numerical stability (default: 1e-6) decouple_wd: Apply decoupled weight decay instead of L2 penalty (default: False) decouple_lr: Apply fully decoupled weight decay instead of L2 penalty (default: False) max_lr: Maximum scheduled learning rate. Set if `lr` is not the maximum scheduled learning rate and `decouple_lr` is True (default: None) kahan_sum: Enables Kahan summation for more accurate parameter updates when training in low precision (float16 or bfloat16). If unspecified, automatically applies for low precision parameters (default: None) foreach: Enables the foreach implementation. If unspecified, tries to use foreach over for-loop implementation since it is significantly faster (default: None) """ def __init__( self, params: Iterable[Tensor] | Iterable[dict], lr: float, betas: tuple[float, float] = (0.9, 0.99), weight_decay: float = 0, eps: float = 1e-6, decouple_wd: bool = False, decouple_lr: bool = False, max_lr: float | None = None, kahan_sum: bool | None = None, foreach: bool | None = None, ): if not 0.0 <= lr: raise ValueError(f"Invalid learning rate: {lr=}") if not 0.0 <= betas[0] < 1.0: raise ValueError(f"Invalid beta1 parameter: {betas[0]=}") if not 0.0 <= betas[1] < 1.0: raise ValueError(f"Invalid beta2 parameter: {betas[1]=}") if not 0.0 <= weight_decay: raise ValueError(f"Invalid weight decay: {weight_decay=}") if not 0.0 <= eps: raise ValueError(f"Invalid epsilon: {eps=}") if decouple_lr and max_lr is None: max_lr = lr if max_lr is not None and not 0.0 <= max_lr: raise ValueError(f"Invalid maximum learning rate: {max_lr=}") if decouple_lr and weight_decay >= 1e-3: warn( f"You are using {weight_decay=} which is potentially high for {decouple_lr=}. Unlike decoupled weight " f"decay, fully decoupled weight decay does not reduce weight decay by the learning rate.", category=UserWarning, )
# Copyright (c) 2023 Benjamin Warner # SPDX-License-Identifier: MIT # Based on PyTorch Optimizers # PyTorch - PyTorch BSD-style license - Copyright (c) 2013-present PyTorch contributors # Kahan summation inspired by Torch Distributed Experimental's `AnyPrecisionAdamW` # torchdistX - BSD 3-Clause License - Copyright (c) Meta Platforms, Inc. and affiliates # Learning rate decoupled weight decay inspired by Composer's `DecoupledSGDW` & `DecoupledAdamW` # Composer - Apache License 2.0 - Copyright (c) 2022 MosaicML Composer authors from __future__ import annotations __all__ = ["Adam", "adam"] class Adam(Optimizer): """Adam optimizer. Optionally with decoupled weight decay (AdamW). Args: params: Iterable of parameters to optimize or dicts defining parameter groups lr: Learning rate betas: Coefficients for gradient and squared gradient moving averages (default: (0.9, 0.99)) weight_decay: Weight decay coefficient. If `decouple_wd` and `decouple_lr` are False, applies L2 penalty (default: 0) eps: Added to denominator to improve numerical stability (default: 1e-6) decouple_wd: Apply decoupled weight decay instead of L2 penalty (default: False) decouple_lr: Apply fully decoupled weight decay instead of L2 penalty (default: False) max_lr: Maximum scheduled learning rate. Set if `lr` is not the maximum scheduled learning rate and `decouple_lr` is True (default: None) kahan_sum: Enables Kahan summation for more accurate parameter updates when training in low precision (float16 or bfloat16). If unspecified, automatically applies for low precision parameters (default: None) foreach: Enables the foreach implementation. If unspecified, tries to use foreach over for-loop implementation since it is significantly faster (default: None) """ def __init__( self, params: Iterable[Tensor] | Iterable[dict], lr: float, betas: tuple[float, float] = (0.9, 0.99), weight_decay: float = 0, eps: float = 1e-6, decouple_wd: bool = False, decouple_lr: bool = False, max_lr: float | None = None, kahan_sum: bool | None = None, foreach: bool | None = None, ): if not 0.0 <= lr: raise ValueError(f"Invalid learning rate: {lr=}") if not 0.0 <= betas[0] < 1.0: raise ValueError(f"Invalid beta1 parameter: {betas[0]=}") if not 0.0 <= betas[1] < 1.0: raise ValueError(f"Invalid beta2 parameter: {betas[1]=}") if not 0.0 <= weight_decay: raise ValueError(f"Invalid weight decay: {weight_decay=}") if not 0.0 <= eps: raise ValueError(f"Invalid epsilon: {eps=}") if decouple_lr and max_lr is None: max_lr = lr if max_lr is not None and not 0.0 <= max_lr: raise ValueError(f"Invalid maximum learning rate: {max_lr=}") if decouple_lr and weight_decay >= 1e-3: warn( f"You are using {weight_decay=} which is potentially high for {decouple_lr=}. Unlike decoupled weight " f"decay, fully decoupled weight decay does not reduce weight decay by the learning rate.", category=UserWarning, )
if not MIN_TORCH_2_1:
0
2023-10-25 00:51:05+00:00
2k