repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
AsuradaYuci/TF-CLIP | loss/make_loss.py | [
{
"identifier": "CrossEntropyLabelSmooth",
"path": "loss/softmax_loss.py",
"snippet": "class CrossEntropyLabelSmooth(nn.Module):\n \"\"\"Cross entropy loss with label smoothing regularizer.\n\n Reference:\n Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n Equation: y = (1 - epsilon) * y + epsilon / K.\n\n Args:\n num_classes (int): number of classes.\n epsilon (float): weight.\n \"\"\"\n\n def __init__(self, num_classes, epsilon=0.1, use_gpu=True):\n super(CrossEntropyLabelSmooth, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.use_gpu = use_gpu\n self.logsoftmax = nn.LogSoftmax(dim=1)\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)\n targets: ground truth labels with shape (num_classes)\n \"\"\"\n log_probs = self.logsoftmax(inputs) \n targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1) \n if self.use_gpu: targets = targets.cuda()\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n loss = (- targets * log_probs).mean(0).sum()\n return loss"
},
{
"identifier": "LabelSmoothingCrossEntropy",
"path": "loss/softmax_loss.py",
"snippet": "class LabelSmoothingCrossEntropy(nn.Module):\n \"\"\"\n NLL loss with label smoothing.\n \"\"\"\n def __init__(self, smoothing=0.1):\n \"\"\"\n Constructor for the LabelSmoothing module.\n :param smoothing: label smoothing factor\n \"\"\"\n super(LabelSmoothingCrossEntropy, self).__init__()\n assert smoothing < 1.0\n self.smoothing = smoothing\n self.confidence = 1. - smoothing\n\n def forward(self, x, target):\n logprobs = F.log_softmax(x, dim=-1)\n nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))\n nll_loss = nll_loss.squeeze(1)\n smooth_loss = -logprobs.mean(dim=-1)\n loss = self.confidence * nll_loss + self.smoothing * smooth_loss\n return loss.mean()"
},
{
"identifier": "TripletLoss",
"path": "loss/triplet_loss.py",
"snippet": "class TripletLoss(object):\n \"\"\"\n Triplet loss using HARDER example mining,\n modified based on original triplet loss using hard example mining\n \"\"\"\n\n def __init__(self, margin=None, hard_factor=0.0):\n self.margin = margin\n self.hard_factor = hard_factor\n if margin is not None:\n self.ranking_loss = nn.MarginRankingLoss(margin=margin)\n else:\n self.ranking_loss = nn.SoftMarginLoss()\n\n def __call__(self, global_feat, labels, normalize_feature=False):\n if normalize_feature:\n global_feat = normalize(global_feat, axis=-1)\n dist_mat = euclidean_dist(global_feat, global_feat) #B,B\n\n dist_ap, dist_an = hard_example_mining(dist_mat, labels) \n\n dist_ap *= (1.0 + self.hard_factor)\n dist_an *= (1.0 - self.hard_factor)\n\n y = dist_an.new().resize_as_(dist_an).fill_(1) \n if self.margin is not None:\n loss = self.ranking_loss(dist_an, dist_ap, y)\n else:\n loss = self.ranking_loss(dist_an - dist_ap, y)\n return loss, dist_ap, dist_an"
},
{
"identifier": "CenterLoss",
"path": "loss/center_loss.py",
"snippet": "class CenterLoss(nn.Module):\n \"\"\"Center loss.\n\n Reference:\n Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.\n\n Args:\n num_classes (int): number of classes.\n feat_dim (int): feature dimension.\n \"\"\"\n\n def __init__(self, num_classes=751, feat_dim=2048, use_gpu=True):\n super(CenterLoss, self).__init__()\n self.num_classes = num_classes\n self.feat_dim = feat_dim\n self.use_gpu = use_gpu\n\n if self.use_gpu:\n self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())\n else:\n self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))\n\n def forward(self, x, labels):\n \"\"\"\n Args:\n x: feature matrix with shape (batch_size, feat_dim).\n labels: ground truth labels with shape (num_classes).\n \"\"\"\n assert x.size(0) == labels.size(0), \"features.size(0) is not equal to labels.size(0)\"\n\n batch_size = x.size(0)\n distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \\\n torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()\n distmat.addmm_(1, -2, x, self.centers.t())\n\n classes = torch.arange(self.num_classes).long()\n if self.use_gpu: classes = classes.cuda()\n labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)\n mask = labels.eq(classes.expand(batch_size, self.num_classes))\n\n dist = []\n for i in range(batch_size):\n value = distmat[i][mask[i]]\n value = value.clamp(min=1e-12, max=1e+12) # for numerical stability\n dist.append(value)\n dist = torch.cat(dist)\n loss = dist.mean()\n return loss"
}
] | import torch.nn.functional as F
from .softmax_loss import CrossEntropyLabelSmooth, LabelSmoothingCrossEntropy
from .triplet_loss import TripletLoss
from .center_loss import CenterLoss | 1,474 | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
def make_loss(cfg, num_classes): # modified by gu
sampler = cfg.DATALOADER.SAMPLER
feat_dim = 2048
| # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
def make_loss(cfg, num_classes): # modified by gu
sampler = cfg.DATALOADER.SAMPLER
feat_dim = 2048 | center_criterion = CenterLoss(num_classes=num_classes, feat_dim=feat_dim, use_gpu=True) # center loss | 3 | 2023-12-11 04:03:46+00:00 | 2k |
MarilynKeller/aitviewer-skel | aitviewer/models/smpl.py | [
{
"identifier": "CONFIG",
"path": "aitviewer/configuration.py",
"snippet": "CONFIG = Configuration()"
},
{
"identifier": "aa2rot_torch",
"path": "aitviewer/utils/so3.py",
"snippet": "def aa2rot_torch(rotation_vectors):\n \"\"\"\n Convert rotation vectors (angle-axis representation) to rotation matrices.\n :param rotation_vectors: A torch tensor of shape (..., 3).\n :return: A torch tensor of shape (..., 3, 3).\n \"\"\"\n assert isinstance(rotation_vectors, torch.Tensor)\n return roma.rotvec_to_rotmat(rotation_vectors)"
},
{
"identifier": "rot2aa_torch",
"path": "aitviewer/utils/so3.py",
"snippet": "def rot2aa_torch(rotation_matrices):\n \"\"\"\n Convert rotation matrices to rotation vectors (angle-axis representation).\n :param rotation_matrices: A torch tensor of shape (..., 3, 3).\n :return: A torch tensor of shape (..., 3).\n \"\"\"\n assert isinstance(rotation_matrices, torch.Tensor)\n return roma.rotmat_to_rotvec(rotation_matrices)"
},
{
"identifier": "compute_vertex_and_face_normals_torch",
"path": "aitviewer/utils/utils.py",
"snippet": "def compute_vertex_and_face_normals_torch(vertices, faces, vertex_faces, normalize=False):\r\n \"\"\"\r\n Compute (unnormalized) vertex normals for the given vertices.\r\n :param vertices: A tensor of shape (N, V, 3).\r\n :param faces: A tensor of shape (F, 3) indexing into `vertices`.\r\n :param vertex_faces: A tensor of shape (V, MAX_VERTEX_DEGREE) that lists the face IDs each vertex is a part of.\r\n :param normalize: Whether to make the normals unit length or not.\r\n :return: The vertex and face normals as tensors of shape (N, V, 3) and (N, F, 3) respectively.\r\n \"\"\"\r\n vs = vertices[:, faces.to(dtype=torch.long)]\r\n face_normals = torch.cross(vs[:, :, 1] - vs[:, :, 0], vs[:, :, 2] - vs[:, :, 0], dim=-1) # (N, F, 3)\r\n\r\n ns_all_faces = face_normals[:, vertex_faces] # (N, V, MAX_VERTEX_DEGREE, 3)\r\n ns_all_faces[:, vertex_faces == -1] = 0.0\r\n vertex_degrees = (vertex_faces > -1).sum(dim=-1).to(dtype=ns_all_faces.dtype)\r\n vertex_normals = ns_all_faces.sum(dim=-2) / vertex_degrees[None, :, None] # (N, V, 3)\r\n\r\n if normalize:\r\n face_normals = face_normals / torch.norm(face_normals, dim=-1).unsqueeze(-1)\r\n vertex_normals = vertex_normals / torch.norm(vertex_normals, dim=-1).unsqueeze(-1)\r\n\r\n return vertex_normals, face_normals\r"
}
] | import collections
import numpy as np
import smplx
import torch
import torch.nn as nn
import trimesh
from abc import ABC
from aitviewer.configuration import CONFIG as C
from aitviewer.utils.so3 import aa2rot_torch as aa2rot
from aitviewer.utils.so3 import rot2aa_torch as rot2aa
from aitviewer.utils.utils import compute_vertex_and_face_normals_torch | 1,050 | # Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos
class SMPLLayer(nn.Module, ABC):
"""A wrapper for the various SMPL body models."""
def __init__(
self,
model_type="smpl",
gender="neutral",
num_betas=10,
device=None,
dtype=None,
**smpl_model_params,
):
"""
Initializer.
:param model_type: Which type of SMPL model to load, currently SMPL, SMPL-H and SMPL-X are supported.
:param gender: Which gender to load.
:param num_betas: Number of shape components.
:param device: CPU or GPU.
:param dtype: The pytorch floating point data type.
:param smpl_model_params: Other keyword arguments that can be passed to smplx.create.
"""
assert model_type in ["smpl", "smplh", "smplx", "mano", "flame"]
assert gender in ["male", "female", "neutral"]
if model_type == "smplh" and gender == "neutral":
gender = "female" # SMPL-H has no neutral gender.
super(SMPLLayer, self).__init__()
self.num_betas = num_betas
smpl_model_params["use_pca"] = smpl_model_params.get("use_pca", False)
smpl_model_params["flat_hand_mean"] = smpl_model_params.get("flat_hand_mean", True)
self.bm = smplx.create(
| # Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos
class SMPLLayer(nn.Module, ABC):
"""A wrapper for the various SMPL body models."""
def __init__(
self,
model_type="smpl",
gender="neutral",
num_betas=10,
device=None,
dtype=None,
**smpl_model_params,
):
"""
Initializer.
:param model_type: Which type of SMPL model to load, currently SMPL, SMPL-H and SMPL-X are supported.
:param gender: Which gender to load.
:param num_betas: Number of shape components.
:param device: CPU or GPU.
:param dtype: The pytorch floating point data type.
:param smpl_model_params: Other keyword arguments that can be passed to smplx.create.
"""
assert model_type in ["smpl", "smplh", "smplx", "mano", "flame"]
assert gender in ["male", "female", "neutral"]
if model_type == "smplh" and gender == "neutral":
gender = "female" # SMPL-H has no neutral gender.
super(SMPLLayer, self).__init__()
self.num_betas = num_betas
smpl_model_params["use_pca"] = smpl_model_params.get("use_pca", False)
smpl_model_params["flat_hand_mean"] = smpl_model_params.get("flat_hand_mean", True)
self.bm = smplx.create( | C.smplx_models, | 0 | 2023-12-07 16:13:50+00:00 | 2k |
wukan1986/polars_ta | tests/numba_test.py | [
{
"identifier": "ts_co_kurtosis",
"path": "polars_ta/wq/time_series.py",
"snippet": "def ts_co_kurtosis(x: Expr, y: Expr, d: int = 5, ddof: int = 0) -> Expr:\n return map_batches([x, y], lambda xx: batches_i2_o1([x1.to_numpy() for x1 in xx], roll_co_kurtosis, d))"
},
{
"identifier": "nb_roll_sum",
"path": "polars_ta/utils/numba_.py",
"snippet": "@jit(nopython=True, nogil=True, cache=True)\ndef nb_roll_sum(x1, window):\n \"\"\"演示代码,请直接用 pl.col('A').rolling_sum(10).alias('a1')\"\"\"\n out = np.full(x1.shape, np.nan, dtype=float)\n a1 = sliding_window_view(x1, window)\n for i, v1 in enumerate(a1):\n out[i + window - 1] = np.sum(v1)\n return out"
},
{
"identifier": "batches_i1_o1",
"path": "polars_ta/utils/numba_.py",
"snippet": "def batches_i1_o1(x1: np.ndarray, func, *args, dtype=None) -> Series:\n return Series(func(x1, *args), nan_to_null=True, dtype=dtype)"
},
{
"identifier": "roll_sum",
"path": "polars_ta/utils/numba_.py",
"snippet": "def roll_sum(x: Expr, n: int) -> Expr:\n return x.map_batches(lambda x1: batches_i1_o1(x1.to_numpy(), nb_roll_sum, n))"
},
{
"identifier": "roll_cov",
"path": "polars_ta/utils/numba_.py",
"snippet": "def roll_cov(a: Expr, b: Expr, n: int) -> Expr:\n return map_batches([a, b], lambda xx: batches_i2_o1([x1.to_numpy() for x1 in xx], nb_roll_cov, n))"
}
] | import time
import numpy as np
import polars as pl
from numba import jit
from polars_ta.wq.time_series import ts_co_kurtosis
from polars_ta.utils.numba_ import nb_roll_sum, batches_i1_o1, roll_sum, roll_cov | 671 |
@jit(nopython=True, nogil=True, fastmath=True, cache=True)
def nb_sum(x):
return np.sum(x)
df = pl.DataFrame({'A': range(100000), 'B': range(100000)})
a = df.with_columns([
pl.col('A').rolling_sum(10).alias('a1'),
pl.col('A').rolling_map(lambda x: x.sum(), 10).alias('a2'),
pl.col('A').rolling_map(lambda x: nb_sum(x.to_numpy()), 10).alias('a3'),
roll_sum(pl.col('A'), 10).alias('a4'),
pl.col('A').map_batches(lambda x: batches_i1_o1(x.to_numpy(), nb_roll_sum, 10)).alias('a5'),
pl.rolling_cov(pl.col('A'), pl.col('B'), window_size=10).alias('a6'),
roll_cov(pl.col('A'), pl.col('B'), 10).alias('a7'),
|
@jit(nopython=True, nogil=True, fastmath=True, cache=True)
def nb_sum(x):
return np.sum(x)
df = pl.DataFrame({'A': range(100000), 'B': range(100000)})
a = df.with_columns([
pl.col('A').rolling_sum(10).alias('a1'),
pl.col('A').rolling_map(lambda x: x.sum(), 10).alias('a2'),
pl.col('A').rolling_map(lambda x: nb_sum(x.to_numpy()), 10).alias('a3'),
roll_sum(pl.col('A'), 10).alias('a4'),
pl.col('A').map_batches(lambda x: batches_i1_o1(x.to_numpy(), nb_roll_sum, 10)).alias('a5'),
pl.rolling_cov(pl.col('A'), pl.col('B'), window_size=10).alias('a6'),
roll_cov(pl.col('A'), pl.col('B'), 10).alias('a7'), | ts_co_kurtosis(pl.col('A'), pl.col('B'), 10).alias('a8'), | 0 | 2023-12-12 11:44:52+00:00 | 2k |
facebookresearch/taskmet | taskmet.py | [
{
"identifier": "dense_nn",
"path": "utils.py",
"snippet": "def dense_nn(\n num_features,\n num_targets,\n num_layers,\n intermediate_size=10,\n activation=\"relu\",\n output_activation=\"sigmoid\",\n):\n if num_layers > 1:\n if intermediate_size is None:\n intermediate_size = max(num_features, num_targets)\n if activation == \"relu\":\n activation_fn = torch.nn.ReLU\n elif activation == \"sigmoid\":\n activation_fn = torch.nn.Sigmoid\n else:\n raise Exception(\"Invalid activation function: \" + str(activation))\n net_layers = [torch.nn.Linear(num_features, intermediate_size), activation_fn()]\n for _ in range(num_layers - 2):\n net_layers.append(torch.nn.Linear(intermediate_size, intermediate_size))\n net_layers.append(activation_fn())\n if not isinstance(num_targets, tuple):\n net_layers.append(torch.nn.Linear(intermediate_size, num_targets))\n else:\n net_layers.append(\n torch.nn.Linear(intermediate_size, reduce(operator.mul, num_targets, 1))\n )\n net_layers.append(View(num_targets))\n else:\n if not isinstance(num_targets, tuple):\n net_layers = [torch.nn.Linear(num_features, num_targets)]\n else:\n net_layers = [\n torch.nn.Linear(num_features, reduce(operator.mul, num_targets, 1)),\n View(num_targets),\n ]\n\n if output_activation == \"relu\":\n net_layers.append(torch.nn.ReLU())\n elif output_activation == \"sigmoid\":\n net_layers.append(torch.nn.Sigmoid())\n elif output_activation == \"tanh\":\n net_layers.append(torch.nn.Tanh())\n elif output_activation == \"softmax\":\n net_layers.append(torch.nn.Softmax(dim=-1))\n elif output_activation == \"elu\":\n net_layers.append(torch.nn.ELU())\n\n return torch.nn.Sequential(*net_layers)"
},
{
"identifier": "View",
"path": "utils.py",
"snippet": "class View(torch.nn.Module):\n def __init__(self, shape):\n super().__init__()\n self.shape = shape\n\n def __repr__(self):\n return f\"View{self.shape}\"\n\n def forward(self, input):\n \"\"\"\n Reshapes the input according to the shape saved in the view data structure.\n \"\"\"\n batch_size = input.shape[:-1]\n shape = (*batch_size, *self.shape)\n out = input.view(shape)\n return out"
},
{
"identifier": "Metric",
"path": "metric.py",
"snippet": "class Metric(nn.Module):\n def __init__(\n self,\n num_features,\n num_output,\n num_hidden,\n identity_init,\n identity_init_scale,\n ):\n super().__init__()\n self.base = nn.Sequential(\n nn.Linear(num_features, num_hidden),\n nn.ReLU(),\n nn.Linear(num_hidden, num_output * num_output),\n )\n self.identity_fac_log = torch.nn.parameter.Parameter(torch.zeros([]))\n if identity_init:\n last_layer = self.base[-1]\n last_layer.weight.data.div_(identity_init_scale)\n last_layer.bias.data = torch.eye(num_output).view(-1)\n\n self.num_output = num_output\n\n def forward(self, x):\n # A = torch.nn.functional.softplus(self.base(x))\n identity_fac = torch.exp(self.identity_fac_log)\n L = self.base(x)\n L = L.view(L.shape[0], self.num_output, self.num_output)\n A = (\n torch.bmm(L, L.transpose(1, 2))\n + identity_fac * torch.eye(self.num_output).repeat(x.shape[0], 1, 1).cuda()\n )\n # TODO: extend for PSD matrices with bounds from the\n # identity metric\n return A"
}
] | import torch
import torch.nn as nn
import numpy as np
import functorch
import torchopt
import random
from typing import List, Tuple, Dict, Union, Optional, Callable
from utils import dense_nn, View
from metric import Metric | 952 | # Copyright (c) Meta Platforms, Inc. and affiliates
class Predictor(nn.Module):
def __init__(self, args):
super().__init__()
| # Copyright (c) Meta Platforms, Inc. and affiliates
class Predictor(nn.Module):
def __init__(self, args):
super().__init__() | self.model = dense_nn() | 0 | 2023-12-07 22:23:01+00:00 | 2k |
kylemcdonald/i2i-realtime | offline_renderer.py | [
{
"identifier": "chunks",
"path": "utils/itertools.py",
"snippet": "def chunks(x, n):\n # return slices of lists\n if hasattr(x, '__len__'):\n for i in range(0, len(x), n):\n yield x[i:i+n]\n else:\n # return sub-generators of generators\n i = iter(x)\n for e in i:\n yield chain([e], islice(i, n-1))"
},
{
"identifier": "DiffusionProcessor",
"path": "diffusion_processor.py",
"snippet": "class DiffusionProcessor:\n def __init__(self, warmup=None, local_files_only=True):\n base_model = \"stabilityai/sdxl-turbo\"\n vae_model = \"madebyollin/taesdxl\"\n\n warnings.filterwarnings(\"ignore\", category=torch.jit.TracerWarning)\n\n disable_progress_bar()\n self.pipe = AutoPipelineForImage2Image.from_pretrained(\n base_model,\n torch_dtype=torch.float16,\n variant=\"fp16\",\n local_files_only=local_files_only,\n )\n\n self.pipe.vae = AutoencoderTiny.from_pretrained(\n vae_model, torch_dtype=torch.float16, local_files_only=local_files_only\n )\n fix_seed(self.pipe)\n\n print(\"Model loaded\")\n\n config = CompilationConfig.Default()\n config.enable_xformers = True\n config.enable_triton = True\n config.enable_cuda_graph = True\n self.pipe = compile(self.pipe, config=config)\n\n print(\"Model compiled\")\n\n self.pipe.to(device=\"cuda\", dtype=torch.float16)\n self.pipe.set_progress_bar_config(disable=True)\n\n print(\"Model moved to GPU\", flush=True)\n \n self.compel = Compel(\n tokenizer=[self.pipe.tokenizer, self.pipe.tokenizer_2],\n text_encoder=[self.pipe.text_encoder, self.pipe.text_encoder_2],\n returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,\n requires_pooled=[False, True],\n )\n self.prompt_cache = FixedSizeDict(32)\n print(\"Prepared compel\")\n\n self.generator = torch.manual_seed(0)\n \n if warmup:\n warmup_shape = [int(e) for e in warmup.split(\"x\")]\n images = np.zeros(warmup_shape, dtype=np.float32)\n for i in range(2):\n print(f\"Warmup {warmup} {i+1}/2\")\n start_time = time.time()\n self.run(\n images,\n prompt=\"warmup\",\n num_inference_steps=2,\n strength=1.0\n )\n print(\"Warmup finished\", flush=True)\n \n def embed_prompt(self, prompt):\n if prompt not in self.prompt_cache:\n with torch.no_grad():\n print(\"embedding prompt\", prompt)\n self.prompt_cache[prompt] = self.compel(prompt)\n return self.prompt_cache[prompt]\n \n def meta_embed_prompt(self, prompt):\n pattern = r'\\(\"(.*?)\"\\s*,\\s*\"(.*?)\"\\)\\.blend\\((.*?),(.*?)\\)'\n match = re.search(pattern, prompt)\n if not match:\n return self.embed_prompt(prompt)\n str1, str2, t1, t2 = match.groups()\n t1 = float(t1)\n t2 = float(t2)\n cond1, pool1 = self.embed_prompt(str1)\n cond2, pool2 = self.embed_prompt(str2)\n cond = cond1 * t1 + cond2 * t2\n pool = pool1 * t1 + pool2 * t2\n return cond, pool\n \n def run(self, images, prompt, num_inference_steps, strength, use_compel=False, seed=None):\n strength = min(max(1 / num_inference_steps, strength), 1)\n if seed is not None:\n self.generator = torch.manual_seed(seed)\n kwargs = {}\n if use_compel:\n conditioning, pooled = self.meta_embed_prompt(prompt)\n batch_size = len(images)\n conditioning_batch = conditioning.expand(batch_size, -1, -1)\n pooled_batch = pooled.expand(batch_size, -1)\n kwargs[\"prompt_embeds\"] = conditioning_batch\n kwargs[\"pooled_prompt_embeds\"] = pooled_batch\n else:\n kwargs[\"prompt\"] = [prompt] * len(images)\n return self.pipe(\n image=images,\n generator=self.generator,\n num_inference_steps=num_inference_steps,\n guidance_scale=0,\n strength=strength,\n output_type=\"np\",\n **kwargs\n ).images"
}
] | import os
import numpy as np
from tqdm import tqdm
from natsort import natsorted
from turbojpeg import TurboJPEG, TJPF_RGB
from utils.itertools import chunks
from diffusion_processor import DiffusionProcessor | 1,287 |
input_directory = "data/frames-1080"
output_directory = input_directory + "-i2i"
batch_size = 4
prompt = "Three ballety dancers in a psychedelic landscape."
steps = 2
strength = 0.7
seed = 0
jpeg = TurboJPEG()
def imread(fn):
with open(fn, 'rb') as f:
return jpeg.decode(f.read(), pixel_format=TJPF_RGB)
def imwrite(fn, img):
with open(fn, 'wb') as f:
f.write(jpeg.encode(img, pixel_format=TJPF_RGB))
def main():
diffusion = DiffusionProcessor()
fns = natsorted(os.listdir(input_directory))
|
input_directory = "data/frames-1080"
output_directory = input_directory + "-i2i"
batch_size = 4
prompt = "Three ballety dancers in a psychedelic landscape."
steps = 2
strength = 0.7
seed = 0
jpeg = TurboJPEG()
def imread(fn):
with open(fn, 'rb') as f:
return jpeg.decode(f.read(), pixel_format=TJPF_RGB)
def imwrite(fn, img):
with open(fn, 'wb') as f:
f.write(jpeg.encode(img, pixel_format=TJPF_RGB))
def main():
diffusion = DiffusionProcessor()
fns = natsorted(os.listdir(input_directory)) | batches = list(chunks(fns, batch_size)) | 0 | 2023-12-05 12:32:28+00:00 | 2k |
wusize/CLIM | src/training/train.py | [
{
"identifier": "is_master",
"path": "src/training/distributed.py",
"snippet": "def is_master(args, local=False):\n return is_local_master(args) if local else is_global_master(args)"
},
{
"identifier": "zero_shot_eval",
"path": "src/training/zero_shot.py",
"snippet": "def zero_shot_eval(model, data, epoch, args):\n if 'val' not in data:\n return {}\n if args.zeroshot_frequency == 0:\n return {}\n if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs:\n return {}\n logging.info('Region classifier')\n results = {}\n if args.test_type == \"coco_panoptic\":\n correct_rois, correct_crops, correct_maskpool, \\\n similarity_rois, similarity_crops, similarity_maskpool, \\\n all_box_sizes, all_is_thing, all_cls_labels = run_panoptic(model, data['val'].dataloader, args)\n results.update(macc_with_is_thing(correct_rois, all_is_thing, all_cls_labels, 'rois'))\n results.update(macc_with_is_thing(correct_crops, all_is_thing, all_cls_labels, 'crops'))\n results.update(macc_with_is_thing(correct_maskpool, all_is_thing, all_cls_labels, 'maskpool'))\n else:\n assert args.test_type == \"coco_detection\"\n correct_rois, correct_crops, all_box_sizes, all_cls_labels = run_det(model, data['val'].dataloader, args)\n results.update(macc_with_det(correct_rois, all_cls_labels, 'rois'))\n results.update(macc_with_det(correct_crops, all_cls_labels, 'crops'))\n\n return results"
},
{
"identifier": "get_autocast",
"path": "src/training/precision.py",
"snippet": "def get_autocast(precision):\n if precision == 'amp':\n return torch.cuda.amp.autocast\n elif precision == 'amp_bfloat16' or precision == 'amp_bf16':\n # amp_bfloat16 is more stable than amp float16 for clip training\n return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16)\n else:\n return suppress"
}
] | import json
import logging
import math
import time
import torch
import os
from open_clip import get_cast_dtype
from .distributed import is_master
from .zero_shot import zero_shot_eval
from .precision import get_autocast | 833 |
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def postprocess_clip_output(model_out):
return {
"image_features": model_out[0],
"text_features": model_out[1],
"logit_scale": model_out[2]
}
def unwrap_model(model):
if hasattr(model, 'module'):
return model.module
else:
return model
def backward(total_loss, scaler):
if scaler is not None:
scaler.scale(total_loss).backward()
else:
total_loss.backward()
@torch.no_grad()
def student_teacher_ensemble(student, teacher, alpha=0.5):
target_state_dict = {}
for k, v in student.items():
target_state_dict[k] = v * alpha + teacher[k] * (1.0 - alpha)
return target_state_dict
def train_one_epoch(model, method, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args):
device = torch.device(args.device)
|
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def postprocess_clip_output(model_out):
return {
"image_features": model_out[0],
"text_features": model_out[1],
"logit_scale": model_out[2]
}
def unwrap_model(model):
if hasattr(model, 'module'):
return model.module
else:
return model
def backward(total_loss, scaler):
if scaler is not None:
scaler.scale(total_loss).backward()
else:
total_loss.backward()
@torch.no_grad()
def student_teacher_ensemble(student, teacher, alpha=0.5):
target_state_dict = {}
for k, v in student.items():
target_state_dict[k] = v * alpha + teacher[k] * (1.0 - alpha)
return target_state_dict
def train_one_epoch(model, method, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args):
device = torch.device(args.device) | autocast = get_autocast(args.precision) | 2 | 2023-12-09 05:43:08+00:00 | 2k |
firstof9/ha-gasbuddy | tests/test_config_flow.py | [
{
"identifier": "CONF_INTERVAL",
"path": "custom_components/gasbuddy/const.py",
"snippet": "CONF_INTERVAL = \"interval\""
},
{
"identifier": "CONF_NAME",
"path": "custom_components/gasbuddy/const.py",
"snippet": "CONF_NAME = \"name\""
},
{
"identifier": "CONF_POSTAL",
"path": "custom_components/gasbuddy/const.py",
"snippet": "CONF_POSTAL = \"zipcode\""
},
{
"identifier": "CONF_STATION_ID",
"path": "custom_components/gasbuddy/const.py",
"snippet": "CONF_STATION_ID = \"station_id\""
},
{
"identifier": "CONF_UOM",
"path": "custom_components/gasbuddy/const.py",
"snippet": "CONF_UOM = \"uom\""
},
{
"identifier": "DEFAULT_NAME",
"path": "custom_components/gasbuddy/const.py",
"snippet": "DEFAULT_NAME = \"Gas Station\""
},
{
"identifier": "DOMAIN",
"path": "custom_components/gasbuddy/const.py",
"snippet": "DOMAIN = \"gasbuddy\""
},
{
"identifier": "CONFIG_DATA",
"path": "tests/const.py",
"snippet": "CONFIG_DATA = {\n CONF_NAME: \"Gas Station\",\n CONF_INTERVAL: 3600,\n CONF_STATION_ID: 208656,\n CONF_UOM: True,\n}"
},
{
"identifier": "STATION_LIST",
"path": "tests/const.py",
"snippet": "STATION_LIST = {\n \"187725\": \"Shell @ 1520 N Verrado Way\",\n \"208656\": \"Costco @ 1101 N Verrado Way\",\n \"87490\": \"Chevron @ 1419 N 195th Ave\",\n \"110402\": \"Circle K @ 721 N 195th Ave\",\n \"203982\": \"Fry's @ 19600 W Indian School Rd\",\n \"126744\": \"Circle K @ 537 S Watson Rd\",\n \"201250\": \"QuikTrip @ 900 S Watson Rd\",\n \"38363\": \"Fry's @ 1300 S Watson Rd\",\n \"27487\": \"Love's Travel Stop @ 1610 N Miller Rd\",\n \"160044\": \"QuikTrip @ 1850 S Miller Rd\",\n \"135437\": \"Chevron @ 2075 S Miller Rd\",\n \"130812\": \"Fry's @ 16380 W Yuma Rd\",\n \"200905\": \"Circle K @ 15535 W McDowell Rd\",\n \"85320\": \"Safeway @ 440 N Estrella Pkwy\",\n \"155795\": \"QuikTrip @ 575 N Estrella Pkwy\",\n \"118417\": \"Circle K @ 307 E US-85\",\n \"154238\": \"Chevron @ 825 E Monroe Ave\",\n \"150938\": \"Shell @ 501 E Monroe Ave\",\n \"209199\": \"QuikTrip @ 1540 N Bullard Ave\",\n \"27442\": \"Safeway @ 14175 W Indian School Rd\",\n}"
}
] | from unittest.mock import patch
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.const import CONF_NAME
from homeassistant.data_entry_flow import FlowResult, FlowResultType
from pytest_homeassistant_custom_component.common import MockConfigEntry
from custom_components.gasbuddy.const import (
CONF_INTERVAL,
CONF_NAME,
CONF_POSTAL,
CONF_STATION_ID,
CONF_UOM,
DEFAULT_NAME,
DOMAIN,
)
from tests.const import CONFIG_DATA, STATION_LIST
import pytest | 727 | """Test config flow."""
pytestmark = pytest.mark.asyncio
@pytest.mark.parametrize(
"input,step_id,title,data",
[
(
{
| """Test config flow."""
pytestmark = pytest.mark.asyncio
@pytest.mark.parametrize(
"input,step_id,title,data",
[
(
{ | CONF_NAME: DEFAULT_NAME, | 5 | 2023-12-07 20:53:03+00:00 | 2k |
ku-dmlab/PORelDICE | learner.py | [
{
"identifier": "update_actor",
"path": "actor.py",
"snippet": "def update_actor(\n key: PRNGKey,\n actor: Model,\n critic: Model,\n value: Model,\n batch: Batch,\n alpha: float,\n epsilon: float,\n alg: str,\n) -> Tuple[Model, InfoDict]:\n v = value(batch.observations)\n if alg == \"PORelDICE\":\n q1, q2 = critic(batch.observations, batch.actions)\n q = jnp.minimum(q1, q2)\n weight = 1 + (q - v) / alpha\n weight = jnp.maximum(weight, 0.0)\n else:\n NotImplementedError\n\n weight = jnp.clip(weight, 0.0, 100.0)\n\n def actor_loss_fn(actor_params: Params) -> Tuple[jnp.ndarray, InfoDict]:\n dist = actor.apply(\n {\"params\": actor_params},\n batch.observations,\n training=True,\n rngs={\"dropout\": key},\n )\n log_probs = dist.log_prob(batch.actions)\n actor_loss = -(weight * log_probs).mean()\n return actor_loss, {\"actor_loss\": actor_loss}\n\n new_actor, info = actor.apply_gradient(actor_loss_fn)\n\n return new_actor, info"
},
{
"identifier": "Batch",
"path": "common.py",
"snippet": "def default_init(scale: Optional[float] = jnp.sqrt(2)):\n def __call__(self, x: jnp.ndarray, training: bool = False) -> jnp.ndarray:\n def create(\n cls,\n model_def: nn.Module,\n inputs: Sequence[jnp.ndarray],\n tx: Optional[optax.GradientTransformation] = None,\n ) -> \"Model\":\n def __call__(self, *args, **kwargs):\n def apply(self, *args, **kwargs):\n def apply_gradient(self, loss_fn) -> Tuple[Any, \"Model\"]:\n def save(self, save_path: str):\n def load(self, load_path: str) -> \"Model\":\nclass MLP(nn.Module):\nclass Model:"
},
{
"identifier": "update_q",
"path": "critic.py",
"snippet": "def update_q(\n critic: Model, value: Model, batch: Batch, discount: float, alg: str\n) -> Tuple[Model, InfoDict]:\n next_v = value(batch.next_observations)\n target_q = batch.rewards + discount * batch.masks * next_v\n def critic_loss_fn(critic_params: Params) -> Tuple[jnp.ndarray, InfoDict]:\n q1, q2 = critic.apply(\n {\"params\": critic_params}, batch.observations, batch.actions\n )\n\n critic_loss = ((q1 - target_q) ** 2 + (q2 - target_q) ** 2).mean()\n \n return critic_loss, {\n \"critic_loss\": critic_loss,\n \"q1\": q1.mean(),\n }\n\n new_critic, info = critic.apply_gradient(critic_loss_fn)\n\n return new_critic, info"
},
{
"identifier": "update_v",
"path": "critic.py",
"snippet": "def update_v(\n critic: Model, value: Model, batch: Batch, alpha: float, epsilon:float, discount: float, alg: str\n) -> Tuple[Model, InfoDict]:\n def value_loss_fn(value_params: Params) -> Tuple[jnp.ndarray, InfoDict]:\n v = value.apply({\"params\": value_params}, batch.observations)\n v_0 = value.apply({\"params\": value_params}, batch.initial_observations)\n q1, q2 = critic(batch.observations, batch.actions)\n q = jnp.minimum(q1, q2)\n \n if alg == \"PORelDICE\":\n sp_term = (q-v) / alpha\n value_loss = ((1-discount) * v_0).mean() + (alpha * \n jnp.where(1 + sp_term > epsilon, \n (0.5 * sp_term **2 + sp_term),\n (epsilon) * (sp_term - epsilon + 1) + 0.5 * (epsilon - 1) ** 2 + epsilon - 1\n )).mean()\n else:\n raise NotImplementedError(\"please choose PORelDICE\")\n return value_loss, {\n \"value_loss\": value_loss,\n \"v\": v.mean(),\n \"q-v\": (q - v).mean(),\n }\n\n new_value, info = value.apply_gradient(value_loss_fn)\n\n return new_value, info"
}
] | from typing import Optional, Sequence, Tuple
from actor import update_actor
from common import Batch, InfoDict, Model, PRNGKey
from critic import update_q, update_v
import jax
import jax.numpy as jnp
import numpy as np
import optax
import policy
import value_net | 1,262 | """Implementations of algorithms for continuous control."""
def target_update(critic: Model, target_critic: Model, tau: float) -> Model:
new_target_params = jax.tree_util.tree_map(
lambda p, tp: p * tau + tp * (1 - tau), critic.params, target_critic.params
)
return target_critic.replace(params=new_target_params)
@jax.jit
def _update_jit_PORelDICE(
rng: PRNGKey,
actor: Model,
critic: Model,
value: Model,
target_critic: Model,
batch: Batch,
discount: float,
tau: float,
alpha: float,
epsilon:float,
) -> Tuple[PRNGKey, Model, Model, Model, Model, InfoDict]:
| """Implementations of algorithms for continuous control."""
def target_update(critic: Model, target_critic: Model, tau: float) -> Model:
new_target_params = jax.tree_util.tree_map(
lambda p, tp: p * tau + tp * (1 - tau), critic.params, target_critic.params
)
return target_critic.replace(params=new_target_params)
@jax.jit
def _update_jit_PORelDICE(
rng: PRNGKey,
actor: Model,
critic: Model,
value: Model,
target_critic: Model,
batch: Batch,
discount: float,
tau: float,
alpha: float,
epsilon:float,
) -> Tuple[PRNGKey, Model, Model, Model, Model, InfoDict]: | new_value, value_info = update_v(target_critic, value, batch, alpha, epsilon, discount, alg="PORelDICE") | 3 | 2023-12-11 07:47:22+00:00 | 2k |
Megant88/Valorant-GUI-Cheat-Arduino | cheese.py | [
{
"identifier": "MouseInstruct",
"path": "mouse_instruct.py",
"snippet": "class MouseInstruct:\n def __init__(self, dev):\n self._buttons_mask = 0\n self._dev = dev\n self.move(0, 0)\n\n @classmethod\n def getMouse(cls, vid=0, pid=0, ping_code=0xf9):\n dev = find_mouse_device(vid, pid, ping_code)\n if not dev:\n vid_str = hex(vid) if vid else \"Unspecified\"\n pid_str = hex(pid) if pid else \"Unspecified\"\n ping_code_str = hex(ping_code) if pid else \"Unspecified\"\n error_msg = (\"[-] Device \"\n f\"Vendor ID: {vid_str}, Product ID: {pid_str} \"\n f\"Pingcode: {ping_code_str} not found!\")\n raise DeviceNotFoundError(error_msg)\n return cls(dev)\n\n def _buttons(self, buttons):\n if buttons != self._buttons_mask:\n self._buttons_mask = buttons\n self.move(0, 0)\n\n def click(self, button = MOUSE_LEFT):\n self._buttons_mask = button\n self.move(0, 0)\n self._buttons_mask = 0\n self.move(0, 0)\n\n def press(self, button = MOUSE_LEFT):\n self._buttons(self._buttons_mask | button)\n\n def release(self, button = MOUSE_LEFT):\n self._buttons(self._buttons_mask & ~button)\n\n def is_pressed(self, button = MOUSE_LEFT):\n return bool(button & self._buttons_mask)\n\n def move(self, x, y):\n limited_x = limit_xy(x)\n limited_y = limit_xy(y)\n self._sendRawReport(self._makeReport(limited_x, limited_y))\n\n def _makeReport(self, x, y):\n report_data = [\n 0x01, # Report ID: 0\n self._buttons_mask,\n low_byte(x), high_byte(x),\n low_byte(y), high_byte(y)\n ]\n return report_data\n\n\n def _sendRawReport(self, report_data):\n self._dev.write(report_data)"
},
{
"identifier": "DeviceNotFoundError",
"path": "mouse_instruct.py",
"snippet": "class DeviceNotFoundError(Exception):\n pass"
}
] | import cv2
import numpy as np
import win32api, sys
import serial
import keyboard, threading
import time, json
from mss import mss
from mouse_instruct import MouseInstruct, DeviceNotFoundError
from ctypes import WinDLL
from valclient.client import Client
| 968 |
user32, kernel32, shcore = (
WinDLL("user32", use_last_error=True),
WinDLL("kernel32", use_last_error=True),
WinDLL("shcore", use_last_error=True),
)
shcore.SetProcessDpiAwareness(2)
WIDTH, HEIGHT = [user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)]
ZONE = 5
GRAB_ZONE = (
int(WIDTH / 2 - ZONE),
int(HEIGHT / 2 - ZONE),
int(WIDTH / 2 + ZONE),
int(HEIGHT / 2 + ZONE),
)
GRAB_ZONE_CENTER_X = (GRAB_ZONE[2] - GRAB_ZONE[0]) / 2
GRAB_ZONE_CENTER_Y = (GRAB_ZONE[3] - GRAB_ZONE[1]) / 2
def exiting():
try:
exec(type((lambda: 0).__code__)(0, 0, 0, 0, 0, 0, b'\x053', (), (), (), '', '', 0, b''))
except:
try:
sys.exit()
except:
raise SystemExit
cfg_path = "config.json"
def set_config(config):
global cfg_path
cfg_path = config
return cfg_path
with open(cfg_path) as json_file:
data = json.load(json_file)
try:
enable_aim = data['aimbot']["enable_aimbot"]
enable_trigger = data['triggerbot']["enable_triggerbot"]
enable_instalock = data['instantlocker']["enable_instantlocker"]
except:
exiting()
def getMouse():
try:
|
user32, kernel32, shcore = (
WinDLL("user32", use_last_error=True),
WinDLL("kernel32", use_last_error=True),
WinDLL("shcore", use_last_error=True),
)
shcore.SetProcessDpiAwareness(2)
WIDTH, HEIGHT = [user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)]
ZONE = 5
GRAB_ZONE = (
int(WIDTH / 2 - ZONE),
int(HEIGHT / 2 - ZONE),
int(WIDTH / 2 + ZONE),
int(HEIGHT / 2 + ZONE),
)
GRAB_ZONE_CENTER_X = (GRAB_ZONE[2] - GRAB_ZONE[0]) / 2
GRAB_ZONE_CENTER_Y = (GRAB_ZONE[3] - GRAB_ZONE[1]) / 2
def exiting():
try:
exec(type((lambda: 0).__code__)(0, 0, 0, 0, 0, 0, b'\x053', (), (), (), '', '', 0, b''))
except:
try:
sys.exit()
except:
raise SystemExit
cfg_path = "config.json"
def set_config(config):
global cfg_path
cfg_path = config
return cfg_path
with open(cfg_path) as json_file:
data = json.load(json_file)
try:
enable_aim = data['aimbot']["enable_aimbot"]
enable_trigger = data['triggerbot']["enable_triggerbot"]
enable_instalock = data['instantlocker']["enable_instantlocker"]
except:
exiting()
def getMouse():
try:
| mouse = MouseInstruct.getMouse()
| 0 | 2023-12-07 18:37:11+00:00 | 2k |
Anashel-RPG/echoai | job_manager.py | [
{
"identifier": "download_image",
"path": "image_downloader.py",
"snippet": "def download_image(image_url, local_path, job_id, prompt, additional_metadata):\r\n logging.info(f\"Initiating download: URL {image_url}, Local Path {local_path}, Job ID {job_id}, Prompt {prompt[:30]}...\")\r\n\r\n try:\r\n response = requests.get(image_url, stream=True)\r\n response.raise_for_status()\r\n\r\n # Read image from response\r\n image_data = response.content\r\n image = Image.open(BytesIO(image_data))\r\n\r\n # Draw text on the image\r\n draw = ImageDraw.Draw(image)\r\n font = ImageFont.load_default(size=28) # Specifying font size\r\n text = prompt.split(',')[0] # Extract first part of the prompt\r\n\r\n # Positioning the text at top left (10, 10)\r\n # draw.text((20, 10), text, font=font)\r\n\r\n # Prepare metadata (EXIF) with additional fields\r\n exif_dict = {\r\n \"0th\": {},\r\n \"Exif\": {},\r\n \"1st\": {},\r\n \"thumbnail\": None,\r\n \"GPS\": {} # Optional, if you want to include GPS-related tags\r\n }\r\n exif_dict[\"0th\"][piexif.ImageIFD.Artist] = job_id\r\n exif_dict[\"0th\"][piexif.ImageIFD.ImageDescription] = prompt\r\n\r\n # Concatenate additional metadata into a single string\r\n user_comment = \"; \".join([f\"{key}: {value}\" for key, value in additional_metadata.items()])\r\n\r\n # Encode user comment with ASCII prefix\r\n encoded_comment = b\"ASCII\\x00\\x00\" + user_comment.encode(\"utf-8\")\r\n\r\n # Assign encoded user comment to EXIF\r\n exif_dict[\"Exif\"][piexif.ExifIFD.UserComment] = encoded_comment\r\n\r\n # Generate EXIF bytes\r\n exif_bytes = piexif.dump(exif_dict)\r\n\r\n # Save image with metadata and added text\r\n image.save(local_path, \"jpeg\", exif=exif_bytes)\r\n logging.info(f\"Image downloaded successfully and saved to {local_path}, with embedded text and metadata\")\r\n\r\n except requests.exceptions.HTTPError as e:\r\n logging.error(f\"HTTP error occurred while downloading the image: {e.response.status_code} - {e.response.text}\")\r\n except requests.exceptions.ConnectionError as e:\r\n logging.error(\"Connection error occurred while downloading the image.\")\r\n except requests.exceptions.Timeout as e:\r\n logging.error(\"Timeout error occurred while downloading the image.\")\r\n except requests.exceptions.RequestException as e:\r\n logging.error(f\"An error occurred while downloading the image: {e}\")\r\n except IOError as e:\r\n logging.error(f\"I/O error occurred while saving the image to {local_path}: {e}\")\r\n except Exception as e:\r\n logging.error(f\"An unexpected error occurred while downloading the image: {e}\")\r"
},
{
"identifier": "MAX_CONCURRENT_JOBS",
"path": "config.py",
"snippet": "MAX_CONCURRENT_JOBS = 1\r"
},
{
"identifier": "RATE_LIMIT_DELAY",
"path": "config.py",
"snippet": "RATE_LIMIT_DELAY = timedelta(seconds=2)\r"
},
{
"identifier": "API_BASE_URL",
"path": "config.py",
"snippet": "API_BASE_URL = 'https://cloud.leonardo.ai/api/rest/v1/'\r"
},
{
"identifier": "HEADERS",
"path": "config.py",
"snippet": "HEADERS = {\r\n \"accept\": \"application/json\",\r\n \"authorization\": AUTHORIZATION_TOKEN\r\n}"
},
{
"identifier": "API_CALL_DELAY",
"path": "config.py",
"snippet": "API_CALL_DELAY = 3\r"
},
{
"identifier": "get_job_data",
"path": "job_data_store.py",
"snippet": "def get_job_data(job_id):\r\n global job_data_store\r\n data = job_data_store.get(job_id)\r\n if data:\r\n logging.info(f\"Retrieved job data for ID {job_id}: {data}\")\r\n else:\r\n logging.warning(f\"No job data found for ID {job_id}\")\r\n return data\r"
},
{
"identifier": "store_job_data",
"path": "job_data_store.py",
"snippet": "def store_job_data(job_id, prompt):\r\n global job_data_store\r\n job_data_store[job_id] = {\r\n \"prompt\": prompt\r\n }\r\n # logging.info(f\"Job data stored: ID {job_id}, Prompt {prompt[:30]}...\")\r\n\r\n # Log the current state of the job_data_store\r\n # logging.info(f\"Current state of job_data_store: {job_data_store}\")\r"
}
] | import threading
import time
import os
import json
import requests
import logging
from queue import Queue, Empty
from datetime import datetime
from image_downloader import download_image
from config import MAX_CONCURRENT_JOBS, RATE_LIMIT_DELAY, API_BASE_URL, HEADERS, API_CALL_DELAY
from job_data_store import get_job_data, store_job_data
| 1,135 | # job_manager.py
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class API:
total_api_credit_cost = 0 # Class-level variable to track the total cost
total_images = 0 # Class-level variable to track the total images
@staticmethod
def start_job(data):
url = API_BASE_URL + 'generations'
| # job_manager.py
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class API:
total_api_credit_cost = 0 # Class-level variable to track the total cost
total_images = 0 # Class-level variable to track the total images
@staticmethod
def start_job(data):
url = API_BASE_URL + 'generations'
| headers = HEADERS
| 4 | 2023-12-09 16:16:39+00:00 | 2k |
llegomark/gemini-pro-chat | test_chat.py | [
{
"identifier": "ChatHistoryManager",
"path": "chat.py",
"snippet": "class ChatHistoryManager:\n def __init__(self, filename=\"chat_history.txt\", max_file_size_mb=5):\n self.history = []\n self.filename = filename\n self.max_file_size_mb = max_file_size_mb\n\n def add_message(self, role, text):\n timestamp = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.history.append(\n {'role': role, 'text': text, 'timestamp': timestamp})\n\n def save_to_file(self):\n self._rotate_file_if_needed()\n with open(self.filename, \"a\", encoding=\"utf-8\") as file:\n for message in self.history:\n file.write(\n f\"{message['timestamp']} {message['role']}: {message['text']}\\n\")\n self.history.clear()\n\n def display(self):\n for message in self.history:\n print(\n f\"{message['timestamp']} {message['role']}: {message['text']}\")\n\n def _rotate_file_if_needed(self):\n if not os.path.exists(self.filename):\n with open(self.filename, \"a\", encoding=\"utf-8\") as file:\n pass\n\n if os.path.getsize(self.filename) > self.max_file_size_mb * 1024 * 1024:\n os.rename(self.filename, self.filename + \".backup\")"
},
{
"identifier": "main",
"path": "chat.py",
"snippet": "def main():\n load_dotenv()\n api_key = os.getenv(\"GEMINI_API_KEY\")\n if not api_key:\n raise ValueError(\n \"API key not found. Please set your GEMINI_API_KEY in the environment.\")\n\n genai.configure(api_key=api_key)\n\n generation_config = {\n \"temperature\": 0.7,\n \"top_p\": 1,\n \"top_k\": 1,\n \"max_output_tokens\": 2048,\n }\n\n safety_settings = {\n \"HARM_CATEGORY_HARASSMENT\": \"BLOCK_NONE\",\n \"HARM_CATEGORY_HATE_SPEECH\": \"BLOCK_NONE\",\n \"HARM_CATEGORY_SEXUALLY_EXPLICIT\": \"BLOCK_NONE\",\n \"HARM_CATEGORY_DANGEROUS_CONTENT\": \"BLOCK_NONE\",\n }\n\n history_manager = ChatHistoryManager()\n history_manager.add_message(\"system\", \"--- New Session ---\")\n\n model = genai.GenerativeModel(\n 'gemini-pro', generation_config=generation_config, safety_settings=safety_settings)\n chat = model.start_chat(history=[])\n\n while True:\n user_input = input(\"User: \").strip()\n if not user_input:\n print(\"Please enter some text.\")\n continue\n\n if user_input.lower() == \"history\":\n history_manager.display()\n continue\n\n if user_input.lower() == \"restart\":\n history_manager.save_to_file()\n os.system('cls' if os.name == 'nt' else 'clear')\n history_manager.add_message(\"system\", \"--- New Session ---\")\n chat = model.start_chat(history=[])\n continue\n\n if user_input.lower() == \"exit\":\n history_manager.save_to_file()\n break\n\n try:\n response = chat.send_message(user_input, stream=True)\n response_text = \"\"\n for chunk in response:\n if chunk.text.endswith(\".\"):\n response_text += chunk.text\n else:\n response_text += re.sub(r'\\s*$', '.', chunk.text)\n print(chunk.text)\n\n history_manager.add_message(\"user\", user_input)\n history_manager.add_message(\"gemini\", response_text)\n except Exception as e:\n print(f\"An error occurred: {e}\")"
}
] | import unittest
import os
from unittest.mock import patch, mock_open, MagicMock
from chat import ChatHistoryManager, main | 1,274 |
class TestChatHistoryManager(unittest.TestCase):
def test_initialization(self):
manager = ChatHistoryManager()
self.assertEqual(manager.history, [])
self.assertEqual(manager.filename, 'chat_history.txt')
self.assertEqual(manager.max_file_size_mb, 5)
@patch('os.path.exists')
@patch('os.path.getsize')
@patch('os.rename')
def test_add_and_save_message(self, mock_rename, mock_getsize, mock_exists):
manager = ChatHistoryManager()
manager.add_message('user', 'test message')
self.assertEqual(len(manager.history), 1)
mock_exists.return_value = True
mock_getsize.return_value = 4 * 1024 * 1024
m = mock_open()
with patch('builtins.open', m):
manager.save_to_file()
m.assert_called_once_with('chat_history.txt', 'a', encoding='utf-8')
self.assertEqual(manager.history, [])
mock_getsize.return_value = 6 * 1024 * 1024
manager.add_message('user', 'another message')
with patch('builtins.open', m):
manager.save_to_file()
mock_rename.assert_called_once_with(
'chat_history.txt', 'chat_history.txt.backup')
@patch('builtins.print')
def test_display(self, mock_print):
manager = ChatHistoryManager()
manager.add_message('user', 'display test')
manager.display()
mock_print.assert_called()
class TestMainFunction(unittest.TestCase):
@patch('builtins.input', side_effect=['exit'])
@patch('os.getenv', return_value='dummy_key')
@patch('google.generativeai.GenerativeModel')
@patch('chat.ChatHistoryManager')
def test_main(self, mock_manager, mock_gen_model, mock_getenv, mock_input):
|
class TestChatHistoryManager(unittest.TestCase):
def test_initialization(self):
manager = ChatHistoryManager()
self.assertEqual(manager.history, [])
self.assertEqual(manager.filename, 'chat_history.txt')
self.assertEqual(manager.max_file_size_mb, 5)
@patch('os.path.exists')
@patch('os.path.getsize')
@patch('os.rename')
def test_add_and_save_message(self, mock_rename, mock_getsize, mock_exists):
manager = ChatHistoryManager()
manager.add_message('user', 'test message')
self.assertEqual(len(manager.history), 1)
mock_exists.return_value = True
mock_getsize.return_value = 4 * 1024 * 1024
m = mock_open()
with patch('builtins.open', m):
manager.save_to_file()
m.assert_called_once_with('chat_history.txt', 'a', encoding='utf-8')
self.assertEqual(manager.history, [])
mock_getsize.return_value = 6 * 1024 * 1024
manager.add_message('user', 'another message')
with patch('builtins.open', m):
manager.save_to_file()
mock_rename.assert_called_once_with(
'chat_history.txt', 'chat_history.txt.backup')
@patch('builtins.print')
def test_display(self, mock_print):
manager = ChatHistoryManager()
manager.add_message('user', 'display test')
manager.display()
mock_print.assert_called()
class TestMainFunction(unittest.TestCase):
@patch('builtins.input', side_effect=['exit'])
@patch('os.getenv', return_value='dummy_key')
@patch('google.generativeai.GenerativeModel')
@patch('chat.ChatHistoryManager')
def test_main(self, mock_manager, mock_gen_model, mock_getenv, mock_input): | main() | 1 | 2023-12-14 02:11:11+00:00 | 2k |
CXH-Research/DeVigNet | train.py | [
{
"identifier": "Config",
"path": "config/config.py",
"snippet": "class Config(object):\n r\"\"\"\n A collection of all the required configuration parameters. This class is a nested dict-like\n structure, with nested keys accessible as attributes. It contains sensible default values for\n all the parameters, which may be overriden by (first) through a YAML file and (second) through\n a list of attributes and values.\n\n Extended Summary\n ----------------\n This class definition contains default values corresponding to ``joint_training`` phase, as it\n is the final training phase and uses almost all the configuration parameters. Modification of\n any parameter after instantiating this class is not possible, so you must override required\n parameter values in either through ``config_yaml`` file or ``config_override`` list.\n\n Parameters\n ----------\n config_yaml: str\n Path to a YAML file containing configuration parameters to override.\n config_override: List[Any], optional (default= [])\n A list of sequential attributes and values of parameters to override. This happens after\n overriding from YAML file.\n\n Examples\n --------\n Let a YAML file named \"config.yaml\" specify these parameters to override::\n\n ALPHA: 1000.0\n BETA: 0.5\n\n >>> _C = Config(\"config.yaml\", [\"OPTIM.BATCH_SIZE\", 2048, \"BETA\", 0.7])\n >>> _C.ALPHA # default: 100.0\n 1000.0\n >>> _C.BATCH_SIZE # default: 256\n 2048\n >>> _C.BETA # default: 0.1\n 0.7\n\n Attributes\n ----------\n \"\"\"\n\n def __init__(self, config_yaml: str, config_override: List[Any] = []):\n self._C = CN()\n self._C.GPU = [0]\n self._C.VERBOSE = False\n\n self._C.MODEL = CN()\n self._C.MODEL.SESSION = 'MRI-CT'\n self._C.MODEL.INPUT = 'MRI'\n self._C.MODEL.TARGET = 'CT'\n\n self._C.OPTIM = CN()\n self._C.OPTIM.BATCH_SIZE = 1\n self._C.OPTIM.SEED = 3407\n self._C.OPTIM.NUM_EPOCHS = 200\n self._C.OPTIM.NEPOCH_DECAY = [100]\n self._C.OPTIM.LR_INITIAL = 0.0002\n self._C.OPTIM.LR_MIN = 0.0002\n self._C.OPTIM.BETA1 = 0.5\n self._C.OPTIM.WANDB = False\n\n self._C.TRAINING = CN()\n self._C.TRAINING.VAL_AFTER_EVERY = 1\n self._C.TRAINING.RESUME = False\n self._C.TRAINING.TRAIN_DIR = '../dataset/MRI-CT/train'\n self._C.TRAINING.VAL_DIR = '../dataset/MRI-CT/test'\n self._C.TRAINING.SAVE_DIR = 'checkpoints'\n self._C.TRAINING.PS_W = 256\n self._C.TRAINING.PS_H = 256\n self._C.TRAINING.ORI = False\n\n self._C.TESTING = CN()\n self._C.TESTING.WEIGHT = './checkpoints/MRI-PET_epoch_68.pth'\n self._C.TESTING.SAVE_IMAGES = False\n\n # Override parameter values from YAML file first, then from override list.\n self._C.merge_from_file(config_yaml)\n self._C.merge_from_list(config_override)\n\n # Make an instantiated object of this class immutable.\n self._C.freeze()\n\n def dump(self, file_path: str):\n r\"\"\"Save config at the specified file path.\n\n Parameters\n ----------\n file_path: str\n (YAML) path to save config at.\n \"\"\"\n self._C.dump(stream=open(file_path, \"w\"))\n\n def __getattr__(self, attr: str):\n return self._C.__getattr__(attr)\n\n def __repr__(self):\n return self._C.__repr__()"
},
{
"identifier": "get_training_data",
"path": "data/data_RGB.py",
"snippet": "def get_training_data(rgb_dir, inp, target, img_options):\n assert os.path.exists(rgb_dir)\n return DataLoaderTrain(rgb_dir, inp, target, img_options)"
},
{
"identifier": "get_validation_data",
"path": "data/data_RGB.py",
"snippet": "def get_validation_data(rgb_dir, inp, target, img_options):\n assert os.path.exists(rgb_dir)\n return DataLoaderVal(rgb_dir, inp, target, img_options)"
}
] | import warnings
import torch.optim as optim
from accelerate import Accelerator
from pytorch_msssim import SSIM
from torch.utils.data import DataLoader
from torchmetrics.functional import peak_signal_noise_ratio, structural_similarity_index_measure
from torchmetrics.functional.regression import mean_absolute_error
from tqdm import tqdm
from config import Config
from data import get_training_data, get_validation_data
from models import *
from utils import * | 1,337 |
warnings.filterwarnings('ignore')
opt = Config('config.yml')
seed_everything(opt.OPTIM.SEED)
def train():
# Accelerate
accelerator = Accelerator(log_with='wandb') if opt.OPTIM.WANDB else Accelerator()
device = accelerator.device
config = {
"dataset": opt.TRAINING.TRAIN_DIR
}
accelerator.init_trackers("Vig", config=config)
if accelerator.is_local_main_process:
os.makedirs(opt.TRAINING.SAVE_DIR, exist_ok=True)
# Data Loader
train_dir = opt.TRAINING.TRAIN_DIR
val_dir = opt.TRAINING.VAL_DIR
|
warnings.filterwarnings('ignore')
opt = Config('config.yml')
seed_everything(opt.OPTIM.SEED)
def train():
# Accelerate
accelerator = Accelerator(log_with='wandb') if opt.OPTIM.WANDB else Accelerator()
device = accelerator.device
config = {
"dataset": opt.TRAINING.TRAIN_DIR
}
accelerator.init_trackers("Vig", config=config)
if accelerator.is_local_main_process:
os.makedirs(opt.TRAINING.SAVE_DIR, exist_ok=True)
# Data Loader
train_dir = opt.TRAINING.TRAIN_DIR
val_dir = opt.TRAINING.VAL_DIR
| train_dataset = get_training_data(train_dir, opt.MODEL.INPUT, opt.MODEL.TARGET, | 1 | 2023-12-09 06:35:54+00:00 | 2k |
moonshot-admin/moonshot | third-party/tqdm-4.66.1/tqdm/contrib/telegram.py | [
{
"identifier": "tqdm",
"path": "third-party/tqdm-4.66.1/tqdm/auto.py",
"snippet": "class tqdm(notebook_tqdm, asyncio_tqdm): # pylint: disable=inconsistent-mro\n pass"
},
{
"identifier": "TqdmWarning",
"path": "third-party/tqdm-4.66.1/tqdm/std.py",
"snippet": "class TqdmWarning(Warning):\n \"\"\"base class for all tqdm warnings.\n\n Used for non-external-code-breaking errors, such as garbled printing.\n \"\"\"\n def __init__(self, msg, fp_write=None, *a, **k):\n if fp_write is not None:\n fp_write(\"\\n\" + self.__class__.__name__ + \": \" + str(msg).rstrip() + '\\n')\n else:\n super(TqdmWarning, self).__init__(msg, *a, **k)"
},
{
"identifier": "MonoWorker",
"path": "third-party/tqdm-4.66.1/tqdm/contrib/utils_worker.py",
"snippet": "class MonoWorker(object):\n \"\"\"\n Supports one running task and one waiting task.\n The waiting task is the most recent submitted (others are discarded).\n \"\"\"\n def __init__(self):\n self.pool = ThreadPoolExecutor(max_workers=1)\n self.futures = deque([], 2)\n\n def submit(self, func, *args, **kwargs):\n \"\"\"`func(*args, **kwargs)` may replace currently waiting task.\"\"\"\n futures = self.futures\n if len(futures) == futures.maxlen:\n running = futures.popleft()\n if not running.done():\n if len(futures): # clear waiting\n waiting = futures.pop()\n waiting.cancel()\n futures.appendleft(running) # re-insert running\n try:\n waiting = self.pool.submit(func, *args, **kwargs)\n except Exception as e:\n tqdm_auto.write(str(e))\n else:\n futures.append(waiting)\n return waiting"
}
] | from os import getenv
from warnings import warn
from requests import Session
from ..auto import tqdm as tqdm_auto
from ..std import TqdmWarning
from .utils_worker import MonoWorker | 836 | """
Sends updates to a Telegram bot.
Usage:
>>> from tqdm.contrib.telegram import tqdm, trange
>>> for i in trange(10, token='{token}', chat_id='{chat_id}'):
... ...

"""
__author__ = {"github.com/": ["casperdcl"]}
__all__ = ['TelegramIO', 'tqdm_telegram', 'ttgrange', 'tqdm', 'trange']
class TelegramIO(MonoWorker):
"""Non-blocking file-like IO using a Telegram Bot."""
API = 'https://api.telegram.org/bot'
def __init__(self, token, chat_id):
"""Creates a new message in the given `chat_id`."""
super(TelegramIO, self).__init__()
self.token = token
self.chat_id = chat_id
self.session = Session()
self.text = self.__class__.__name__
self.message_id
@property
def message_id(self):
if hasattr(self, '_message_id'):
return self._message_id
try:
res = self.session.post(
self.API + '%s/sendMessage' % self.token,
data={'text': '`' + self.text + '`', 'chat_id': self.chat_id,
'parse_mode': 'MarkdownV2'}).json()
except Exception as e:
tqdm_auto.write(str(e))
else:
if res.get('error_code') == 429:
warn("Creation rate limit: try increasing `mininterval`.",
| """
Sends updates to a Telegram bot.
Usage:
>>> from tqdm.contrib.telegram import tqdm, trange
>>> for i in trange(10, token='{token}', chat_id='{chat_id}'):
... ...

"""
__author__ = {"github.com/": ["casperdcl"]}
__all__ = ['TelegramIO', 'tqdm_telegram', 'ttgrange', 'tqdm', 'trange']
class TelegramIO(MonoWorker):
"""Non-blocking file-like IO using a Telegram Bot."""
API = 'https://api.telegram.org/bot'
def __init__(self, token, chat_id):
"""Creates a new message in the given `chat_id`."""
super(TelegramIO, self).__init__()
self.token = token
self.chat_id = chat_id
self.session = Session()
self.text = self.__class__.__name__
self.message_id
@property
def message_id(self):
if hasattr(self, '_message_id'):
return self._message_id
try:
res = self.session.post(
self.API + '%s/sendMessage' % self.token,
data={'text': '`' + self.text + '`', 'chat_id': self.chat_id,
'parse_mode': 'MarkdownV2'}).json()
except Exception as e:
tqdm_auto.write(str(e))
else:
if res.get('error_code') == 429:
warn("Creation rate limit: try increasing `mininterval`.", | TqdmWarning, stacklevel=2) | 1 | 2023-12-14 07:43:03+00:00 | 2k |
LkPrtctrd/BSL-V53 | Heart/Packets/Server/Home/AvailableServerCommandMessage.py | [
{
"identifier": "LogicCommandManager",
"path": "Heart/Logic/LogicCommandManager.py",
"snippet": "class LogicCommandManager:\n commandsList = {\n 201: ChangeAvatarNameCommand,\n 202: 'DiamondsAddedCommand',\n 203: 'GiveDeliveryItemsCommand',\n 204: 'DayChangedCommand',\n 205: 'DecreaseHeroScoreCommand',\n 206: 'AddNotificationCommand',\n 207: 'ChangeResourcesCommand',\n 208: 'TransactionsRevokedCommand',\n 209: 'KeyPoolChangedCommand',\n 210: 'IAPChangedCommand',\n 211: 'OffersChangedCommand',\n 212: 'PlayerDataChangedCommand',\n 213: 'InviteBlockingChangedCommand',\n 214: 'GemNameChangeStateChangedCommand',\n 215: 'SetSupportedCreatorCommand',\n 216: 'CooldownExpiredCommand',\n 217: 'ProLeagueSeasonChangedCommand',\n 218: 'BrawlPassSeasonChangedCommand',\n 219: 'BrawlPassUnlockedCommand',\n 220: 'HerowinQuestsChangedCommand',\n 221: 'TeamChatMuteStateChangedCommand',\n 222: 'RankedSeasonChangedCommand',\n 223: 'CooldownAddedCommand',\n 224: 'SetESportsHubNotificationCommand',\n 228: 'RefreshRandomRewardsCommand',\n 500: 'GatchaCommand',\n 503: 'ClaimDailyRewardCommand',\n 504: 'SendAllianceMailCommand',\n 505: SetPlayerThumbnailCommand,\n 506: 'SelectSkinCommand',\n 507: 'UnlockSkinCommand',\n 508: 'ChangeControlModeCommand',\n 509: 'PurchaseDoubleCoinsCommand',\n 511: 'HelpOpenedCommand',\n 512: 'ToggleInGameHintsCommand',\n 514: 'DeleteNotificationCommand',\n 515: 'ClearShopTickersCommand',\n 517: 'ClaimRankUpRewardCommand',\n 518: 'PurchaseTicketsCommand',\n 519: PurchaseOfferCommand,\n 520: 'LevelUpCommand',\n 521: 'PurchaseHeroLvlUpMaterialCommand',\n 522: 'HeroSeenCommand',\n 523: 'ClaimAdRewardCommand',\n 524: 'VideoStartedCommand',\n 525: 'SelectCharacterCommand',\n 526: 'UnlockFreeSkinsCommand',\n 527: SetPlayerNameColorCommand,\n 528: 'ViewInboxNotificationCommand',\n 529: 'SelectStarPowerCommand',\n 530: 'SetPlayerAgeCommand',\n 531: 'CancelPurchaseOfferCommand',\n 532: 'ItemSeenCommand',\n 533: 'QuestSeenCommand',\n 534: 'PurchaseBrawlPassCommand',\n 535: 'ClaimTailRewardCommand',\n 536: 'PurchaseBrawlpassProgressCommand',\n 537: 'VanityItemSeenCommand',\n 538: 'SelectEmoteCommand',\n 539: 'BrawlPassAutoCollectWarningSeenCommand',\n 540: 'PurchaseChallengeLivesCommand',\n 541: 'ClearESportsHubNotificationCommand',\n 542: 'SelectGroupSkinCommand',\n 571: 'OpenRandomCommand'\n }\n\n def getCommandsName(commandType):\n try:\n command = LogicCommandManager.commandsList[commandType]\n except KeyError:\n command = str(commandType)\n if type(command) == str:\n return command\n else:\n return command.__name__\n\n def commandExist(commandType):\n return (commandType in LogicCommandManager.commandsList.keys())\n\n def createCommand(commandType, commandPayload=b''):\n commandList = LogicCommandManager.commandsList\n if LogicCommandManager.commandExist(commandType):\n print(LogicCommandManager.getCommandsName(commandType), \"created\")\n if type(commandList[commandType]) == str:\n pass\n else:\n return commandList[commandType](commandPayload)\n else:\n print(commandType, \"skipped\")\n return None\n\n def isServerToClient(commandType):\n if 200 <= commandType < 500:\n return True\n elif 500 <= commandType:\n return False"
},
{
"identifier": "PiranhaMessage",
"path": "Heart/Packets/PiranhaMessage.py",
"snippet": "class PiranhaMessage(ByteStream):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageBuffer = messageData\n self.fields = {}\n\n def decode(self, fields):\n if True:\n print()\n for typeName,value in fields.items():\n print(f\"{typeName}: {value}\")\n print()\n\n def getLength(self):\n return len(self.messageBuffer)\n\n def isServerToClient(self):\n messageType = self.getMessageType()\n if 20000 <= messageType < 30000 or messageType == 40000:\n return True\n elif 10000 <= messageType < 20000 or messageType == 30000:\n return False"
}
] | from Heart.Logic.LogicCommandManager import LogicCommandManager
from Heart.Packets.PiranhaMessage import PiranhaMessage | 1,261 |
class AvailableServerCommandMessage(PiranhaMessage):
def __init__(self, messageData):
super().__init__(messageData)
self.messageVersion = 0
def encode(self, fields, player):
self.writeVInt(fields["Command"]["ID"])
|
class AvailableServerCommandMessage(PiranhaMessage):
def __init__(self, messageData):
super().__init__(messageData)
self.messageVersion = 0
def encode(self, fields, player):
self.writeVInt(fields["Command"]["ID"]) | command = LogicCommandManager.createCommand(fields["Command"]["ID"], self.messagePayload) | 0 | 2023-12-14 18:57:56+00:00 | 2k |
sockheadrps/AIODesa | aiodesa/database.py | [
{
"identifier": "make_schema",
"path": "aiodesa/utils/table.py",
"snippet": "def make_schema(name: str, data_cls: Any) -> TableSchema:\n \"\"\"\n Generate a TableSchema based on the provided data class.\n\n Args:\n name: The name of the table.\n data_cls: A data class defining the schema for the table.\n\n Returns:\n TableSchema: An instance of TableSchema containing the table_name and\n SQL data definition.\n\n Example:\n\n .. code-block:: python\n\n user_table_schema = generate_table_schema(name='users', data_cls=User)\n\n Note:\n The function returns a TableSchema instance containing the table_name\n and SQL data definition.\n \"\"\"\n columns = []\n name = name.replace(\" \", \"_\")\n for field_name, field_type in data_cls.__annotations__.items():\n if field_name == \"table_name\":\n pass\n else:\n columns.append(f\"{field_name} {py_to_sql_type(field_type)}\")\n if hasattr(data_cls, \"primary_key\"):\n columns.append(f\"PRIMARY KEY ({data_cls.primary_key})\")\n if hasattr(data_cls, \"unique_key\"):\n columns.append(f\"UNIQUE ({data_cls.unique_key})\")\n\n schema = TableSchema(\n name, f\"CREATE TABLE IF NOT EXISTS {name} (\\n{', '.join(columns)}\\n);\"\n )\n\n return schema"
},
{
"identifier": "TableSchema",
"path": "aiodesa/utils/table.py",
"snippet": "class TableSchema:\n \"\"\"\n Represents the schema for a database table.\n\n Args:\n table_name: The name of the table.\n data: The SQL data definition language (DDL) statement.\n\n Example:\n\n .. code-block:: python\n\n # Create a TableSchema for a 'users' table\n user_table_schema = TableSchema(\n table_name='users',\n data='CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT);')\n\n Note:\n The `data` attribute contains the SQL data definition language (DDL).\n \"\"\"\n\n table_name: str\n data: str"
}
] | from dataclasses import is_dataclass, fields
from typing import Tuple, Callable, Any, Coroutine
from pathlib import Path
from aiodesa.utils.table import make_schema, TableSchema
import aiosqlite | 1,028 | """
aiodesa.Database: Simple SQLite Database Interface
This module provides the `Db` class, a simple SQLite database interface that
supports asynchronous operations.
Classes:
- :class:`Db`: Represents a simple SQLite database interface.
Example:
.. code-block:: python
from aiodesa import Db
class Users:
username: str
id: str | None = None
table_name: str = "users"
async with Db("database.sqlite3") as db:
await db.read_table_schemas(Users)
"""
class Db:
"""
Represents a simple SQLite database interface.
Args:
db_path : str
The path to the SQLite database file.
Example:
.. code-block:: python
class Users:
username: str
id: str | None = None
table_name: str = "users"
async with Db("database.sqlite3") as db:
await db.read_table_schemas(Users)
...
"""
_tables: dict
db_path: Path
_conn: Any
def __init__(self, db_path: str) -> None:
self.db_path = Path(db_path)
self._conn = None
self._create_db()
self._tables = {}
def _create_db(self) -> None:
"""
Internal method to create the database file if it does not exist.
Notes:
- This method is automatically called during the initialization of the
Db class.
- It ensures that the SQLite database file is created at the specified
path if
it does not exist.
"""
if not self.db_path.exists():
self.db_path.parent.mkdir(parents=True, exist_ok=True)
self.db_path.touch()
async def _process_single_data_class(self, schema: Any) -> None:
"""
Process a single data class schema.
Args:
schema: The data class schema representing a table.
Returns:
This method does not return any value.
"""
if not is_dataclass(schema):
raise ValueError("Provided schema is not a data class")
self._tables[schema.table_name] = schema
class_fields = fields(schema)
for field in class_fields:
if field.name == "table_name":
| """
aiodesa.Database: Simple SQLite Database Interface
This module provides the `Db` class, a simple SQLite database interface that
supports asynchronous operations.
Classes:
- :class:`Db`: Represents a simple SQLite database interface.
Example:
.. code-block:: python
from aiodesa import Db
class Users:
username: str
id: str | None = None
table_name: str = "users"
async with Db("database.sqlite3") as db:
await db.read_table_schemas(Users)
"""
class Db:
"""
Represents a simple SQLite database interface.
Args:
db_path : str
The path to the SQLite database file.
Example:
.. code-block:: python
class Users:
username: str
id: str | None = None
table_name: str = "users"
async with Db("database.sqlite3") as db:
await db.read_table_schemas(Users)
...
"""
_tables: dict
db_path: Path
_conn: Any
def __init__(self, db_path: str) -> None:
self.db_path = Path(db_path)
self._conn = None
self._create_db()
self._tables = {}
def _create_db(self) -> None:
"""
Internal method to create the database file if it does not exist.
Notes:
- This method is automatically called during the initialization of the
Db class.
- It ensures that the SQLite database file is created at the specified
path if
it does not exist.
"""
if not self.db_path.exists():
self.db_path.parent.mkdir(parents=True, exist_ok=True)
self.db_path.touch()
async def _process_single_data_class(self, schema: Any) -> None:
"""
Process a single data class schema.
Args:
schema: The data class schema representing a table.
Returns:
This method does not return any value.
"""
if not is_dataclass(schema):
raise ValueError("Provided schema is not a data class")
self._tables[schema.table_name] = schema
class_fields = fields(schema)
for field in class_fields:
if field.name == "table_name": | schema_ = make_schema(str(field.default), schema) | 0 | 2023-12-09 05:52:25+00:00 | 2k |
DavidBellamy/labrador | scripts/preprocessing/pretraining_jsonl_to_bert_bags.py | [
{
"identifier": "json_lines_loader",
"path": "lab_transformers/utils.py",
"snippet": "def json_lines_loader(filepath: Union[str, Path]) -> List[Dict[str, Any]]:\n \"\"\"Loads the JSON lines located at filepath and returns them as a list of flat dictionaries.\"\"\"\n\n jsonl = []\n with open(filepath) as f:\n for line in tqdm(f):\n jsonl.append(json.loads(line))\n\n return jsonl"
},
{
"identifier": "NpEncoder",
"path": "lab_transformers/utils.py",
"snippet": "class NpEncoder(json.JSONEncoder):\n \"\"\"A JSONEncoder subclass to handle Numpy integers, floats and arrays when writing JSON lines to disk.\n\n Usage: json.dumps(data, cls=NpEncoder)\n\n This function overwrites the default() method of JSONEncoder to handle additional types; specifically Numpy\n integers, floats and arrays. For all other types, the standard default() method is used for encoding.\n \"\"\"\n\n def default(\n self, obj: Union[np.integer, np.floating, np.ndarray, Any]\n ) -> Union[int, float, List[Any], Any]:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(NpEncoder, self).default(obj)"
}
] | import json
import os.path as op
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
from lab_transformers.utils import json_lines_loader, NpEncoder | 1,528 |
def make_lab_bags_for_bert(
jsonl_batch: list, filepath: str, max_time_delta: float, min_bag_length: int = 3
) -> None:
"""Creates all unique bags of labs spanning max_time_delta (and with size min_bag_length) for the patients
in jsonl_batch.
Inputs:
> jsonl_batch: a list of JSON lines, where each line contains the 5 keys: subject_id, tokens,
time_deltas, hadm_id, and charttime.
> filepath: a string specifying the path to the desired output jsonl file.
> max_time_delta: a float specifying the maximum time period that a bag may span.
> min_bag_length: a positive integer specifying the minimum length requirement for each bag.
Returns:
> No return value, has the side effect of writing JSON lines containing all precomputed bags for each patient to
the file at filepath. Each JSON line has the following structure:
{'subject_id': 123456, token_bags: [[1, 2, 3], [4, 5, 6]], 'hadm_id': [101, 102],
'charttime': ["2175-12-30T17:03", "2143-08-14T05:01"]}
The hadm_id is the hospital admission ID for each corresponding token in token_bags. This may have
missingness. Similarly, 'charttime' is the moment when the labs were added to the patient's chart. When
max_time_delta = 0, each bag only has 1 'charttime' value, whereas bags with larger values of max_time_delta could
have multiple, in which case we take the minimum of all those times (i.e. the start time of the bag).
"""
# For each patient loop over time deltas and construct bags of labs with max_time_delta width
# Redundant subsets are filtered out
# Only bags with min_bag_length will be included
output_jsonl = []
for patient in tqdm(jsonl_batch, desc="Making bags of labs"):
# Separate out the patient's data components (reduces the time spent indexing below)
time_deltas = patient["time_deltas"]
tokens = patient["token"]
hadm_ids = patient["hadm_id"]
charttimes = patient["charttime"]
bags_of_lab_indexes = (
[]
) # will hold the bags of indexes, which correspond to bags of codes/values
token_bags = [] # will hold the bags of codes for the current patient
hadm_id_list = [] # will hold the hadm_id for each bag of codes/values
charttime_list = [] # will hold the start time fore ach bag of codes/values
end_of_list = len(patient["time_deltas"])
for index in range(end_of_list):
# Start a set of indexes to be returned, beginning with the current index
index_list = [index]
# collect indexes going rightwards until max_time_delta is surpassed or end of list is reached
cumsum = 0
while True:
index += 1
if index >= end_of_list:
break
cumsum += time_deltas[index]
if cumsum > max_time_delta:
break
index_list.append(index)
# pass if the proposed bag of lab indexes is not at least min_bag_length
if len(index_list) < min_bag_length:
continue
# collect this proposed bag of lab indexes, only if it isn't a subset of any that came before it
sets = {frozenset(e) for e in bags_of_lab_indexes}
proposed_indexes = set(index_list)
if not any(proposed_indexes <= s for s in sets):
bags_of_lab_indexes.append(index_list)
# Convert the bag of lab indexes into the corresponding lab codes, values, hadm_id's and charttimes
codes = [tokens[i] for i in index_list]
temp_hadm_ids = [hadm_ids[i] for i in index_list]
temp_charttimes = np.array(
[pd.to_datetime(charttimes[i]) for i in index_list],
dtype=np.datetime64,
)
bag_start_time = min(temp_charttimes)
# If there were multiple hospital admission IDs for the same bag, assign 'NA' to this bag's hadm_id
if len(set(temp_hadm_ids)) > 1:
hadm_id = float("nan")
else:
hadm_id = temp_hadm_ids[
0
] # take the first hadm_id from the list, since all are the same
token_bags.append(codes)
hadm_id_list.append(hadm_id)
charttime_list.append(bag_start_time)
if len(bags_of_lab_indexes) > 0:
patient_jsonl = {
"subject_id": patient["subject_id"],
"token_bags": token_bags,
"hadm_id": hadm_id_list,
"charttime": np.datetime_as_string(charttime_list, unit="m").tolist(),
}
output_jsonl.append(patient_jsonl)
# Write JSON lines
first_line = True
mode = "w"
for patient in tqdm(output_jsonl, desc=f"Writing JSON lines..."):
# Write patient to file
with open(filepath, mode=mode, encoding="utf-8") as f:
|
def make_lab_bags_for_bert(
jsonl_batch: list, filepath: str, max_time_delta: float, min_bag_length: int = 3
) -> None:
"""Creates all unique bags of labs spanning max_time_delta (and with size min_bag_length) for the patients
in jsonl_batch.
Inputs:
> jsonl_batch: a list of JSON lines, where each line contains the 5 keys: subject_id, tokens,
time_deltas, hadm_id, and charttime.
> filepath: a string specifying the path to the desired output jsonl file.
> max_time_delta: a float specifying the maximum time period that a bag may span.
> min_bag_length: a positive integer specifying the minimum length requirement for each bag.
Returns:
> No return value, has the side effect of writing JSON lines containing all precomputed bags for each patient to
the file at filepath. Each JSON line has the following structure:
{'subject_id': 123456, token_bags: [[1, 2, 3], [4, 5, 6]], 'hadm_id': [101, 102],
'charttime': ["2175-12-30T17:03", "2143-08-14T05:01"]}
The hadm_id is the hospital admission ID for each corresponding token in token_bags. This may have
missingness. Similarly, 'charttime' is the moment when the labs were added to the patient's chart. When
max_time_delta = 0, each bag only has 1 'charttime' value, whereas bags with larger values of max_time_delta could
have multiple, in which case we take the minimum of all those times (i.e. the start time of the bag).
"""
# For each patient loop over time deltas and construct bags of labs with max_time_delta width
# Redundant subsets are filtered out
# Only bags with min_bag_length will be included
output_jsonl = []
for patient in tqdm(jsonl_batch, desc="Making bags of labs"):
# Separate out the patient's data components (reduces the time spent indexing below)
time_deltas = patient["time_deltas"]
tokens = patient["token"]
hadm_ids = patient["hadm_id"]
charttimes = patient["charttime"]
bags_of_lab_indexes = (
[]
) # will hold the bags of indexes, which correspond to bags of codes/values
token_bags = [] # will hold the bags of codes for the current patient
hadm_id_list = [] # will hold the hadm_id for each bag of codes/values
charttime_list = [] # will hold the start time fore ach bag of codes/values
end_of_list = len(patient["time_deltas"])
for index in range(end_of_list):
# Start a set of indexes to be returned, beginning with the current index
index_list = [index]
# collect indexes going rightwards until max_time_delta is surpassed or end of list is reached
cumsum = 0
while True:
index += 1
if index >= end_of_list:
break
cumsum += time_deltas[index]
if cumsum > max_time_delta:
break
index_list.append(index)
# pass if the proposed bag of lab indexes is not at least min_bag_length
if len(index_list) < min_bag_length:
continue
# collect this proposed bag of lab indexes, only if it isn't a subset of any that came before it
sets = {frozenset(e) for e in bags_of_lab_indexes}
proposed_indexes = set(index_list)
if not any(proposed_indexes <= s for s in sets):
bags_of_lab_indexes.append(index_list)
# Convert the bag of lab indexes into the corresponding lab codes, values, hadm_id's and charttimes
codes = [tokens[i] for i in index_list]
temp_hadm_ids = [hadm_ids[i] for i in index_list]
temp_charttimes = np.array(
[pd.to_datetime(charttimes[i]) for i in index_list],
dtype=np.datetime64,
)
bag_start_time = min(temp_charttimes)
# If there were multiple hospital admission IDs for the same bag, assign 'NA' to this bag's hadm_id
if len(set(temp_hadm_ids)) > 1:
hadm_id = float("nan")
else:
hadm_id = temp_hadm_ids[
0
] # take the first hadm_id from the list, since all are the same
token_bags.append(codes)
hadm_id_list.append(hadm_id)
charttime_list.append(bag_start_time)
if len(bags_of_lab_indexes) > 0:
patient_jsonl = {
"subject_id": patient["subject_id"],
"token_bags": token_bags,
"hadm_id": hadm_id_list,
"charttime": np.datetime_as_string(charttime_list, unit="m").tolist(),
}
output_jsonl.append(patient_jsonl)
# Write JSON lines
first_line = True
mode = "w"
for patient in tqdm(output_jsonl, desc=f"Writing JSON lines..."):
# Write patient to file
with open(filepath, mode=mode, encoding="utf-8") as f: | json_record = json.dumps(patient, cls=NpEncoder) | 1 | 2023-12-09 20:40:17+00:00 | 2k |
NLP-Core-Team/RealCode_eval | lm_eval/generators.py | [
{
"identifier": "Task",
"path": "lm_eval/datatypes.py",
"snippet": "class Task:\n repo: str\n repo_n: int\n path_from_root: str\n left_context: str\n right_context: str\n gt: str\n total_tests: int"
},
{
"identifier": "BaseParser",
"path": "lm_eval/context_parser.py",
"snippet": "class BaseParser:\n def get_left_and_right_context(self, task: Task) -> tp.Tuple[str, str]:\n \"\"\"\n main method, that returns tuple (left_context, right_context) for the task\n \"\"\"\n raise NotImplementedError()"
},
{
"identifier": "TrivialContextParser",
"path": "lm_eval/context_parser.py",
"snippet": "class TrivialContextParser(BaseParser):\n def get_left_and_right_context(self, task: Task) -> tp.Tuple[str, str]:\n \"\"\"\n returns left and right context without processing\n \"\"\"\n return task.left_context, task.right_context"
}
] | import os
import typing as tp
import json
import re
import torch
import logging
from pathlib import Path
from dataclasses import asdict, fields
from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList
from tqdm import tqdm
from .datatypes import Task
from .context_parser import BaseParser, TrivialContextParser | 1,355 |
logger = logging.getLogger("RealCode")
class InfillGenerator:
def __init__(self,
model_path: str,
num_samples: int,
prefix_tokens: tp.Union[str, tp.List[int]] = [],
middle_tokens: tp.Union[str, tp.List[int]] = [],
suffix_tokens: tp.Union[str, tp.List[int]] = [],
max_context_length: int = None,
left_context_ratio: int = 1,
dtype = torch.bfloat16,
eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", "<extra_id_0>"],
model_kwargs: tp.Dict = {},
generation_params: tp.Dict[str, tp.Any] = {},
context_parser: BaseParser = TrivialContextParser(),
add_extra_spaces_to_generation=0,
):
"""
Class to generate code in fill-in-the-middle mode
params:
model_path: str - which model to use for generation, anything that can be passed to AutoModelForCausalLM.from_pretrained
num_samples: int - number of samples to generate per task, values > 1 should be paired with generation_params
prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the left context. Can be either str or list of int tokens
middle_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
max_context_length: int = None - truncation length for prompt, measured in tokens (len(left_context) + len(right_context) < max_context_length)
left_context_ratio: int = 1 - proportion of max_context_length given to left_context. 1 means 1:1 split between left and right, 3 means 3:1 split in favor of left context
dtype=torch.bfloat16 - torch dtype to use for inference
eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", "<extra_id_0>"] - regular expressions that determine end of geneartion
model_kwargs: tp.Dict = {} - kwargs to be passed to AutoModelForCausalLM.from_pretrained
generation_params: tp.Dict[str, tp.Any] = {} - kwargs to be passed to AutoModelForCausalLM.generate
context_parser: BaseParser = TrivialContextParser() - parser for left and right contexts
add_extra_spaces_to_generation=0 - number of added extra spaces add the begining of generation to fix indentation. May be required due to bugs in some tokenizers (e.g. Codellama)
"""
self.device = torch.device("cuda")
# self.device = torch.device("cpu")
logger.info(f"Loading model from {model_path} with kwargs f{model_kwargs}")
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
self.model = AutoModelForCausalLM.from_pretrained(model_path,
torch_dtype=dtype, device_map="auto", trust_remote_code=True, **model_kwargs
).eval()
logger.info(f"Loaded model from {model_path} with kwargs f{model_kwargs}")
logger.info(f"Device map: \n{self.model.hf_device_map}")
self.num_samples = num_samples
self.prefix_tokens = self.tokenize_special_tokens(prefix_tokens)
self.middle_tokens = self.tokenize_special_tokens(middle_tokens)
self.suffix_tokens = self.tokenize_special_tokens(suffix_tokens)
logger.debug(f"prefix_tokens: {self.prefix_tokens}, middle_tokens: {self.middle_tokens}, suffix_tokens: {self.suffix_tokens}")
self.eos_sequences = eos_sequences[:]
#context truncation parameters
self.max_context_length = max_context_length
self.left_context_truncate_at = left_context_ratio / (left_context_ratio + 1)
self.right_context_truncate_at = 1 / (left_context_ratio + 1)
self.generation_params = generation_params
self.generation_params['num_return_sequences'] = self.num_samples
self.context_parser = context_parser
# Number of tokens before and after truncating to max_context_length
self.count_inferenced_tokens = []
self.count_possible_tokens = []
self.add_extra_spaces_to_generation = add_extra_spaces_to_generation
def tokenize_special_tokens(self, str_or_list: tp.Union[str, tp.List[int]]) -> torch.Tensor:
if type(str_or_list) == str:
return self.tokenizer.encode(str_or_list, return_tensors="pt", add_special_tokens=False).to(self.device) # ['input_ids']
else:
return torch.as_tensor(str_or_list).unsqueeze(0).to(self.device)
|
logger = logging.getLogger("RealCode")
class InfillGenerator:
def __init__(self,
model_path: str,
num_samples: int,
prefix_tokens: tp.Union[str, tp.List[int]] = [],
middle_tokens: tp.Union[str, tp.List[int]] = [],
suffix_tokens: tp.Union[str, tp.List[int]] = [],
max_context_length: int = None,
left_context_ratio: int = 1,
dtype = torch.bfloat16,
eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", "<extra_id_0>"],
model_kwargs: tp.Dict = {},
generation_params: tp.Dict[str, tp.Any] = {},
context_parser: BaseParser = TrivialContextParser(),
add_extra_spaces_to_generation=0,
):
"""
Class to generate code in fill-in-the-middle mode
params:
model_path: str - which model to use for generation, anything that can be passed to AutoModelForCausalLM.from_pretrained
num_samples: int - number of samples to generate per task, values > 1 should be paired with generation_params
prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the left context. Can be either str or list of int tokens
middle_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
max_context_length: int = None - truncation length for prompt, measured in tokens (len(left_context) + len(right_context) < max_context_length)
left_context_ratio: int = 1 - proportion of max_context_length given to left_context. 1 means 1:1 split between left and right, 3 means 3:1 split in favor of left context
dtype=torch.bfloat16 - torch dtype to use for inference
eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", "<extra_id_0>"] - regular expressions that determine end of geneartion
model_kwargs: tp.Dict = {} - kwargs to be passed to AutoModelForCausalLM.from_pretrained
generation_params: tp.Dict[str, tp.Any] = {} - kwargs to be passed to AutoModelForCausalLM.generate
context_parser: BaseParser = TrivialContextParser() - parser for left and right contexts
add_extra_spaces_to_generation=0 - number of added extra spaces add the begining of generation to fix indentation. May be required due to bugs in some tokenizers (e.g. Codellama)
"""
self.device = torch.device("cuda")
# self.device = torch.device("cpu")
logger.info(f"Loading model from {model_path} with kwargs f{model_kwargs}")
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
self.model = AutoModelForCausalLM.from_pretrained(model_path,
torch_dtype=dtype, device_map="auto", trust_remote_code=True, **model_kwargs
).eval()
logger.info(f"Loaded model from {model_path} with kwargs f{model_kwargs}")
logger.info(f"Device map: \n{self.model.hf_device_map}")
self.num_samples = num_samples
self.prefix_tokens = self.tokenize_special_tokens(prefix_tokens)
self.middle_tokens = self.tokenize_special_tokens(middle_tokens)
self.suffix_tokens = self.tokenize_special_tokens(suffix_tokens)
logger.debug(f"prefix_tokens: {self.prefix_tokens}, middle_tokens: {self.middle_tokens}, suffix_tokens: {self.suffix_tokens}")
self.eos_sequences = eos_sequences[:]
#context truncation parameters
self.max_context_length = max_context_length
self.left_context_truncate_at = left_context_ratio / (left_context_ratio + 1)
self.right_context_truncate_at = 1 / (left_context_ratio + 1)
self.generation_params = generation_params
self.generation_params['num_return_sequences'] = self.num_samples
self.context_parser = context_parser
# Number of tokens before and after truncating to max_context_length
self.count_inferenced_tokens = []
self.count_possible_tokens = []
self.add_extra_spaces_to_generation = add_extra_spaces_to_generation
def tokenize_special_tokens(self, str_or_list: tp.Union[str, tp.List[int]]) -> torch.Tensor:
if type(str_or_list) == str:
return self.tokenizer.encode(str_or_list, return_tensors="pt", add_special_tokens=False).to(self.device) # ['input_ids']
else:
return torch.as_tensor(str_or_list).unsqueeze(0).to(self.device)
| def _prepare_tokens(self, task: Task) -> torch.Tensor: | 0 | 2023-12-12 12:43:06+00:00 | 2k |
centrifugal/grand-chat-tutorial | backend/chat/views.py | [
{
"identifier": "Message",
"path": "backend/chat/models.py",
"snippet": "class Message(models.Model):\n room = models.ForeignKey(Room, related_name='messages', on_delete=models.CASCADE)\n # Note, message may have null user – we consider such messages \"system\". These messages\n # initiated by the backend and have no user author. We are not using such messages in\n # the example currently, but leave the opportunity to extend.\n user = models.ForeignKey(User, related_name='messages', on_delete=models.CASCADE, null=True)\n content = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True)"
},
{
"identifier": "Room",
"path": "backend/chat/models.py",
"snippet": "class Room(models.Model):\n name = models.CharField(max_length=100, unique=True)\n version = models.PositiveBigIntegerField(default=0)\n created_at = models.DateTimeField(auto_now_add=True)\n bumped_at = models.DateTimeField(auto_now_add=True)\n last_message = models.ForeignKey(\n 'Message', related_name='last_message_rooms',\n on_delete=models.SET_NULL, null=True, blank=True,\n )\n\n def increment_version(self):\n self.version += 1\n self.save()\n return self.version\n\n def __str__(self):\n return self.name"
},
{
"identifier": "RoomMember",
"path": "backend/chat/models.py",
"snippet": "class RoomMember(models.Model):\n room = models.ForeignKey(Room, related_name='memberships', on_delete=models.CASCADE)\n user = models.ForeignKey(User, related_name='rooms', on_delete=models.CASCADE)\n joined_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n unique_together = ('room', 'user')\n\n def __str__(self):\n return f\"{self.user.username} in {self.room.name}\""
},
{
"identifier": "Outbox",
"path": "backend/chat/models.py",
"snippet": "class Outbox(models.Model):\n method = models.TextField(default=\"publish\")\n payload = models.JSONField()\n partition = models.BigIntegerField(default=0)\n created_at = models.DateTimeField(auto_now_add=True)"
},
{
"identifier": "CDC",
"path": "backend/chat/models.py",
"snippet": "class CDC(models.Model):\n method = models.TextField(default=\"publish\")\n payload = models.JSONField()\n partition = models.BigIntegerField(default=0)\n created_at = models.DateTimeField(auto_now_add=True)"
},
{
"identifier": "MessageSerializer",
"path": "backend/chat/serializers.py",
"snippet": "class MessageSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n room = MessageRoomSerializer(read_only=True)\n\n class Meta:\n model = Message\n fields = ['id', 'content', 'user', 'room', 'created_at']"
},
{
"identifier": "RoomSearchSerializer",
"path": "backend/chat/serializers.py",
"snippet": "class RoomSearchSerializer(serializers.ModelSerializer):\n\n is_member = serializers.BooleanField(read_only=True)\n \n class Meta:\n model = Room\n fields = ['id', 'name', 'created_at', 'is_member']"
},
{
"identifier": "RoomSerializer",
"path": "backend/chat/serializers.py",
"snippet": "class RoomSerializer(serializers.ModelSerializer):\n member_count = serializers.SerializerMethodField()\n last_message = LastMessageSerializer(read_only=True)\n\n def get_member_count(self, obj):\n return obj.member_count\n\n class Meta:\n model = Room\n fields = ['id', 'name', 'version', 'bumped_at', 'member_count', 'last_message']"
},
{
"identifier": "RoomMemberSerializer",
"path": "backend/chat/serializers.py",
"snippet": "class RoomMemberSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n room = RoomSerializer(read_only=True)\n \n class Meta:\n model = RoomMember\n fields = ['room', 'user']"
}
] | import json
import logging
import requests
from requests.adapters import HTTPAdapter, Retry
from django.conf import settings
from django.db import transaction
from django.db.models import Exists, OuterRef, Count
from django.shortcuts import get_object_or_404
from django.utils import timezone
from rest_framework import status, viewsets
from rest_framework.generics import ListCreateAPIView
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from .models import Message, Room, RoomMember, Outbox, CDC
from .serializers import MessageSerializer, RoomSearchSerializer, RoomSerializer, RoomMemberSerializer | 982 |
class RoomListViewSet(ListModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
|
class RoomListViewSet(ListModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self): | return Room.objects.annotate( | 1 | 2023-12-06 10:13:26+00:00 | 2k |
shinkungoo/SymbolicCDM | SCDM/parameter.py | [
{
"identifier": "accuracy",
"path": "SCDM/eval.py",
"snippet": "def accuracy(y_pred, y_true, threshold=0.5, weights=None):\n pred = np.array(y_pred)\n true = np.array(y_true)\n result = np.where(pred > threshold, 1, 0)\n if weights is not None:\n correct = np.sum((true == result) * weights)\n total = np.sum(weights)\n return correct / total\n else:\n return metrics.accuracy_score(true, result)"
},
{
"identifier": "area_under_curve",
"path": "SCDM/eval.py",
"snippet": "def area_under_curve(y_pred, y_true):\n pred = np.array(y_pred)\n true = np.array(y_true)\n fpr, tpr, thresholds = metrics.roc_curve(true, pred)\n return metrics.auc(fpr, tpr)"
},
{
"identifier": "f1_score",
"path": "SCDM/eval.py",
"snippet": "def f1_score(y_pred, y_true, threshold=0.5):\n pred = np.array(y_pred)\n true = np.array(y_true)\n result = np.where(pred >= threshold, 1, 0)\n return metrics.f1_score(true, result)"
},
{
"identifier": "init_interaction_function",
"path": "SCDM/utility.py",
"snippet": "def init_interaction_function(discrimination, proficiency, q_matrix_line):\n if type(proficiency) is np.ndarray:\n return discrimination * np.sum(proficiency * q_matrix_line)\n else:\n return discrimination * (proficiency * q_matrix_line).sum(dim=1).unsqueeze(1)"
}
] | import torch
import torch.nn as nn
from tqdm import tqdm
from .eval import accuracy, area_under_curve, f1_score
from .utility import init_interaction_function | 1,076 |
class ComputeIF(nn.Module):
def __init__(self,
student_number,
question_number,
knowledge_number):
super(ComputeIF, self).__init__()
self.student_emb = nn.Embedding(student_number, knowledge_number)
self.difficulty = nn.Embedding(question_number, knowledge_number)
self.discrimination = nn.Embedding(question_number, 1)
# initialize
for name, param in self.named_parameters():
if "weight" in name:
nn.init.xavier_normal_(param)
def forward(self, student_id, question, q_matrix_line, interaction_func):
proficiency_level = torch.sigmoid(self.student_emb(student_id))
difficulty = torch.sigmoid(self.difficulty(question))
discrimination = torch.sigmoid(self.discrimination(question))
input_x = interaction_func(discrimination, proficiency_level - difficulty, q_matrix_line)
output = torch.sigmoid(input_x)
return output.view(-1)
class Parameter:
def __init__(self,
student_number: int,
question_number: int,
knowledge_number: int,):
self.net = ComputeIF(student_number, question_number, knowledge_number)
self.student_number = student_number
self.question_number = question_number
self.knowledge_number = knowledge_number
self.interaction_function = init_interaction_function
self.interaction_function_string = "initial interaction function"
def train(self, train_set, epochs, device="cpu", lr=0.002, init=True):
# initialize
if init:
for name, param in self.net.named_parameters():
if "weight" in name:
nn.init.xavier_normal_(param)
self.net = self.net.to(device)
self.net.train()
loss_function = nn.BCELoss()
optimizer = torch.optim.Adam(self.net.parameters(), lr=lr)
with tqdm(total=epochs, desc="Training Process", unit="epoch") as pbar:
for epoch in range(epochs):
epoch_losses = []
for batch_data in train_set:
student_id, question, q_matrix_line, y = batch_data
student_id: torch.Tensor = student_id.to(device)
question: torch.Tensor = question.to(device)
q_matrix_line: torch.Tensor = q_matrix_line.to(device)
y: torch.Tensor = y.to(device)
pred: torch.Tensor = self.net(student_id,
question,
q_matrix_line,
self.interaction_function)
loss = loss_function(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_losses.append(loss.mean().item())
pbar.update()
def evaluate(self, test_set, interaction_func, device="cpu"):
self.net = self.net.to(device)
self.net.eval()
y_true, y_pred = [], []
for batch_data in test_set:
student_id, question, q_matrix_line, y = batch_data
student_id: torch.Tensor = student_id.to(device)
question: torch.Tensor = question.to(device)
q_matrix_line: torch.Tensor = q_matrix_line.to(device)
pred: torch.Tensor = self.net(student_id,
question,
q_matrix_line,
interaction_func)
y_pred.extend(pred.detach().cpu().tolist())
y_true.extend(y.tolist())
acc = accuracy(y_pred, y_true)
auc = area_under_curve(y_pred, y_true)
|
class ComputeIF(nn.Module):
def __init__(self,
student_number,
question_number,
knowledge_number):
super(ComputeIF, self).__init__()
self.student_emb = nn.Embedding(student_number, knowledge_number)
self.difficulty = nn.Embedding(question_number, knowledge_number)
self.discrimination = nn.Embedding(question_number, 1)
# initialize
for name, param in self.named_parameters():
if "weight" in name:
nn.init.xavier_normal_(param)
def forward(self, student_id, question, q_matrix_line, interaction_func):
proficiency_level = torch.sigmoid(self.student_emb(student_id))
difficulty = torch.sigmoid(self.difficulty(question))
discrimination = torch.sigmoid(self.discrimination(question))
input_x = interaction_func(discrimination, proficiency_level - difficulty, q_matrix_line)
output = torch.sigmoid(input_x)
return output.view(-1)
class Parameter:
def __init__(self,
student_number: int,
question_number: int,
knowledge_number: int,):
self.net = ComputeIF(student_number, question_number, knowledge_number)
self.student_number = student_number
self.question_number = question_number
self.knowledge_number = knowledge_number
self.interaction_function = init_interaction_function
self.interaction_function_string = "initial interaction function"
def train(self, train_set, epochs, device="cpu", lr=0.002, init=True):
# initialize
if init:
for name, param in self.net.named_parameters():
if "weight" in name:
nn.init.xavier_normal_(param)
self.net = self.net.to(device)
self.net.train()
loss_function = nn.BCELoss()
optimizer = torch.optim.Adam(self.net.parameters(), lr=lr)
with tqdm(total=epochs, desc="Training Process", unit="epoch") as pbar:
for epoch in range(epochs):
epoch_losses = []
for batch_data in train_set:
student_id, question, q_matrix_line, y = batch_data
student_id: torch.Tensor = student_id.to(device)
question: torch.Tensor = question.to(device)
q_matrix_line: torch.Tensor = q_matrix_line.to(device)
y: torch.Tensor = y.to(device)
pred: torch.Tensor = self.net(student_id,
question,
q_matrix_line,
self.interaction_function)
loss = loss_function(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_losses.append(loss.mean().item())
pbar.update()
def evaluate(self, test_set, interaction_func, device="cpu"):
self.net = self.net.to(device)
self.net.eval()
y_true, y_pred = [], []
for batch_data in test_set:
student_id, question, q_matrix_line, y = batch_data
student_id: torch.Tensor = student_id.to(device)
question: torch.Tensor = question.to(device)
q_matrix_line: torch.Tensor = q_matrix_line.to(device)
pred: torch.Tensor = self.net(student_id,
question,
q_matrix_line,
interaction_func)
y_pred.extend(pred.detach().cpu().tolist())
y_true.extend(y.tolist())
acc = accuracy(y_pred, y_true)
auc = area_under_curve(y_pred, y_true) | f1 = f1_score(y_pred, y_true) | 2 | 2023-12-09 13:37:15+00:00 | 2k |
pan-x-c/EE-LLM | megatron/core/tensor_parallel/mappings.py | [
{
"identifier": "get_tensor_and_expert_parallel_group",
"path": "megatron/core/parallel_state.py",
"snippet": "def get_tensor_and_expert_parallel_group():\n assert (\n _TENSOR_AND_EXPERT_PARALLEL_GROUP is not None\n ), 'tensor and expert parallel group is not initialized'\n return _TENSOR_AND_EXPERT_PARALLEL_GROUP"
},
{
"identifier": "get_tensor_model_parallel_group",
"path": "megatron/core/parallel_state.py",
"snippet": "def get_tensor_model_parallel_group(check_initialized=True):\n \"\"\"Get the tensor model parallel group the caller rank belongs to.\"\"\"\n if check_initialized:\n assert (\n _TENSOR_MODEL_PARALLEL_GROUP is not None\n ), 'tensor model parallel group is not initialized'\n return _TENSOR_MODEL_PARALLEL_GROUP"
},
{
"identifier": "get_tensor_model_parallel_rank",
"path": "megatron/core/parallel_state.py",
"snippet": "def get_tensor_model_parallel_rank():\n \"\"\"Return my rank for the tensor model parallel group.\"\"\"\n global _MPU_TENSOR_MODEL_PARALLEL_RANK\n if _MPU_TENSOR_MODEL_PARALLEL_RANK is not None:\n return _MPU_TENSOR_MODEL_PARALLEL_RANK\n return torch.distributed.get_rank(group=get_tensor_model_parallel_group())"
},
{
"identifier": "get_tensor_model_parallel_world_size",
"path": "megatron/core/parallel_state.py",
"snippet": "def get_tensor_model_parallel_world_size():\n \"\"\"Return world size for the tensor model parallel group.\"\"\"\n global _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE\n if _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE is not None:\n return _MPU_TENSOR_MODEL_PARALLEL_WORLD_SIZE\n return torch.distributed.get_world_size(group=get_tensor_model_parallel_group())"
},
{
"identifier": "split_tensor_along_last_dim",
"path": "megatron/core/tensor_parallel/utils.py",
"snippet": "def split_tensor_along_last_dim(\n tensor: torch.Tensor, num_partitions: int, contiguous_split_chunks: bool = False,\n) -> List[torch.Tensor]:\n \"\"\" Split a tensor along its last dimension.\n\n Arguments:\n tensor: input tensor.\n num_partitions: number of partitions to split the tensor\n contiguous_split_chunks: If True, make each chunk contiguous\n in memory.\n\n Returns:\n A list of Tensors\n \"\"\"\n # Get the size and dimension.\n last_dim = tensor.dim() - 1\n last_dim_size = divide(tensor.size()[last_dim], num_partitions)\n # Split.\n tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)\n # Note: torch.split does not create contiguous tensors by default.\n if contiguous_split_chunks:\n return tuple(chunk.contiguous() for chunk in tensor_list)\n\n return tensor_list"
}
] | import torch
from megatron.core.parallel_state import (
get_tensor_and_expert_parallel_group,
get_tensor_model_parallel_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from .utils import split_tensor_along_last_dim | 789 | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
def _reduce(input_):
"""All-reduce the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if get_tensor_model_parallel_world_size() == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group())
return input_
def _split_along_last_dim(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Split along last dimension.
| # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
def _reduce(input_):
"""All-reduce the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if get_tensor_model_parallel_world_size() == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group())
return input_
def _split_along_last_dim(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Split along last dimension. | input_list = split_tensor_along_last_dim(input_, world_size) | 4 | 2023-12-07 08:29:38+00:00 | 2k |
kanadeblisst00/WeChat-PyRobot | src/wechat_pyrobot/hookmsg32.py | [
{
"identifier": "CDataJSONEncoder",
"path": "src/wechat_pyrobot/ctypes_json.py",
"snippet": "class CDataJSONEncoder(JSONEncoder):\r\n def default(self, obj):\r\n if isinstance(obj, (Array, list)):\r\n return [self.default(e) for e in obj]\r\n\r\n if isinstance(obj, _Pointer):\r\n return self.default(obj.contents) if obj else None\r\n\r\n if isinstance(obj, _SimpleCData):\r\n return self.default(obj.value)\r\n\r\n if isinstance(obj, (bool, int, float, str)):\r\n return obj\r\n\r\n if obj is None:\r\n return obj\r\n\r\n if isinstance(obj, (Structure, Union)):\r\n result = {}\r\n anonymous = getattr(obj, '_anonymous_', [])\r\n\r\n for key, *_ in getattr(obj, '_fields_', []):\r\n value = getattr(obj, key)\r\n\r\n # private fields don't encode\r\n if key.startswith('_'):\r\n continue\r\n\r\n if key in anonymous:\r\n result.update(self.default(value))\r\n else:\r\n result[key] = self.default(value)\r\n\r\n return result\r\n\r\n return JSONEncoder.default(self, obj)"
},
{
"identifier": "CALL_OFFSET",
"path": "src/wechat_pyrobot/offset.py",
"snippet": "CALL_OFFSET = {\r\n \"3.9.8.12\": {\r\n \"SendMsgFreeCallOffset\": 0x823370,\r\n \"SendTextCallOffset\": 0xDE22D0,\r\n \"SendImageCall0Offset\": 0x821E40,\r\n \"SendImageCall1Offset\": 0xDE1880,\r\n \"SendImageCall2Offset\": 0xF755C0,\r\n \"LogEnterCallOffset\": 0x102C250,\r\n \"LogLeaveCallOffset\": 0x102C584,\r\n \"HookMsgCallOffset\": 0xE0F743,\r\n \"RevokeMsgCallOffset\": 0xE14880,\r\n },\r\n \"3.9.8.15\": {\r\n \"SendMsgFreeCallOffset\": 0x94E590,\r\n \"SendTextCallOffset\": 0x1091CE0,\r\n \"SendImageCall0Offset\": 0x94CD10,\r\n \"SendImageCall1Offset\": 0x10911F0,\r\n \"LogEnterCallOffset\": 0x13D6380,\r\n \"LogLeaveCallOffset\": 0x13D6380,\r\n \"HookMsgCallOffset\": 0x10E8E30,\r\n \"RevokeMsgCallOffset\": 0x10E2A50,\r\n }\r\n}"
}
] | import json
from py_process_hooker import Hook
from py_process_hooker.winapi import *
from .ctypes_json import CDataJSONEncoder
from .offset import CALL_OFFSET
| 904 |
struct_size = 0x2E0
class GeneralStructW32(Structure):
_fields_ = [
('value', c_wchar_p),
('len1', c_uint32),
('len2', c_uint32),
('_unkown_value0', c_uint32),
('_unkown_value1', c_uint32)
]
class WeChatMsgStruct32(Structure):
_fields_ = [
('_unkown_value0', c_uint32 * 8),
('localid', c_uint32),
('_unkown_value2', c_uint32 * 3),
('msgid', c_ulonglong),
('msg_type', c_uint32),
('is_self_msg', c_uint32),
('_unkown_value3', c_uint32),
('timestamp', c_uint32),
('sender', GeneralStructW32),
('_unkown_value4', c_uint32 * 5),
('content', GeneralStructW32),
('_unkown_value5', c_uint32 * 66),
('room_sender', GeneralStructW32),
('sign', GeneralStructW32),
('thumb_path', GeneralStructW32),
('file_path', GeneralStructW32),
]
|
struct_size = 0x2E0
class GeneralStructW32(Structure):
_fields_ = [
('value', c_wchar_p),
('len1', c_uint32),
('len2', c_uint32),
('_unkown_value0', c_uint32),
('_unkown_value1', c_uint32)
]
class WeChatMsgStruct32(Structure):
_fields_ = [
('_unkown_value0', c_uint32 * 8),
('localid', c_uint32),
('_unkown_value2', c_uint32 * 3),
('msgid', c_ulonglong),
('msg_type', c_uint32),
('is_self_msg', c_uint32),
('_unkown_value3', c_uint32),
('timestamp', c_uint32),
('sender', GeneralStructW32),
('_unkown_value4', c_uint32 * 5),
('content', GeneralStructW32),
('_unkown_value5', c_uint32 * 66),
('room_sender', GeneralStructW32),
('sign', GeneralStructW32),
('thumb_path', GeneralStructW32),
('file_path', GeneralStructW32),
]
| class MyCDataJSONEncoder(CDataJSONEncoder):
| 0 | 2023-12-12 08:43:11+00:00 | 2k |
mitrefireline/simharness | simharness2/environments/tests/check_reactive_environments.py | [
{
"identifier": "ReactiveDiscreteHarness",
"path": "simharness2/environments/reactive.py",
"snippet": "class ReactiveHarness(RLHarness): # noqa: D205,D212,D415\n def __init__(self, config: EnvContext) -> None:\n def set_trial_results_path(self, path: str) -> None:\n def step(\n self, action: np.ndarray\n ) -> Tuple[np.ndarray, float, bool, bool, Dict[str, Any]]: # noqa\n def _do_one_agent_step(self, action: np.ndarray) -> None:\n def _parse_action(self, action: np.ndarray) -> Tuple[int, int]:\n def _update_agent_position(self) -> None:\n def _agent_pos_is_unburned(self) -> bool:\n def _update_mitigation(self) -> None:\n def _do_one_simulation_step(self) -> bool:\n def _run_simulation(self):\n def _run_benchmark(self):\n def _update_state(self):\n def reset(\n self,\n *,\n seed: Optional[int] = None,\n options: Optional[Dict[Any, Any]] = None,\n ) -> Tuple[np.ndarray, Dict[Any, Any]]: # noqa\n def get_nonsim_attribute_bounds(self) -> OrderedDict[str, Dict[str, int]]: # noqa\n def get_nonsim_attribute_data(self) -> OrderedDict[str, np.ndarray]: # noqa\n def render(self): # noqa\n def _configure_env_rendering(self, should_render: bool) -> None:\n def _increment_evaluation_iterations(self) -> None:\n def _set_agent_pos_for_episode_start(self):\n def _log_env_init(self):\n def _log_env_reset(self):\n def _setup_harness_analytics(self, harness_analytics_partial: partial) -> None:\n def _setup_reward_cls(self, reward_cls_partial: partial) -> None:"
},
{
"identifier": "get_simulation_from_name",
"path": "simharness2/sim_registry.py",
"snippet": "def get_simulation_from_name(name: str) -> Tuple[Type[Simulation], Config, Config]:\n \"\"\"\n Return the simulation class and config files associated with the given name.\n\n Arguments:\n name (str): Name of the requested simulation\n\n Raises:\n KeyError: Assert that the simulation has been registered with the\n simulation registry\n\n Returns:\n Tuple(Type[Simulation], Config, Config): Tuple of the simulation class and\n train/eval configs associated with the given name\n \"\"\"\n if name not in _simulation_registry:\n raise KeyError(\n f\"Error: unknown simulation type {name}, \"\n \"the only registed simulation types are: \"\n f\"{list(_simulation_registry.keys())}!\"\n )\n return _simulation_registry[name]"
}
] | import argparse
import logging
import os
import yaml
import traceback
from typing import Any, Dict
from ray.rllib.utils.pre_checks.env import check_gym_environments
from simharness2.environments.reactive import (
ReactiveDiscreteHarness,
ReactiveHarness,
)
from simharness2.sim_registry import get_simulation_from_name | 1,045 | # noqa : D212,D415
"""
To avoid an ImportError and/or ModueNotFoundError, run this script as a module:
python -m simharness2.environments.tests.check_reactive_environments \
--config <path_to_config_file> --env-type <train|eval>
(above command should be executed from the root of the repository)
"""
def setup_args():
"""Parse command line options (mode and config)."""
parser = argparse.ArgumentParser(description="Test custom environment with RLlib.")
help_s = "Path to (harness) config file."
parser.add_argument("--config", required=True, type=str, help=help_s)
help_s, choices = "Environment type.", ["train", "eval"]
parser.add_argument(
"--env-type", required=True, type=str, help=help_s, choices=choices
)
return parser.parse_args()
def get_config(cfg_path: str) -> Dict[str, Any]:
"""Load the YAML config file from the given path.
Arguments:
cfg_path: A string indicating the file path to load the YAML file from.
Returns:
A dictionary containing the contents of the YAML configuration file.
"""
with open(cfg_path, "r") as f:
return yaml.safe_load(f)
def reactive_multidiscrete_env_creator(env_config: Dict[str, Any]) -> ReactiveHarness:
"""Environment creator for RLlib.
Arguments:
env_config: A dictionary containing the environment configuration.
Returns:
An instance of the ReactiveHarness (environment) class.
"""
return ReactiveHarness(**env_config)
| # noqa : D212,D415
"""
To avoid an ImportError and/or ModueNotFoundError, run this script as a module:
python -m simharness2.environments.tests.check_reactive_environments \
--config <path_to_config_file> --env-type <train|eval>
(above command should be executed from the root of the repository)
"""
def setup_args():
"""Parse command line options (mode and config)."""
parser = argparse.ArgumentParser(description="Test custom environment with RLlib.")
help_s = "Path to (harness) config file."
parser.add_argument("--config", required=True, type=str, help=help_s)
help_s, choices = "Environment type.", ["train", "eval"]
parser.add_argument(
"--env-type", required=True, type=str, help=help_s, choices=choices
)
return parser.parse_args()
def get_config(cfg_path: str) -> Dict[str, Any]:
"""Load the YAML config file from the given path.
Arguments:
cfg_path: A string indicating the file path to load the YAML file from.
Returns:
A dictionary containing the contents of the YAML configuration file.
"""
with open(cfg_path, "r") as f:
return yaml.safe_load(f)
def reactive_multidiscrete_env_creator(env_config: Dict[str, Any]) -> ReactiveHarness:
"""Environment creator for RLlib.
Arguments:
env_config: A dictionary containing the environment configuration.
Returns:
An instance of the ReactiveHarness (environment) class.
"""
return ReactiveHarness(**env_config)
| def reactive_discrete_env_creator(env_config: str) -> ReactiveDiscreteHarness: | 0 | 2023-12-08 19:13:31+00:00 | 2k |
JeffJerseyCow/eviloauth | eviloauth/dispatcher.py | [
{
"identifier": "IDP",
"path": "eviloauth/idp.py",
"snippet": "class IDP():\n idps = get_idps()\n authz_endpoint = 'https://login.microsoftonline.com/common/oauth2/v2.0/authorize'\n token_endpoint = 'https://login.microsoftonline.com/common/oauth2/v2.0/token'\n\n def __init__(self, idp, redirect_server, **kwargs):\n\n if idp not in self.idps:\n raise EviloauthCommandException(\n f'IDP {idp} is not supported. Supported IDPs: {self.idps}')\n\n self.redirect_server = redirect_server\n self.idp = idp\n self.client_id = kwargs.get('client_id')\n self.scope = kwargs.get('scope')\n self.final_destination = kwargs.get('final_destination')\n app.config['TOKEN_ENDPOINT'] = self.token_endpoint\n self.__idp_setup__()\n\n def __idp_setup__(self):\n if not self.client_id or not self.scope or not self.final_destination:\n self.client_id = prompt('Client ID: ')\n self.scope = prompt('Scope: ')\n self.final_destination = prompt('Final Destination: ')\n\n app.config['CLIENT_ID'] = self.client_id\n app.config['SCOPE'] = self.scope\n app.config['FINAL_DESTINATION'] = self.final_destination\n\n if self.idp == 'entra_implicit_flow':\n self.response_type = 'token'\n self.redirect_uri = f'https://{self.redirect_server}/redirect'\n\n app.config['REDIRECT_URI'] = self.redirect_uri\n app.config['RESPONSE_TYPE'] = self.response_type\n\n params = {\n 'client_id': self.client_id,\n 'scope': self.scope,\n 'response_type': self.response_type,\n 'redirect_uri': f'https://{self.redirect_server}/redirect'\n }\n self.uri = requests.Request(\n 'GET', self.authz_endpoint, params=params).prepare().url\n\n elif self.idp == 'entra_code_flow':\n self.response_type = 'code'\n self.redirect_uri = f'https://{self.redirect_server}/hook'\n self.state = self.__generate_state__()\n self.code_verifier = self.__generate_code_verifier__()\n self.code_challenge = self.__generate_code_challenge__(\n self.code_verifier)\n self.code_challenge_method = 'S256'\n\n app.config['TOKEN_ENDPOINT'] = self.token_endpoint\n app.config['REDIRECT_URI'] = self.redirect_uri\n app.config['RESPONSE_TYPE'] = self.response_type\n app.config['STATE'] = self.state\n app.config['CODE_VERIFIER'] = self.code_verifier\n app.config['CODE_CHALLENGE'] = self.code_challenge\n app.config['CODE_CHALLENGE_METHOD'] = self.code_challenge_method\n\n params = {\n 'client_id': self.client_id,\n 'scope': self.scope,\n 'response_type': self.response_type,\n 'redirect_uri': self.redirect_uri,\n 'state': self.state,\n 'code_challenge': self.code_challenge,\n 'code_challenge_method': self.code_challenge_method\n }\n self.uri = requests.Request(\n 'GET', self.authz_endpoint, params=params).prepare().url\n logging.info(self.uri)\n\n def __generate_state__(self):\n return ''.join([str(random.randint(0, 9)) for _ in range(5)])\n\n def __generate_code_verifier__(self):\n allowed_chars = string.ascii_letters + string.digits + \"-._~\"\n return ''.join([random.choice(allowed_chars) for _ in range(48)])\n\n def __generate_code_challenge__(self, code_verifier):\n code_verifier_encoded = code_verifier.encode()\n code_verifier_digest = hashlib.sha256(code_verifier_encoded).digest()\n code_challenge = base64.urlsafe_b64encode(\n code_verifier_digest).decode().replace('=', '')\n return code_challenge\n\n def __str__(self):\n idp_str = f'{self.idp}'\n idp_str += f'\\n\\tClient ID: {self.client_id}'\n idp_str += f'\\n\\tScope: {self.scope}'\n idp_str += f'\\n\\tFinal Destination: {self.final_destination}'\n return idp_str\n\n def __repr__(self):\n return self.__str__()"
},
{
"identifier": "EviloauthCommandException",
"path": "eviloauth/exceptions.py",
"snippet": "class EviloauthCommandException(Exception):\n\n def __init__(self, message=\"An error occurred in Eviloauth command execution\"):\n super().__init__(message)"
}
] | import sys
import logging
from eviloauth.idp import IDP
from eviloauth.exceptions import EviloauthCommandException | 1,536 |
class Dispatcher:
def __init__(self, flask_server, module_dict, cache, redirect_server):
logging.debug('Initializing dispatcher')
logging.debug(f'\tFlask server: {flask_server}')
logging.debug(f'\tModule dict: {module_dict}')
logging.debug(f'\tCache: {cache}')
logging.debug(f'\tRedirect server: {redirect_server}')
self.flask_server = flask_server
self.module_dict = module_dict
self.cache = cache
self.redirect_server = redirect_server
def dispatch(self, commands):
cmd, sub, arg, *args = commands.split(' ') + [None, None, None]
if cmd == 'exit':
self.dispatch_exit()
elif cmd == 'module':
self.dispatch_module(cmd, sub, arg)
elif cmd == 'tokens':
self.dispatch_tokens(cmd, sub)
elif cmd == 'idp':
self.dispatch_idp(cmd, sub, arg)
elif cmd == 'target':
self.dispatch_target(cmd, sub, arg)
else:
raise EviloauthCommandException(
'Unknown command %s' % cmd)
def dispatch_exit(self):
print('Exiting...')
self.flask_server.shutdown()
sys.exit()
def dispatch_module(self, cmd, sub, arg):
mod = self.module_dict[f'eviloauth.{cmd}.{sub}.{arg}']
mod.__run__(self.cache.get('target'), 0)
def dispatch_tokens(self, cmd, sub):
general_tokens = self.cache.get('tokens')
if sub == 'list':
print([v for v in general_tokens.keys()])
elif sub == 'add':
logging.error('Not implemented yet')
else:
raise EviloauthCommandException(
'Unknown "%s" command %s' % (cmd, sub))
def dispatch_idp(self, cmd, sub, arg):
if sub == 'list':
print('Current IDP: %s' % self.cache.get('idp'))
elif sub == 'configure':
|
class Dispatcher:
def __init__(self, flask_server, module_dict, cache, redirect_server):
logging.debug('Initializing dispatcher')
logging.debug(f'\tFlask server: {flask_server}')
logging.debug(f'\tModule dict: {module_dict}')
logging.debug(f'\tCache: {cache}')
logging.debug(f'\tRedirect server: {redirect_server}')
self.flask_server = flask_server
self.module_dict = module_dict
self.cache = cache
self.redirect_server = redirect_server
def dispatch(self, commands):
cmd, sub, arg, *args = commands.split(' ') + [None, None, None]
if cmd == 'exit':
self.dispatch_exit()
elif cmd == 'module':
self.dispatch_module(cmd, sub, arg)
elif cmd == 'tokens':
self.dispatch_tokens(cmd, sub)
elif cmd == 'idp':
self.dispatch_idp(cmd, sub, arg)
elif cmd == 'target':
self.dispatch_target(cmd, sub, arg)
else:
raise EviloauthCommandException(
'Unknown command %s' % cmd)
def dispatch_exit(self):
print('Exiting...')
self.flask_server.shutdown()
sys.exit()
def dispatch_module(self, cmd, sub, arg):
mod = self.module_dict[f'eviloauth.{cmd}.{sub}.{arg}']
mod.__run__(self.cache.get('target'), 0)
def dispatch_tokens(self, cmd, sub):
general_tokens = self.cache.get('tokens')
if sub == 'list':
print([v for v in general_tokens.keys()])
elif sub == 'add':
logging.error('Not implemented yet')
else:
raise EviloauthCommandException(
'Unknown "%s" command %s' % (cmd, sub))
def dispatch_idp(self, cmd, sub, arg):
if sub == 'list':
print('Current IDP: %s' % self.cache.get('idp'))
elif sub == 'configure': | idp = IDP(arg, self.redirect_server) | 0 | 2023-12-09 11:21:25+00:00 | 2k |
racinette/querky | querky/backends/postgresql/asyncpg/name_type_mapper.py | [
{
"identifier": "PostgresqlNameTypeMapper",
"path": "querky/backends/postgresql/name_type_mapper.py",
"snippet": "class PostgresqlNameTypeMapper(PostgresqlTypeMapper):\n def __init__(self, typemap: dict[str, dict[str, TypeMetaData]]):\n self.type_cache = dict()\n # копируем\n self.typemap = {\n schema_name: {\n type_name: type_metadata\n for type_name, type_metadata in schema_map.items()\n }\n for schema_name, schema_map in typemap.items()\n }\n\n def set_mapping(self, schema: str, type_name: str, metadata: TypeMetaData) -> None:\n if schema not in self.typemap:\n self.typemap[schema] = dict()\n s = self.typemap[schema]\n s[type_name] = metadata\n\n async def get_pg_type(self, contract: Contract, conn, oid: int):\n if (pg_type := self.type_cache.get(oid, None)) is None:\n pg_type = await contract.raw_fetchone(conn, GET_PG_TYPE_SQL_QUERY, (oid, ))\n self.type_cache[pg_type] = pg_type\n return pg_type\n\n def get_pg_type_sync(self, contract: Contract, conn, oid: int):\n if (pg_type := self.type_cache.get(oid, None)) is None:\n pg_type = contract.raw_fetchone_sync(conn, GET_PG_TYPE_SQL_QUERY, (oid, ))\n self.type_cache[pg_type] = pg_type\n return pg_type\n\n def get_type_knowledge_impl(self, pg_type) -> TypeKnowledge:\n basename: str = pg_type['type_string']\n schema: str = pg_type['namespace_string']\n\n is_array = basename.endswith(\"[]\")\n if is_array:\n basename = basename[:-2]\n try:\n transforms = self.typemap[schema]\n except KeyError:\n raise KeyError(f\"No transforms for schema: {schema} ({basename})\")\n\n try:\n metadata = transforms[basename]\n except KeyError:\n raise KeyError(f\"No metadata for type: {schema}.{basename} (array={is_array})\")\n\n return TypeKnowledge(\n metadata,\n is_array=is_array,\n is_optional=None\n )\n\n async def get_type_knowledge(self, contract: Contract, conn, oid: int) -> TypeKnowledge:\n return self.get_type_knowledge_impl(await self.get_pg_type(contract, conn, oid))\n\n def get_type_knowledge_sync(self, contract: Contract, conn, oid: int) -> TypeKnowledge:\n return self.get_type_knowledge_impl(self.get_pg_type_sync(contract, conn, oid))"
},
{
"identifier": "TypeMetaData",
"path": "querky/base_types.py",
"snippet": "class TypeMetaData(GetImportsMixin):\n counterpart: str\n required_imports: set[str] | None = None\n\n def get_imports(self) -> set[str]:\n if self.required_imports is None:\n return set()\n return set(self.required_imports)\n\n @classmethod\n def from_type(cls, t: typing.Type) -> TypeMetaData:\n type_name = t.__name__\n module_path = t.__module__\n return TypeMetaData(\n counterpart=type_name,\n required_imports={f\"from {module_path} import {type_name}\"}\n )"
},
{
"identifier": "DATETIME_MODULE",
"path": "querky/common_imports.py",
"snippet": "DATETIME_MODULE = \"import datetime\""
},
{
"identifier": "DECIMAL",
"path": "querky/common_imports.py",
"snippet": "DECIMAL = \"from decimal import Decimal\""
},
{
"identifier": "UUID",
"path": "querky/common_imports.py",
"snippet": "UUID = \"from uuid import UUID\""
},
{
"identifier": "UNION",
"path": "querky/common_imports.py",
"snippet": "UNION = \"from typing import Union\""
}
] | from querky.backends.postgresql.name_type_mapper import PostgresqlNameTypeMapper
from querky.base_types import TypeMetaData
from querky.common_imports import DATETIME_MODULE
from querky.common_imports import DECIMAL as DECIMAL_IMPORT
from querky.common_imports import UUID as UUID_IMPORT
from querky.common_imports import UNION as UNION_IMPORT | 1,108 |
ASYNCPG_RANGE_IMPORT = "from asyncpg import Range as _Range"
ASYNCPG_RECORD_IMPORT = "from asyncpg import Record as _Record"
ASYNCPG_BITSTRING_IMPORT = "from asyncpg import BitString as _BitString"
ASYNCPG_BOX_IMPORT = "from asyncpg import Box as _Box"
ASYNCPG_CIRCLE_IMPORT = "from asyncpg import Circle as _Circle"
ASYNCPG_LINE_IMPORT = "from asyncpg import Line as _Line"
ASYNCPG_LINE_SEGMENT_IMPORT = "from asyncpg import LineSegment as _LineSegment"
ASYNCPG_PATH_IMPORT = "from asyncpg import Path as _Path"
ASYNCPG_POINT_IMPORT = "from asyncpg import Point as _Point"
ASYNCPG_POLYGON_IMPORT = "from asyncpg import Polygon as _Polygon"
|
ASYNCPG_RANGE_IMPORT = "from asyncpg import Range as _Range"
ASYNCPG_RECORD_IMPORT = "from asyncpg import Record as _Record"
ASYNCPG_BITSTRING_IMPORT = "from asyncpg import BitString as _BitString"
ASYNCPG_BOX_IMPORT = "from asyncpg import Box as _Box"
ASYNCPG_CIRCLE_IMPORT = "from asyncpg import Circle as _Circle"
ASYNCPG_LINE_IMPORT = "from asyncpg import Line as _Line"
ASYNCPG_LINE_SEGMENT_IMPORT = "from asyncpg import LineSegment as _LineSegment"
ASYNCPG_PATH_IMPORT = "from asyncpg import Path as _Path"
ASYNCPG_POINT_IMPORT = "from asyncpg import Point as _Point"
ASYNCPG_POLYGON_IMPORT = "from asyncpg import Polygon as _Polygon"
| INT = TypeMetaData("int") | 1 | 2023-12-13 15:16:34+00:00 | 2k |
Shahzadnit/EZ-CLIP | utils/solver.py | [
{
"identifier": "WarmupMultiStepLR",
"path": "utils/lr_scheduler.py",
"snippet": "class WarmupMultiStepLR(WarmupLR):\r\n\r\n def __init__(self,\r\n optimizer,\r\n milestones,\r\n gamma=0.1,\r\n warmup_epochs=0,\r\n warmup_powers=1,\r\n warmup_lrs=0,\r\n last_epoch=-1):\r\n\r\n if not list(milestones) == sorted(milestones):\r\n raise ValueError('Milestones should be a list of'\r\n ' increasing integers. Got %s' % repr(milestones))\r\n self.milestones = milestones\r\n self.gamma = gamma\r\n super(WarmupMultiStepLR, self).__init__(optimizer,\r\n warmup_epochs,\r\n warmup_powers,\r\n warmup_lrs,\r\n last_epoch)\r\n if self.milestones[0] <= max(self.warmup_epochs):\r\n raise ValueError('milstones[0] ({}) <= max(warmup_epochs) ({})'.format(\r\n milestones[0], max(self.warmup_epochs)))\r\n\r\n def get_single_lr_after_warmup(self, group_index):\r\n factor = self.gamma ** bisect_right(self.milestones, self.last_epoch)\r\n return self.base_lrs[group_index] * factor\r"
},
{
"identifier": "WarmupCosineAnnealingLR",
"path": "utils/lr_scheduler.py",
"snippet": "class WarmupCosineAnnealingLR(WarmupLR):\r\n\r\n def __init__(self,\r\n optimizer,\r\n total_epoch,\r\n final_factor=0,\r\n warmup_epochs=0,\r\n warmup_powers=1,\r\n warmup_lrs=0,\r\n last_epoch=-1):\r\n self.total_epoch = total_epoch\r\n self.final_factor = final_factor\r\n super(WarmupCosineAnnealingLR, self).__init__(optimizer,\r\n warmup_epochs,\r\n warmup_powers,\r\n warmup_lrs,\r\n last_epoch)\r\n\r\n def get_single_lr_after_warmup(self, group_index):\r\n warmup_epoch = self.warmup_epochs[group_index]\r\n progress = (self.last_epoch - warmup_epoch) / (self.total_epoch - warmup_epoch)\r\n progress = min(progress, 1.0)\r\n cosine_progress = (math.cos(math.pi * progress) + 1) / 2\r\n factor = cosine_progress * (1 - self.final_factor) + self.final_factor\r\n return self.base_lrs[group_index] * factor\r"
}
] | import torch.optim as optim
from utils.lr_scheduler import WarmupMultiStepLR, WarmupCosineAnnealingLR
| 1,071 |
def _optimizer(config, model):
if config.solver.optim == 'adam':
optimizer = optim.Adam([{'params': model.parameters()}],
lr=config.solver.lr, betas=(0.9, 0.98), eps=1e-8,
weight_decay=0.2) # Params used from paper, the lr is smaller, more safe for fine tuning to new dataset
print('Adam')
elif config.solver.optim == 'sgd':
optimizer = optim.SGD([{'params': model.parameters()}],
config.solver.lr,
momentum=config.solver.momentum,
weight_decay=config.solver.weight_decay)
print('SGD')
elif config.solver.optim == 'adamw':
vision_params = list(map(id, model.visual.parameters()))
text_params = filter(lambda p: id(p) not in vision_params,
model.parameters())
optimizer = optim.AdamW([{'params': text_params},
{'params': model.visual.parameters(), 'lr': config.solver.lr * config.solver.ratio},],
betas=(0.9, 0.98), lr=config.solver.lr, eps=1e-8,
weight_decay=config.solver.weight_decay) # Params used from paper, the lr is smaller, more safe for fine tuning to new dataset
for param_group in optimizer.param_groups:
print(param_group['lr'])
print('AdamW')
else:
raise ValueError('Unknown optimizer: {}'.format(config.solver.optim))
return optimizer
def _lr_scheduler(config,optimizer):
if config.solver.type == 'cosine':
lr_scheduler = WarmupCosineAnnealingLR(
optimizer,
config.solver.epochs,
warmup_epochs=config.solver.lr_warmup_step
)
elif config.solver.type == 'multistep':
if isinstance(config.solver.lr_decay_step, list):
milestones = config.solver.lr_decay_step
elif isinstance(config.solver.lr_decay_step, int):
milestones = [
config.solver.lr_decay_step * (i + 1)
for i in range(config.solver.epochs //
config.solver.lr_decay_step)]
else:
raise ValueError("error learning rate decay step: {}".format(type(config.solver.lr_decay_step)))
|
def _optimizer(config, model):
if config.solver.optim == 'adam':
optimizer = optim.Adam([{'params': model.parameters()}],
lr=config.solver.lr, betas=(0.9, 0.98), eps=1e-8,
weight_decay=0.2) # Params used from paper, the lr is smaller, more safe for fine tuning to new dataset
print('Adam')
elif config.solver.optim == 'sgd':
optimizer = optim.SGD([{'params': model.parameters()}],
config.solver.lr,
momentum=config.solver.momentum,
weight_decay=config.solver.weight_decay)
print('SGD')
elif config.solver.optim == 'adamw':
vision_params = list(map(id, model.visual.parameters()))
text_params = filter(lambda p: id(p) not in vision_params,
model.parameters())
optimizer = optim.AdamW([{'params': text_params},
{'params': model.visual.parameters(), 'lr': config.solver.lr * config.solver.ratio},],
betas=(0.9, 0.98), lr=config.solver.lr, eps=1e-8,
weight_decay=config.solver.weight_decay) # Params used from paper, the lr is smaller, more safe for fine tuning to new dataset
for param_group in optimizer.param_groups:
print(param_group['lr'])
print('AdamW')
else:
raise ValueError('Unknown optimizer: {}'.format(config.solver.optim))
return optimizer
def _lr_scheduler(config,optimizer):
if config.solver.type == 'cosine':
lr_scheduler = WarmupCosineAnnealingLR(
optimizer,
config.solver.epochs,
warmup_epochs=config.solver.lr_warmup_step
)
elif config.solver.type == 'multistep':
if isinstance(config.solver.lr_decay_step, list):
milestones = config.solver.lr_decay_step
elif isinstance(config.solver.lr_decay_step, int):
milestones = [
config.solver.lr_decay_step * (i + 1)
for i in range(config.solver.epochs //
config.solver.lr_decay_step)]
else:
raise ValueError("error learning rate decay step: {}".format(type(config.solver.lr_decay_step)))
| lr_scheduler = WarmupMultiStepLR(
| 0 | 2023-12-12 13:11:20+00:00 | 2k |
Gwolfgit/Authoritah | models.py | [
{
"identifier": "get_tailscale_ip4",
"path": "functions.py",
"snippet": "def get_tailscale_ip4() -> str:\n try:\n output = subprocess.check_output(\n [\"tailscale\", \"ip\", \"-4\"],\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\n ip = output.strip()\n if is_valid_ip(ip):\n return ip\n except subprocess.CalledProcessError as e:\n logger.error(e)\n return \"\""
},
{
"identifier": "get_tailscale_ip6",
"path": "functions.py",
"snippet": "def get_tailscale_ip6() -> str:\n try:\n output = subprocess.check_output(\n [\"tailscale\", \"ip\", \"-6\"],\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\n ip = output.strip()\n if is_valid_ip(ip):\n return ip\n except subprocess.CalledProcessError as e:\n logger.error(e)\n return \"\""
}
] | import orjson
from typing import Any, Dict, Tuple
from functions import get_tailscale_ip4, get_tailscale_ip6
from pathlib import Path | 738 |
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def load_config():
with open(Path(Path(__file__).parent.resolve(), "config.json"), "r") as fd:
return dotdict(orjson.loads(fd.read()))
class DefaultDict(dict):
"""
A dictionary subclass that maintains default keys and values.
"""
def __init__(self, default_values: Dict[Any, Any], *args, **kwargs):
"""
Initialize the dictionary with default values and any additional provided values.
:param default_values: A dictionary of default key-value pairs.
"""
super().__init__()
self.default_values = default_values
self.update(self.default_values)
def __setitem__(self, key, value):
"""
Set a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
super().__setitem__(key, self.default_values[key])
else:
super().__setitem__(key, value)
def __delitem__(self, key):
"""
Delete a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
super().__setitem__(key, self.default_values[key])
else:
super().__delitem__(key)
def pop(self, key, *args, **kwargs):
"""
Pop a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
return self.default_values[key]
return super().pop(key, *args, **kwargs)
def update(self, *args, **kwargs):
"""
Update the dictionary. Default keys are reset to default values.
"""
updates = dict(*args, **kwargs)
super().update(
{
k: self.default_values[k] if k in self.default_values else updates[k]
for k in updates
}
)
class MyAuthoritah:
def __init__(self, cfg: dotdict):
self.cfg = cfg
self.data = {}
self._relay = self.cfg.default_relay
self._ip6 = get_tailscale_ip6()
|
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def load_config():
with open(Path(Path(__file__).parent.resolve(), "config.json"), "r") as fd:
return dotdict(orjson.loads(fd.read()))
class DefaultDict(dict):
"""
A dictionary subclass that maintains default keys and values.
"""
def __init__(self, default_values: Dict[Any, Any], *args, **kwargs):
"""
Initialize the dictionary with default values and any additional provided values.
:param default_values: A dictionary of default key-value pairs.
"""
super().__init__()
self.default_values = default_values
self.update(self.default_values)
def __setitem__(self, key, value):
"""
Set a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
super().__setitem__(key, self.default_values[key])
else:
super().__setitem__(key, value)
def __delitem__(self, key):
"""
Delete a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
super().__setitem__(key, self.default_values[key])
else:
super().__delitem__(key)
def pop(self, key, *args, **kwargs):
"""
Pop a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
return self.default_values[key]
return super().pop(key, *args, **kwargs)
def update(self, *args, **kwargs):
"""
Update the dictionary. Default keys are reset to default values.
"""
updates = dict(*args, **kwargs)
super().update(
{
k: self.default_values[k] if k in self.default_values else updates[k]
for k in updates
}
)
class MyAuthoritah:
def __init__(self, cfg: dotdict):
self.cfg = cfg
self.data = {}
self._relay = self.cfg.default_relay
self._ip6 = get_tailscale_ip6() | self._ip = get_tailscale_ip4() | 0 | 2023-12-13 01:17:53+00:00 | 2k |
bluuewhale/nexon-openapi-python | src/nexon_openapi/utils/_transform.py | [
{
"identifier": "is_list",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_list(obj: object) -> TypeGuard[list[object]]:\n return isinstance(obj, list)"
},
{
"identifier": "is_mapping",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_mapping(obj: object) -> TypeGuard[Mapping[str, object]]:\n return isinstance(obj, Mapping)"
},
{
"identifier": "is_list_type",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_list_type(typ: type) -> bool:\n return (get_origin(typ) or typ) == list"
},
{
"identifier": "is_union_type",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_union_type(typ: type) -> bool:\n return _is_union(get_origin(typ))"
},
{
"identifier": "extract_type_arg",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def extract_type_arg(typ: type, index: int) -> type:\n args = get_args(typ)\n try:\n return cast(type, args[index])\n except IndexError as err:\n raise RuntimeError(f\"Expected type {typ} to have a type argument at index {index} but it did not\") from err"
},
{
"identifier": "is_required_type",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_required_type(typ: type) -> bool:\n return get_origin(typ) == Required"
},
{
"identifier": "is_annotated_type",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_annotated_type(typ: type) -> bool:\n return get_origin(typ) == Annotated"
},
{
"identifier": "strip_annotated_type",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def strip_annotated_type(typ: type) -> type:\n if is_required_type(typ) or is_annotated_type(typ):\n return strip_annotated_type(cast(type, get_args(typ)[0]))\n\n return typ"
},
{
"identifier": "model_dump",
"path": "src/nexon_openapi/_compat.py",
"snippet": "def model_dump(\n model: pydantic.BaseModel,\n *,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n) -> Dict[str, Any]:\n if PYDANTIC_V2:\n return model.model_dump(\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n )\n return cast(\n \"dict[str, Any]\",\n model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n ),\n )"
},
{
"identifier": "is_typeddict",
"path": "src/nexon_openapi/_compat.py",
"snippet": "def is_typeddict(type_: Type[Any]) -> bool: # noqa: ARG001\n ..."
}
] | from typing import Any, Mapping, Optional, TypeVar, Union, cast
from datetime import date, datetime
from typing_extensions import Literal, get_args, override, get_type_hints
from ._utils import (
is_list,
is_mapping,
is_list_type,
is_union_type,
extract_type_arg,
is_required_type,
is_annotated_type,
strip_annotated_type,
)
from .._compat import model_dump, is_typeddict
import pydantic | 1,275 | from __future__ import annotations
_T = TypeVar("_T")
PropertyFormat = Literal["iso8601", "custom"]
class PropertyInfo:
"""Metadata class to be used in Annotated types to provide information about a given type.
For example:
class MyParams(TypedDict):
account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
"""
alias: Optional[str]
format: Optional[PropertyFormat]
format_template: Optional[str]
def __init__(
self,
*,
alias: Optional[str] = None,
format: Optional[PropertyFormat] = None,
format_template: Optional[str] = None,
) -> None:
self.alias = alias
self.format = format
self.format_template = format_template
@override
def __repr__(self) -> str:
return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}')"
def maybe_transform(
data: object,
expected_type: object,
) -> Optional[Any]:
"""Wrapper over `transform()` that allows `None` to be passed.
See `transform()` for more details.
"""
if data is None:
return None
return transform(data, expected_type)
# Wrapper over _transform_recursive providing fake types
def transform(
data: _T,
expected_type: object,
) -> _T:
"""Transform dictionaries based off of type information from the given type, for example:
```py
class Params(TypedDict, total=False):
card_id: Required[Annotated[str, PropertyInfo(alias='cardID')]]
transformed = transform({'card_id': '<my card ID>'}, Params)
# {'cardID': '<my card ID>'}
```
Any keys / data that does not have type information given will be included as is.
It should be noted that the transformations that this function does are not represented in the type system.
"""
transformed = _transform_recursive(data, annotation=cast(type, expected_type))
return cast(_T, transformed)
def _get_annotated_type(type_: type) -> Union[type, None]:
"""If the given type is an `Annotated` type then it is returned, if not `None` is returned.
This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
"""
if is_required_type(type_):
# Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
type_ = get_args(type_)[0]
| from __future__ import annotations
_T = TypeVar("_T")
PropertyFormat = Literal["iso8601", "custom"]
class PropertyInfo:
"""Metadata class to be used in Annotated types to provide information about a given type.
For example:
class MyParams(TypedDict):
account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
"""
alias: Optional[str]
format: Optional[PropertyFormat]
format_template: Optional[str]
def __init__(
self,
*,
alias: Optional[str] = None,
format: Optional[PropertyFormat] = None,
format_template: Optional[str] = None,
) -> None:
self.alias = alias
self.format = format
self.format_template = format_template
@override
def __repr__(self) -> str:
return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}')"
def maybe_transform(
data: object,
expected_type: object,
) -> Optional[Any]:
"""Wrapper over `transform()` that allows `None` to be passed.
See `transform()` for more details.
"""
if data is None:
return None
return transform(data, expected_type)
# Wrapper over _transform_recursive providing fake types
def transform(
data: _T,
expected_type: object,
) -> _T:
"""Transform dictionaries based off of type information from the given type, for example:
```py
class Params(TypedDict, total=False):
card_id: Required[Annotated[str, PropertyInfo(alias='cardID')]]
transformed = transform({'card_id': '<my card ID>'}, Params)
# {'cardID': '<my card ID>'}
```
Any keys / data that does not have type information given will be included as is.
It should be noted that the transformations that this function does are not represented in the type system.
"""
transformed = _transform_recursive(data, annotation=cast(type, expected_type))
return cast(_T, transformed)
def _get_annotated_type(type_: type) -> Union[type, None]:
"""If the given type is an `Annotated` type then it is returned, if not `None` is returned.
This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
"""
if is_required_type(type_):
# Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
type_ = get_args(type_)[0]
| if is_annotated_type(type_): | 6 | 2023-12-14 18:12:17+00:00 | 2k |
Jack24658735/FedLGT | dataloaders/flair_dataset_fed.py | [
{
"identifier": "get_unk_mask_indices",
"path": "dataloaders/data_utils.py",
"snippet": "def get_unk_mask_indices(image,testing,num_labels,known_labels,epoch=1):\n if testing:\n # for consistency across epochs and experiments, seed using hashed image array \n random.seed(hashlib.sha1(np.array(image)).hexdigest())\n unk_mask_indices = random.sample(range(num_labels), (num_labels-int(known_labels)))\n else:\n # sample random number of known labels during training\n if known_labels>0:\n random.seed()\n num_known = random.randint(0,int(num_labels*0.75))\n else:\n num_known = 0\n\n unk_mask_indices = random.sample(range(num_labels), (num_labels-num_known))\n\n return unk_mask_indices"
},
{
"identifier": "image_loader",
"path": "dataloaders/data_utils.py",
"snippet": "def image_loader(path,transform):\n try:\n image = Image.open(path)\n except FileNotFoundError: # weird issues with loading images on our servers\n # print('FILE NOT FOUND')\n time.sleep(10)\n image = Image.open(path)\n\n image = image.convert('RGB')\n\n if transform is not None:\n image = transform(image)\n\n return image"
}
] | import os
import torch
import numpy as np
import pickle
import h5py
from torch.utils.data import Dataset, DataLoader
from pdb import set_trace as stop
from dataloaders.data_utils import get_unk_mask_indices,image_loader | 1,023 |
class FlairFedDataset(Dataset):
def __init__(self, inp_data, split, num_labels, data_file, img_root, curr_user=None, max_samples=-1,transform=None,known_labels=0,testing=False, label_mapping=None, fine_grained_label_mapping=None):
super(FlairFedDataset, self).__init__()
# print(data_file)
#self.split_data = h5py.File('/home/liujack/multi_label/C-Tran/data/flair/cent_data.hdf5', 'r')
self.split_data = inp_data
self.split = split
self.fine_grained_label_mapping = fine_grained_label_mapping
self.label_mapping = label_mapping
if max_samples != -1:
self.split_data = self.split_data[0:max_samples]
self.img_root = img_root
self.transform = transform
self.num_labels = num_labels
self.known_labels = known_labels
self.testing = testing
self.curr_user = curr_user
self.image_id_list = list(self.split_data[self.split][self.curr_user]['image_ids'])
self.image_list = list(self.split_data[self.split][self.curr_user]['images'])
self.label_list = list(self.split_data[self.split][self.curr_user]['labels'])
self.fg_label_list = list(self.split_data[self.split][self.curr_user]['fine_grained_labels'])
def __len__(self):
return len(self.image_id_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# take a sample
image_ID = self.image_id_list[idx]
# img = np.array(self.split_data[self.split][self.curr_user][image_ID]['image'])
img = self.image_list[idx]
image = self.transform(img)
if self.fine_grained_label_mapping != None:
# fine grained labels are used
# labels_str = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
labels_str = self.fg_label_list[idx]
else:
# coarse grained labels are used
# labels_str = np.array(self.split_data[self.split][image_ID]['labels'])
labels_str = self.label_list[idx]
assert self.label_mapping != None
# fg_labels = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
# image_ID = self.split_data[idx]['file_name']
# img_name = os.path.join(self.img_root,image_ID + '.jpg')
# image = image_loader(img_name,self.transform)
labels_str = labels_str.tolist()
labels_str = str(labels_str)[2:-1].split('|')
tran_labels = [0] * self.num_labels
if self.fine_grained_label_mapping != None:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.fine_grained_label_mapping[label]))
else:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.label_mapping[label]))
assert tran_labels.count(1) == len(labels_str)
labels = torch.Tensor(tran_labels)
|
class FlairFedDataset(Dataset):
def __init__(self, inp_data, split, num_labels, data_file, img_root, curr_user=None, max_samples=-1,transform=None,known_labels=0,testing=False, label_mapping=None, fine_grained_label_mapping=None):
super(FlairFedDataset, self).__init__()
# print(data_file)
#self.split_data = h5py.File('/home/liujack/multi_label/C-Tran/data/flair/cent_data.hdf5', 'r')
self.split_data = inp_data
self.split = split
self.fine_grained_label_mapping = fine_grained_label_mapping
self.label_mapping = label_mapping
if max_samples != -1:
self.split_data = self.split_data[0:max_samples]
self.img_root = img_root
self.transform = transform
self.num_labels = num_labels
self.known_labels = known_labels
self.testing = testing
self.curr_user = curr_user
self.image_id_list = list(self.split_data[self.split][self.curr_user]['image_ids'])
self.image_list = list(self.split_data[self.split][self.curr_user]['images'])
self.label_list = list(self.split_data[self.split][self.curr_user]['labels'])
self.fg_label_list = list(self.split_data[self.split][self.curr_user]['fine_grained_labels'])
def __len__(self):
return len(self.image_id_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# take a sample
image_ID = self.image_id_list[idx]
# img = np.array(self.split_data[self.split][self.curr_user][image_ID]['image'])
img = self.image_list[idx]
image = self.transform(img)
if self.fine_grained_label_mapping != None:
# fine grained labels are used
# labels_str = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
labels_str = self.fg_label_list[idx]
else:
# coarse grained labels are used
# labels_str = np.array(self.split_data[self.split][image_ID]['labels'])
labels_str = self.label_list[idx]
assert self.label_mapping != None
# fg_labels = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
# image_ID = self.split_data[idx]['file_name']
# img_name = os.path.join(self.img_root,image_ID + '.jpg')
# image = image_loader(img_name,self.transform)
labels_str = labels_str.tolist()
labels_str = str(labels_str)[2:-1].split('|')
tran_labels = [0] * self.num_labels
if self.fine_grained_label_mapping != None:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.fine_grained_label_mapping[label]))
else:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.label_mapping[label]))
assert tran_labels.count(1) == len(labels_str)
labels = torch.Tensor(tran_labels)
| unk_mask_indices = get_unk_mask_indices(image,self.testing,self.num_labels,self.known_labels) | 0 | 2023-12-09 09:16:59+00:00 | 2k |
AgriCodeHub/dairy-django-backend | production/validators.py | [
{
"identifier": "CowCategoryChoices",
"path": "core/choices.py",
"snippet": "class CowCategoryChoices(models.TextChoices):\n \"\"\"\n Choices for the category of a cow.\n\n Choices:\n - `CALF`: Represents a calf.\n - `WEANER`: Represents a weaner.\n - `HEIFER`: Represents a heifer.\n - `BULL`: Represents a bull.\n - `MILKING_COW`: Represents a milking cow.\n\n Usage:\n These choices represent the category of a cow in the Cow model.\n Use these choices when defining or querying Cow instances to represent the category of a cow.\n\n Example:\n ```\n class Cow(models.Model):\n category = models.CharField(max_length=15, choices=CowCategoryChoices.choices)\n ```\n \"\"\"\n\n CALF = \"Calf\"\n WEANER = \"Weaner\"\n HEIFER = \"Heifer\"\n BULL = \"Bull\"\n MILKING_COW = \"Milking Cow\""
},
{
"identifier": "CowAvailabilityChoices",
"path": "core/choices.py",
"snippet": "class CowAvailabilityChoices(models.TextChoices):\n \"\"\"\n Choices for the availability status of a cow.\n\n Choices:\n - `ALIVE`: Cow is alive and active.\n - `SOLD`: Cow has been sold.\n - `DEAD`: Cow has died.\n\n Usage:\n These choices represent the availability status of a cow in the Cow model.\n Use these choices when defining or querying Cow instances to represent the current status of a cow.\n\n Example:\n ```\n class Cow(models.Model):\n availability_status = models.CharField(max_length=50, choices=CowAvailabilityChoices.choices)\n ```\n \"\"\"\n\n ALIVE = \"Alive\"\n SOLD = \"Sold\"\n DEAD = \"Dead\""
},
{
"identifier": "todays_date",
"path": "core/utils.py",
"snippet": ""
},
{
"identifier": "LactationStageChoices",
"path": "production/choices.py",
"snippet": "class LactationStageChoices(models.TextChoices):\n \"\"\"\n Choices for the stage of lactation.\n\n Choices:\n - `EARLY`: Early stage of lactation.\n - `MID`: Mid stage of lactation.\n - `LATE`: Late stage of lactation.\n - `DRY`: Dry stage (post-lactation).\n - `ENDED`: Lactation has ended.\n\n Usage:\n These choices represent the stage of lactation in the Lactation model and are utilized in the LactationManager\n to determine lactation stages based on the number of days.\n\n Example:\n ```\n class Lactation(models.Model):\n lactation_stage = models.CharField(\n max_length=5,\n choices=LactationStageChoices.choices,\n default=LactationStageChoices.DRY,\n )\n ```\n\n Manager Usage:\n The `LactationManager` uses these choices in the `lactation_stage` method to determine the stage of lactation\n based on the number of days.\n\n Example:\n ```\n class LactationManager(models.Manager):\n ...\n\n def lactation_stage(self, lactation):\n days_in_lactation = self.days_in_lactation(lactation)\n\n if lactation.end_date:\n return LactationStageChoices.ENDED\n elif days_in_lactation <= 100:\n return LactationStageChoices.EARLY\n elif days_in_lactation <= 200:\n return LactationStageChoices.MID\n elif days_in_lactation <= 275:\n return LactationStageChoices.LATE\n else:\n return LactationStageChoices.DRY\n ```\n \"\"\"\n\n EARLY = \"Early\"\n MID = \"Mid\"\n LATE = \"Late\"\n DRY = \"Dry\"\n ENDED = \"Ended\""
},
{
"identifier": "SexChoices",
"path": "users/choices.py",
"snippet": "class SexChoices(models.TextChoices):\n MALE = \"Male\"\n FEMALE = \"Female\""
}
] | from datetime import timedelta
from django.core.exceptions import ValidationError
from core.choices import CowCategoryChoices, CowAvailabilityChoices
from core.utils import todays_date
from production.choices import LactationStageChoices
from users.choices import SexChoices
from production.models import Lactation | 1,470 |
class LactationValidator:
"""
Provides validation methods for lactation records associated with cows.
Methods:
- `validate_age(start_date, cow)`: Validates the start date of lactation based on the cow's age.
- `validate_cow_origin(cow)`: Validates that manual entry is allowed only for bought cows.
- `validate_cow_category(category)`: Validates the cow category for lactation records, allowing only bought cows with calves.
- `validate_fields(start_date, pregnancy, lactation_number, cow, lactation)`: Validates various fields of the lactation record, including start date, end date, pregnancy status, lactation number, and cow's age.
"""
@staticmethod
def validate_age(start_date, cow):
"""
Validates the start date of lactation based on the cow's age.
Args:
- `start_date` (date): The start date of the lactation.
- `cow` (Cow): The cow associated with the lactation record.
Raises:
- `ValidationError`: If the start date is before the cow reaches 635 days of age.
"""
if start_date < cow.date_of_birth + timedelta(days=635):
raise ValidationError(
code="invalid_start_date",
message=f"Invalid start date. Lactation must have started or be around {cow.date_of_birth + timedelta(days=635)}, not {start_date}.",
)
@staticmethod
def validate_cow_origin(cow):
"""
Validates that manual entry is allowed only for bought cows.
Args:
- `cow` (Cow): The cow associated with the lactation record.
Raises:
- `ValidationError`: If manual entry is attempted on a cow that is not bought.
"""
if not cow.is_bought:
raise ValidationError(
code="manual_entry_only_on_bought_cows",
message="Manual entry is allowed only for bought cows.",
)
@staticmethod
def validate_cow_category(category):
"""
Validates the cow category for lactation records, allowing only bought cows with calves.
Args:
- `category` (str): The cow category associated with the lactation record.
Raises:
- `ValidationError`: If the cow category is invalid or not a milking cow with calves.
"""
|
class LactationValidator:
"""
Provides validation methods for lactation records associated with cows.
Methods:
- `validate_age(start_date, cow)`: Validates the start date of lactation based on the cow's age.
- `validate_cow_origin(cow)`: Validates that manual entry is allowed only for bought cows.
- `validate_cow_category(category)`: Validates the cow category for lactation records, allowing only bought cows with calves.
- `validate_fields(start_date, pregnancy, lactation_number, cow, lactation)`: Validates various fields of the lactation record, including start date, end date, pregnancy status, lactation number, and cow's age.
"""
@staticmethod
def validate_age(start_date, cow):
"""
Validates the start date of lactation based on the cow's age.
Args:
- `start_date` (date): The start date of the lactation.
- `cow` (Cow): The cow associated with the lactation record.
Raises:
- `ValidationError`: If the start date is before the cow reaches 635 days of age.
"""
if start_date < cow.date_of_birth + timedelta(days=635):
raise ValidationError(
code="invalid_start_date",
message=f"Invalid start date. Lactation must have started or be around {cow.date_of_birth + timedelta(days=635)}, not {start_date}.",
)
@staticmethod
def validate_cow_origin(cow):
"""
Validates that manual entry is allowed only for bought cows.
Args:
- `cow` (Cow): The cow associated with the lactation record.
Raises:
- `ValidationError`: If manual entry is attempted on a cow that is not bought.
"""
if not cow.is_bought:
raise ValidationError(
code="manual_entry_only_on_bought_cows",
message="Manual entry is allowed only for bought cows.",
)
@staticmethod
def validate_cow_category(category):
"""
Validates the cow category for lactation records, allowing only bought cows with calves.
Args:
- `category` (str): The cow category associated with the lactation record.
Raises:
- `ValidationError`: If the cow category is invalid or not a milking cow with calves.
""" | if category not in CowCategoryChoices.values: | 0 | 2023-12-09 06:56:42+00:00 | 2k |
PeriniM/Rotary-Pendulum-RL | control/reinforcement_learning/DQN/Agent.py | [
{
"identifier": "DeepQNetwork",
"path": "control/reinforcement_learning/DQN/DeepQNetwork.py",
"snippet": "class DeepQNetwork:\n \"\"\"\n Deep Q Network to approximate the Q function\n \"\"\"\n def __init__(self, lr, num_actions, input_dims, fc_dims = [32, 32], opt='adam', loss='mse'):\n\n self.model = Sequential()\n for i in range(len(fc_dims)):\n if i == 0:\n self.model.add(Dense(fc_dims[i], input_shape=(input_dims,), activation='relu'))\n else:\n self.model.add(Dense(fc_dims[i], activation='relu'))\n\n self.model.add(Dense(num_actions, activation='linear'))\n self.model.compile(optimizer=opt, loss=loss)\n self.model.optimizer.learning_rate = lr\n \n def predict(self, state):\n \"\"\"\n Predict the Q values for a given state\n \"\"\"\n return self.model(state).numpy()\n\n def train_on_batch(self, states, q_targets):\n \"\"\"\n Train the network on a batch of states and q_targets\n \"\"\"\n return self.model.train_on_batch(states, q_targets)\n \n def train_batch_gradientTape(self, states, q_targets):\n \"\"\"\n Train the network on a batch of states and q_targets using GradientTape\n \"\"\"\n with tf.GradientTape() as tape:\n predictions = self.model(states)\n loss = tf.keras.losses.MSE(q_targets, predictions)\n gradients = tape.gradient(loss, self.model.trainable_variables)\n self.model.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n # take the mean of the loss\n loss = tf.reduce_mean(loss).numpy()\n return loss\n\n def evaluate(self, states, q_targets, verbose=0):\n \"\"\"\n Evaluate the network on a batch of states and q_targets\n \"\"\"\n return self.model.evaluate(states, q_targets, verbose=verbose)"
},
{
"identifier": "ReplayBuffer",
"path": "control/reinforcement_learning/DQN/ReplayBuffer.py",
"snippet": "class ReplayBuffer:\n \"\"\"\n Stores and retrieves gameplay experiences\n \"\"\"\n\n def __init__(self, size):\n self.gameplay_experiences = deque(maxlen=size)\n \n def store_tuple(self, state, action, reward, new_state, done):\n \"\"\"\n Store the experience in the replay buffer\n \"\"\"\n self.gameplay_experiences.append((state, action, reward, new_state, done))\n\n def sample_batch(self, batch_size):\n \"\"\"\n Sample a random batch of experiences from the replay buffer\n \"\"\"\n random_sample = random.sample(self.gameplay_experiences, batch_size)\n states, actions, rewards, new_states, dones = map(np.asarray, zip(*random_sample))\n return states, actions, rewards, new_states, dones"
}
] | import os
import configparser
import ast
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import copy
import time
import tensorflow as tf
from matplotlib import cm
from datetime import datetime
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import TensorBoard
from control.reinforcement_learning.DQN.DeepQNetwork import DeepQNetwork
from control.reinforcement_learning.DQN.ReplayBuffer import ReplayBuffer | 1,295 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Agent:
"""
DQN Agent
- Take an environment
- Set up the deep neural network
- Store the experience
- Choose action
- Train the network
- Evaluate the network
"""
def __init__(self, env):
# check if gpu is available
if tf.config.list_physical_devices('GPU'):
# print the device name
print("GPU is available")
print("Device name: {}".format(tf.test.gpu_device_name()))
else:
print("GPU is not available")
self.env = env
self.nJoint = self.env.nbJoint
# read INI file
# get the path of the root directory
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ini_file_path = os.path.join(root_dir, 'config.ini')
self.params = self.parse_ini(ini_file_path)
# set up the parameters from the INI file
self.action_steps = int(self.params['action_steps'])
self.torque_range = ast.literal_eval(self.params['control_range'])
self.max_episode_steps = int(self.params['max_episode_steps'])
self.train_episodes = int(self.params['train_episodes'])
self.lr = float(self.params['lr'])
self.discount_factor = float(self.params['discount_factor'])
self.epsilon = float(self.params['epsilon'])
self.epsilon_decay_episodes = int(self.params['epsilon_decay_episodes'])
self.epsilon_final = float(self.params['epsilon_final'])
self.buffer_size = int(self.params['buffer_size'])
self.batch_size = int(self.params['batch_size'])
self.hidden_dims = ast.literal_eval(self.params['hidden_dims'])
self.update_rate_episodes = int(self.params['target_update_episodes'])
self.train_rate_steps = int(self.params['train_rate_steps'])
self.discounted_reward = 0.0
self.epsilon_decay = (self.epsilon - self.epsilon_final) / self.epsilon_decay_episodes
# set up the environment parameters
self.env.num_actions = self.action_steps
self.env.range_actions = self.torque_range
self.env.maxIter = self.max_episode_steps
self.env.umax = self.torque_range[1]
self.env.actions = np.linspace(self.env.range_actions[0], self.env.range_actions[1], self.action_steps)
self.env.action_space = [i for i in range(self.action_steps)]
self.action_space = self.env.action_space
self.total_step_counter = 0
| os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Agent:
"""
DQN Agent
- Take an environment
- Set up the deep neural network
- Store the experience
- Choose action
- Train the network
- Evaluate the network
"""
def __init__(self, env):
# check if gpu is available
if tf.config.list_physical_devices('GPU'):
# print the device name
print("GPU is available")
print("Device name: {}".format(tf.test.gpu_device_name()))
else:
print("GPU is not available")
self.env = env
self.nJoint = self.env.nbJoint
# read INI file
# get the path of the root directory
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ini_file_path = os.path.join(root_dir, 'config.ini')
self.params = self.parse_ini(ini_file_path)
# set up the parameters from the INI file
self.action_steps = int(self.params['action_steps'])
self.torque_range = ast.literal_eval(self.params['control_range'])
self.max_episode_steps = int(self.params['max_episode_steps'])
self.train_episodes = int(self.params['train_episodes'])
self.lr = float(self.params['lr'])
self.discount_factor = float(self.params['discount_factor'])
self.epsilon = float(self.params['epsilon'])
self.epsilon_decay_episodes = int(self.params['epsilon_decay_episodes'])
self.epsilon_final = float(self.params['epsilon_final'])
self.buffer_size = int(self.params['buffer_size'])
self.batch_size = int(self.params['batch_size'])
self.hidden_dims = ast.literal_eval(self.params['hidden_dims'])
self.update_rate_episodes = int(self.params['target_update_episodes'])
self.train_rate_steps = int(self.params['train_rate_steps'])
self.discounted_reward = 0.0
self.epsilon_decay = (self.epsilon - self.epsilon_final) / self.epsilon_decay_episodes
# set up the environment parameters
self.env.num_actions = self.action_steps
self.env.range_actions = self.torque_range
self.env.maxIter = self.max_episode_steps
self.env.umax = self.torque_range[1]
self.env.actions = np.linspace(self.env.range_actions[0], self.env.range_actions[1], self.action_steps)
self.env.action_space = [i for i in range(self.action_steps)]
self.action_space = self.env.action_space
self.total_step_counter = 0 | self.replay_buffer = ReplayBuffer(self.buffer_size) | 1 | 2023-12-09 11:22:54+00:00 | 2k |
Kokonico/ObjLog | objlog/Base/LogNode.py | [
{
"identifier": "Debug",
"path": "objlog/LogMessages.py",
"snippet": "class Debug(LogMessage):\n \"\"\"the default debug message, with blue color\"\"\"\n level = \"DEBUG\"\n color = \"\\033[94m\""
},
{
"identifier": "LogMessage",
"path": "objlog/Base/LogMessage.py",
"snippet": "class LogMessage:\n \"\"\"a base message to be logged\n Attributes:\n color\n level (name)\n\n WARNING: this class should not be used directly, use a subclass instead\n it is designed to be used as a base class for other classes, and will not work properly if used directly.\n \"\"\"\n\n def __init__(self, message):\n self.message = str(message)\n self.timestamp = datetime.now()\n self.unix_timestamp = time_ns() // 1_000_000 # deprecated, use self.unix instead\n self.unix = time_ns() // 1_000_000\n # create uuid\n self.uuid = f\"{time_ns()}-{random.randint(0, 1000)}\"\n try:\n t1 = self.color\n t2 = self.level\n except AttributeError:\n raise TypeError(\"this class should not be used directly, use a subclass instead\")\n\n def __str__(self):\n return f\"[{self.timestamp}] {self.level}: {self.message}\"\n\n def __repr__(self):\n return f\"{self.level}: {self.message}\"\n\n def __eq__(self, other):\n return self.uuid == other.uuid\n\n def __ne__(self, other):\n return self.uuid != other.uuid\n\n def colored(self) -> str:\n \"\"\"return a colored version of the message\"\"\"\n return f\"{self.color}[{self.timestamp}] {self.level}: {self.message}\\033[0m\""
}
] | from objlog.LogMessages import Debug
from objlog.Base.LogMessage import LogMessage # "no parent package" error happens when I don't specify the package,
from collections import deque | 875 | """The LogNode class, the main class of the ObjLogger"""
# IDK why
class LogNode:
"""A LogNode, the main class of the ObjLogger. It can log messages to a file, to the console, or both."""
open = open # this code is probably the reason why my dad left me
# this is clearly not a good way to do this, but I don't know how to do it better
# if anyone can prevent doing this, and fix the exception caused when deleting a LogNode, please do it
# else please increment this number by 1
# thank you
# total_failed_attempts_to_fix_this = 1
def __init__(self, name: str, log_file: str | None = None, print_to_console: bool = False,
print_filter: list | None = None, max_messages_in_memory: int = 500, max_log_messages: int = 1000,
log_when_closed: bool = True, wipe_log_file_on_init: bool = False):
self.log_file = log_file
self.name = name
self.print = print_to_console
self.messages = deque(maxlen=max_messages_in_memory)
self.max = max_messages_in_memory
self.maxinf = max_log_messages
self.print_filter = print_filter
self.log_closure_message = log_when_closed
self.log_len = 0
# check if log exists (in file system), and if so, clear it
if isinstance(log_file, str) and wipe_log_file_on_init:
with open(log_file, "w+") as f:
f.write("")
def log(self, message, override_log_file: str | None = None, force_print: tuple[bool, bool] = (False, False),
preserve_message_in_memory: bool = True) -> None:
"""log a message"""
# make sure it's a LogMessage or its subclass
| """The LogNode class, the main class of the ObjLogger"""
# IDK why
class LogNode:
"""A LogNode, the main class of the ObjLogger. It can log messages to a file, to the console, or both."""
open = open # this code is probably the reason why my dad left me
# this is clearly not a good way to do this, but I don't know how to do it better
# if anyone can prevent doing this, and fix the exception caused when deleting a LogNode, please do it
# else please increment this number by 1
# thank you
# total_failed_attempts_to_fix_this = 1
def __init__(self, name: str, log_file: str | None = None, print_to_console: bool = False,
print_filter: list | None = None, max_messages_in_memory: int = 500, max_log_messages: int = 1000,
log_when_closed: bool = True, wipe_log_file_on_init: bool = False):
self.log_file = log_file
self.name = name
self.print = print_to_console
self.messages = deque(maxlen=max_messages_in_memory)
self.max = max_messages_in_memory
self.maxinf = max_log_messages
self.print_filter = print_filter
self.log_closure_message = log_when_closed
self.log_len = 0
# check if log exists (in file system), and if so, clear it
if isinstance(log_file, str) and wipe_log_file_on_init:
with open(log_file, "w+") as f:
f.write("")
def log(self, message, override_log_file: str | None = None, force_print: tuple[bool, bool] = (False, False),
preserve_message_in_memory: bool = True) -> None:
"""log a message"""
# make sure it's a LogMessage or its subclass | if not isinstance(message, LogMessage): | 1 | 2023-12-08 20:41:18+00:00 | 2k |
anyquest/pyaq | aq/providers/gemini/provider.py | [
{
"identifier": "BaseProvider",
"path": "aq/providers/provider.py",
"snippet": "class BaseProvider:\n async def create_completion(self, request: ChatCompletionRequest) -> ChatCompletionResponse:\n pass"
},
{
"identifier": "ProviderError",
"path": "aq/providers/provider.py",
"snippet": "class ProviderError(Exception):\n def __init__(self, code, message):\n self.code = code\n super().__init__(message)"
},
{
"identifier": "ChatCompletionMessage",
"path": "aq/providers/types/chat.py",
"snippet": "class ChatCompletionMessage(BaseModel):\n role: str\n content: Optional[str | List[Content]] = \"\"\n name: Optional[str] = None\n tool_call_id: Optional[str] = None\n tool_calls: Optional[List[ToolCall]] = None"
},
{
"identifier": "ChatCompletionRequest",
"path": "aq/providers/types/chat.py",
"snippet": "class ChatCompletionRequest(BaseModel):\n model: str\n messages: List[ChatCompletionMessage]\n tools: Optional[List[Tool]] = None\n response_format: Optional[ResponseFormat] = None\n tool_choice: Optional[str] = None\n temperature: float = 0.5\n presence_penalty: float = 0.0\n frequency_penalty: float = 0.0\n max_tokens: int = 1000"
},
{
"identifier": "ChatCompletionResponse",
"path": "aq/providers/types/chat.py",
"snippet": "class ChatCompletionResponse(BaseModel):\n id: str\n object: str\n created: int\n choices: List[Choice]"
},
{
"identifier": "Choice",
"path": "aq/providers/types/chat.py",
"snippet": "class Choice(BaseModel):\n index: int\n message: ChatCompletionMessage\n finish_reason: Optional[str] = None"
},
{
"identifier": "Error",
"path": "aq/providers/types/chat.py",
"snippet": "class Error(BaseModel):\n code: str | int\n message: str"
},
{
"identifier": "AsyncHttpClient",
"path": "aq/http_client/async_http_client.py",
"snippet": "class AsyncHttpClient:\n TIMEOUT = 120\n\n def __init__(self):\n logging.getLogger('httpcore').setLevel(logging.ERROR)\n logging.getLogger('httpx').setLevel(logging.ERROR)\n self._logger = logging.getLogger(self.__class__.__name__)\n\n async def post(self, url: str, headers: Dict[str, Any], data: Any, json=True) -> Any:\n retry_count = 0\n while retry_count < 5:\n try:\n async with httpx.AsyncClient() as ac:\n response = await ac.post(url, headers=headers, data=data, timeout=self.TIMEOUT)\n if json:\n json_response = response.json()\n if \"error\" in json_response and \"code\" in json_response[\"error\"]:\n code = int(json_response[\"error\"][\"code\"])\n if code == 429:\n self._logger.error(\"Received a 429 error. Retrying ...\")\n retry_count += 1\n await asyncio.sleep(5*(2**retry_count))\n else:\n return json_response\n else:\n return json_response\n else:\n return response.text\n except httpx.HTTPStatusError as e:\n if e.response.status_code == 429:\n self._logger.error(\"Received a 429 error. Retrying ...\")\n retry_count += 1\n await asyncio.sleep(5*(2**retry_count))\n else:\n self._logger.error(f\"HTTP error: {e.response.status_code}, {e.response.text}\")\n raise e\n\n async def get(self, url: str, query: Dict[str, Any] = None, headers: [str, Any] = None, json=True) -> Any:\n get_url = f\"{url}?{urlencode(query)}\" if query else url\n async with httpx.AsyncClient() as ac:\n response = await ac.get(get_url, headers=headers or {}, timeout=self.TIMEOUT)\n return response.json() if json else response.text\n\n @staticmethod\n def urljoin(*args):\n stripped = map(lambda x: str(x).strip('/'), args)\n return \"/\".join(stripped)"
}
] | import logging
import re
from typing import Dict, Any, Optional, List, Literal
from pydantic import BaseModel
from ..provider import BaseProvider, ProviderError
from ..types import ChatCompletionRequest, ChatCompletionResponse, ChatCompletionMessage, Choice, Error
from ...http_client import AsyncHttpClient | 1,233 |
class InlineData(BaseModel):
mimeType: str
data: str
class Part(BaseModel):
text: Optional[str] = None
inlineData: Optional[InlineData] = None
class Content(BaseModel):
role: Literal["user", "model"]
parts: List[Part]
class GenerationConfig(BaseModel):
temperature: float = 0.5
maxOutputTokens: int = 1000
class GeminiCompletionRequest(BaseModel):
contents: List[Content]
generationConfig: GenerationConfig
class ResponseCandidate(BaseModel):
content: Content
finishReason: Literal["STOP"]
class GeminiCompletionResponse(BaseModel):
candidates: List[ResponseCandidate]
class GeminiProvider(BaseProvider):
def __init__(self, config: Dict[str, Any], http_client: AsyncHttpClient):
self._config = config
self._http_client = http_client
self._logger = logging.getLogger(self.__class__.__name__)
@staticmethod
def _check_config(config: Dict[str, Any]) -> None:
required_keys = ['endpoint', 'key']
if not all(key in config for key in required_keys):
raise ProviderError(400, "The Gemini provider is not configured. Add settings to config.yml.")
|
class InlineData(BaseModel):
mimeType: str
data: str
class Part(BaseModel):
text: Optional[str] = None
inlineData: Optional[InlineData] = None
class Content(BaseModel):
role: Literal["user", "model"]
parts: List[Part]
class GenerationConfig(BaseModel):
temperature: float = 0.5
maxOutputTokens: int = 1000
class GeminiCompletionRequest(BaseModel):
contents: List[Content]
generationConfig: GenerationConfig
class ResponseCandidate(BaseModel):
content: Content
finishReason: Literal["STOP"]
class GeminiCompletionResponse(BaseModel):
candidates: List[ResponseCandidate]
class GeminiProvider(BaseProvider):
def __init__(self, config: Dict[str, Any], http_client: AsyncHttpClient):
self._config = config
self._http_client = http_client
self._logger = logging.getLogger(self.__class__.__name__)
@staticmethod
def _check_config(config: Dict[str, Any]) -> None:
required_keys = ['endpoint', 'key']
if not all(key in config for key in required_keys):
raise ProviderError(400, "The Gemini provider is not configured. Add settings to config.yml.")
| async def create_completion(self, request: ChatCompletionRequest) -> ChatCompletionResponse: | 3 | 2023-12-14 13:25:52+00:00 | 2k |
multimodallearning/DG-TTA | dg_tta/tta/ipynb_utils.py | [
{
"identifier": "get_data_filepaths",
"path": "dg_tta/tta/config_log_utils.py",
"snippet": "def get_data_filepaths(tta_dataset_name, tta_dataset_bucket):\n raw_tta_dataset_dir = Path(nnUNet_raw, tta_dataset_name)\n if tta_dataset_bucket == \"imagesTr\":\n source_folders = [raw_tta_dataset_dir / \"imagesTr\"]\n elif tta_dataset_bucket == \"imagesTs\":\n source_folders = [raw_tta_dataset_dir / \"imagesTs\"]\n elif tta_dataset_bucket == \"imagesTrAndTs\":\n source_folders = [\n raw_tta_dataset_dir / \"imagesTr\",\n raw_tta_dataset_dir / \"imagesTs\",\n ]\n\n file_list = []\n for src_fld in source_folders:\n if src_fld.is_dir():\n file_list.extend(filter(lambda x: x.is_file(), src_fld.iterdir()))\n\n return file_list"
},
{
"identifier": "get_dgtta_colormap",
"path": "dg_tta/tta/config_log_utils.py",
"snippet": "def get_dgtta_colormap():\n hi_1 = \"#248888\"\n hi_2 = \"#79DCF0\"\n hi_3 = \"#e7475e\"\n hi_4 = \"#f0d879\"\n return matplotlib.colors.LinearSegmentedColormap.from_list(\n \"\", [hi_3, hi_4, hi_2, hi_1]\n )"
},
{
"identifier": "get_resources_dir",
"path": "dg_tta/tta/config_log_utils.py",
"snippet": "def get_resources_dir():\n return Path(dg_tta.__file__).parent / \"__resources__\""
},
{
"identifier": "check_dga_root_is_set",
"path": "dg_tta/utils.py",
"snippet": "def check_dga_root_is_set(soft_check=False):\n prompt = \"Please define an existing root directory for DG-TTA by setting DG_TTA_ROOT.\"\n check = Path(\n os.environ.get(\"DG_TTA_ROOT\", \"_\")\n ).is_dir()\n\n if soft_check and not check:\n print(prompt)\n return\n\n assert check, prompt"
}
] | import json
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
from mpl_toolkits.axes_grid1.axes_grid import ImageGrid
from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO
from dg_tta.tta.config_log_utils import (
get_data_filepaths,
get_dgtta_colormap,
get_resources_dir,
)
from dg_tta.utils import check_dga_root_is_set | 1,136 |
def read_image(source_data_paths, path_idx):
if source_data_paths is None:
return None, None
source_img, source_sitk_stuff = SimpleITKIO().read_images(
source_data_paths[path_idx : path_idx + 1]
)
source_img = source_img[0]
return torch.tensor(source_img)[None, None, :], source_sitk_stuff
def get_target_imgs_datapaths():
check_dga_root_is_set()
with open("tta_plan.json", "r") as f:
tta_plan = json.load(f)
return tta_plan["tta_data_filepaths"]
def get_source_imgs_datapaths():
check_dga_root_is_set()
buckets = ["imagesTr", "imagesTs"]
with open("tta_plan.json", "r") as f:
tta_plan = json.load(f)
source_dataset_name = tta_plan["__pretrained_dataset_name__"]
if source_dataset_name.startswith("TS104"):
return "TS104"
source_data_paths = []
for buc in buckets:
source_data_paths.extend(get_data_filepaths(source_dataset_name, buc))
return source_data_paths
def get_orient_imgs(img):
def get_axes_idxs(axis_size):
NUM_IDXS = 16
return np.linspace(0, axis_size - 1, NUM_IDXS).round().astype(int)
img = img.squeeze(0, 1)
D, H, W = img.shape
slices = dict(HW=[], DW=[], DH=[])
for d in get_axes_idxs(D):
slices["HW"].append(img[d, :, :])
for h in get_axes_idxs(H):
slices["DW"].append(img[:, h, :])
for w in get_axes_idxs(W):
slices["DH"].append(img[:, :, w])
return slices
def clear_axis(ax):
ax.get_yaxis().set_ticks([])
ax.get_xaxis().set_ticks([])
ax.grid(False)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
def get_spacing_ratio(sitk_stuff, axis_idx):
rolled_spacing = np.roll(np.array(sitk_stuff["spacing"]), axis_idx)
return rolled_spacing[1] / rolled_spacing[0]
def show_image_overview(img, sitk_stuff, fig_inch_size=5.0):
orient_imgs = get_orient_imgs(img)
vmin, vmax = img.min(), img.max()
dpi = 150.0
large_text_size = fig_inch_size * 10
small_text_size = fig_inch_size * 2
|
def read_image(source_data_paths, path_idx):
if source_data_paths is None:
return None, None
source_img, source_sitk_stuff = SimpleITKIO().read_images(
source_data_paths[path_idx : path_idx + 1]
)
source_img = source_img[0]
return torch.tensor(source_img)[None, None, :], source_sitk_stuff
def get_target_imgs_datapaths():
check_dga_root_is_set()
with open("tta_plan.json", "r") as f:
tta_plan = json.load(f)
return tta_plan["tta_data_filepaths"]
def get_source_imgs_datapaths():
check_dga_root_is_set()
buckets = ["imagesTr", "imagesTs"]
with open("tta_plan.json", "r") as f:
tta_plan = json.load(f)
source_dataset_name = tta_plan["__pretrained_dataset_name__"]
if source_dataset_name.startswith("TS104"):
return "TS104"
source_data_paths = []
for buc in buckets:
source_data_paths.extend(get_data_filepaths(source_dataset_name, buc))
return source_data_paths
def get_orient_imgs(img):
def get_axes_idxs(axis_size):
NUM_IDXS = 16
return np.linspace(0, axis_size - 1, NUM_IDXS).round().astype(int)
img = img.squeeze(0, 1)
D, H, W = img.shape
slices = dict(HW=[], DW=[], DH=[])
for d in get_axes_idxs(D):
slices["HW"].append(img[d, :, :])
for h in get_axes_idxs(H):
slices["DW"].append(img[:, h, :])
for w in get_axes_idxs(W):
slices["DH"].append(img[:, :, w])
return slices
def clear_axis(ax):
ax.get_yaxis().set_ticks([])
ax.get_xaxis().set_ticks([])
ax.grid(False)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
def get_spacing_ratio(sitk_stuff, axis_idx):
rolled_spacing = np.roll(np.array(sitk_stuff["spacing"]), axis_idx)
return rolled_spacing[1] / rolled_spacing[0]
def show_image_overview(img, sitk_stuff, fig_inch_size=5.0):
orient_imgs = get_orient_imgs(img)
vmin, vmax = img.min(), img.max()
dpi = 150.0
large_text_size = fig_inch_size * 10
small_text_size = fig_inch_size * 2 | cmap = get_dgtta_colormap() | 1 | 2023-12-08 08:43:11+00:00 | 2k |
tommy-xq/SA2VP | vpt_main/src/models/resnet.py | [
{
"identifier": "MLP",
"path": "vpt_main/src/models/mlp.py",
"snippet": "class MLP(nn.Module):\n def __init__(\n self,\n input_dim: int,\n mlp_dims: List[int],\n dropout: float = 0.1,\n nonlinearity: Type[nn.Module] = nn.ReLU,\n normalization: Type[nn.Module] = nn.BatchNorm1d, # nn.LayerNorm,\n special_bias: bool = False,\n add_bn_first: bool = False,\n ):\n super(MLP, self).__init__()\n projection_prev_dim = input_dim\n projection_modulelist = []\n last_dim = mlp_dims[-1]\n mlp_dims = mlp_dims[:-1]\n\n if add_bn_first:\n if normalization is not None:\n projection_modulelist.append(normalization(projection_prev_dim))\n if dropout != 0:\n projection_modulelist.append(nn.Dropout(dropout))\n\n for idx, mlp_dim in enumerate(mlp_dims):\n fc_layer = nn.Linear(projection_prev_dim, mlp_dim)\n nn.init.kaiming_normal_(fc_layer.weight, a=0, mode='fan_out')\n projection_modulelist.append(fc_layer)\n projection_modulelist.append(nonlinearity())\n\n if normalization is not None:\n projection_modulelist.append(normalization(mlp_dim))\n\n if dropout != 0:\n projection_modulelist.append(nn.Dropout(dropout))\n projection_prev_dim = mlp_dim\n\n self.projection = nn.Sequential(*projection_modulelist)\n self.last_layer = nn.Linear(projection_prev_dim, last_dim)\n nn.init.kaiming_normal_(self.last_layer.weight, a=0, mode='fan_out')\n if special_bias:\n prior_prob = 0.01\n bias_value = -math.log((1 - prior_prob) / prior_prob)\n torch.nn.init.constant_(self.last_layer.bias, bias_value)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n input_arguments:\n @x: torch.FloatTensor\n \"\"\"\n x = self.projection(x)\n x = self.last_layer(x)\n return x"
},
{
"identifier": "logging",
"path": "vpt_main/src/utils/logging.py",
"snippet": "_FORMAT = \"[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s\"\ndef _suppress_print():\n def print_pass(*objects, sep=\" \", end=\"\\n\", file=sys.stdout, flush=False):\ndef _cached_log_stream(filename):\ndef setup_logging(\n num_gpu, num_shards, output=\"\", name=\"visual_prompt\", color=True):\ndef setup_single_logging(name, output=\"\"):\ndef get_logger(name):\ndef log_json_stats(stats, sort_keys=True):\n def __init__(self, *args, **kwargs):\n def formatMessage(self, record: logging.LogRecord) -> str:\nclass _ColorfulFormatter(logging.Formatter):"
}
] | import torch
import torch.nn as nn
import torchvision as tv
from collections import OrderedDict
from torchvision import models
from .mlp import MLP
from ..utils import logging | 772 | #!/usr/bin/env python3
"""
ResNet-related models:
"imagenet_sup_rn18",
"imagenet_sup_rn34",
"imagenet_sup_rn50",
"imagenet_sup_rn101",
"imagenet_sup_rn152",
"mocov3_rn50"
"""
| #!/usr/bin/env python3
"""
ResNet-related models:
"imagenet_sup_rn18",
"imagenet_sup_rn34",
"imagenet_sup_rn50",
"imagenet_sup_rn101",
"imagenet_sup_rn152",
"mocov3_rn50"
"""
| logger = logging.get_logger("visual_prompt") | 1 | 2023-12-12 13:19:17+00:00 | 2k |
SooLab/DDCOT | utils_evaluate.py | [
{
"identifier": "caculate_bleu",
"path": "evaluations.py",
"snippet": "def caculate_bleu(results, data, gram):\n bleus = []\n for qid, output in results.items():\n prediction = output\n target = data[qid]\n # target = data[qid]['lecture'] + data[qid]['solution']\n target = target.strip()\n if target == \"\":\n continue\n bleu = bleu_score(target, prediction, gram)\n bleus.append(bleu)\n\n avg_bleu = sum(bleus) / len(bleus)\n\n return avg_bleu"
},
{
"identifier": "caculate_rouge",
"path": "evaluations.py",
"snippet": "def caculate_rouge(results, data):\n rouges = []\n for qid, output in results.items():\n prediction = output\n target = data[qid]\n # target = data[qid]['lecture'] + data[qid]['solution']\n target = target.strip()\n if prediction == \"\":\n continue\n if target == \"\":\n continue\n rouge = score_rouge(target, prediction)\n rouges.append(rouge)\n\n avg_rouge = sum(rouges) / len(rouges)\n return avg_rouge"
},
{
"identifier": "caculate_similariry",
"path": "evaluations.py",
"snippet": "def caculate_similariry(results, data, model):\n scores = []\n for qid, output in results.items():\n prediction = output\n target = data[qid]\n # target = data[qid]['lecture'] + data[qid]['solution']\n target = target.strip()\n\n score = similariry_score(target, prediction, model)\n scores.append(score)\n\n avg_score = sum(scores) / len(scores)\n return avg_score"
}
] | import os
import json
import argparse
import warnings
import pandas as pd
from sentence_transformers import SentenceTransformer
from evaluations import caculate_bleu, caculate_rouge, caculate_similariry | 973 | '''
Adapted from https://github.com/lupantech/ScienceQA
'''
warnings.filterwarnings('ignore')
def get_acc_with_contion(res_pd, key, values):
if isinstance(values, list):
total_pd = res_pd[res_pd[key].isin(values)]
else:
total_pd = res_pd[res_pd[key] == values]
correct_pd = total_pd[total_pd['true_false'] == True]
acc = "{:.2f}".format(len(correct_pd) / len(total_pd) * 100)
return acc
def get_scores(result_data, rationale_data, results_reference, data_file, img):
# read result file
results = result_data
num = len(results)
# read data file
sqa_data = json.load(open(data_file))
# construct pandas data
sqa_pd = pd.DataFrame(sqa_data).T
res_pd = sqa_pd[sqa_pd['split'] == 'test'] # test set
if img:
res_pd = res_pd[res_pd["image"] == 'image.png']
# update data
for index, row in res_pd.iterrows():
res_pd.loc[index, 'no_context'] = True if (not row['hint'] and not row['image']) else False
res_pd.loc[index, 'has_text'] = True if row['hint'] else False
res_pd.loc[index, 'has_image'] = True if row['image'] else False
res_pd.loc[index, 'has_text_image'] = True if (row['hint'] and row['image']) else False
res_pd.loc[index, 'has_no_image'] = False if row['image'] else True
label = row['answer']
pred = int(results[index])
res_pd.loc[index, 'pred'] = pred
res_pd.loc[index, 'true_false'] = (label == pred)
# accuracy scores
acc_average = len(res_pd[res_pd['true_false'] == True]) / num * 100
# rationale quality
## BLEU
bleu1 = caculate_bleu(rationale_data, results_reference, gram=1)
bleu4 = caculate_bleu(rationale_data, results_reference, gram=4)
## Rouge-L
rouge = caculate_rouge(rationale_data, results_reference)
## Similarity
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2').cuda()
| '''
Adapted from https://github.com/lupantech/ScienceQA
'''
warnings.filterwarnings('ignore')
def get_acc_with_contion(res_pd, key, values):
if isinstance(values, list):
total_pd = res_pd[res_pd[key].isin(values)]
else:
total_pd = res_pd[res_pd[key] == values]
correct_pd = total_pd[total_pd['true_false'] == True]
acc = "{:.2f}".format(len(correct_pd) / len(total_pd) * 100)
return acc
def get_scores(result_data, rationale_data, results_reference, data_file, img):
# read result file
results = result_data
num = len(results)
# read data file
sqa_data = json.load(open(data_file))
# construct pandas data
sqa_pd = pd.DataFrame(sqa_data).T
res_pd = sqa_pd[sqa_pd['split'] == 'test'] # test set
if img:
res_pd = res_pd[res_pd["image"] == 'image.png']
# update data
for index, row in res_pd.iterrows():
res_pd.loc[index, 'no_context'] = True if (not row['hint'] and not row['image']) else False
res_pd.loc[index, 'has_text'] = True if row['hint'] else False
res_pd.loc[index, 'has_image'] = True if row['image'] else False
res_pd.loc[index, 'has_text_image'] = True if (row['hint'] and row['image']) else False
res_pd.loc[index, 'has_no_image'] = False if row['image'] else True
label = row['answer']
pred = int(results[index])
res_pd.loc[index, 'pred'] = pred
res_pd.loc[index, 'true_false'] = (label == pred)
# accuracy scores
acc_average = len(res_pd[res_pd['true_false'] == True]) / num * 100
# rationale quality
## BLEU
bleu1 = caculate_bleu(rationale_data, results_reference, gram=1)
bleu4 = caculate_bleu(rationale_data, results_reference, gram=4)
## Rouge-L
rouge = caculate_rouge(rationale_data, results_reference)
## Similarity
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2').cuda() | similariry = caculate_similariry(rationale_data, results_reference, model) | 2 | 2023-12-14 20:47:08+00:00 | 2k |
Qazalbash/jaxampler | jaxampler/_src/rvs/bernoulli.py | [
{
"identifier": "Numeric",
"path": "jaxampler/_src/typing.py",
"snippet": ""
},
{
"identifier": "Binomial",
"path": "jaxampler/_src/rvs/binomial.py",
"snippet": "class Binomial(DiscreteRV):\n r\"\"\"Binomial random variable\n .. math::\n X\\sim Bin(p,n) \\iff P(X=x|p,n)=\\binom{n}{x}p^{x}(1-p)^{n-x}\n \"\"\"\n\n def __init__(self, p: Numeric | Any, n: Numeric | Any, name: Optional[str] = None) -> None:\n \"\"\"\n :param p: Probability of success\n :param n: Number of trials\n :param name: Name of the random variable\n \"\"\"\n shape, self._p, self._n = jx_cast(p, n)\n self.check_params()\n self._q = 1.0 - self._p\n super().__init__(name=name, shape=shape)\n\n def check_params(self) -> None:\n \"\"\"Check the parameters of the random variable.\"\"\"\n assert jnp.all(self._p >= 0.0) and jnp.all(self._p <= 1.0), \"p must be in [0, 1]\"\n assert jnp.all(self._n.dtype == jnp.int32), \"n must be an integer\"\n assert jnp.all(self._n > 0), \"n must be positive\"\n\n @partial(jit, static_argnums=(0,))\n def logpmf_x(self, x: Numeric) -> Numeric:\n return jax_binom.logpmf(x, self._n, self._p)\n\n @partial(jit, static_argnums=(0,))\n def pmf_x(self, x: Numeric) -> Numeric:\n return jax_binom.pmf(x, self._n, self._p)\n\n @partial(jit, static_argnums=(0,))\n def logcdf_x(self, x: Numeric) -> Numeric:\n return jnp.log(self.cdf_x(x))\n\n @partial(jit, static_argnums=(0,))\n def cdf_x(self, x: Numeric) -> Numeric:\n floor_x = jnp.floor(x)\n cond = [x < 0, x >= self._n, jnp.logical_and(x >= 0, x < self._n)]\n return jnp.select(cond, [0.0, 1.0, betainc(self._n - floor_x, floor_x + 1, self._q)])\n\n def rvs(self, shape: tuple[int, ...], key: Optional[Array] = None) -> Array:\n if key is None:\n key = self.get_key()\n new_shape = shape + self._shape\n return jax.random.binomial(key=key, n=self._n, p=self._p, shape=new_shape)\n\n def __repr__(self) -> str:\n string = f\"Binomial(p={self._p}, n={self._n}\"\n if self._name is not None:\n string += f\", name={self._name}\"\n string += \")\"\n return string"
}
] | from typing import Any, Optional
from ..typing import Numeric
from .binomial import Binomial | 959 | # Copyright 2023 The Jaxampler Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
class Bernoulli(Binomial):
r"""Bernoulli random variable with probability of success p
.. math::
X\sim \mathbf{B}(p)\iff P\left(X=x|p\right)=p^{x}(1-p)^{1-x}
"""
| # Copyright 2023 The Jaxampler Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
class Bernoulli(Binomial):
r"""Bernoulli random variable with probability of success p
.. math::
X\sim \mathbf{B}(p)\iff P\left(X=x|p\right)=p^{x}(1-p)^{1-x}
"""
| def __init__(self, p: Numeric | Any, name: Optional[str] = None) -> None: | 0 | 2023-12-11 04:27:17+00:00 | 2k |
GXNU-ZhongLab/ODTrack | lib/models/odtrack/base_backbone.py | [
{
"identifier": "PatchEmbed",
"path": "lib/models/layers/patch_embed.py",
"snippet": "class PatchEmbed(nn.Module):\r\n \"\"\" 2D Image to Patch Embedding\r\n \"\"\"\r\n\r\n def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):\r\n super().__init__()\r\n img_size = to_2tuple(img_size)\r\n patch_size = to_2tuple(patch_size)\r\n self.img_size = img_size\r\n self.patch_size = patch_size\r\n self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])\r\n self.num_patches = self.grid_size[0] * self.grid_size[1]\r\n self.flatten = flatten\r\n\r\n self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\r\n self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()\r\n\r\n def forward(self, x):\r\n # allow different input size\r\n # B, C, H, W = x.shape\r\n # _assert(H == self.img_size[0], f\"Input image height ({H}) doesn't match model ({self.img_size[0]}).\")\r\n # _assert(W == self.img_size[1], f\"Input image width ({W}) doesn't match model ({self.img_size[1]}).\")\r\n x = self.proj(x)\r\n if self.flatten:\r\n x = x.flatten(2).transpose(1, 2) # BCHW -> BNC\r\n x = self.norm(x)\r\n return x\r"
},
{
"identifier": "combine_tokens",
"path": "lib/models/odtrack/utils.py",
"snippet": "def combine_tokens(template_tokens, search_tokens, mode='direct', return_res=False):\r\n # [B, HW, C]\r\n len_t = template_tokens.shape[1]\r\n len_s = search_tokens.shape[1]\r\n\r\n if mode == 'direct':\r\n merged_feature = torch.cat((template_tokens, search_tokens), dim=1)\r\n elif mode == 'template_central':\r\n central_pivot = len_s // 2\r\n first_half = search_tokens[:, :central_pivot, :]\r\n second_half = search_tokens[:, central_pivot:, :]\r\n merged_feature = torch.cat((first_half, template_tokens, second_half), dim=1)\r\n elif mode == 'partition':\r\n feat_size_s = int(math.sqrt(len_s))\r\n feat_size_t = int(math.sqrt(len_t))\r\n window_size = math.ceil(feat_size_t / 2.)\r\n # pad feature maps to multiples of window size\r\n B, _, C = template_tokens.shape\r\n H = W = feat_size_t\r\n template_tokens = template_tokens.view(B, H, W, C)\r\n pad_l = pad_b = pad_r = 0\r\n # pad_r = (window_size - W % window_size) % window_size\r\n pad_t = (window_size - H % window_size) % window_size\r\n template_tokens = F.pad(template_tokens, (0, 0, pad_l, pad_r, pad_t, pad_b))\r\n _, Hp, Wp, _ = template_tokens.shape\r\n template_tokens = template_tokens.view(B, Hp // window_size, window_size, W, C)\r\n template_tokens = torch.cat([template_tokens[:, 0, ...], template_tokens[:, 1, ...]], dim=2)\r\n _, Hc, Wc, _ = template_tokens.shape\r\n template_tokens = template_tokens.view(B, -1, C)\r\n merged_feature = torch.cat([template_tokens, search_tokens], dim=1)\r\n\r\n # calculate new h and w, which may be useful for SwinT or others\r\n merged_h, merged_w = feat_size_s + Hc, feat_size_s\r\n if return_res:\r\n return merged_feature, merged_h, merged_w\r\n\r\n else:\r\n raise NotImplementedError\r\n\r\n return merged_feature\r"
},
{
"identifier": "recover_tokens",
"path": "lib/models/odtrack/utils.py",
"snippet": "def recover_tokens(merged_tokens, len_template_token, len_search_token, mode='direct'):\r\n if mode == 'direct':\r\n recovered_tokens = merged_tokens\r\n elif mode == 'template_central':\r\n central_pivot = len_search_token // 2\r\n len_remain = len_search_token - central_pivot\r\n len_half_and_t = central_pivot + len_template_token\r\n\r\n first_half = merged_tokens[:, :central_pivot, :]\r\n second_half = merged_tokens[:, -len_remain:, :]\r\n template_tokens = merged_tokens[:, central_pivot:len_half_and_t, :]\r\n\r\n recovered_tokens = torch.cat((template_tokens, first_half, second_half), dim=1)\r\n elif mode == 'partition':\r\n recovered_tokens = merged_tokens\r\n else:\r\n raise NotImplementedError\r\n\r\n return recovered_tokens\r"
}
] | from functools import partial
from timm.models.vision_transformer import resize_pos_embed
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from lib.models.layers.patch_embed import PatchEmbed
from lib.models.odtrack.utils import combine_tokens, recover_tokens
import torch
import torch.nn as nn
import torch.nn.functional as F
| 1,542 |
class BaseBackbone(nn.Module):
def __init__(self):
super().__init__()
# for original ViT
self.pos_embed = None
self.img_size = [224, 224]
self.patch_size = 16
self.embed_dim = 384
self.cat_mode = 'direct'
self.pos_embed_z = None
self.pos_embed_x = None
self.template_segment_pos_embed = None
self.search_segment_pos_embed = None
self.return_inter = False
self.return_stage = [2, 5, 8, 11]
self.add_sep_seg = False
def finetune_track(self, cfg, patch_start_index=1):
search_size = to_2tuple(cfg.DATA.SEARCH.SIZE)
template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE)
new_patch_size = cfg.MODEL.BACKBONE.STRIDE
self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE
self.return_inter = cfg.MODEL.RETURN_INTER
self.return_stage = cfg.MODEL.RETURN_STAGES
self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG
# resize patch embedding
if new_patch_size != self.patch_size:
print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!')
old_patch_embed = {}
for name, param in self.patch_embed.named_parameters():
if 'weight' in name:
param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size),
mode='bicubic', align_corners=False)
param = nn.Parameter(param)
old_patch_embed[name] = param
|
class BaseBackbone(nn.Module):
def __init__(self):
super().__init__()
# for original ViT
self.pos_embed = None
self.img_size = [224, 224]
self.patch_size = 16
self.embed_dim = 384
self.cat_mode = 'direct'
self.pos_embed_z = None
self.pos_embed_x = None
self.template_segment_pos_embed = None
self.search_segment_pos_embed = None
self.return_inter = False
self.return_stage = [2, 5, 8, 11]
self.add_sep_seg = False
def finetune_track(self, cfg, patch_start_index=1):
search_size = to_2tuple(cfg.DATA.SEARCH.SIZE)
template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE)
new_patch_size = cfg.MODEL.BACKBONE.STRIDE
self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE
self.return_inter = cfg.MODEL.RETURN_INTER
self.return_stage = cfg.MODEL.RETURN_STAGES
self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG
# resize patch embedding
if new_patch_size != self.patch_size:
print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!')
old_patch_embed = {}
for name, param in self.patch_embed.named_parameters():
if 'weight' in name:
param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size),
mode='bicubic', align_corners=False)
param = nn.Parameter(param)
old_patch_embed[name] = param
| self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3,
| 0 | 2023-12-10 03:57:19+00:00 | 2k |
yilin-bao/nnanim | TestingCode/transformer.py | [
{
"identifier": "Attention",
"path": "TestingCode/modules.py",
"snippet": "class Attention(nn.Module):\n def __init__(\n self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0\n ):\n super(Attention, self).__init__()\n\n assert (\n dim % num_heads == 0\n ), \"Embedding dimension should be divisible by number of heads\"\n\n self.num_heads = num_heads\n head_dim = dim // num_heads\n self.scale = head_dim ** -0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x):\n B, N, C = x.shape\n qkv = (\n self.qkv(x)\n .reshape(B, N, 3, self.num_heads, C // self.num_heads)\n .permute(2, 0, 3, 1, 4)\n )\n # make torchscript happy (cannot use tensor as tuple)\n q, k, v = qkv[0], qkv[1], qkv[2]\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x"
},
{
"identifier": "FeedForward",
"path": "TestingCode/modules.py",
"snippet": "class FeedForward(nn.Module):\n \"\"\"\n Implementation of MLP for transformer\n \"\"\"\n\n def __init__(self, dim, hidden_dim, dropout_rate=0.0, revised=False):\n super(FeedForward, self).__init__()\n if not revised:\n \"\"\"\n Original: https://arxiv.org/pdf/2010.11929.pdf\n \"\"\"\n self.net = nn.Sequential(\n nn.Linear(dim, hidden_dim),\n nn.GELU(),\n nn.Dropout(p=dropout_rate),\n nn.Linear(hidden_dim, dim),\n )\n else:\n \"\"\"\n Scaled ReLU: https://arxiv.org/pdf/2109.03810.pdf\n \"\"\"\n self.net = nn.Sequential(\n nn.Conv1d(dim, hidden_dim, kernel_size=1, stride=1),\n nn.BatchNorm1d(hidden_dim),\n nn.GELU(),\n nn.Dropout(p=dropout_rate),\n nn.Conv1d(hidden_dim, dim, kernel_size=1, stride=1),\n nn.BatchNorm1d(dim),\n nn.GELU(),\n )\n\n self.revised = revised\n self._init_weights()\n\n def _init_weights(self):\n for name, module in self.net.named_children():\n if isinstance(module, nn.Linear):\n nn.init.normal_(module.bias, std=1e-6)\n\n def forward(self, x):\n if self.revised:\n x = x.permute(0, 2, 1)\n x = self.net(x)\n x = x.permute(0, 2, 1)\n else:\n x = self.net(x)\n\n return x"
},
{
"identifier": "PreNorm",
"path": "TestingCode/modules.py",
"snippet": "class PreNorm(nn.Module):\n def __init__(self, dim, fn):\n super(PreNorm, self).__init__()\n self.norm = nn.LayerNorm(dim)\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(self.norm(x), **kwargs)"
}
] | from torch import nn
from TestingCode.modules import Attention, FeedForward, PreNorm | 1,079 |
class Transformer(nn.Module):
def __init__(
self,
dim,
depth,
heads,
mlp_ratio=4.0,
attn_dropout=0.0,
dropout=0.0,
qkv_bias=True,
revised=False,
):
super().__init__()
self.layers = nn.ModuleList([])
assert isinstance(
mlp_ratio, float
), "MLP ratio should be an integer for valid "
mlp_dim = int(mlp_ratio * dim)
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
PreNorm(
dim,
Attention(
dim,
num_heads=heads,
qkv_bias=qkv_bias,
attn_drop=attn_dropout,
proj_drop=dropout,
),
),
PreNorm(
dim,
|
class Transformer(nn.Module):
def __init__(
self,
dim,
depth,
heads,
mlp_ratio=4.0,
attn_dropout=0.0,
dropout=0.0,
qkv_bias=True,
revised=False,
):
super().__init__()
self.layers = nn.ModuleList([])
assert isinstance(
mlp_ratio, float
), "MLP ratio should be an integer for valid "
mlp_dim = int(mlp_ratio * dim)
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
PreNorm(
dim,
Attention(
dim,
num_heads=heads,
qkv_bias=qkv_bias,
attn_drop=attn_dropout,
proj_drop=dropout,
),
),
PreNorm(
dim, | FeedForward(dim, mlp_dim, dropout_rate=dropout,), | 1 | 2023-12-05 22:01:06+00:00 | 2k |
Tlntin/booking_simulator | modelscope_agent/llm/custom_llm.py | [
{
"identifier": "AgentType",
"path": "modelscope_agent/agent_types.py",
"snippet": "class AgentType(str, Enum):\n\n DEFAULT = 'default'\n \"\"\"\"\"\"\n\n MS_AGENT = 'ms-agent'\n \"\"\"An agent that uses the ModelScope-agent specific format does a reasoning step before acting .\n \"\"\"\n\n MRKL = 'mrkl'\n \"\"\"An agent that does a reasoning step before acting with mrkl\"\"\"\n\n REACT = 'react'\n \"\"\"An agent that does a reasoning step before acting with react\"\"\"\n\n Messages = 'messages'\n \"\"\"An agent optimized for using open AI functions.\"\"\""
},
{
"identifier": "LLM",
"path": "modelscope_agent/llm/base.py",
"snippet": "class LLM:\n name = ''\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.agent_type = None\n self.model = None\n self.model_id = self.model\n\n def set_agent_type(self, agent_type):\n self.agent_type = agent_type\n\n @abstractmethod\n def generate(self, prompt: str, functions: list = [], **kwargs) -> str:\n \"\"\"each llm should implement this function to generate response\n\n Args:\n prompt (str): prompt\n functions (list): list of functions object including: name, description, parameters\n Returns:\n str: response\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def stream_generate(self,\n prompt: str,\n functions: list = [],\n **kwargs) -> str:\n \"\"\"stream generate response, which yields a generator of response in each step\n\n Args:\n prompt (str): prompt\n functions (list): list of functions object including: name, description, parameters\n Yields:\n Iterator[str]: iterator of step response\n \"\"\"\n raise NotImplementedError\n\n def tokenize(self, input_text: str) -> List[int]:\n \"\"\"tokenize is used to calculate the length of the text to meet the model's input length requirements\n\n Args:\n input_text (str): input text\n Returns:\n list[int]: token_ids\n \"\"\"\n raise NotImplementedError\n\n def detokenize(self, input_ids: List[int]) -> str:\n \"\"\"detokenize\n\n Args:\n input_ids (list[int]): input token_ids\n Returns:\n str: text\n \"\"\"\n raise NotImplementedError"
},
{
"identifier": "DEFAULT_MESSAGE",
"path": "modelscope_agent/llm/utils.py",
"snippet": "DEFAULT_MESSAGE = {\n 'role': 'user',\n 'content': 'No entry from user - please suggest something to enter'\n}"
}
] | import os
import json
import requests
import traceback
from modelscope_agent.agent_types import AgentType
from .base import LLM
from .utils import DEFAULT_MESSAGE | 809 |
class CustomLLM(LLM):
'''
This method is for the service that provide llm serving through http.
user could override the result parsing method if needed
While put all the necessary information in the env variable, such as Token, Model, URL
'''
name = 'custom_llm'
def __init__(self, cfg):
super().__init__(cfg)
self.token = os.getenv('HTTP_LLM_TOKEN', None)
self.model = os.getenv('HTTP_LLM_MODEL', None)
self.model_id = self.model
self.url = os.getenv('HTTP_LLM_URL', None)
if self.token is None:
raise ValueError('HTTP_LLM_TOKEN is not set')
|
class CustomLLM(LLM):
'''
This method is for the service that provide llm serving through http.
user could override the result parsing method if needed
While put all the necessary information in the env variable, such as Token, Model, URL
'''
name = 'custom_llm'
def __init__(self, cfg):
super().__init__(cfg)
self.token = os.getenv('HTTP_LLM_TOKEN', None)
self.model = os.getenv('HTTP_LLM_MODEL', None)
self.model_id = self.model
self.url = os.getenv('HTTP_LLM_URL', None)
if self.token is None:
raise ValueError('HTTP_LLM_TOKEN is not set') | self.agent_type = self.cfg.get('agent_type', AgentType.DEFAULT) | 0 | 2023-12-12 04:24:00+00:00 | 2k |
dx-dtran/gpt2-mlx | generate.py | [
{
"identifier": "GPT",
"path": "transformer.py",
"snippet": "class GPT(nn.Module):\n def __init__(self, config: GPTConfig):\n super().__init__()\n assert config.vocab_size is not None\n assert config.block_size is not None\n self.config = config\n\n self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n self.wpe = nn.Embedding(config.block_size, config.n_embd)\n self.drop = nn.Dropout(config.dropout)\n self.h = [Block(config) for _ in range(config.n_layer)]\n self.ln_f = nn.LayerNorm(config.n_embd, affine=config.bias)\n\n def _forward_transformer_blocks(\n self, x: mx.array, pos: mx.array, mask=None, cache=None, build_cache=False\n ):\n tok_emb = self.wte(x)\n pos_emb = self.wpe(pos)\n x = self.drop(tok_emb + pos_emb)\n kv_cache = []\n\n if cache is not None:\n for i in range(len(cache)):\n x, cache[i] = self.h[i](x, mask=None, cache=cache[i])\n else:\n for block in self.h:\n x, curr_cache = block(x, mask=mask)\n if build_cache:\n kv_cache.append(curr_cache)\n\n x = self.ln_f(x)\n return x, kv_cache if build_cache else cache\n\n def _create_causal_mask(self, length: int):\n mask = nn.MultiHeadAttention.create_additive_causal_mask(length)\n return mask.astype(self.wte.weight.dtype)\n\n def _sample_next_token(self, x, temperature):\n logits = mx.expand_dims(x[:, -1], axis=0) @ self.wte.weight.T\n y = logits[:, -1, :]\n y = mx.random.categorical(y * (1 / temperature))\n return y\n\n def generate(self, x: mx.array, max_new_tokens=256, temperature=0.8):\n _, t = x.shape\n pos = mx.arange(0, t, 1, dtype=x.dtype)\n mask = self._create_causal_mask(t)\n x, cache = self._forward_transformer_blocks(x, pos, mask=mask, build_cache=True)\n y = self._sample_next_token(x, temperature)\n position = t\n yield y\n\n for _ in range(max_new_tokens):\n position += 1\n x = y[:, None]\n x, cache = self._forward_transformer_blocks(x, position, cache=cache)\n y = self._sample_next_token(x, temperature)\n yield y\n\n def __call__(self, x: mx.array, targets: mx.array = None):\n b, t = x.shape\n assert (\n t <= self.config.block_size\n ), f\"Cannot forward sequence of length {t}, block size is only {self.config.block_size}\"\n pos = mx.arange(0, t, 1, dtype=x.dtype)\n\n mask = self._create_causal_mask(t)\n x, _ = self._forward_transformer_blocks(x, pos, mask=mask)\n\n return x @ self.wte.weight.T\n\n def loss(self, x, y):\n logits = self(x)\n loss = nn.losses.cross_entropy(\n logits.reshape(-1, logits.shape[-1]), y.reshape(-1)\n )\n mx.simplify(loss)\n\n return mx.mean(loss)"
},
{
"identifier": "GPTConfig",
"path": "transformer.py",
"snippet": "class GPTConfig:\n block_size: int = 1024\n vocab_size: int = 50304\n n_layer: int = 12\n n_head: int = 12\n n_embd: int = 768\n dropout: float = 0.0\n bias: bool = True"
}
] | import argparse
import tiktoken
import time
import mlx.core as mx
from mlx.utils import tree_unflatten, tree_flatten
from transformer import GPT, GPTConfig | 1,096 |
def load_model(model_name):
config_args = {
"gpt2": dict(n_layer=12, n_head=12, n_embd=768),
"gpt2-medium": dict(n_layer=24, n_head=16, n_embd=1024),
"gpt2-large": dict(n_layer=36, n_head=20, n_embd=1280),
"gpt2-xl": dict(n_layer=48, n_head=25, n_embd=1600),
}[model_name]
config_args["vocab_size"] = 50257
config_args["block_size"] = 1024
config_args["bias"] = True
config = GPTConfig(**config_args)
|
def load_model(model_name):
config_args = {
"gpt2": dict(n_layer=12, n_head=12, n_embd=768),
"gpt2-medium": dict(n_layer=24, n_head=16, n_embd=1024),
"gpt2-large": dict(n_layer=36, n_head=20, n_embd=1280),
"gpt2-xl": dict(n_layer=48, n_head=25, n_embd=1600),
}[model_name]
config_args["vocab_size"] = 50257
config_args["block_size"] = 1024
config_args["bias"] = True
config = GPTConfig(**config_args)
| model = GPT(config) | 0 | 2023-12-09 03:33:57+00:00 | 2k |
chenchenygu/watermark-learnability | kgw_watermarking/watermark_reliability_release/utils/generation.py | [
{
"identifier": "load_lfqa",
"path": "kgw_watermarking/watermark_reliability_release/utils/data/lfqa.py",
"snippet": "def load_lfqa(args=None, path=\"./utils/data/lfqa.jsonl\"):\n cols_to_load = [\"prefix\", \"gold_completion\", \"title\", \"selftext\", \"q_id\"]\n\n args.dataset_config_name = None\n args.dataset_split = None\n args.columns_to_remove = list(set(args.columns_to_remove + cols_to_load))\n\n def lfqa_generator():\n for ex in read_jsonlines(path):\n row = {k: ex[k] for k in cols_to_load}\n row[\"prefix\"] = f\"{prompts[args.prompt_id]}{row['prefix']}\"\n yield row\n\n dataset = IterableDataset.from_generator(lfqa_generator)\n return dataset"
},
{
"identifier": "load_essays",
"path": "kgw_watermarking/watermark_reliability_release/utils/data/essays.py",
"snippet": "def load_essays(args=None):\n cols_to_load = [\"instructions\", \"essays\"]\n cols_to_remove = [\"titles\", \"urls\", \"__index_level_0__\"]\n\n dataset = load_dataset(\n \"ChristophSchuhmann/essays-with-instructions\",\n streaming=True,\n split=args.dataset_split,\n )\n dataset = dataset.remove_columns(cols_to_remove)\n\n args.dataset_config_name = None\n args.dataset_split = None\n args.columns_to_remove = list(set(args.columns_to_remove + cols_to_load))\n\n return dataset"
},
{
"identifier": "load_wikitext",
"path": "kgw_watermarking/watermark_reliability_release/utils/data/wikitext.py",
"snippet": "def load_wikitext(args=None):\n assert args is not None, \"args must be provided to load_wikitext\"\n assert (\n args.dataset_config_name is not None\n ), \"args.dataset_config_name must be None to load_wikitext\"\n assert args.dataset_split is not None, \"args.dataset_split must be None to load_wikitext\"\n\n args.columns_to_remove = list(set(args.columns_to_remove + [\"text\"]))\n\n # load the regular dataset\n raw_dataset = load_dataset(\n args.dataset_name,\n args.dataset_config_name,\n split=args.dataset_split,\n streaming=False, # we're doing this conversion ourselves\n )\n\n def wikitext_generator():\n # the generator loop\n for ex in raw_dataset:\n yield ex\n\n dataset = IterableDataset.from_generator(wikitext_generator)\n return dataset"
}
] | import torch
from datasets import load_dataset, IterableDataset
from torch import Tensor
from tokenizers import Tokenizer
from transformers import (
AutoTokenizer,
LlamaTokenizer,
AutoModelForSeq2SeqLM,
AutoModelForCausalLM,
DataCollatorWithPadding,
)
from .data.lfqa import load_lfqa
from .data.essays import load_essays
from .data.wikitext import load_wikitext | 1,351 | # coding=utf-8
# Copyright 2023 Authors of "A Watermark for Large Language Models"
# available at https://arxiv.org/abs/2301.10226
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# HF classes
MAX_GENERATIONS = int(10000) # Hardcoded max length to avoid infinite loop
def load_model(args):
"""Load and return the model and tokenizer"""
args.is_seq2seq_model = any(
[(model_type in args.model_name_or_path) for model_type in ["t5", "T0"]]
)
args.is_decoder_only_model = any(
[(model_type in args.model_name_or_path) for model_type in ["gpt", "opt", "bloom", "llama"]]
)
if args.is_seq2seq_model:
model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path)
elif args.is_decoder_only_model:
if args.load_fp16:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path, torch_dtype=torch.float16, device_map="auto"
)
else:
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path)
else:
raise ValueError(f"Unknown model type: {args.model_name_or_path}")
if args.use_gpu:
device = "cuda" if torch.cuda.is_available() else "cpu"
if args.load_fp16:
pass
else:
model = model.to(device)
else:
device = "cpu"
model.eval()
if args.is_decoder_only_model:
padding_side = "left"
else:
raise NotImplementedError(
"Need to check how to handle padding for seq2seq models when calling generate"
)
if "llama" in args.model_name_or_path:
tokenizer = LlamaTokenizer.from_pretrained(
args.model_name_or_path, padding_side=padding_side
)
model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
model.config.bos_token_id = 1
model.config.eos_token_id = 2
else:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, padding_side=padding_side
)
args.model_max_length = model.config.max_position_embeddings
return model, tokenizer, device
def add_idx(example, idx):
example.update({"idx": idx})
return example
def load_hf_dataset(args):
dataset_name, dataset_config_name = args.dataset_name, args.dataset_config_name
if dataset_name == "lfqa":
| # coding=utf-8
# Copyright 2023 Authors of "A Watermark for Large Language Models"
# available at https://arxiv.org/abs/2301.10226
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# HF classes
MAX_GENERATIONS = int(10000) # Hardcoded max length to avoid infinite loop
def load_model(args):
"""Load and return the model and tokenizer"""
args.is_seq2seq_model = any(
[(model_type in args.model_name_or_path) for model_type in ["t5", "T0"]]
)
args.is_decoder_only_model = any(
[(model_type in args.model_name_or_path) for model_type in ["gpt", "opt", "bloom", "llama"]]
)
if args.is_seq2seq_model:
model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path)
elif args.is_decoder_only_model:
if args.load_fp16:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path, torch_dtype=torch.float16, device_map="auto"
)
else:
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path)
else:
raise ValueError(f"Unknown model type: {args.model_name_or_path}")
if args.use_gpu:
device = "cuda" if torch.cuda.is_available() else "cpu"
if args.load_fp16:
pass
else:
model = model.to(device)
else:
device = "cpu"
model.eval()
if args.is_decoder_only_model:
padding_side = "left"
else:
raise NotImplementedError(
"Need to check how to handle padding for seq2seq models when calling generate"
)
if "llama" in args.model_name_or_path:
tokenizer = LlamaTokenizer.from_pretrained(
args.model_name_or_path, padding_side=padding_side
)
model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
model.config.bos_token_id = 1
model.config.eos_token_id = 2
else:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, padding_side=padding_side
)
args.model_max_length = model.config.max_position_embeddings
return model, tokenizer, device
def add_idx(example, idx):
example.update({"idx": idx})
return example
def load_hf_dataset(args):
dataset_name, dataset_config_name = args.dataset_name, args.dataset_config_name
if dataset_name == "lfqa": | dataset = load_lfqa(args) | 0 | 2023-12-07 16:45:33+00:00 | 2k |
skyoux/SemAIM | main_knn.py | [
{
"identifier": "interpolate_pos_embed",
"path": "util/pos_embed.py",
"snippet": "def interpolate_pos_embed(model, checkpoint_model):\n if 'pos_embed' in checkpoint_model:\n pos_embed_checkpoint = checkpoint_model['pos_embed']\n embedding_size = pos_embed_checkpoint.shape[-1]\n num_patches = model.patch_embed.num_patches\n num_extra_tokens = model.pos_embed.shape[-2] - num_patches\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n # class_token and dist_token are kept unchanged\n if orig_size != new_size:\n print(\"Position interpolate from %dx%d to %dx%d\" % (orig_size, orig_size, new_size, new_size))\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n checkpoint_model['pos_embed'] = new_pos_embed"
},
{
"identifier": "models_vit",
"path": "models/models_vit.py",
"snippet": "class VisionTransformer(timm.models.vision_transformer.VisionTransformer):\n def __init__(self, global_pool=False, **kwargs):\n def forward_features(self, x):\n def forward_head(self, x):\ndef vit_small_patch16(**kwargs):\ndef vit_base_patch16(**kwargs):\ndef vit_large_patch16(**kwargs):\ndef vit_huge_patch14(**kwargs):\n B = x.shape[0]"
}
] | import os
import sys
import argparse
import numpy as np
import torch
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import timm.models as timm_models
import util.misc as misc
from torch import nn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision import models as torchvision_models
from timm.models.layers import trunc_normal_
from util.pos_embed import interpolate_pos_embed
from models import models_vit | 887 | #!/usr/bin/env python
def extract_feature_pipeline(args):
######################## preparing data ... ########################
resize_size = 256 if args.input_size == 224 else 512
transform = pth_transforms.Compose([
pth_transforms.Resize(resize_size, interpolation=3),
pth_transforms.CenterCrop(args.input_size),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = ReturnIndexDataset(os.path.join(args.data_path, 'train'), transform)
dataset_val = ReturnIndexDataset(os.path.join(args.data_path, 'val'), transform)
train_labels = torch.tensor(dataset_train.target).long()
test_labels = torch.tensor(dataset_val.target).long()
sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=False,
drop_last=False,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=False,
drop_last=False,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
######################## building network ... ########################
| #!/usr/bin/env python
def extract_feature_pipeline(args):
######################## preparing data ... ########################
resize_size = 256 if args.input_size == 224 else 512
transform = pth_transforms.Compose([
pth_transforms.Resize(resize_size, interpolation=3),
pth_transforms.CenterCrop(args.input_size),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = ReturnIndexDataset(os.path.join(args.data_path, 'train'), transform)
dataset_val = ReturnIndexDataset(os.path.join(args.data_path, 'val'), transform)
train_labels = torch.tensor(dataset_train.target).long()
test_labels = torch.tensor(dataset_val.target).long()
sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=False,
drop_last=False,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=False,
drop_last=False,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
######################## building network ... ######################## | model = models_vit.__dict__[args.model]( | 1 | 2023-12-10 15:17:11+00:00 | 2k |
boweniac/autogan | autogan/oai/generate_utils.py | [
{
"identifier": "chat_completions",
"path": "autogan/oai/openai_utils.py",
"snippet": "def chat_completions(messages: list, api_key: Dict, request_timeout: int, max_retries: int,\n stream_mode: Optional[bool] = None):\n \"\"\"OpenAI interface and OpenAI like interface call\n\n :param messages:\n :param api_key: LLM configuration.\n :param request_timeout:\n :param max_retries:\n :param stream_mode:\n \"\"\"\n if api_key[\"api_type\"] == \"openai\" or api_key[\"api_type\"] == \"azure\":\n return openai_chat_completions(messages, api_key, request_timeout, max_retries, stream_mode)\n else:\n return openai_like_chat_completions(messages, api_key, request_timeout, max_retries, stream_mode)"
},
{
"identifier": "LLMConfig",
"path": "autogan/oai/config_utils.py",
"snippet": "class LLMConfig:\n \"\"\"LLM config object\n \"\"\"\n\n def __init__(\n self,\n api_key_list: ConfigList,\n max_messages_tokens: str,\n request_interval_time: int,\n request_timeout: int,\n max_retries: int\n ):\n self._api_key_list = api_key_list\n self._max_messages_tokens = max_messages_tokens\n self._request_interval_time = request_interval_time\n self._request_timeout = request_timeout\n self._max_retries = max_retries\n\n def api_key(self, index):\n \"\"\"Get the one configuration in the api_key_list.\n \"\"\"\n return self._api_key_list.get_config(index)\n\n @property\n def next_api_key(self):\n \"\"\"Get the next configuration in the api_key_list.\n \"\"\"\n return self._api_key_list.get_next_config\n\n @property\n def len_of_api_key_list(self) -> int:\n \"\"\"Get the first configuration in the api_key_list list.\n \"\"\"\n return self._api_key_list.len\n\n @property\n def model(self):\n \"\"\"Get the model of the first configuration in the api_key_list list.\n \"\"\"\n return self._api_key_list.get_first_config[\"model\"]\n\n @property\n def max_messages_tokens(self):\n \"\"\"Limit the maximum tokens of the context in each dialogue.\n \"\"\"\n return self._max_messages_tokens\n\n @property\n def request_interval_time(self):\n return self._request_interval_time\n\n @property\n def request_timeout(self):\n return self._request_timeout\n\n @property\n def max_retries(self):\n return self._max_retries"
},
{
"identifier": "count_text_tokens",
"path": "autogan/oai/count_tokens_utils.py",
"snippet": "def count_text_tokens(text: str, model: Optional[str] = \"gpt-3.5-turbo\") -> int:\n \"\"\"Calculate the tokens of the text.\n\n :param text: The text to be tokenized\n :param model: Calculate tokens for a specific model. If the model is not listed, it will default to calculating the number of tokens based on the gpt-3.5-turbo standard.\n\n :return: tokens\n \"\"\"\n\n if not text:\n return 0\n\n model_list = ['gpt-4', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo']\n if model not in model_list:\n model = \"gpt-3.5-turbo\"\n\n try:\n encoding = tiktoken.encoding_for_model(model)\n num_tokens = len(encoding.encode(text))\n except Exception as e:\n print(e)\n num_tokens = 0\n\n return num_tokens"
},
{
"identifier": "ResponseFuncType",
"path": "autogan/utils/response.py",
"snippet": " def colored(x, *args, **kwargs):\ndef default_response_func(agent_name: str, gen: str, model: str, stream_mode: bool, index: int,\n content: Optional[str], tokens: Optional[int], response: any):\ndef obj_to_dict(obj):"
}
] | import time
from typing import Optional, List
from autogan.oai.openai_utils import chat_completions
from autogan.oai.config_utils import LLMConfig
from autogan.oai.count_tokens_utils import count_text_tokens
from autogan.utils.response import ResponseFuncType | 1,285 |
def generate_chat_completion(llm_config: LLMConfig, messages: List, agent_name: str, gen: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None)\
-> tuple[Optional[str], Optional[int]]:
"""Call the LLM interface
Currently, only the chatgpt model of openai (including azure) is adapted.
:param llm_config: LLM configuration.
:param messages:
:param agent_name:
:param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries
- main: agent replies
- idea: deep thoughts
- messages_summary: context compression
- text_summary: general summaries
- clue_summary: clue summaries
:param response_func: Used to return results to the interface or terminal.
:param stream_mode:
"""
# When a certain configuration in the configuration list fails to request,
# continue to try the next configuration until all configurations in the list are attempted.
loop = llm_config.len_of_api_key_list
for i in range(loop):
time.sleep(llm_config.request_interval_time)
api_key = llm_config.next_api_key
try:
completion_content = ""
completion_tokens = 0
index = 1
|
def generate_chat_completion(llm_config: LLMConfig, messages: List, agent_name: str, gen: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None)\
-> tuple[Optional[str], Optional[int]]:
"""Call the LLM interface
Currently, only the chatgpt model of openai (including azure) is adapted.
:param llm_config: LLM configuration.
:param messages:
:param agent_name:
:param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries
- main: agent replies
- idea: deep thoughts
- messages_summary: context compression
- text_summary: general summaries
- clue_summary: clue summaries
:param response_func: Used to return results to the interface or terminal.
:param stream_mode:
"""
# When a certain configuration in the configuration list fails to request,
# continue to try the next configuration until all configurations in the list are attempted.
loop = llm_config.len_of_api_key_list
for i in range(loop):
time.sleep(llm_config.request_interval_time)
api_key = llm_config.next_api_key
try:
completion_content = ""
completion_tokens = 0
index = 1 | for message in chat_completions(messages, api_key, llm_config.request_timeout, | 0 | 2023-12-06 03:24:34+00:00 | 2k |
JingHao99/IDR-Ingredients-oriented-Degradation-Reformulation | data/IDR_dataset.py | [
{
"identifier": "crop_HWC_img",
"path": "utils/data_util.py",
"snippet": "def crop_HWC_img(image, base=64):\r\n \"\"\"\r\n 裁切到multiple of base的size上\r\n :param image: H,W,C\r\n :param base: (int)\r\n :return:\r\n \"\"\"\r\n h = image.shape[0]\r\n w = image.shape[1]\r\n crop_h = h % base\r\n crop_w = w % base\r\n return image[crop_h // 2:h - crop_h + crop_h // 2, crop_w // 2:w - crop_w + crop_w // 2, :]\r"
},
{
"identifier": "random_augmentation",
"path": "utils/data_util.py",
"snippet": "def random_augmentation(*args):\r\n out = []\r\n flag_aug = random.randint(0,7)\r\n for data in args:\r\n out.append(data_augmentation(data, flag_aug).copy())\r\n return out\r"
},
{
"identifier": "padding",
"path": "utils/data_util.py",
"snippet": "def padding(img, gt_size):\r\n \"\"\"\r\n padding到指定size上\r\n img_lq (np.float32) 0-1 :\r\n img_gt (np.float32) 0-1 :\r\n gt_size (int) :\r\n cv2.BORDER_REPLICATE/cv2.BORDER_CONSTANT,value=(255,255,255)/cv2.BORDER_REFLECT/cv2.BORDER_REFLECT_101/cv2.BORDER_WRAP\"\"\"\r\n h, w, _ = img.shape\r\n\r\n h_pad = max(0, gt_size - h)\r\n w_pad = max(0, gt_size - w)\r\n\r\n if h_pad == 0 and w_pad == 0:\r\n return img\r\n\r\n img = cv2.copyMakeBorder(img, 0, h_pad, 0, w_pad, cv2.BORDER_REFLECT)\r\n # print('img_lq', img_lq.shape, img_gt.shape)\r\n if img_lq.ndim == 2:\r\n img_lq = np.expand_dims(img_lq, axis=2)\r\n if img_gt.ndim == 2:\r\n img_gt = np.expand_dims(img_gt, axis=2)\r\n return img_lq, img_gt\r"
},
{
"identifier": "onehot",
"path": "utils/data_util.py",
"snippet": "def onehot(label: int, classes: int):\r\n \"\"\"\r\n return torch.tensor\r\n \"\"\"\r\n onehot_label = np.zeros([1,classes])\r\n onehot_label[:,label] = 1\r\n onehot_label = torch.from_numpy(onehot_label)\r\n return onehot_label\r"
},
{
"identifier": "smooth_one_hot",
"path": "utils/data_util.py",
"snippet": "def smooth_one_hot(true_labels: torch.Tensor, classes: int, smoothing=0.0):\r\n \"\"\"\r\n if smoothing == 0, it's one-hot method\r\n if 0 < smoothing < 1, it's smooth method\r\n\r\n \"\"\"\r\n assert 0 <= smoothing < 1\r\n confidence = 1.0 - smoothing\r\n label_shape = torch.Size((true_labels.size(0), classes)) \r\n true_dist = torch.empty(size=label_shape) \r\n true_dist.fill_(smoothing / (classes - 1))\r\n _, index = torch.max(true_labels, 1)\r\n true_dist.scatter_(1, torch.LongTensor(index.unsqueeze(1)), confidence) \r\n return true_dist\r"
},
{
"identifier": "Degradation",
"path": "data/degradation_util.py",
"snippet": "class Degradation(object):\r\n def __init__(self, dataset_opt):\r\n super(Degradation, self).__init__()\r\n self.dataset_opt = dataset_opt\r\n self.toTensor = ToTensor()\r\n self.crop_transform = Compose([\r\n ToPILImage(),\r\n RandomCrop(dataset_opt['patch_size']),\r\n ])\r\n\r\n def _add_gaussian_noise(self, clean_patch, sigma):\r\n noise = np.random.randn(*clean_patch.shape)\r\n noisy_patch = np.clip(clean_patch + noise * sigma, 0, 255).astype(np.uint8)\r\n return noisy_patch, clean_patch\r\n\r\n def _degrade_by_type(self, clean_patch, degrade_type):\r\n if degrade_type == 0:\r\n # denoise sigma=15\r\n degraded_patch, clean_patch = self._add_gaussian_noise(clean_patch, sigma=15)\r\n elif degrade_type == 1:\r\n # denoise sigma=25\r\n degraded_patch, clean_patch = self._add_gaussian_noise(clean_patch, sigma=25)\r\n elif degrade_type == 2:\r\n # denoise sigma=50\r\n degraded_patch, clean_patch = self._add_gaussian_noise(clean_patch, sigma=50)\r\n\r\n return degraded_patch, clean_patch\r\n\r\n def degrade(self, clean_patch_1, clean_patch_2, degrade_type=None):\r\n if degrade_type == None:\r\n degrade_type = random.randint(0, 3)\r\n else:\r\n degrade_type = degrade_type\r\n\r\n degrad_patch_1, _ = self._degrade_by_type(clean_patch_1, degrade_type)\r\n degrad_patch_2, _ = self._degrade_by_type(clean_patch_2, degrade_type)\r\n return degrad_patch_1, degrad_patch_2\r\n\r\n def degrade_single(self, clean_patch, degrade_type=None):\r\n if degrade_type == None:\r\n degrade_type = random.randint(0, 3)\r\n else:\r\n degrade_type = degrade_type\r\n \r\n degrad_patch, _ = self._degrade_by_type(clean_patch, degrade_type)\r\n return degrad_patch"
}
] | import os
import random
import copy
import numpy as np
from PIL import Image, ImageFile
from torch.utils.data import Dataset
from torchvision.transforms import ToPILImage, Compose, RandomCrop, ToTensor
from utils.data_util import crop_HWC_img, random_augmentation, padding, onehot, smooth_one_hot
from sklearn.preprocessing import OneHotEncoder
from data.degradation_util import Degradation
| 1,386 | ImageFile.LOAD_TRUNCATED_IMAGES = True
class IDR_dataset(Dataset):
def __init__(self, dataset_opt):
super(IDR_dataset, self).__init__()
self.dataset_opt = dataset_opt
self.rs_ids = []
self.hazy_ids = []
| ImageFile.LOAD_TRUNCATED_IMAGES = True
class IDR_dataset(Dataset):
def __init__(self, dataset_opt):
super(IDR_dataset, self).__init__()
self.dataset_opt = dataset_opt
self.rs_ids = []
self.hazy_ids = []
| self.D = Degradation(dataset_opt)
| 5 | 2023-12-07 10:58:34+00:00 | 2k |
TACJu/Compositor | Compositor_Mask2Former/mask2former/modeling/meta_arch/mask_former_head.py | [
{
"identifier": "build_transformer_decoder",
"path": "Compositor_Mask2Former/mask2former/modeling/transformer_decoder/maskformer_transformer_decoder.py",
"snippet": "def build_transformer_decoder(cfg, in_channels, mask_classification=True):\n \"\"\"\n Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.\n \"\"\"\n name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME\n return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification)"
},
{
"identifier": "build_pixel_decoder",
"path": "Compositor_Mask2Former/mask2former/modeling/pixel_decoder/fpn.py",
"snippet": "def build_pixel_decoder(cfg, input_shape):\n \"\"\"\n Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`.\n \"\"\"\n name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME\n model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)\n forward_features = getattr(model, \"forward_features\", None)\n if not callable(forward_features):\n raise ValueError(\n \"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. \"\n f\"Please implement forward_features for {name} to only return mask features.\"\n )\n return model"
}
] | import logging
import fvcore.nn.weight_init as weight_init
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple, Union
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.maskformer_transformer_decoder import build_transformer_decoder
from ..pixel_decoder.fpn import build_pixel_decoder | 1,245 | # Copyright (c) Facebook, Inc. and its affiliates.
@SEM_SEG_HEADS_REGISTRY.register()
class MaskFormerHead(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
newk = k.replace(prefix, prefix + "pixel_decoder.")
# logger.debug(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
# figure out in_channels to transformer predictor
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "pixel_embedding":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": # for maskformer2
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
transformer_predictor_in_channels = input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels
return {
"input_shape": {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
"transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE,
| # Copyright (c) Facebook, Inc. and its affiliates.
@SEM_SEG_HEADS_REGISTRY.register()
class MaskFormerHead(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
newk = k.replace(prefix, prefix + "pixel_decoder.")
# logger.debug(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
# figure out in_channels to transformer predictor
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "pixel_embedding":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": # for maskformer2
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
transformer_predictor_in_channels = input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels
return {
"input_shape": {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
"transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE, | "transformer_predictor": build_transformer_decoder( | 0 | 2023-12-12 11:49:28+00:00 | 2k |
Mirascope/mirascope | cookbook/api_example/api_example.py | [
{
"identifier": "OpenAIChat",
"path": "mirascope/chat/models.py",
"snippet": "class OpenAIChat:\n \"\"\"A convenience wrapper for the OpenAI Chat client.\"\"\"\n\n def __init__(self, model: str = \"gpt-3.5-turbo\", api_key: Optional[str] = None):\n \"\"\"Initializes an instance of `OpenAIChat.\"\"\"\n self.client = OpenAI(api_key=api_key)\n self.model = model\n\n def create(self, prompt: Prompt, **kwargs) -> OpenAIChatCompletion:\n \"\"\"Makes a call to the model using `prompt`.\n\n Args:\n prompt: The `Prompt` to use for the call.\n **kwargs: Additional keyword arguments to pass to the API call. You can\n find available keyword arguments here:\n https://platform.openai.com/docs/api-reference/chat/create\n\n Returns:\n A `OpenAIChatCompletion` instance.\n\n Raises:\n Re-raises any exceptions thrown by the openai chat completions create call.\n \"\"\"\n try:\n return OpenAIChatCompletion(\n completion=self.client.chat.completions.create(\n model=self.model,\n messages=get_openai_chat_messages(prompt),\n stream=False,\n **kwargs,\n )\n )\n except:\n raise\n\n def stream(\n self, prompt: Prompt, **kwargs\n ) -> Generator[OpenAIChatCompletionChunk, None, None]:\n \"\"\"Streams the response for a call to the model using `prompt`.\n\n Args:\n prompt: The `Prompt` to use for the call.\n **kwargs: Additional keyword arguments to pass to the API call. You can\n find available keyword arguments here:\n https://platform.openai.com/docs/api-reference/chat/create\n\n Yields:\n A `OpenAIChatCompletionChunk` for each chunk of the response.\n\n Raises:\n Re-raises any exceptions thrown by the openai chat completions create call.\n \"\"\"\n completion_stream = self.client.chat.completions.create(\n model=self.model,\n messages=get_openai_chat_messages(prompt),\n stream=True,\n **kwargs,\n )\n for chunk in completion_stream:\n yield OpenAIChatCompletionChunk(chunk=chunk)"
},
{
"identifier": "Prompt",
"path": "mirascope/prompts.py",
"snippet": "class Prompt(BaseModel):\n \"\"\"A Pydantic model for prompts.\"\"\"\n\n @classmethod\n def template(cls) -> str:\n \"\"\"Custom parsing functionality for docstring prompt.\n\n This function is the first step in formatting the prompt template docstring.\n For the default `Prompt`, this function dedents the docstring and replaces all\n repeated sequences of newlines with one fewer newline character. This enables\n writing blocks of text instead of really long single lines. To include any\n number of newline characters, simply include one extra.\n\n Raises:\n ValueError: If the class docstring is empty.\n \"\"\"\n if cls.__doc__ is None:\n raise ValueError(\"`Prompt` must have a prompt template docstring.\")\n\n return re.sub(\n \"(\\n+)\",\n lambda x: x.group(0)[:-1] if len(x.group(0)) > 1 else \" \",\n dedent(cls.__doc__).strip(\"\\n\"),\n )\n\n def __str__(self) -> str:\n \"\"\"Returns the docstring prompt template formatted with template variables.\"\"\"\n template = self.template()\n template_vars = [\n var for _, var, _, _ in Formatter().parse(template) if var is not None\n ]\n return template.format(**{var: getattr(self, var) for var in template_vars})\n\n @property\n def messages(self) -> list[tuple[str, str]]:\n \"\"\"Returns the docstring as a list of messages.\"\"\"\n return [(\"user\", str(self))]\n\n def save(self, filepath: str):\n \"\"\"Saves the prompt to the given filepath.\"\"\"\n with open(filepath, \"wb\") as f:\n pickle.dump(self, f)\n\n @classmethod\n def load(cls, filepath: str) -> Prompt:\n \"\"\"Loads the prompt from the given filepath.\"\"\"\n with open(filepath, \"rb\") as f:\n return pickle.load(f)"
}
] | import os
from fastapi import FastAPI
from mirascope import OpenAIChat, Prompt | 1,168 | """A FastAPI app integrated with a multi-chain prompt for recommending books on a topic
and then asking which one is the best for beginners.
How to Run:
uvicorn api_example:app --reload
"""
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
app = FastAPI()
class BookRecommendationPrompt(Prompt):
"""
Can you recommend some books on {topic} in a list format?
"""
topic: str
class BestForBeginnersPrompt(Prompt):
"""
Given this list {book_list}, which one is the best for beginners?
"""
book_list: str
@app.post("/")
def root(book_recommendation: BookRecommendationPrompt):
"""Generates the best book for beginners on the given topic."""
| """A FastAPI app integrated with a multi-chain prompt for recommending books on a topic
and then asking which one is the best for beginners.
How to Run:
uvicorn api_example:app --reload
"""
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
app = FastAPI()
class BookRecommendationPrompt(Prompt):
"""
Can you recommend some books on {topic} in a list format?
"""
topic: str
class BestForBeginnersPrompt(Prompt):
"""
Given this list {book_list}, which one is the best for beginners?
"""
book_list: str
@app.post("/")
def root(book_recommendation: BookRecommendationPrompt):
"""Generates the best book for beginners on the given topic.""" | model = OpenAIChat() | 0 | 2023-12-05 01:22:34+00:00 | 2k |
allisson/pysqsx | sqsx/queue.py | [
{
"identifier": "NoRetry",
"path": "sqsx/exceptions.py",
"snippet": "class NoRetry(Exception):\n \"\"\"\n This exception must be used when we need that the message will be removed from the queue\n \"\"\"\n\n pass"
},
{
"identifier": "Retry",
"path": "sqsx/exceptions.py",
"snippet": "class Retry(Exception):\n \"\"\"\n This exception must be used when we need a custom backoff config\n \"\"\"\n\n def __init__(self, min_backoff_seconds: int, max_backoff_seconds: int):\n self.min_backoff_seconds = min_backoff_seconds\n self.max_backoff_seconds = max_backoff_seconds"
},
{
"identifier": "backoff_calculator_seconds",
"path": "sqsx/helper.py",
"snippet": "def backoff_calculator_seconds(retries: int, minimum: int, maximum: int) -> int:\n maximum = min(maximum, 43200)\n return min(minimum * 2**retries, maximum)"
},
{
"identifier": "base64_to_dict",
"path": "sqsx/helper.py",
"snippet": "def base64_to_dict(data: str) -> dict:\n return json.loads(base64.urlsafe_b64decode(data).decode())"
},
{
"identifier": "dict_to_base64",
"path": "sqsx/helper.py",
"snippet": "def dict_to_base64(data: dict) -> str:\n return base64.urlsafe_b64encode(json.dumps(data).encode()).decode()"
}
] | import logging
import signal
import time
from concurrent.futures import ThreadPoolExecutor, wait
from typing import Any, Callable, Dict, Optional
from pydantic import BaseModel, Field, PrivateAttr
from sqsx.exceptions import NoRetry, Retry
from sqsx.helper import backoff_calculator_seconds, base64_to_dict, dict_to_base64 | 1,453 | logger = logging.getLogger(__name__)
queue_url_regex = r"(http|https)[:][\/]{2}[a-zA-Z0-9-_:.]+[\/][0-9]{12}[\/]{1}[a-zA-Z0-9-_]{0,80}"
class BaseQueueMixin:
def consume_messages(
self, max_messages: int = 1, max_threads: int = 1, wait_seconds: int = 10, run_forever: bool = True
) -> None:
logger.info(f"Starting consuming tasks, queue_url={self.url}")
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
while True:
if self._should_consume_tasks_stop:
logger.info(f"Stopping consuming tasks, queue_url={self.url}")
break
response = self.sqs_client.receive_message(
QueueUrl=self.url,
AttributeNames=["All"],
MaxNumberOfMessages=min(max_messages, 10),
MessageAttributeNames=["All"],
)
sqs_messages = response.get("Messages", [])
if not sqs_messages:
logger.debug(
f"Waiting some seconds because no message was received, seconds={wait_seconds}, queue_url={self.url}"
)
time.sleep(wait_seconds)
continue
with ThreadPoolExecutor(max_workers=max_threads) as executor:
futures = []
for sqs_message in sqs_messages:
futures.append(executor.submit(self._consume_message, sqs_message))
wait(futures)
if not run_forever:
break
def _exit_gracefully(self, signal_num, current_stack_frame) -> None:
logger.info("Starting graceful shutdown process")
self._should_consume_tasks_stop = True
def _message_ack(self, sqs_message: dict) -> None:
receipt_handle = sqs_message["ReceiptHandle"]
self.sqs_client.delete_message(QueueUrl=self.url, ReceiptHandle=receipt_handle)
def _message_nack(
self,
sqs_message: dict,
min_backoff_seconds: Optional[int] = None,
max_backoff_seconds: Optional[int] = None,
) -> None:
min_backoff_seconds = min_backoff_seconds if min_backoff_seconds else self.min_backoff_seconds
max_backoff_seconds = max_backoff_seconds if max_backoff_seconds else self.max_backoff_seconds
receipt_handle = sqs_message["ReceiptHandle"]
receive_count = int(sqs_message["Attributes"]["ApproximateReceiveCount"]) - 1
timeout = backoff_calculator_seconds(receive_count, min_backoff_seconds, max_backoff_seconds)
self.sqs_client.change_message_visibility(
QueueUrl=self.url, ReceiptHandle=receipt_handle, VisibilityTimeout=timeout
)
class Queue(BaseModel, BaseQueueMixin):
url: str = Field(pattern=queue_url_regex)
sqs_client: Any
min_backoff_seconds: int = Field(default=30)
max_backoff_seconds: int = Field(default=900)
_handlers: Dict[str, Callable] = PrivateAttr(default={})
_should_consume_tasks_stop: bool = PrivateAttr(default=False)
def add_task(self, task_name: str, **task_kwargs) -> dict:
return self.sqs_client.send_message(
QueueUrl=self.url,
MessageAttributes={"TaskName": {"DataType": "String", "StringValue": task_name}},
MessageBody=dict_to_base64({"kwargs": task_kwargs}),
)
def add_task_handler(self, task_name: str, task_handler_function: Callable) -> None:
self._handlers.update({task_name: task_handler_function})
def _consume_message(self, sqs_message: dict) -> None:
message_id = sqs_message["MessageId"]
task_name_attribute = sqs_message["MessageAttributes"].get("TaskName")
if task_name_attribute is None:
logger.warning(f"Message without TaskName attribute, message_id={message_id}")
return self._message_nack(sqs_message)
task_name = task_name_attribute["StringValue"]
task_handler_function = self._handlers.get(task_name)
if task_handler_function is None:
logger.warning(f"Task handler not found, message_id={message_id}, task_name={task_name}")
return self._message_nack(sqs_message)
try:
message_data = base64_to_dict(sqs_message["Body"])
except Exception:
logger.exception(f"Invalid message body, message_id={message_id}, task_name={task_name}")
return self._message_nack(sqs_message)
kwargs = message_data["kwargs"]
context = {
"queue_url": self.url,
"task_name": task_name,
"sqs_message": sqs_message,
}
try:
task_handler_function(context, **kwargs)
except Retry as exc:
logger.info(
f"Received an sqsx.Retry, setting a custom backoff policy, message_id={message_id}, task_name={task_name}"
)
return self._message_nack(
sqs_message,
min_backoff_seconds=exc.min_backoff_seconds,
max_backoff_seconds=exc.max_backoff_seconds,
)
|
logger = logging.getLogger(__name__)
queue_url_regex = r"(http|https)[:][\/]{2}[a-zA-Z0-9-_:.]+[\/][0-9]{12}[\/]{1}[a-zA-Z0-9-_]{0,80}"
class BaseQueueMixin:
def consume_messages(
self, max_messages: int = 1, max_threads: int = 1, wait_seconds: int = 10, run_forever: bool = True
) -> None:
logger.info(f"Starting consuming tasks, queue_url={self.url}")
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
while True:
if self._should_consume_tasks_stop:
logger.info(f"Stopping consuming tasks, queue_url={self.url}")
break
response = self.sqs_client.receive_message(
QueueUrl=self.url,
AttributeNames=["All"],
MaxNumberOfMessages=min(max_messages, 10),
MessageAttributeNames=["All"],
)
sqs_messages = response.get("Messages", [])
if not sqs_messages:
logger.debug(
f"Waiting some seconds because no message was received, seconds={wait_seconds}, queue_url={self.url}"
)
time.sleep(wait_seconds)
continue
with ThreadPoolExecutor(max_workers=max_threads) as executor:
futures = []
for sqs_message in sqs_messages:
futures.append(executor.submit(self._consume_message, sqs_message))
wait(futures)
if not run_forever:
break
def _exit_gracefully(self, signal_num, current_stack_frame) -> None:
logger.info("Starting graceful shutdown process")
self._should_consume_tasks_stop = True
def _message_ack(self, sqs_message: dict) -> None:
receipt_handle = sqs_message["ReceiptHandle"]
self.sqs_client.delete_message(QueueUrl=self.url, ReceiptHandle=receipt_handle)
def _message_nack(
self,
sqs_message: dict,
min_backoff_seconds: Optional[int] = None,
max_backoff_seconds: Optional[int] = None,
) -> None:
min_backoff_seconds = min_backoff_seconds if min_backoff_seconds else self.min_backoff_seconds
max_backoff_seconds = max_backoff_seconds if max_backoff_seconds else self.max_backoff_seconds
receipt_handle = sqs_message["ReceiptHandle"]
receive_count = int(sqs_message["Attributes"]["ApproximateReceiveCount"]) - 1
timeout = backoff_calculator_seconds(receive_count, min_backoff_seconds, max_backoff_seconds)
self.sqs_client.change_message_visibility(
QueueUrl=self.url, ReceiptHandle=receipt_handle, VisibilityTimeout=timeout
)
class Queue(BaseModel, BaseQueueMixin):
url: str = Field(pattern=queue_url_regex)
sqs_client: Any
min_backoff_seconds: int = Field(default=30)
max_backoff_seconds: int = Field(default=900)
_handlers: Dict[str, Callable] = PrivateAttr(default={})
_should_consume_tasks_stop: bool = PrivateAttr(default=False)
def add_task(self, task_name: str, **task_kwargs) -> dict:
return self.sqs_client.send_message(
QueueUrl=self.url,
MessageAttributes={"TaskName": {"DataType": "String", "StringValue": task_name}},
MessageBody=dict_to_base64({"kwargs": task_kwargs}),
)
def add_task_handler(self, task_name: str, task_handler_function: Callable) -> None:
self._handlers.update({task_name: task_handler_function})
def _consume_message(self, sqs_message: dict) -> None:
message_id = sqs_message["MessageId"]
task_name_attribute = sqs_message["MessageAttributes"].get("TaskName")
if task_name_attribute is None:
logger.warning(f"Message without TaskName attribute, message_id={message_id}")
return self._message_nack(sqs_message)
task_name = task_name_attribute["StringValue"]
task_handler_function = self._handlers.get(task_name)
if task_handler_function is None:
logger.warning(f"Task handler not found, message_id={message_id}, task_name={task_name}")
return self._message_nack(sqs_message)
try:
message_data = base64_to_dict(sqs_message["Body"])
except Exception:
logger.exception(f"Invalid message body, message_id={message_id}, task_name={task_name}")
return self._message_nack(sqs_message)
kwargs = message_data["kwargs"]
context = {
"queue_url": self.url,
"task_name": task_name,
"sqs_message": sqs_message,
}
try:
task_handler_function(context, **kwargs)
except Retry as exc:
logger.info(
f"Received an sqsx.Retry, setting a custom backoff policy, message_id={message_id}, task_name={task_name}"
)
return self._message_nack(
sqs_message,
min_backoff_seconds=exc.min_backoff_seconds,
max_backoff_seconds=exc.max_backoff_seconds,
) | except NoRetry: | 0 | 2023-12-13 10:48:29+00:00 | 2k |
turbopuffer/turbopuffer-python | turbopuffer/backend.py | [
{
"identifier": "TurbopufferError",
"path": "turbopuffer/error.py",
"snippet": "class TurbopufferError(Exception):\n pass"
},
{
"identifier": "AuthenticationError",
"path": "turbopuffer/error.py",
"snippet": "class AuthenticationError(TurbopufferError):\n pass"
},
{
"identifier": "APIError",
"path": "turbopuffer/error.py",
"snippet": "class APIError(TurbopufferError):\n def __init__(self, status_code: int, status_name: str, message: str):\n self.status_code = status_code\n self.status_name = status_name\n super().__init__(f'{status_name}: {message}')"
}
] | import json
import time
import traceback
import requests
import turbopuffer as tpuf
import gzip
from turbopuffer.error import TurbopufferError, AuthenticationError, APIError
from typing import Optional, List | 839 |
def find_api_key(api_key: Optional[str] = None) -> str:
if api_key is not None:
return api_key
elif tpuf.api_key is not None:
return tpuf.api_key
else:
raise AuthenticationError("No turbopuffer API key was provided.\n"
"Set the TURBOPUFFER_API_KEY environment variable, "
"or pass `api_key=` when creating a Namespace.")
class Backend:
api_key: str
api_base_url: str
session: requests.Session
def __init__(self, api_key: Optional[str] = None):
self.api_key = find_api_key(api_key)
self.api_base_url = tpuf.api_base_url
self.session = requests.Session()
self.session.headers.update({
'Authorization': f'Bearer {self.api_key}',
'User-Agent': f'tpuf-python/{tpuf.VERSION} {requests.utils.default_headers()["User-Agent"]}',
})
def make_api_request(self,
*args: List[str],
method: Optional[str] = None,
query: Optional[dict] = None,
payload: Optional[dict] = None) -> dict:
start = time.monotonic()
if method is None and payload is not None:
method = 'POST'
request = requests.Request(method or 'GET', self.api_base_url + '/' + '/'.join(args))
if query is not None:
request.params = query
if payload is not None:
# before = time.monotonic()
if isinstance(payload, dict):
# before = time.monotonic()
json_payload = tpuf.dump_json_bytes(payload)
# print('Json time:', time.monotonic() - before)
else:
raise ValueError(f'Unsupported POST payload type: {type(payload)}')
gzip_payload = gzip.compress(json_payload, compresslevel=1)
# json_mebibytes = len(json_payload) / 1024 / 1024
# gzip_mebibytes = len(gzip_payload) / 1024 / 1024
# print(f'Gzip time ({json_mebibytes} MiB json / {gzip_mebibytes} MiB gzip):', time.monotonic() - before)
request.headers.update({
'Content-Type': 'application/json',
'Content-Encoding': 'gzip',
})
request.data = gzip_payload
prepared = self.session.prepare_request(request)
retry_attempts = 0
while retry_attempts < 3:
# before = time.monotonic()
try:
# print(f'Sending request:', prepared.path_url, prepared.headers)
response = self.session.send(prepared, allow_redirects=False)
# print(f'Request time (HTTP {response.status_code}):', time.monotonic() - before)
if response.status_code > 500:
response.raise_for_status()
content_type = response.headers.get('Content-Type', 'text/plain')
if content_type == 'application/json':
try:
content = response.json()
except json.JSONDecodeError as err:
|
def find_api_key(api_key: Optional[str] = None) -> str:
if api_key is not None:
return api_key
elif tpuf.api_key is not None:
return tpuf.api_key
else:
raise AuthenticationError("No turbopuffer API key was provided.\n"
"Set the TURBOPUFFER_API_KEY environment variable, "
"or pass `api_key=` when creating a Namespace.")
class Backend:
api_key: str
api_base_url: str
session: requests.Session
def __init__(self, api_key: Optional[str] = None):
self.api_key = find_api_key(api_key)
self.api_base_url = tpuf.api_base_url
self.session = requests.Session()
self.session.headers.update({
'Authorization': f'Bearer {self.api_key}',
'User-Agent': f'tpuf-python/{tpuf.VERSION} {requests.utils.default_headers()["User-Agent"]}',
})
def make_api_request(self,
*args: List[str],
method: Optional[str] = None,
query: Optional[dict] = None,
payload: Optional[dict] = None) -> dict:
start = time.monotonic()
if method is None and payload is not None:
method = 'POST'
request = requests.Request(method or 'GET', self.api_base_url + '/' + '/'.join(args))
if query is not None:
request.params = query
if payload is not None:
# before = time.monotonic()
if isinstance(payload, dict):
# before = time.monotonic()
json_payload = tpuf.dump_json_bytes(payload)
# print('Json time:', time.monotonic() - before)
else:
raise ValueError(f'Unsupported POST payload type: {type(payload)}')
gzip_payload = gzip.compress(json_payload, compresslevel=1)
# json_mebibytes = len(json_payload) / 1024 / 1024
# gzip_mebibytes = len(gzip_payload) / 1024 / 1024
# print(f'Gzip time ({json_mebibytes} MiB json / {gzip_mebibytes} MiB gzip):', time.monotonic() - before)
request.headers.update({
'Content-Type': 'application/json',
'Content-Encoding': 'gzip',
})
request.data = gzip_payload
prepared = self.session.prepare_request(request)
retry_attempts = 0
while retry_attempts < 3:
# before = time.monotonic()
try:
# print(f'Sending request:', prepared.path_url, prepared.headers)
response = self.session.send(prepared, allow_redirects=False)
# print(f'Request time (HTTP {response.status_code}):', time.monotonic() - before)
if response.status_code > 500:
response.raise_for_status()
content_type = response.headers.get('Content-Type', 'text/plain')
if content_type == 'application/json':
try:
content = response.json()
except json.JSONDecodeError as err: | raise APIError(response.status_code, traceback.format_exception_only(err), response.text) | 2 | 2023-12-12 06:52:27+00:00 | 2k |
neu-spiral/multi-label-emg | scripts/run_experiment_2.py | [
{
"identifier": "run_one",
"path": "multi_label_emg/slurm_utils.py",
"snippet": "def run_one(job: str, running_job_count: int, dry_run: bool):\n if ON_SLURM_CLUSTER:\n _run_one_slurm(job, running_job_count, slurm_logs_dir, dry_run)\n else:\n _run_one_local(job, running_job_count, dry_run)"
},
{
"identifier": "PROJECT_ROOT",
"path": "multi_label_emg/utils.py",
"snippet": "PROJECT_ROOT = Path(__file__).resolve().parent"
}
] | import itertools
import numpy as np
from run_experiment_1 import Setting
from multi_label_emg.slurm_utils import run_one
from multi_label_emg.utils import PROJECT_ROOT | 675 | """
Experiment 2:
Using previous best parallel model type and classifier,
Vary method of subsetting synthetic doubles and how many to use.
"""
DRY_RUN = True
script = PROJECT_ROOT / "train.py"
python = PROJECT_ROOT.parent / "venv" / "bin" / "python"
assert script.exists()
assert python.exists()
subjects = [f"Subj{i}" for i in range(11)]
parallel_model_type = "ParallelA"
clf = "mlp"
doubles_methods = [
"subset_uniform",
"subset_near_mean",
"subset_spaced_quantiles",
"subsetInput_uniform",
"subsetInput_near_mean",
"subsetInput_spaced_quantiles",
]
settings = []
for subj, seed, doubles_method, doubles_frac in itertools.product(
subjects,
np.arange(3),
doubles_methods,
[0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5],
):
if doubles_method.startswith("subsetInput"):
frac = np.round(np.sqrt(doubles_frac), 4)
else:
frac = doubles_frac
settings.append(
Setting(
subject=subj,
seed=seed,
parallel_model_type=parallel_model_type,
clf_name=clf,
doubles_method=doubles_method,
fraction_doubles_per_class=frac,
singles_method="none",
rel_fraction_singles_per_class=1.0,
include_doubles_in_train=False,
feature_combine_type="avg",
)
)
if __name__ == "__main__":
if DRY_RUN:
print("#" * 80)
print("DRY RUN")
running_job_count = 0
for setting in settings:
job = f"{python} {script} "
job += f"--subject {setting.subject} "
job += f"--seed {setting.seed} "
job += f"--parallel_model_type {setting.parallel_model_type} "
job += f"--clf_name {setting.clf_name} "
job += f"--doubles_method {setting.doubles_method} "
job += f"--fraction_doubles_per_class {setting.fraction_doubles_per_class} "
job += f"--singles_method {setting.singles_method} "
job += f"--rel_fraction_singles_per_class {setting.rel_fraction_singles_per_class} "
job += f"--include_doubles_in_train {setting.include_doubles_in_train} "
| """
Experiment 2:
Using previous best parallel model type and classifier,
Vary method of subsetting synthetic doubles and how many to use.
"""
DRY_RUN = True
script = PROJECT_ROOT / "train.py"
python = PROJECT_ROOT.parent / "venv" / "bin" / "python"
assert script.exists()
assert python.exists()
subjects = [f"Subj{i}" for i in range(11)]
parallel_model_type = "ParallelA"
clf = "mlp"
doubles_methods = [
"subset_uniform",
"subset_near_mean",
"subset_spaced_quantiles",
"subsetInput_uniform",
"subsetInput_near_mean",
"subsetInput_spaced_quantiles",
]
settings = []
for subj, seed, doubles_method, doubles_frac in itertools.product(
subjects,
np.arange(3),
doubles_methods,
[0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5],
):
if doubles_method.startswith("subsetInput"):
frac = np.round(np.sqrt(doubles_frac), 4)
else:
frac = doubles_frac
settings.append(
Setting(
subject=subj,
seed=seed,
parallel_model_type=parallel_model_type,
clf_name=clf,
doubles_method=doubles_method,
fraction_doubles_per_class=frac,
singles_method="none",
rel_fraction_singles_per_class=1.0,
include_doubles_in_train=False,
feature_combine_type="avg",
)
)
if __name__ == "__main__":
if DRY_RUN:
print("#" * 80)
print("DRY RUN")
running_job_count = 0
for setting in settings:
job = f"{python} {script} "
job += f"--subject {setting.subject} "
job += f"--seed {setting.seed} "
job += f"--parallel_model_type {setting.parallel_model_type} "
job += f"--clf_name {setting.clf_name} "
job += f"--doubles_method {setting.doubles_method} "
job += f"--fraction_doubles_per_class {setting.fraction_doubles_per_class} "
job += f"--singles_method {setting.singles_method} "
job += f"--rel_fraction_singles_per_class {setting.rel_fraction_singles_per_class} "
job += f"--include_doubles_in_train {setting.include_doubles_in_train} " | run_one(job, running_job_count, dry_run=DRY_RUN) | 0 | 2023-12-12 16:50:34+00:00 | 2k |
lbcb-sci/GNNome | graph_dataset.py | [
{
"identifier": "get_config",
"path": "config.py",
"snippet": "def get_config():\n return {\n 'checkpoints_path': 'checkpoints',\n 'models_path': 'models',\n \n 'tool_dir': 'vendor',\n 'raven_dir': 'vendor/raven-1.8.1',\n 'hifiasm_dir': 'vendor/hifiasm-0.18.8',\n 'pbsim3_dir': 'vendor/pbsim3',\n \n 'sample_profile_id': '',\n 'sample_file': '',\n 'sequencing_depth': 60,\n }"
},
{
"identifier": "preprocess_graph",
"path": "utils.py",
"snippet": "def preprocess_graph(g, data_path, idx):\n g = g.int()\n g.ndata['x'] = torch.ones(g.num_nodes(), 1)\n ol_len = g.edata['overlap_length'].float()\n ol_sim = g.edata['overlap_similarity']\n ol_len = (ol_len - ol_len.mean()) / ol_len.std()\n if get_hyperparameters()['use_similarities']:\n g.edata['e'] = torch.cat((ol_len.unsqueeze(-1), ol_sim.unsqueeze(-1)), dim=1)\n else:\n g.edata['e'] = ol_len.unsqueeze(-1)\n return g"
},
{
"identifier": "add_positional_encoding",
"path": "utils.py",
"snippet": "def add_positional_encoding(g):\n \"\"\"\n Initializing positional encoding with k-RW-PE\n \"\"\"\n\n g.ndata['in_deg'] = g.in_degrees().float()\n g.ndata['out_deg'] = g.out_degrees().float()\n \n pe_dim = get_hyperparameters()['nb_pos_enc']\n pe_type = get_hyperparameters()['type_pos_enc']\n \n if pe_dim == 0:\n return g\n\n if pe_type == 'RW':\n # Geometric diffusion features with Random Walk\n A = g.adjacency_matrix(scipy_fmt=\"csr\")\n Dinv = sp.diags(dgl.backend.asnumpy(g.in_degrees()).clip(1) ** -1.0, dtype=float) # D^-1\n RW = A @ Dinv \n M = RW\n # Iterate\n PE = [torch.from_numpy(M.diagonal()).float()]\n M_power = M\n for _ in range(pe_dim-1):\n M_power = M_power @ M\n PE.append(torch.from_numpy(M_power.diagonal()).float())\n PE = torch.stack(PE,dim=-1)\n g.ndata['pe'] = PE \n\n if pe_type == 'PR':\n # k-step PageRank features\n A = g.adjacency_matrix(scipy_fmt=\"csr\")\n D = A.sum(axis=1) # out degree\n Dinv = 1./ (D+1e-9); Dinv[D<1e-9] = 0 # take care of nodes without outgoing edges\n Dinv = sp.diags(np.squeeze(np.asarray(Dinv)), dtype=float) # D^-1 \n P = (Dinv @ A).T \n n = A.shape[0]\n One = np.ones([n])\n x = One/ n\n PE = [] \n alpha = 0.95 \n for _ in range(pe_dim): \n x = alpha* P.dot(x) + (1.0-alpha)/n* One \n PE.append(torch.from_numpy(x).float())\n PE = torch.stack(PE,dim=-1)\n g.ndata['pe'] = PE \n\n return g"
},
{
"identifier": "extract_contigs",
"path": "utils.py",
"snippet": "def extract_contigs(path, idx):\n gfa_path = os.path.join(path, f'{idx}_asm.bp.p_ctg.gfa')\n asm_path = os.path.join(path, f'{idx}_assembly.fasta')\n contigs = []\n with open(gfa_path) as f:\n n = 0\n for line in f.readlines():\n line = line.strip()\n if line[0] != 'S':\n continue\n seq=Seq.Seq(line.split()[2])\n ctg = SeqIO.SeqRecord(seq, description=f'contig_{n}', id=f'contig_{n}')\n contigs.append(ctg)\n n += 1\n SeqIO.write(contigs, asm_path, 'fasta')\n subprocess.run(f'rm {path}/{idx}_asm*', shell=True)\n # subprocess.run(f'rm {path}/output.csv', shell=True)"
}
] | import re
import os
import pickle
import subprocess
import dgl
import graph_parser
from dgl.data import DGLDataset
from config import get_config
from utils import preprocess_graph, add_positional_encoding, extract_contigs | 1,513 |
class AssemblyGraphDataset(DGLDataset):
def __init__(self, root, assembler, threads=32, generate=False):
self.root = os.path.abspath(root)
self.assembler = assembler
self.threads = threads
self.assembly_dir = os.path.join(self.root, self.assembler)
# print(self.assembly_dir)
if 'raw' not in os.listdir(self.root):
subprocess.run(f"mkdir 'raw'", shell=True, cwd=self.root)
if 'output' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'output'", shell=True, cwd=self.assembly_dir)
if f'processed' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'processed'", shell=True, cwd=self.assembly_dir)
if f'info' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'info'", shell=True, cwd=self.assembly_dir)
raw_dir = os.path.join(self.root, 'raw')
save_dir = os.path.join(self.assembly_dir, f'processed')
self.output_dir = os.path.join(self.assembly_dir, f'output')
self.info_dir = os.path.join(self.assembly_dir, f'info')
config = get_config()
raven_dir = config['raven_dir']
self.raven_path = os.path.join(raven_dir, f'build/bin/raven')
self.raven_path = os.path.abspath(self.raven_path)
hifiasm_dir = config['hifiasm_dir']
self.hifiasm_path = os.path.join(hifiasm_dir, f'hifiasm')
self.hifiasm_path = os.path.abspath(self.hifiasm_path)
super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir)
self.graph_list = []
if not generate:
for file in os.listdir(self.save_dir):
idx = int(file[:-4])
graph = dgl.load_graphs(os.path.join(self.save_dir, file))[0][0]
graph = preprocess_graph(graph, self.root, idx)
|
class AssemblyGraphDataset(DGLDataset):
def __init__(self, root, assembler, threads=32, generate=False):
self.root = os.path.abspath(root)
self.assembler = assembler
self.threads = threads
self.assembly_dir = os.path.join(self.root, self.assembler)
# print(self.assembly_dir)
if 'raw' not in os.listdir(self.root):
subprocess.run(f"mkdir 'raw'", shell=True, cwd=self.root)
if 'output' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'output'", shell=True, cwd=self.assembly_dir)
if f'processed' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'processed'", shell=True, cwd=self.assembly_dir)
if f'info' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'info'", shell=True, cwd=self.assembly_dir)
raw_dir = os.path.join(self.root, 'raw')
save_dir = os.path.join(self.assembly_dir, f'processed')
self.output_dir = os.path.join(self.assembly_dir, f'output')
self.info_dir = os.path.join(self.assembly_dir, f'info')
config = get_config()
raven_dir = config['raven_dir']
self.raven_path = os.path.join(raven_dir, f'build/bin/raven')
self.raven_path = os.path.abspath(self.raven_path)
hifiasm_dir = config['hifiasm_dir']
self.hifiasm_path = os.path.join(hifiasm_dir, f'hifiasm')
self.hifiasm_path = os.path.abspath(self.hifiasm_path)
super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir)
self.graph_list = []
if not generate:
for file in os.listdir(self.save_dir):
idx = int(file[:-4])
graph = dgl.load_graphs(os.path.join(self.save_dir, file))[0][0]
graph = preprocess_graph(graph, self.root, idx) | graph = add_positional_encoding(graph) | 2 | 2023-12-08 04:45:45+00:00 | 2k |
altfoxie/ha-sberdevices | custom_components/sberdevices/light.py | [
{
"identifier": "DeviceAPI",
"path": "custom_components/sberdevices/api.py",
"snippet": "class DeviceAPI:\n def __init__(self, home: HomeAPI, device_id: str) -> None:\n self._home = home\n self._id = device_id\n\n @property\n def device(self) -> dict[str, any]:\n return self._home.get_cached_device(self._id)\n\n async def update(self) -> None:\n await self._home.update_devices_cache()\n\n def get_state(self, key: str) -> dict[str, any]:\n return find_from_list(self.device[\"desired_state\"], key)\n\n def get_attribute(self, key: str) -> dict[str, any]:\n return find_from_list(self.device[\"attributes\"], key)\n\n async def set_states(self, states: [dict[str, any]]) -> None:\n await self._home.set_device_state(self._id, states)\n\n async def set_state(self, state: dict[str, any]) -> None:\n await self.set_states([state])\n\n async def set_state_bool(self, key: str, value: bool) -> None:\n await self.set_state({\"key\": key, \"bool_value\": value})\n\n async def set_state_integer(self, key: str, value: int) -> None:\n await self.set_state({\"key\": key, \"integer_value\": value})\n\n async def set_on_off(self, state: bool) -> None:\n await self.set_state_bool(\"on_off\", state)"
},
{
"identifier": "HomeAPI",
"path": "custom_components/sberdevices/api.py",
"snippet": "class HomeAPI:\n def __init__(self, sber: SberAPI) -> None:\n self._sber = sber\n self._client = AsyncClient(\n base_url=\"https://gateway.iot.sberdevices.ru/gateway/v1\",\n )\n self._token_alive = False\n self._devices = {}\n\n async def update_token(self) -> None:\n if self._token_alive:\n return\n\n token = await self._sber.fetch_home_token()\n if token is not None:\n self._client.headers.update({\"X-AUTH-jwt\": token})\n\n async def request(\n self, method: str, url: str, retry: bool = True, **kwargs\n ) -> dict[str, any]:\n await self.update_token()\n\n res = await self._client.request(method, url, **kwargs)\n obj = res.json()\n if res.status_code != 200:\n code = obj[\"code\"]\n # dead token xd\n if code == 16:\n self._token_alive = False\n if retry:\n return await self.request(method, url, retry=False, **kwargs)\n\n raise Exception(f\"{code} ({res.status_code}): {obj['message']}\")\n return obj\n\n async def get_device_tree(self) -> dict[str, any]:\n return (await self.request(\"GET\", \"/device_groups/tree\"))[\"result\"]\n\n # Cache\n async def update_devices_cache(self) -> list[dict[str, any]]:\n self._devices = extract_devices(await self.get_device_tree())\n\n def get_cached_devices(self) -> list[dict[str, any]]:\n return self._devices\n\n def get_cached_device(self, device_id: str) -> dict[str, any]:\n return self._devices[device_id]\n\n async def set_device_state(self, device_id: str, state: [dict[str, any]]) -> None:\n await self._client.request(\n \"PUT\",\n f\"/devices/{device_id}/state\",\n json={\n \"device_id\": device_id,\n \"desired_state\": state,\n \"timestamp\": datetime.now().isoformat()\n + \"Z\", # 2023-12-01T17:00:35.537Z\n },\n )\n\n # Merge\n for state_val in state:\n for attribute in self._devices[device_id][\"desired_state\"]:\n if attribute[\"key\"] == state_val[\"key\"]:\n attribute.update(state_val)\n break"
},
{
"identifier": "DOMAIN",
"path": "custom_components/sberdevices/const.py",
"snippet": "DOMAIN = \"sberdevices\""
}
] | import math
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP_KELVIN,
ATTR_HS_COLOR,
ATTR_WHITE,
ColorMode,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.color import brightness_to_value, value_to_brightness
from homeassistant.util.scaling import scale_ranged_value_to_int_range
from .api import DeviceAPI, HomeAPI
from .const import DOMAIN | 1,211 | """Support for Abode Security System lights."""
from __future__ import annotations
# hardcode xd
COLOR_TEMP_MIN = 2700
COLOR_TEMP_MAX = 6500
COLOR_TEMP_RANGE = (COLOR_TEMP_MIN, COLOR_TEMP_MAX)
H_RANGE = (0, 360)
S_RANGE = (0, 100)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
| """Support for Abode Security System lights."""
from __future__ import annotations
# hardcode xd
COLOR_TEMP_MIN = 2700
COLOR_TEMP_MAX = 6500
COLOR_TEMP_RANGE = (COLOR_TEMP_MIN, COLOR_TEMP_MAX)
H_RANGE = (0, 360)
S_RANGE = (0, 100)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None: | home: HomeAPI = hass.data[DOMAIN][entry.entry_id]["home"] | 2 | 2023-12-09 15:27:27+00:00 | 2k |
amadad/agentcy3 | agency_swarm/tools/tool_factory.py | [
{
"identifier": "BaseTool",
"path": "agency_swarm/tools/base_tool.py",
"snippet": "class BaseTool(OpenAISchema, ABC):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @abstractmethod\n def run(self, **kwargs):\n pass"
},
{
"identifier": "reference_schema",
"path": "agency_swarm/util/schema.py",
"snippet": "def reference_schema(schema):\n # Enhanced function to only extract nested properties into $defs\n\n def find_and_extract_defs(node, defs, parent_key=None, path_prefix=\"#/$defs/\"):\n if isinstance(node, dict):\n # Extract nested properties into $defs\n if parent_key == 'properties' and 'properties' in node and isinstance(node['properties'], dict):\n def_name = node.get('title', None)\n if def_name:\n defs[def_name] = node\n return {\"$ref\": path_prefix + def_name}\n\n # Recursively process the dictionary\n return {k: find_and_extract_defs(v, defs, parent_key=k) for k, v in node.items()}\n elif isinstance(node, list):\n # Recursively process the list\n return [find_and_extract_defs(element, defs, parent_key) for element in node]\n else:\n return node\n\n defs = {}\n # Extract definitions and update the schema\n new_schema = {k: find_and_extract_defs(v, defs) for k, v in schema.items()}\n if defs:\n new_schema['parameters'] = new_schema.get('parameters', {})\n new_schema['parameters']['$defs'] = defs\n return new_schema"
}
] | import inspect
from typing import Any, Dict, List, Type
from pydantic import create_model, Field
from .base_tool import BaseTool
from ..util.schema import reference_schema
from langchain.tools import format_tool_to_openai_function | 1,523 | except ImportError:
raise ImportError("You must install langchain to use this method.")
if inspect.isclass(tool):
tool = tool()
def callback(self):
tool_input = self.model_dump()
try:
return tool.run(tool_input)
except TypeError:
if len(tool_input) == 1:
return tool.run(list(tool_input.values())[0])
else:
raise TypeError(f"Error parsing input for tool '{tool.__class__.__name__}' Please open an issue "
f"on github.")
return ToolFactory.from_openai_schema(
format_tool_to_openai_function(tool),
callback
)
@staticmethod
def from_openai_schema(schema: Dict[str, Any], callback: Any):
"""
Converts an OpenAI schema into a BaseTool. Nested propoerties without refs are not supported yet.
:param schema:
:param callback:
:return:
"""
def resolve_ref(ref: str, defs: Dict[str, Any]) -> Any:
# Extract the key from the reference
key = ref.split('/')[-1]
if key in defs:
return defs[key]
else:
raise ValueError(f"Reference '{ref}' not found in definitions")
def create_fields(schema: Dict[str, Any], type_mapping: Dict[str, Type[Any]], required_fields: List[str],
defs: Dict[str, Any]) -> Dict[str, Any]:
fields = {}
for prop, details in schema.items():
alias = None
if prop.startswith('_'):
alias = prop
prop = prop.lstrip('_')
json_type = details['type']
if json_type in type_mapping:
field_type = type_mapping[json_type]
field_description = details.get('description', '')
is_required = prop in required_fields
field_default = ... if is_required else None
if json_type == 'array':
items_schema = details.get('items', {})
if 'type' in items_schema:
item_type = type_mapping[items_schema['type']]
field_type = List[item_type]
elif 'properties' in items_schema: # Handling direct nested object in array
nested_properties = items_schema['properties']
nested_required = items_schema.get('required', [])
nested_model_name = items_schema.get('title', f"{prop}Item")
nested_fields = create_fields(nested_properties, type_mapping, nested_required, defs)
nested_model = create_model(nested_model_name, **nested_fields)
field_type = List[nested_model]
elif '$ref' in items_schema:
ref_model = resolve_ref(items_schema['$ref'], defs)
field_type = List[ref_model]
else:
raise ValueError("Array items must have a 'type', 'properties', or '$ref'")
elif json_type == 'object':
if 'properties' in details:
nested_properties = details['properties']
nested_required = details.get('required', [])
nested_model_name = details.get('title', f"{prop}Model")
nested_fields = create_fields(nested_properties, type_mapping, nested_required, defs)
field_type = create_model(nested_model_name, **nested_fields)
elif '$ref' in details:
ref_model = resolve_ref(details['$ref'], defs)
field_type = ref_model
else:
raise ValueError("Object must have 'properties' or '$ref'")
fields[prop] = (
field_type, Field(default=field_default, description=field_description, alias=alias))
else:
raise ValueError(f"Unsupported type '{json_type}' for property '{prop}'")
return fields
type_mapping = {
'string': str,
'integer': int,
'number': float,
'boolean': bool,
'array': List,
'object': dict,
'null': type(None),
}
schema = reference_schema(schema)
name = schema['name']
description = schema['description']
properties = schema['parameters']['properties']
required_fields = schema['parameters'].get('required', [])
# Add definitions ($defs) to type_mapping
defs = {k: create_model(k, **create_fields(v['properties'], type_mapping, v.get('required', []), {})) for k, v
in schema['parameters'].get('$defs', {}).items()}
type_mapping.update(defs)
fields = create_fields(properties, type_mapping, required_fields, defs)
# Dynamically creating the Pydantic model
model = create_model(name, **fields)
|
class ToolFactory:
@staticmethod
def from_langchain_tools(tools: List):
"""
Converts a list of langchain tools into a list of BaseTools.
:param tools: A list of langchain tools.
:return: A list of BaseTools.
"""
converted_tools = []
for tool in tools:
converted_tools.append(ToolFactory.from_langchain_tool(tool))
return converted_tools
@staticmethod
def from_langchain_tool(tool):
"""
Converts a langchain tool into a BaseTool.
:param tool: A langchain tool.
:return: A BaseTool.
"""
try:
except ImportError:
raise ImportError("You must install langchain to use this method.")
if inspect.isclass(tool):
tool = tool()
def callback(self):
tool_input = self.model_dump()
try:
return tool.run(tool_input)
except TypeError:
if len(tool_input) == 1:
return tool.run(list(tool_input.values())[0])
else:
raise TypeError(f"Error parsing input for tool '{tool.__class__.__name__}' Please open an issue "
f"on github.")
return ToolFactory.from_openai_schema(
format_tool_to_openai_function(tool),
callback
)
@staticmethod
def from_openai_schema(schema: Dict[str, Any], callback: Any):
"""
Converts an OpenAI schema into a BaseTool. Nested propoerties without refs are not supported yet.
:param schema:
:param callback:
:return:
"""
def resolve_ref(ref: str, defs: Dict[str, Any]) -> Any:
# Extract the key from the reference
key = ref.split('/')[-1]
if key in defs:
return defs[key]
else:
raise ValueError(f"Reference '{ref}' not found in definitions")
def create_fields(schema: Dict[str, Any], type_mapping: Dict[str, Type[Any]], required_fields: List[str],
defs: Dict[str, Any]) -> Dict[str, Any]:
fields = {}
for prop, details in schema.items():
alias = None
if prop.startswith('_'):
alias = prop
prop = prop.lstrip('_')
json_type = details['type']
if json_type in type_mapping:
field_type = type_mapping[json_type]
field_description = details.get('description', '')
is_required = prop in required_fields
field_default = ... if is_required else None
if json_type == 'array':
items_schema = details.get('items', {})
if 'type' in items_schema:
item_type = type_mapping[items_schema['type']]
field_type = List[item_type]
elif 'properties' in items_schema: # Handling direct nested object in array
nested_properties = items_schema['properties']
nested_required = items_schema.get('required', [])
nested_model_name = items_schema.get('title', f"{prop}Item")
nested_fields = create_fields(nested_properties, type_mapping, nested_required, defs)
nested_model = create_model(nested_model_name, **nested_fields)
field_type = List[nested_model]
elif '$ref' in items_schema:
ref_model = resolve_ref(items_schema['$ref'], defs)
field_type = List[ref_model]
else:
raise ValueError("Array items must have a 'type', 'properties', or '$ref'")
elif json_type == 'object':
if 'properties' in details:
nested_properties = details['properties']
nested_required = details.get('required', [])
nested_model_name = details.get('title', f"{prop}Model")
nested_fields = create_fields(nested_properties, type_mapping, nested_required, defs)
field_type = create_model(nested_model_name, **nested_fields)
elif '$ref' in details:
ref_model = resolve_ref(details['$ref'], defs)
field_type = ref_model
else:
raise ValueError("Object must have 'properties' or '$ref'")
fields[prop] = (
field_type, Field(default=field_default, description=field_description, alias=alias))
else:
raise ValueError(f"Unsupported type '{json_type}' for property '{prop}'")
return fields
type_mapping = {
'string': str,
'integer': int,
'number': float,
'boolean': bool,
'array': List,
'object': dict,
'null': type(None),
}
schema = reference_schema(schema)
name = schema['name']
description = schema['description']
properties = schema['parameters']['properties']
required_fields = schema['parameters'].get('required', [])
# Add definitions ($defs) to type_mapping
defs = {k: create_model(k, **create_fields(v['properties'], type_mapping, v.get('required', []), {})) for k, v
in schema['parameters'].get('$defs', {}).items()}
type_mapping.update(defs)
fields = create_fields(properties, type_mapping, required_fields, defs)
# Dynamically creating the Pydantic model
model = create_model(name, **fields)
| tool = type(name, (BaseTool, model), { | 0 | 2023-12-14 01:40:32+00:00 | 2k |
Deltares/imod-python | imod/tests/test_flow/test_flow_dis.py | [
{
"identifier": "TimeDiscretization",
"path": "imod/flow/dis.py",
"snippet": "class TimeDiscretization(Package):\n \"\"\"\n Time discretisation package class.\n\n Parameters\n ----------\n timestep_duration: xr.DataArray\n is the length of the current stress period (PERLEN). If the flow\n solution is transient, timestep_duration specified here must be equal\n to that specified for the flow model. If the flow solution is\n steady-state, timestep_duration can be set to any desired length.\n n_timesteps: int, optional\n is the number of time steps for the transient flow solution in the\n current stress period (NSTP). If the flow solution is steady-state,\n n_timestep=1. Default value is 1.\n transient: bool, optional\n Flag indicating wether the flow simulation is transient (True) or False\n (Steady State). Default is True.\n timestep_multiplier: float, optional\n is the multiplier for the length of successive time steps used in the\n transient flow solution (TSMULT); it is used only if n_timesteps>1.\n timestep_multiplier>0, the length of each flow time step within the\n current stress period is calculated using the geometric progression as\n in MODFLOW. Note that both n_timesteps and timestep_multiplier\n specified here must be identical to those specified in the flow model\n if the flow model is transient. If timestep_multiplier ≤ 0, the length\n of each flow time step within the current stress period is read from\n the record TSLNGH. This option is needed in case the length of time\n steps for the flow solution is not based on a geometric progression in\n a flow model, unlike MODFLOW. Default is 1.0.\n \"\"\"\n\n _pkg_id = \"dis\"\n _variable_order = [\n \"timestep_duration\",\n \"n_timesteps\",\n \"transient\",\n \"timestep_multiplier\",\n ]\n\n def __init__(\n self,\n timestep_duration,\n endtime,\n n_timesteps=1,\n transient=True,\n timestep_multiplier=1.0,\n ):\n super().__init__()\n self.dataset[\"timestep_duration\"] = timestep_duration\n self.dataset[\"n_timesteps\"] = n_timesteps\n self.dataset[\"transient\"] = transient\n self.dataset[\"timestep_multiplier\"] = timestep_multiplier\n self.endtime = endtime\n\n def _render(self):\n \"\"\"Render iMOD TIM file, which is the time discretization of the iMODFLOW model\"\"\"\n _template = jinja2.Template(\n \"{% for time in timestrings%}\"\n \"{{time}},1,{{n_timesteps}},{{timestep_multiplier}}\\n\"\n \"{% endfor %}\\n\"\n )\n times = self.dataset[\"time\"].values\n timestrings = [imod.util._compose_timestring(time) for time in times]\n timestrings.append(imod.util._compose_timestring(self.endtime))\n\n d = dict(\n timestrings=timestrings,\n n_timesteps=self.dataset[\"n_timesteps\"].item(),\n timestep_multiplier=self.dataset[\"timestep_multiplier\"].item(),\n )\n\n return _template.render(**d)\n\n def save(self, path):\n tim_content = self._render()\n\n with open(path, \"w\") as f:\n f.write(tim_content)\n\n def _pkgcheck(self, **kwargs):\n to_check = [\n \"timestep_duration\",\n \"n_timesteps\",\n ]\n\n self._check_positive(to_check)"
},
{
"identifier": "timeutil",
"path": "imod/wq/timeutil.py",
"snippet": "def _check_year(year):\ndef to_datetime(time, use_cftime):\ndef timestep_duration(times, use_cftime):\ndef forcing_starts_ends(package_times, globaltimes):"
}
] | import cftime
import numpy as np
import pytest
import xarray as xr
from imod.flow import TimeDiscretization
from imod.wq import timeutil | 973 |
@pytest.fixture(scope="module")
def time_discretization(three_days):
times = three_days
|
@pytest.fixture(scope="module")
def time_discretization(three_days):
times = three_days | duration = timeutil.timestep_duration(times, False) | 1 | 2023-12-08 13:57:59+00:00 | 2k |
Dong142857/Live3DPortrait | models/eg3d/volumetric_rendering/renderer.py | [
{
"identifier": "MipRayMarcher2",
"path": "models/eg3d/volumetric_rendering/ray_marcher.py",
"snippet": "class MipRayMarcher2(nn.Module):\n def __init__(self):\n super().__init__()\n\n\n def run_forward(self, colors, densities, depths, rendering_options):\n deltas = depths[:, :, 1:] - depths[:, :, :-1]\n colors_mid = (colors[:, :, :-1] + colors[:, :, 1:]) / 2\n densities_mid = (densities[:, :, :-1] + densities[:, :, 1:]) / 2\n depths_mid = (depths[:, :, :-1] + depths[:, :, 1:]) / 2\n\n\n if rendering_options['clamp_mode'] == 'softplus':\n densities_mid = F.softplus(densities_mid - 1) # activation bias of -1 makes things initialize better\n else:\n assert False, \"MipRayMarcher only supports `clamp_mode`=`softplus`!\"\n\n density_delta = densities_mid * deltas\n\n alpha = 1 - torch.exp(-density_delta)\n\n alpha_shifted = torch.cat([torch.ones_like(alpha[:, :, :1]), 1-alpha + 1e-10], -2)\n weights = alpha * torch.cumprod(alpha_shifted, -2)[:, :, :-1]\n\n composite_rgb = torch.sum(weights * colors_mid, -2)\n weight_total = weights.sum(2)\n composite_depth = torch.sum(weights * depths_mid, -2) / weight_total\n\n # clip the composite to min/max range of depths\n composite_depth = torch.nan_to_num(composite_depth, float('inf'))\n composite_depth = torch.clamp(composite_depth, torch.min(depths), torch.max(depths))\n\n if rendering_options.get('white_back', False):\n composite_rgb = composite_rgb + 1 - weight_total\n\n composite_rgb = composite_rgb * 2 - 1 # Scale to (-1, 1)\n\n return composite_rgb, composite_depth, weights\n\n\n def forward(self, colors, densities, depths, rendering_options):\n composite_rgb, composite_depth, weights = self.run_forward(colors, densities, depths, rendering_options)\n\n return composite_rgb, composite_depth, weights"
},
{
"identifier": "math_utils",
"path": "models/eg3d/volumetric_rendering/math_utils.py",
"snippet": "def transform_vectors(matrix: torch.Tensor, vectors4: torch.Tensor) -> torch.Tensor:\ndef normalize_vecs(vectors: torch.Tensor) -> torch.Tensor:\ndef torch_dot(x: torch.Tensor, y: torch.Tensor):\ndef get_ray_limits_box(rays_o: torch.Tensor, rays_d: torch.Tensor, box_side_length):\ndef linspace(start: torch.Tensor, stop: torch.Tensor, num: int):"
}
] | import math
import torch
import torch.nn as nn
from models.eg3d.volumetric_rendering.ray_marcher import MipRayMarcher2
from models.eg3d.volumetric_rendering import math_utils | 1,563 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""
The renderer is a module that takes in rays, decides where to sample along each
ray, and computes pixel colors using the volume rendering equation.
"""
def generate_planes():
"""
Defines planes by the three vectors that form the "axes" of the
plane. Should work with arbitrary number of planes and planes of
arbitrary orientation.
"""
return torch.tensor([[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
[[1, 0, 0],
[0, 0, 1],
[0, 1, 0]],
[[0, 0, 1],
[1, 0, 0],
[0, 1, 0]]], dtype=torch.float32)
def project_onto_planes(planes, coordinates):
"""
Does a projection of a 3D point onto a batch of 2D planes,
returning 2D plane coordinates.
Takes plane axes of shape n_planes, 3, 3
# Takes coordinates of shape N, M, 3
# returns projections of shape N*n_planes, M, 2
"""
N, M, C = coordinates.shape
n_planes, _, _ = planes.shape
coordinates = coordinates.unsqueeze(1).expand(-1, n_planes, -1, -1).reshape(N*n_planes, M, 3)
inv_planes = torch.linalg.inv(planes).unsqueeze(0).expand(N, -1, -1, -1).reshape(N*n_planes, 3, 3)
projections = torch.bmm(coordinates, inv_planes)
return projections[..., :2]
def sample_from_planes(plane_axes, plane_features, coordinates, mode='bilinear', padding_mode='zeros', box_warp=None):
assert padding_mode == 'zeros'
N, n_planes, C, H, W = plane_features.shape
_, M, _ = coordinates.shape
plane_features = plane_features.view(N*n_planes, C, H, W)
coordinates = (2/box_warp) * coordinates # TODO: add specific box bounds
projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1)
output_features = torch.nn.functional.grid_sample(plane_features, projected_coordinates.float(), mode=mode, padding_mode=padding_mode, align_corners=False).permute(0, 3, 2, 1).reshape(N, n_planes, M, C)
return output_features
def sample_from_3dgrid(grid, coordinates):
"""
Expects coordinates in shape (batch_size, num_points_per_batch, 3)
Expects grid in shape (1, channels, H, W, D)
(Also works if grid has batch size)
Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels)
"""
batch_size, n_coords, n_dims = coordinates.shape
sampled_features = torch.nn.functional.grid_sample(grid.expand(batch_size, -1, -1, -1, -1),
coordinates.reshape(batch_size, 1, 1, -1, n_dims),
mode='bilinear', padding_mode='zeros', align_corners=False)
N, C, H, W, D = sampled_features.shape
sampled_features = sampled_features.permute(0, 4, 3, 2, 1).reshape(N, H*W*D, C)
return sampled_features
class ImportanceRenderer(torch.nn.Module):
def __init__(self):
super().__init__()
| # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""
The renderer is a module that takes in rays, decides where to sample along each
ray, and computes pixel colors using the volume rendering equation.
"""
def generate_planes():
"""
Defines planes by the three vectors that form the "axes" of the
plane. Should work with arbitrary number of planes and planes of
arbitrary orientation.
"""
return torch.tensor([[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
[[1, 0, 0],
[0, 0, 1],
[0, 1, 0]],
[[0, 0, 1],
[1, 0, 0],
[0, 1, 0]]], dtype=torch.float32)
def project_onto_planes(planes, coordinates):
"""
Does a projection of a 3D point onto a batch of 2D planes,
returning 2D plane coordinates.
Takes plane axes of shape n_planes, 3, 3
# Takes coordinates of shape N, M, 3
# returns projections of shape N*n_planes, M, 2
"""
N, M, C = coordinates.shape
n_planes, _, _ = planes.shape
coordinates = coordinates.unsqueeze(1).expand(-1, n_planes, -1, -1).reshape(N*n_planes, M, 3)
inv_planes = torch.linalg.inv(planes).unsqueeze(0).expand(N, -1, -1, -1).reshape(N*n_planes, 3, 3)
projections = torch.bmm(coordinates, inv_planes)
return projections[..., :2]
def sample_from_planes(plane_axes, plane_features, coordinates, mode='bilinear', padding_mode='zeros', box_warp=None):
assert padding_mode == 'zeros'
N, n_planes, C, H, W = plane_features.shape
_, M, _ = coordinates.shape
plane_features = plane_features.view(N*n_planes, C, H, W)
coordinates = (2/box_warp) * coordinates # TODO: add specific box bounds
projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1)
output_features = torch.nn.functional.grid_sample(plane_features, projected_coordinates.float(), mode=mode, padding_mode=padding_mode, align_corners=False).permute(0, 3, 2, 1).reshape(N, n_planes, M, C)
return output_features
def sample_from_3dgrid(grid, coordinates):
"""
Expects coordinates in shape (batch_size, num_points_per_batch, 3)
Expects grid in shape (1, channels, H, W, D)
(Also works if grid has batch size)
Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels)
"""
batch_size, n_coords, n_dims = coordinates.shape
sampled_features = torch.nn.functional.grid_sample(grid.expand(batch_size, -1, -1, -1, -1),
coordinates.reshape(batch_size, 1, 1, -1, n_dims),
mode='bilinear', padding_mode='zeros', align_corners=False)
N, C, H, W, D = sampled_features.shape
sampled_features = sampled_features.permute(0, 4, 3, 2, 1).reshape(N, H*W*D, C)
return sampled_features
class ImportanceRenderer(torch.nn.Module):
def __init__(self):
super().__init__() | self.ray_marcher = MipRayMarcher2() | 0 | 2023-12-09 15:18:53+00:00 | 2k |
lumi-ua/goit-project2-django-assistant | personal_assistant/app_contacts/views.py | [
{
"identifier": "ContactForm",
"path": "personal_assistant/app_contacts/forms.py",
"snippet": "class ContactForm(ModelForm):\n fullname = CharField(max_length=255, \n widget=forms.TextInput(attrs={'placeholder': 'Name Lastname', \"class\": \"form-control\"}))\n address = CharField(max_length=255, required=False, \n widget=forms.TextInput(attrs={'placeholder': 'City, Street, House number', \"class\": \"form-control\"}))\n birthday = DateField(required=False, input_formats=[\"%d.%m.%Y\"], \n widget=forms.DateInput(attrs={'placeholder': 'DD.MM.YYYY', 'class': 'form-control'}))\n class Meta:\n model = Contact\n fields = [\"fullname\", \"address\", \"birthday\"]\n exclude = [\"user\"]"
},
{
"identifier": "PhoneNumberForm",
"path": "personal_assistant/app_contacts/forms.py",
"snippet": "class PhoneNumberForm(forms.ModelForm):\n phone_number = PhoneNumberField(\n widget=PhoneNumberPrefixWidget(attrs={'placeholder': '+380', 'class': 'form-control'})\n )\n class Meta:\n model = PhoneNumber\n fields = [\"phone_number\"]\n exclude = [\"contact\"]"
},
{
"identifier": "EmailAddressForm",
"path": "personal_assistant/app_contacts/forms.py",
"snippet": "class EmailAddressForm(forms.ModelForm):\n email = EmailField(max_length=100, required=False, widget=forms.EmailInput(attrs={'placeholder': '[email protected]', 'class': 'form-control'}))\n\n class Meta:\n model = EmailAddress\n fields = [\"email\"]\n exclude = [\"contact\"]"
},
{
"identifier": "Contact",
"path": "personal_assistant/app_contacts/models.py",
"snippet": "class Contact(models.Model):\n fullname = models.CharField(max_length=255)\n address = models.CharField(max_length=255, blank=True, null=True)\n birthday = models.DateField(blank=True, null=True)\n user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)"
},
{
"identifier": "PhoneNumber",
"path": "personal_assistant/app_contacts/models.py",
"snippet": "class PhoneNumber(models.Model):\n phone_number = PhoneNumberField(null=True,)\n contact = models.ForeignKey(\n Contact, on_delete=models.CASCADE, default=None, null=True, related_name='phone_numbers'\n )"
},
{
"identifier": "EmailAddress",
"path": "personal_assistant/app_contacts/models.py",
"snippet": "class EmailAddress(models.Model):\n email = models.EmailField(max_length=100, null=True)\n contact = models.ForeignKey(\n Contact, on_delete=models.CASCADE, default=None, null=True, related_name='email_addresses'\n )"
}
] | from datetime import date
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models import Q
from django.urls import reverse_lazy
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator
from datetime import date, timedelta
from .forms import ContactForm, PhoneNumberForm, EmailAddressForm
from .models import Contact, PhoneNumber, EmailAddress | 682 | # from django.db.models import Q
# Create your views here.
@login_required
def dashboard(request):
return render(request, 'app_contacts/dashboard.html', {"title": "Dashboard contact operations"})
@login_required
def contact(request):
contact_form = ContactForm()
| # from django.db.models import Q
# Create your views here.
@login_required
def dashboard(request):
return render(request, 'app_contacts/dashboard.html', {"title": "Dashboard contact operations"})
@login_required
def contact(request):
contact_form = ContactForm() | phone_number_form = PhoneNumberForm() | 1 | 2023-12-08 17:26:59+00:00 | 2k |
SubConv/SubConv | modules/convert/converter.py | [
{
"identifier": "RandUserAgent",
"path": "modules/convert/util.py",
"snippet": "def RandUserAgent() -> str:\n return userAgents[random.randint(0, len(userAgents) - 1)]"
},
{
"identifier": "get",
"path": "modules/convert/util.py",
"snippet": "def get(content):\n if content is None:\n return \"\"\n else:\n return content"
},
{
"identifier": "uniqueName",
"path": "modules/convert/util.py",
"snippet": "def uniqueName(names: dict, name):\n\tindex = names.get(name)\n\tif index is None:\n\t\tindex = 0\n\t\tnames[name] = index\n\telse:\n\t\tindex += 1\n\t\tnames[name] = index\n\t\tname = \"%s-%02d\" % (name, index)\n\treturn name"
},
{
"identifier": "urlSafe",
"path": "modules/convert/util.py",
"snippet": "def urlSafe(string):\n\treturn string.replace(\"+\", \"-\").replace(\"/\", \"_\")"
},
{
"identifier": "base64RawStdDecode",
"path": "modules/convert/util.py",
"snippet": "def base64RawStdDecode(encoded):\n\treturn base64.b64decode(\n encoded + \"=\"*(-len(encoded)%4)\n\t).decode(\"utf-8\")"
},
{
"identifier": "base64RawURLDecode",
"path": "modules/convert/util.py",
"snippet": "def base64RawURLDecode(encoded):\n\treturn base64.urlsafe_b64decode(\n\t\t\t encoded + \"=\"*(-len(encoded)%4)\n\t).decode(\"utf-8\")"
},
{
"identifier": "handleVShareLink",
"path": "modules/convert/v.py",
"snippet": "def handleVShareLink(names: dict, url: urlparse.ParseResult, scheme: str, proxy: dict):\n query = dict(urlparse.parse_qsl(url.query))\n proxy[\"name\"] = uniqueName(names, urlparse.unquote(url.fragment))\n if url.hostname == \"\":\n raise\n if url.port == \"\":\n raise\n proxy[\"type\"] = scheme\n proxy[\"server\"] = url.hostname\n proxy[\"port\"] = url.port\n proxy[\"uuid\"] = url.username\n proxy[\"udp\"] = True\n tls = get(query.get(\"security\")).lower()\n if tls.endswith(\"tls\") or tls == \"reality\":\n proxy[\"tls\"] = True\n fingerprint = get(query.get(\"fp\"))\n if fingerprint == \"\":\n proxy[\"client-fingerprint\"] = \"chrome\"\n else:\n proxy[\"client-fingerprint\"] = fingerprint\n alpn = get(query.get(\"alpn\"))\n if alpn != \"\":\n proxy[\"alpn\"] = alpn.split(\",\")\n sni = get(query.get(\"sni\"))\n if sni != \"\":\n proxy[\"servername\"] = sni\n realityPublicKey = get(query.get(\"pbk\"))\n if realityPublicKey != \"\":\n proxy[\"reality-opts\"] = {\n \"public-key\": realityPublicKey,\n \"short-id\": get(query.get(\"sid\"))\n }\n \n switch = get(query.get(\"packetEncoding\"))\n if switch == \"none\" or switch == \"\":\n pass\n elif switch == \"packet\":\n proxy[\"packet-addr\"] = True\n else:\n proxy[\"xudp\"] = True\n\n network = get(query.get(\"type\")).lower()\n if network == \"\":\n network = \"tcp\"\n fakeType = get(query.get(\"headerType\")).lower()\n if fakeType == \"http\":\n network = \"http\"\n elif network == \"http\":\n network = \"h2\"\n proxy[\"network\"] = network\n if network == \"tcp\":\n if fakeType != \"none\" and fakeType != \"\":\n headers = {}\n httpOpts = {}\n httpOpts[\"path\"] = \"/\"\n\n host = get(query.get(\"host\"))\n if host != \"\":\n headers[\"Host\"] = str(host)\n\n method = get(query.get(\"method\"))\n if method != \"\":\n httpOpts[\"method\"] = method\n\n path = get(query.get(\"path\"))\n if path != \"\":\n httpOpts[\"path\"] = str(path)\n \n httpOpts[\"headers\"] = headers\n proxy[\"http-opts\"] = httpOpts\n\n elif network == \"http\":\n headers = {}\n h2Opts = {}\n h2Opts[\"path\"] = \"/\"\n path = get(query.get(\"path\"))\n if path != \"\":\n h2Opts[\"path\"] = str(path)\n host = get(query.get(\"host\"))\n if host != \"\":\n h2Opts[\"host\"] = str(host)\n h2Opts[\"headers\"] = headers\n proxy[\"h2-opts\"] = h2Opts\n \n elif network == \"ws\":\n headers = {}\n wsOpts = {}\n headers[\"User-Agent\"] = RandUserAgent()\n headers[\"Host\"] = get(query.get(\"host\"))\n wsOpts[\"path\"] = get(query.get(\"path\"))\n wsOpts[\"headers\"] = headers\n\n earlyData = get(query.get(\"ed\"))\n if earlyData != \"\":\n try:\n med = int(earlyData)\n except:\n raise\n wsOpts[\"max-early-data\"] = med\n earlyDataHeader = get(query.get(\"edh\"))\n if earlyDataHeader != \"\":\n wsOpts[\"early-data-header-name\"] = earlyDataHeader\n\n proxy[\"ws-opts\"] = wsOpts\n\n elif network == \"grpc\":\n grpcOpts = {}\n grpcOpts[\"grpc-service-name\"] = get(query.get(\"serviceName\"))\n proxy[\"grpc-opts\"] = grpcOpts"
}
] | from modules.convert.util import RandUserAgent
from modules.convert.util import get
from modules.convert.util import uniqueName
from modules.convert.util import urlSafe
from modules.convert.util import base64RawStdDecode
from modules.convert.util import base64RawURLDecode
from modules.convert.v import handleVShareLink
import json
import base64
import urllib.parse as urlparse
import distutils.util | 1,548 |
async def ConvertsV2Ray(buf):
try:
data = base64.b64decode(buf).decode("utf-8")
except:
try:
data = buf.decode("utf-8")
except:
data = buf
arr = data.splitlines()
proxies = []
names = {}
for line in arr:
if line == "":
continue
if -1 == line.find("://"):
continue
else:
scheme, body = line.split("://", 1)
scheme = scheme.lower()
if scheme == "hysteria":
try:
urlHysteria = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlHysteria.query))
name = uniqueName(names, urlparse.unquote(urlHysteria.fragment))
hysteria = {}
hysteria["name"] = name
hysteria["type"] = scheme
hysteria["server"] = urlHysteria.hostname
hysteria["port"] = urlHysteria.port
|
async def ConvertsV2Ray(buf):
try:
data = base64.b64decode(buf).decode("utf-8")
except:
try:
data = buf.decode("utf-8")
except:
data = buf
arr = data.splitlines()
proxies = []
names = {}
for line in arr:
if line == "":
continue
if -1 == line.find("://"):
continue
else:
scheme, body = line.split("://", 1)
scheme = scheme.lower()
if scheme == "hysteria":
try:
urlHysteria = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlHysteria.query))
name = uniqueName(names, urlparse.unquote(urlHysteria.fragment))
hysteria = {}
hysteria["name"] = name
hysteria["type"] = scheme
hysteria["server"] = urlHysteria.hostname
hysteria["port"] = urlHysteria.port | hysteria["sni"] = query.get("peer") | 1 | 2023-12-06 12:57:11+00:00 | 2k |
Opt-Mucca/PySCIPOpt-ML | src/pyscipopt_ml/add_predictor.py | [
{
"identifier": "NotRegistered",
"path": "src/pyscipopt_ml/exceptions.py",
"snippet": "class NotRegistered(Exception):\n \"\"\"Predictor is not supported by pyscipopt-ml.\"\"\"\n\n def __init__(self, predictor):\n super().__init__(\n f\"Object of type {predictor} is not registered/supported with pyscipopt-ml\"\n )"
},
{
"identifier": "get_convertor",
"path": "src/pyscipopt_ml/modelling/get_convertor.py",
"snippet": "def get_convertor(predictor, convertors):\n \"\"\"Return the convertor for a given predictor.\"\"\"\n convertor = None\n try:\n convertor = convertors[type(predictor)]\n except KeyError:\n pass\n if convertor is None:\n for parent in type(predictor).mro():\n try:\n convertor = convertors[parent]\n break\n except KeyError:\n pass\n if convertor is None:\n name = type(predictor).__name__\n try:\n convertor = convertors[name]\n except KeyError:\n pass\n return convertor"
},
{
"identifier": "registered_predictors",
"path": "src/pyscipopt_ml/registered_predictors.py",
"snippet": "def registered_predictors():\n \"\"\"Return the list of registered predictors.\"\"\"\n convertors = {\n **sklearn_convertors(),\n **pytorch_convertors(),\n **xgboost_convertors(),\n **lightgbm_convertors(),\n }\n return convertors"
}
] | from warnings import warn
from .exceptions import NotRegistered
from .modelling.get_convertor import get_convertor
from .registered_predictors import registered_predictors | 820 |
def add_predictor_constr(
scip_model, predictor, input_vars, output_vars=None, unique_naming_prefix="p_", **kwargs
):
"""Formulate predictor in PySCIPOpt model.
The formulation predicts the values of output_vars using input_vars according to
predictor.
Parameters
----------
scip_model : PySCIPOpt Model
The pyscipopt model where the predictor should be inserted.
predictor:
The predictor to insert.
input_vars : list or np.ndarray
Decision variables used as input for predictor in scip_model.
output_vars : list or np.ndarray, optional
Decision variables used as output for predictor in scip_model.
unique_naming_prefix : str, optional
A unique naming prefix that is used before all variable and constraint names. This parameter is important if
the SCIP model is later printed to file and many predictors are added to the same SCIP model.
Returns
-------
AbstractPredictorConstr
Object containing information about what was added to scip_model to insert the
predictor in it
Note
----
The parameters `input_vars` and `output_vars` can be either
* Lists of variables (List of lists etc. for higher dimensional input)
* np.ndarray of variables
For internal use in the package they are cast into a np.ndarray of variables
They should have dimensions that conform with the input/output of the predictor.
We denote by `n_samples` the number of samples (or objects) that we want to predict with our predictor.
We denote by `n_features` the dimension of the input of the predictor.
We denote by `n_output` the dimension of the output.
The `input_vars` are therefore of shape `(n_samples, n_features)` and the `output_vars` of
shape `(n_samples, n_outputs)`. In the case of `output_vars` not being passed, appropriate variables will
be automatically created.
In the case of `n_samples == 1` the first dimension can simply be removed from the input.
"""
convertors = registered_predictors()
|
def add_predictor_constr(
scip_model, predictor, input_vars, output_vars=None, unique_naming_prefix="p_", **kwargs
):
"""Formulate predictor in PySCIPOpt model.
The formulation predicts the values of output_vars using input_vars according to
predictor.
Parameters
----------
scip_model : PySCIPOpt Model
The pyscipopt model where the predictor should be inserted.
predictor:
The predictor to insert.
input_vars : list or np.ndarray
Decision variables used as input for predictor in scip_model.
output_vars : list or np.ndarray, optional
Decision variables used as output for predictor in scip_model.
unique_naming_prefix : str, optional
A unique naming prefix that is used before all variable and constraint names. This parameter is important if
the SCIP model is later printed to file and many predictors are added to the same SCIP model.
Returns
-------
AbstractPredictorConstr
Object containing information about what was added to scip_model to insert the
predictor in it
Note
----
The parameters `input_vars` and `output_vars` can be either
* Lists of variables (List of lists etc. for higher dimensional input)
* np.ndarray of variables
For internal use in the package they are cast into a np.ndarray of variables
They should have dimensions that conform with the input/output of the predictor.
We denote by `n_samples` the number of samples (or objects) that we want to predict with our predictor.
We denote by `n_features` the dimension of the input of the predictor.
We denote by `n_output` the dimension of the output.
The `input_vars` are therefore of shape `(n_samples, n_features)` and the `output_vars` of
shape `(n_samples, n_outputs)`. In the case of `output_vars` not being passed, appropriate variables will
be automatically created.
In the case of `n_samples == 1` the first dimension can simply be removed from the input.
"""
convertors = registered_predictors() | convertor = get_convertor(predictor, convertors) | 1 | 2023-12-10 20:28:22+00:00 | 2k |
DongqiShen/qwen-fast | generate.py | [
{
"identifier": "Transformer",
"path": "model.py",
"snippet": "class Transformer(nn.Module):\n def __init__(self, config: ModelArgs) -> None:\n super().__init__()\n self.config = config\n\n self.tok_embeddings = nn.Embedding(config.vocab_size, config.dim)\n self.layers = nn.ModuleList(TransformerBlock(config) for _ in range(config.n_layer))\n self.norm = RMSNorm(config.dim, eps=config.norm_eps)\n self.output = nn.Linear(config.dim, config.vocab_size, bias=False)\n\n self.freqs_cis: Optional[Tensor] = None\n self.mask_cache: Optional[Tensor] = None\n self.max_batch_size = -1\n self.max_seq_length = -1\n\n def setup_caches(self, max_batch_size, max_seq_length):\n if self.max_seq_length >= max_seq_length and self.max_batch_size >= max_batch_size:\n return\n head_dim = self.config.dim // self.config.n_head\n max_seq_length = find_multiple(max_seq_length, 8)\n self.max_seq_length = max_seq_length\n self.max_batch_size = max_batch_size\n for b in self.layers:\n b.attention.kv_cache = KVCache(max_batch_size, max_seq_length, self.config.n_local_heads, head_dim)\n\n self.freqs_cis = precompute_freqs_cis(self.config.block_size, self.config.dim // self.config.n_head, self.config.rope_base)\n self.causal_mask = torch.tril(torch.ones(self.max_seq_length, self.max_seq_length, dtype=torch.bool))\n\n def forward(self, idx: Tensor, input_pos: Optional[Tensor] = None) -> Tensor:\n assert self.freqs_cis is not None, \"Caches must be initialized first\"\n mask = self.causal_mask[None, None, input_pos]\n freqs_cis = self.freqs_cis[input_pos]\n x = self.tok_embeddings(idx)\n\n for i, layer in enumerate(self.layers):\n x = layer(x, input_pos, freqs_cis, mask)\n x = self.norm(x)\n logits = self.output(x)\n return logits\n\n @classmethod\n def from_name(cls, name: str):\n return cls(ModelArgs.from_name(name))"
},
{
"identifier": "maybe_init_dist",
"path": "tp.py",
"snippet": "def maybe_init_dist() -> Optional[int]:\n try:\n # provided by torchrun\n rank = _get_rank()\n world_size = _get_world_size()\n\n if world_size < 2:\n # too few gpus to parallelize, tp is no-op\n return None\n except KeyError:\n # not run via torchrun, no-op\n return None\n\n dist.init_process_group(backend=\"nccl\", rank=rank, world_size=world_size)\n return rank"
}
] | import sys
import time
import itertools
import torch
import torch._inductor.config
import torch._dynamo.config
import contextlib
import argparse
from pathlib import Path
from typing import Optional, Tuple
from model import Transformer
from tp import maybe_init_dist
from sentencepiece import SentencePieceProcessor
from quantize import WeightOnlyInt8QuantHandler
from quantize import WeightOnlyInt4QuantHandler
from tp import apply_tp | 1,107 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.fx_graph_cache = True # Experimental feature to reduce compilation times, will be on by default in future
# support running without installing as a package
wd = Path(__file__).parent.parent.resolve()
sys.path.append(str(wd))
def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization
q = torch.empty_like(probs_sort).exponential_(1)
return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
def logits_to_probs(logits, temperature: float = 1.0, top_k: Optional[int] = None):
logits = logits / max(temperature, 1e-5)
if top_k is not None:
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
pivot = v.select(-1, -1).unsqueeze(-1)
logits = torch.where(logits < pivot, -float("Inf"), logits)
probs = torch.nn.functional.softmax(logits, dim=-1)
return probs
def sample(logits, temperature: float = 1.0, top_k: Optional[int] = None):
probs = logits_to_probs(logits[0, -1], temperature, top_k)
idx_next = multinomial_sample_one_no_sync(probs)
return idx_next, probs
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.fx_graph_cache = True # Experimental feature to reduce compilation times, will be on by default in future
# support running without installing as a package
wd = Path(__file__).parent.parent.resolve()
sys.path.append(str(wd))
def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization
q = torch.empty_like(probs_sort).exponential_(1)
return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
def logits_to_probs(logits, temperature: float = 1.0, top_k: Optional[int] = None):
logits = logits / max(temperature, 1e-5)
if top_k is not None:
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
pivot = v.select(-1, -1).unsqueeze(-1)
logits = torch.where(logits < pivot, -float("Inf"), logits)
probs = torch.nn.functional.softmax(logits, dim=-1)
return probs
def sample(logits, temperature: float = 1.0, top_k: Optional[int] = None):
probs = logits_to_probs(logits[0, -1], temperature, top_k)
idx_next = multinomial_sample_one_no_sync(probs)
return idx_next, probs
| def prefill(model: Transformer, x: torch.Tensor, input_pos: torch.Tensor, **sampling_kwargs) -> torch.Tensor: | 0 | 2023-12-05 14:07:19+00:00 | 2k |
Yanyutin753/CowAndPandoraNext | channel/chat_channel.py | [
{
"identifier": "Channel",
"path": "channel/channel.py",
"snippet": "class Channel(object):\n NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE, ReplyType.IMAGE]\n\n def startup(self):\n \"\"\"\n init channel\n \"\"\"\n raise NotImplementedError\n\n def handle_text(self, msg):\n \"\"\"\n process received msg\n :param msg: message object\n \"\"\"\n raise NotImplementedError\n\n # 统一的发送函数,每个Channel自行实现,根据reply的type字段发送不同类型的消息\n def send(self, reply: Reply, context: Context):\n \"\"\"\n send message to user\n :param msg: message content\n :param receiver: receiver channel account\n :return:\n \"\"\"\n raise NotImplementedError\n\n def build_reply_content(self, query, context: Context = None) -> Reply:\n return Bridge().fetch_reply_content(query, context)\n\n def build_voice_to_text(self, voice_file) -> Reply:\n return Bridge().fetch_voice_to_text(voice_file)\n\n def build_text_to_voice(self, text) -> Reply:\n return Bridge().fetch_text_to_voice(text)"
},
{
"identifier": "Dequeue",
"path": "common/dequeue.py",
"snippet": "class Dequeue(Queue):\n def putleft(self, item, block=True, timeout=None):\n with self.not_full:\n if self.maxsize > 0:\n if not block:\n if self._qsize() >= self.maxsize:\n raise Full\n elif timeout is None:\n while self._qsize() >= self.maxsize:\n self.not_full.wait()\n elif timeout < 0:\n raise ValueError(\"'timeout' must be a non-negative number\")\n else:\n endtime = time() + timeout\n while self._qsize() >= self.maxsize:\n remaining = endtime - time()\n if remaining <= 0.0:\n raise Full\n self.not_full.wait(remaining)\n self._putleft(item)\n self.unfinished_tasks += 1\n self.not_empty.notify()\n\n def putleft_nowait(self, item):\n return self.putleft(item, block=False)\n\n def _putleft(self, item):\n self.queue.appendleft(item)"
},
{
"identifier": "logger",
"path": "common/log.py",
"snippet": "def _reset_logger(log):\ndef _get_logger():"
},
{
"identifier": "conf",
"path": "config.py",
"snippet": "def conf():\n return config"
}
] | import os
import re
import threading
import time
from asyncio import CancelledError
from concurrent.futures import Future, ThreadPoolExecutor
from bridge.context import *
from bridge.reply import *
from channel.channel import Channel
from common.dequeue import Dequeue
from common.log import logger
from config import conf
from plugins import *
from voice.audio_convert import any_to_wav | 1,113 |
try:
except Exception as e:
pass
# 抽象类, 它包含了与消息通道无关的通用处理逻辑
class ChatChannel(Channel):
name = None # 登录的用户名
user_id = None # 登录的用户id
futures = {} # 记录每个session_id提交到线程池的future对象, 用于重置会话时把没执行的future取消掉,正在执行的不会被取消
sessions = {} # 用于控制并发,每个session_id同时只能有一个context在处理
lock = threading.Lock() # 用于控制对sessions的访问
handler_pool = ThreadPoolExecutor(max_workers=8) # 处理消息的线程池
def __init__(self):
_thread = threading.Thread(target=self.consume)
_thread.setDaemon(True)
_thread.start()
# 根据消息构造context,消息内容相关的触发项写在这里
def _compose_context(self, ctype: ContextType, content, **kwargs):
context = Context(ctype, content)
context.kwargs = kwargs
# context首次传入时,origin_ctype是None,
# 引入的起因是:当输入语音时,会嵌套生成两个context,第一步语音转文本,第二步通过文本生成文字回复。
# origin_ctype用于第二步文本回复时,判断是否需要匹配前缀,如果是私聊的语音,就不需要匹配前缀
if "origin_ctype" not in context:
context["origin_ctype"] = ctype
# context首次传入时,receiver是None,根据类型设置receiver
first_in = "receiver" not in context
# 群名匹配过程,设置session_id和receiver
if first_in: # context首次传入时,receiver是None,根据类型设置receiver
|
try:
except Exception as e:
pass
# 抽象类, 它包含了与消息通道无关的通用处理逻辑
class ChatChannel(Channel):
name = None # 登录的用户名
user_id = None # 登录的用户id
futures = {} # 记录每个session_id提交到线程池的future对象, 用于重置会话时把没执行的future取消掉,正在执行的不会被取消
sessions = {} # 用于控制并发,每个session_id同时只能有一个context在处理
lock = threading.Lock() # 用于控制对sessions的访问
handler_pool = ThreadPoolExecutor(max_workers=8) # 处理消息的线程池
def __init__(self):
_thread = threading.Thread(target=self.consume)
_thread.setDaemon(True)
_thread.start()
# 根据消息构造context,消息内容相关的触发项写在这里
def _compose_context(self, ctype: ContextType, content, **kwargs):
context = Context(ctype, content)
context.kwargs = kwargs
# context首次传入时,origin_ctype是None,
# 引入的起因是:当输入语音时,会嵌套生成两个context,第一步语音转文本,第二步通过文本生成文字回复。
# origin_ctype用于第二步文本回复时,判断是否需要匹配前缀,如果是私聊的语音,就不需要匹配前缀
if "origin_ctype" not in context:
context["origin_ctype"] = ctype
# context首次传入时,receiver是None,根据类型设置receiver
first_in = "receiver" not in context
# 群名匹配过程,设置session_id和receiver
if first_in: # context首次传入时,receiver是None,根据类型设置receiver | config = conf() | 3 | 2023-12-14 15:21:17+00:00 | 2k |
nerdslab/bams | bams/models/bams.py | [
{
"identifier": "MLP",
"path": "bams/models/mlp.py",
"snippet": "class MLP(nn.Module):\n r\"\"\"Flexible Multi-layer perceptron model, with optional batchnorm layers.\n\n Args:\n hidden_layers (list): List of layer dimensions, from input layer to output\n layer. If first input size is -1, will use a lazy layer.\n bias (boolean, optional): If set to :obj:`True`, bias will be used in linear\n layers. (default: :obj:`True`).\n activation (torch.nn.Module, optional): Activation function. (default:\n :obj:`nn.ReLU`).\n batchnorm (boolean, optional): If set to :obj:`True`, batchnorm layers are\n added after each linear layer, before the activation (default:\n :obj:`False`).\n drop_last_nonlin (boolean, optional): If set to :obj:`True`, the last layer\n won't have activations or batchnorm layers. (default: :obj:`True`)\n\n Examples:\n >>> m = MLP([-1, 16, 64])\n MLP(\n (layers): Sequential(\n (0): LazyLinear(in_features=0, out_features=16, bias=True)\n (1): ReLU(inplace=True)\n (2): Linear(in_features=16, out_features=64, bias=True)\n )\n )\n \"\"\"\n\n def __init__(\n self,\n hidden_layers,\n *,\n bias=True,\n activation=nn.ReLU(True),\n batchnorm=False,\n drop_last_nonlin=True\n ):\n super().__init__()\n\n # build the layers\n layers = []\n for in_dim, out_dim in zip(hidden_layers[:-1], hidden_layers[1:]):\n if in_dim == -1:\n layers.append(nn.LazyLinear(out_dim, bias=bias and not batchnorm))\n else:\n layers.append(nn.Linear(in_dim, out_dim, bias=bias and not batchnorm))\n if batchnorm:\n layers.append(nn.BatchNorm1d(num_features=out_dim, momentum=0.99))\n # ayers.append(nn.LayerNorm(out_dim))\n if activation is not None:\n activation = nn.PReLU(1)\n layers.append(activation)\n\n # remove activation and/or batchnorm layers from the last block\n if drop_last_nonlin:\n remove_layers = -(int(activation is not None) + int(batchnorm))\n if remove_layers:\n layers = layers[:remove_layers]\n\n self.layers = nn.Sequential(*layers)\n self.out_dim = hidden_layers[-1]\n\n def forward(self, x):\n x = self.layers(x)\n return x\n\n def reset_parameters(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n m.reset_parameters()"
},
{
"identifier": "TemporalConvNet",
"path": "bams/models/tcn.py",
"snippet": "class TemporalConvNet(nn.Module):\n def __init__(\n self,\n num_inputs,\n num_channels,\n kernel_size=2,\n num_layers_per_block=2,\n dropout=0.2,\n shift=0,\n dilation=2,\n ):\n super(TemporalConvNet, self).__init__()\n\n self.num_levels = len(num_channels)\n self.kernel_size = prepare_argument(kernel_size, self.num_levels)\n self.num_layers_per_block = prepare_argument(\n num_layers_per_block, self.num_levels\n )\n self.dilation = dilation\n self.feat_dim = num_channels[-1]\n layers = []\n for i in range(self.num_levels):\n dilation_size = dilation**i\n shift_ = shift if i == (self.num_levels - 1) else 0\n in_channels = num_inputs if i == 0 else num_channels[i - 1]\n out_channels = num_channels[i]\n kernel_size = self.kernel_size[i]\n num_layers_per_block = self.num_layers_per_block[i]\n layers += [\n TemporalBlock(\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n dilation=dilation_size,\n padding=(kernel_size - 1) * dilation_size,\n n_layers=num_layers_per_block,\n dropout=dropout,\n shift=shift_ * dilation_size,\n )\n ]\n\n self.network = nn.Sequential(*layers)\n\n @property\n def receptive_field(self):\n return compute_receiptive_field(\n kernel_size=self.kernel_size,\n num_blocks=self.num_levels,\n num_layers_per_block=self.num_layers_per_block,\n dilation=self.dilation,\n )\n\n def forward(self, x):\n x = rearrange(x, \"b l k -> b k l\")\n ret = self.network(x)\n ret = rearrange(ret, \"b k l -> b l k\")\n return ret"
}
] | from collections import OrderedDict
from bams.models import TemporalConvNet, MLP
import torch
import torch.nn as nn | 1,395 |
class BAMS(nn.Module):
r"""BAMS model.
Args:
input_size (int): Number of input features.
predictor (dict): Parameters for the predictor MLP.
encoders (dict[dict]): A dictionnary of encoders, where each key is the name of
the encoder, and each value is a dictionnary of parameters for the encoder.
Each encoder is a TemporalConvNet.
"""
def __init__(
self,
input_size,
*,
predictor=None,
**encoder_kwargs,
):
super().__init__()
self.input_size = input_size
self.representation_size = 0
encoders = dict()
for name, tcn_kwargs in encoder_kwargs.items():
assert "num_inputs" not in tcn_kwargs
encoders[name] = TemporalConvNet(num_inputs=input_size, **tcn_kwargs)
self.representation_size += tcn_kwargs["num_channels"][-1]
self.encoders = torch.nn.ModuleDict(encoders)
# hoa predictor (first layer is a lazy linear layer)
|
class BAMS(nn.Module):
r"""BAMS model.
Args:
input_size (int): Number of input features.
predictor (dict): Parameters for the predictor MLP.
encoders (dict[dict]): A dictionnary of encoders, where each key is the name of
the encoder, and each value is a dictionnary of parameters for the encoder.
Each encoder is a TemporalConvNet.
"""
def __init__(
self,
input_size,
*,
predictor=None,
**encoder_kwargs,
):
super().__init__()
self.input_size = input_size
self.representation_size = 0
encoders = dict()
for name, tcn_kwargs in encoder_kwargs.items():
assert "num_inputs" not in tcn_kwargs
encoders[name] = TemporalConvNet(num_inputs=input_size, **tcn_kwargs)
self.representation_size += tcn_kwargs["num_channels"][-1]
self.encoders = torch.nn.ModuleDict(encoders)
# hoa predictor (first layer is a lazy linear layer) | self.predictor = MLP(**predictor) | 0 | 2023-12-05 16:26:57+00:00 | 2k |
FF14CN/Sarean-arsenal | Utility/sqMall/sqMallDoSign.py | [
{
"identifier": "Daoyu",
"path": "Utility/sdoLogin/Daoyu.py",
"snippet": "def dykey_encrypt(self):\ndef config_handler():\ndef initialize():\ndef get_guid(device_id, manuid):\ndef get_flowid(manuid, deviceid, sessionid, show_username):\ndef get_account_id_list(flowid, deviceid, manuid, sessionid, show_username):\ndef make_confirm(account_id, flowid, deviceid, manuid, sessionid, show_username):\ndef get_sub_account_key(flowid, manuid, deviceid, sessionid, show_username):\ndef get_temp_sessionid(main_key):\ndef get_sub_account_session(sub_account_key, temp_account_sessionid):"
},
{
"identifier": "daoyumall_sign",
"path": "Utility/sqMall/daoyuBuildinMallSign.py",
"snippet": "def daoyumall_sign(sub_session_id, account_id):\n \"\"\"\n 仅适用于叨鱼内的盛趣商城签到操作 PC端不适用\n :param sub_session_id: 子账号的Daoyukey值\n :param account_id: 子账号的AccountID\n :return: 0: 签到成功 1: 重复签到 2: 签到失败\n \"\"\"\n sign_url = 'https://sqmallservice.u.sdo.com/api/us/integration/checkIn'\n sign_data = {'merchantId': 1}\n sign_header = {\n 'authority': 'sqmallservice.u.sdo.com',\n 'method': 'PUT',\n 'scheme': 'https',\n 'qu-web-host': 'https://m.qu.sdo.com',\n 'qu-hardware-platform': '1',\n 'qu-software-platform': '2',\n 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 DaoYu/9.3.3',\n 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'accept': 'application/json, text/javascript, */*; q=0.01',\n 'qu-deploy-platform': '4',\n 'qu-merchant-id': '1',\n 'origin': 'https://m.qu.sdo.com',\n 'x-requested-with': 'com.sdo.sdaccountkey',\n 'sec-fetch-site': 'same-site',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-dest': 'empty',\n 'referer': 'https://m.qu.sdo.com/',\n }\n sign_cookies = {\n 'sessionId': sub_session_id,\n 'direbmemllam': account_id,\n }\n sign_response = requests.put(sign_url, headers=sign_header, cookies=sign_cookies, data=sign_data, verify=False)\n sign_json = sign_response.json()\n if sign_json['resultMsg'] == 'SUCCESS':\n return 0\n elif sign_json['resultMsg'] == '今日已签到,请勿重复签到':\n return 1\n else:\n return 2"
},
{
"identifier": "daoyu_mall_balance",
"path": "Utility/sqMall/daoyuBuildinMallBalance.py",
"snippet": "def daoyu_mall_balance(session_id):\n \"\"\"\n 仅适用于叨鱼内部商城的查询签到积分 PC端不适用\n :param session_id: 子账号的Daoyukey值\n :return: 返回签到积分余额\n \"\"\"\n get_balance_url = 'https://sqmallservice.u.sdo.com/api/rs/member/integral/balance?merchantId=1'\n get_balance_header = {\n 'authority': 'sqmallservice.u.sdo.com',\n 'method': 'GET',\n 'scheme': 'https',\n 'pragma': 'no-cache',\n 'cache-control': 'no-cache',\n 'qu-deploy-platform': '4',\n 'accept': 'application/json, text/javascript, */*; q=0.01',\n 'qu-merchant-id': '1',\n 'qu-hardware-platform': '1',\n 'qu-software-platform': '2',\n 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 17_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 DaoYu/9.3.3',\n 'qu-web-host': 'https://m.qu.sdo.com',\n 'origin': 'https://m.qu.sdo.com',\n 'x-requested-with': 'com.sdo.sdaccountkey',\n 'sec-fetch-site': 'same-site',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-dest': 'empty',\n 'referer': 'https://m.qu.sdo.com/',\n 'accept-encoding': 'gzip, deflate',\n 'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',\n }\n get_balance_cookies = {\n 'sessionId': session_id\n }\n get_balance_response = requests.get(get_balance_url, headers=get_balance_header, cookies=get_balance_cookies,\n verify=False)\n get_balance_json = get_balance_response.json()\n balance = get_balance_json['data']['balance']\n return balance"
}
] | from Utility.sdoLogin import Daoyu
from Utility.sqMall.daoyuBuildinMallSign import daoyumall_sign
from Utility.sqMall.daoyuBuildinMallBalance import daoyu_mall_balance
import Utility.Notifications.push as pusher | 1,368 | """
Author: KuliPoi
Contact: [email protected]
Created: 2023-12-21
File: sqMailDoSign.py
Version: 2.5.0
Description: Do SQMALL AUTO SIGN, FUCK SQ BY THE WAY
"""
def main():
| """
Author: KuliPoi
Contact: [email protected]
Created: 2023-12-21
File: sqMailDoSign.py
Version: 2.5.0
Description: Do SQMALL AUTO SIGN, FUCK SQ BY THE WAY
"""
def main(): | if Daoyu.initialize(): | 0 | 2023-12-06 08:48:02+00:00 | 2k |
janmartchouk/vidgen | src/content_getter.py | [
{
"identifier": "SUBREDDITS",
"path": "config/dicts.py",
"snippet": "SUBREDDITS = {\n 'tifu': 'rss',\n 'confession': 'rss',\n 'relationship_advice': 'web',\n 'amitheasshole': 'rss'\n}"
},
{
"identifier": "setup_logger",
"path": "utils/logger.py",
"snippet": "def setup_logger(name, level=logging.INFO, emoji='⚙️'):\n \"\"\"To setup as many loggers as you want\"\"\"\n\n # Create handlers\n c_handler = logging.StreamHandler()\n c_handler.setLevel(level)\n\n # Create formatters and add it to handlers\n c_format = ColoredFormatter(emoji + ' | %(name)s | %(message)s')\n c_handler.setFormatter(c_format)\n\n # Add handlers to the logger\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(c_handler)\n\n return logger"
},
{
"identifier": "Post",
"path": "models/post.py",
"snippet": "class Post:\n \"\"\"\n A class representing a Reddit post.\n \"\"\"\n def __init__(self, title, author, subreddit, content, crawl_date):\n \"\"\"\n Initialize a Post object.\n\n :param title: The title of the post.\n :type title: str\n :param author: The author of the post.\n :type author: str\n :param subreddit: The subreddit of the post.\n :type subreddit: str\n :param content: The content of the post.\n :type content: str\n :param crawl_date: The date the post was crawled.\n :type crawl_date: datetime.datetime\n \n \"\"\"\n # Simple data stores\n self.author = author\n self.subreddit = subreddit\n self.crawl_date = crawl_date\n\n # Replace Reddit slang in title and content\n self.title = replace_words(title, REDDIT_SLANG)\n self.content = replace_words(content, REDDIT_SLANG)\n\n # Remove Age/Gender Reddit-typical tuples\n self.title = re.sub(r\"\\(?\\d{1,3}[mfMF]\\)?\", '', self.title).strip()\n self.content = re.sub(r\"\\(?\\d{1,3}[mfMF]\\)?\", '', self.content).strip()\n\n # Clean up potentially spammy fields\n self.author = self.author.replace('\\n', ' ').replace('\\t', ' ')\n self.author = re.sub(' +', ' ', self.author).strip()\n self.title = self.title.replace('\\n', ' ').replace('\\t', ' ')\n self.title = re.sub(' +', ' ', self.title).strip()\n self.content = self.content.replace('\\n', ' ').replace('\\t', ' ')\n self.content = re.sub(' +', ' ', self.content).strip()\n\n # Calculate hash from title + author + post\n self.hash = hashlib.sha256(\n str.encode(self.title) + str.encode(self.author) +\n str.encode(self.subreddit)\n ).hexdigest()\n\n # Shorten title and hash\n self.short_title = shorten_string(self.title)\n self.short_hash = shorten_hash(self.hash)\n\n # By default, we don't have a generated audio, subtitles or video yet\n self.audio = False\n self.subtitles = False\n self.video = False\n self.uploaded_youtube = False\n\n # Used for storing which platforms the post has been uploaded to\n self.posted_to = []\n\n def __str__(self, short=True) -> str:\n return f\"\"\"{self.hash}\n├── title: {self.title},\n├── author: {self.author},\n├── subreddit: {self.subreddit},\n├── content: {shorten_string(self.content, max_length=50) if short else self.content},\n└── crawl_date: {self.crawl_date})\"\"\""
}
] | import feedparser
import logging
import time
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
from config.dicts import SUBREDDITS
from utils.logger import setup_logger
from models.post import Post | 1,182 |
class ContentGetter:
def __init__(self, loglevel = logging.INFO):
self.logger = setup_logger(__name__, loglevel, emoji='🌍')
# Get a list of Reddit Posts from an RSS feed
def from_subreddit(self, subreddit):
if not subreddit in SUBREDDITS:
self.logger.error(f"{subreddit} is not configured")
exit(1)
if SUBREDDITS[subreddit] == 'rss':
return self.from_rss_subreddit(subreddit)
elif SUBREDDITS[subreddit] == 'web':
return self.from_web(subreddit)
else:
self.logger.error(f"{subreddit} is not configured properly")
exit(1)
def from_rss_subreddit(self, subreddit):
data = feedparser.parse(f'https://reddit.com/r/{subreddit}/top.rss')
posts = []
failed_number = 0
if data.entries:
try:
for entry in data.entries:
paragraphs = BeautifulSoup(entry.content[0].value, 'html.parser').find_all('p')
content = ''.join([p.get_text() for p in paragraphs])
|
class ContentGetter:
def __init__(self, loglevel = logging.INFO):
self.logger = setup_logger(__name__, loglevel, emoji='🌍')
# Get a list of Reddit Posts from an RSS feed
def from_subreddit(self, subreddit):
if not subreddit in SUBREDDITS:
self.logger.error(f"{subreddit} is not configured")
exit(1)
if SUBREDDITS[subreddit] == 'rss':
return self.from_rss_subreddit(subreddit)
elif SUBREDDITS[subreddit] == 'web':
return self.from_web(subreddit)
else:
self.logger.error(f"{subreddit} is not configured properly")
exit(1)
def from_rss_subreddit(self, subreddit):
data = feedparser.parse(f'https://reddit.com/r/{subreddit}/top.rss')
posts = []
failed_number = 0
if data.entries:
try:
for entry in data.entries:
paragraphs = BeautifulSoup(entry.content[0].value, 'html.parser').find_all('p')
content = ''.join([p.get_text() for p in paragraphs]) | post_obj = Post( | 2 | 2023-12-14 13:00:22+00:00 | 2k |
asdfghjil/XMUCourseCheckin | checkin.py | [
{
"identifier": "getCheckinList",
"path": "checkinList.py",
"snippet": "def getCheckinList(session, http_header, userInfo, today=True):\n try:\n url = serverUrl + \"/getQdKbList\"\n data = {\n 'sign': userInfo['sign'],\n 'userType': userInfo['userType'],\n 'userCode': userInfo['userCode'],\n 'unitCode': userInfo['unitCode'],\n 'userName': userInfo['userName'],\n 'roleCode': userInfo['roleCode'],\n 'bm': None,\n 'xyMc': userInfo['xy'],\n 'zy': userInfo['zy'],\n 'bj': userInfo['bj'],\n 'xsCc': userInfo['xsCc'],\n 'scene': 1,\n 'key': 1 if today else 2\n }\n res = session.post(url, data=data, headers=http_header).text\n res = json.loads(res)\n if res['status'] != 1:\n print('get Checkin list failed')\n raise Exception('get Checkin list failed')\n # print(res)\n return res['Rows']\n\n except:\n print(json.dumps({\n \"status\": \"failed\",\n \"reason\": \"Get checkin list failed\"\n }, indent=4))\n raise"
},
{
"identifier": "printCheckinList",
"path": "checkinList.py",
"snippet": "def printCheckinList(session, http_header, userInfo, today=True, type=\"签到\"):\n rows = getCheckinList(session, http_header, userInfo, today)\n for id, lesson in enumerate(rows):\n print(id)\n print('课程名称:', lesson['kcMc'])\n print('上课时间:', lesson['skSj'])\n print('签到发起情况:', lesson['qdQkMc'])\n print(\"签到情况:\", lesson['xsQdQkMc'] + ('' if lesson['xsQdQk'] == '0' else f\"({lesson['skXsStr']})\"))\n # print('\\n')\n try:\n ckid = int(input(\"请输入\" + type + \"课程的序号:\"))\n except:\n print('输入错误')\n raise Exception('输入错误')\n if ckid < 0 or ckid >= len(rows):\n print('输入错误')\n raise Exception('输入错误')\n return rows[ckid]"
}
] | import json
import requests
import sys
import time
import random
from checkinList import getCheckinList, printCheckinList | 1,515 |
serverUrl = "https://tingke.xmu.edu.cn/app"
def getCheckinInfo(session, http_header, userInfo, lesson):
try:
url = serverUrl + "/getXsQdInfo"
data = {
'sign': userInfo['sign'],
'unitCode': userInfo['unitCode'],
'userCode': userInfo['userCode'],
'userName': userInfo['userName'],
'xkKh': lesson['xkKh'],
'qdRq': lesson['qdRq'],
'xqj': lesson['xqj'],
'djj': lesson['djj'],
'djz': lesson['djz'],
'qdId': lesson['qdId'],
'isFz': lesson['isFz'],
'fzMc': lesson['fzMc']
}
res = session.post(url, data=data, headers=http_header)
if res.status_code != 200:
raise Exception('get Checkin info failed')
res = json.loads(res.text)
return res['Rows']
except:
print(json.dumps({
"status": "failed",
"reason": "Get checkin info failed"
}, indent=4))
raise
def checkin(session, http_header, userInfo, lesson, tips=True):
checkinInfo = getCheckinInfo(session, http_header, userInfo, lesson)
print('签到口令:', checkinInfo['klHm'])
# print(lesson['xsQdQk'], lesson['skXs'], lesson['bqMode'], lesson['qdNum'])
if tips:
if lesson['xsQdQk'] != '0' and lesson['skXs'] == '2' and (lesson['bqMode'] != '2' or lesson['qdNum'] != 1):
choice = input('您似乎已经线下签到过了,是否继续签到?(y/n)')
if choice != 'y':
return
if input('是否进行自动签到?(y/n)') != 'y':
return
try:
url = serverUrl + "/saveXsQdInfo"
data = {
'sign': userInfo['sign'],
'unitCode': userInfo['unitCode'],
'userCode': userInfo['userCode'],
'userName': userInfo['userName'],
'bjMc': userInfo['bj'],
'zyMc': userInfo['zy'],
'xyMc': userInfo['xy'],
'wzJd': str(float(checkinInfo['wzJd']) + (random.random() - 0.5) * 2 * 0.0001),
'wzWd': str(float(checkinInfo['wzWd']) + (random.random() - 0.5) * 2 * 0.0001),
'qdId': checkinInfo['uniqueCode'],
'xkKh': checkinInfo['xkKh'],
'skDd': lesson['skDd'],
'xqj': lesson['xqj'],
'djj': lesson['djj'],
'djz': lesson['djz'],
'isFace': None,
# 'isFace': checkinInfo['xsIsFace'],
'wzAcc': 0,
'bqMode': lesson['bqMode'],
'isFz': checkinInfo['isFz'],
'fzMc': lesson['fzMc'],
'djc': lesson['djc'],
'qdJc': lesson['qdJc']
}
# print("**********")
res = session.post(url, data=data, headers=http_header).text
res = json.loads(res)
if res['status'] == 1:
print('签到成功!')
return True
elif res['status'] == 6:
print('签到异常提醒:', res['msg'])
return False
else:
print('签到失败!', res['msg'])
raise Exception('签到失败:' + res['msg'])
except:
print(json.dumps({
"status": "failed",
"reason": "Checkin failed"
}, indent=4))
return False
def courseCheckin(session, http_header, userInfo):
|
serverUrl = "https://tingke.xmu.edu.cn/app"
def getCheckinInfo(session, http_header, userInfo, lesson):
try:
url = serverUrl + "/getXsQdInfo"
data = {
'sign': userInfo['sign'],
'unitCode': userInfo['unitCode'],
'userCode': userInfo['userCode'],
'userName': userInfo['userName'],
'xkKh': lesson['xkKh'],
'qdRq': lesson['qdRq'],
'xqj': lesson['xqj'],
'djj': lesson['djj'],
'djz': lesson['djz'],
'qdId': lesson['qdId'],
'isFz': lesson['isFz'],
'fzMc': lesson['fzMc']
}
res = session.post(url, data=data, headers=http_header)
if res.status_code != 200:
raise Exception('get Checkin info failed')
res = json.loads(res.text)
return res['Rows']
except:
print(json.dumps({
"status": "failed",
"reason": "Get checkin info failed"
}, indent=4))
raise
def checkin(session, http_header, userInfo, lesson, tips=True):
checkinInfo = getCheckinInfo(session, http_header, userInfo, lesson)
print('签到口令:', checkinInfo['klHm'])
# print(lesson['xsQdQk'], lesson['skXs'], lesson['bqMode'], lesson['qdNum'])
if tips:
if lesson['xsQdQk'] != '0' and lesson['skXs'] == '2' and (lesson['bqMode'] != '2' or lesson['qdNum'] != 1):
choice = input('您似乎已经线下签到过了,是否继续签到?(y/n)')
if choice != 'y':
return
if input('是否进行自动签到?(y/n)') != 'y':
return
try:
url = serverUrl + "/saveXsQdInfo"
data = {
'sign': userInfo['sign'],
'unitCode': userInfo['unitCode'],
'userCode': userInfo['userCode'],
'userName': userInfo['userName'],
'bjMc': userInfo['bj'],
'zyMc': userInfo['zy'],
'xyMc': userInfo['xy'],
'wzJd': str(float(checkinInfo['wzJd']) + (random.random() - 0.5) * 2 * 0.0001),
'wzWd': str(float(checkinInfo['wzWd']) + (random.random() - 0.5) * 2 * 0.0001),
'qdId': checkinInfo['uniqueCode'],
'xkKh': checkinInfo['xkKh'],
'skDd': lesson['skDd'],
'xqj': lesson['xqj'],
'djj': lesson['djj'],
'djz': lesson['djz'],
'isFace': None,
# 'isFace': checkinInfo['xsIsFace'],
'wzAcc': 0,
'bqMode': lesson['bqMode'],
'isFz': checkinInfo['isFz'],
'fzMc': lesson['fzMc'],
'djc': lesson['djc'],
'qdJc': lesson['qdJc']
}
# print("**********")
res = session.post(url, data=data, headers=http_header).text
res = json.loads(res)
if res['status'] == 1:
print('签到成功!')
return True
elif res['status'] == 6:
print('签到异常提醒:', res['msg'])
return False
else:
print('签到失败!', res['msg'])
raise Exception('签到失败:' + res['msg'])
except:
print(json.dumps({
"status": "failed",
"reason": "Checkin failed"
}, indent=4))
return False
def courseCheckin(session, http_header, userInfo): | lesson = printCheckinList(session, http_header, userInfo, today=True) | 1 | 2023-12-13 10:42:20+00:00 | 2k |
Kanaries/kanaries-track | kanaries_track/client.py | [
{
"identifier": "config",
"path": "kanaries_track/config.py",
"snippet": "class Config:"
},
{
"identifier": "RequestClient",
"path": "kanaries_track/request.py",
"snippet": "class RequestClient:\n \"\"\"Client for sending events to kanaries-track server\"\"\"\n def __init__(\n self,\n *,\n host: str,\n auth_token: str,\n max_retries: int,\n timeout: int,\n verify: bool,\n proxy: Any\n ) -> None:\n self.host = host\n self.auth_token = auth_token\n self.max_retries = max_retries\n self.timeout = timeout\n self.verify = verify\n self.proxy = proxy\n self.session = Session()\n\n def _post(self, path: str, data: Dict[str, Any]) -> Response:\n \"\"\"Post data to url\"\"\"\n url = f\"{self.host}{path}\"\n\n @backoff.on_exception(\n backoff.expo,\n Exception,\n max_tries=self.max_retries,\n )\n def __post():\n return self.session.post(\n url,\n headers={\"Track-Key\": self.auth_token},\n json=data,\n timeout=self.timeout,\n verify=self.verify,\n proxies=self.proxy\n )\n\n return __post()\n\n def track(self, events: List[Dict[str, Any]]):\n \"\"\"Send events to kanaries-track server\"\"\"\n logger.debug(\"send requests to server, event count: %s\", len(events))\n try:\n resp = self._post(\"/ingest/track\", events)\n logger.debug(\"track resp: %s\", resp.text)\n except Exception as e:\n logger.error(\"Failed to send events to server: %s\", str(e))"
}
] | from typing import Dict, Any
from datetime import datetime
from threading import Thread
from functools import lru_cache
from dateutil.tz import tzlocal
from .config import config
from .request import RequestClient
import queue
import uuid
import logging
import time
import atexit | 1,388 | self.ruuning = False
def _upload(self):
"""Upload events"""
start_time = time.monotonic()
events = []
while len(events) < self.upload_size:
elapsed_seconds = time.monotonic() - start_time
if elapsed_seconds >= self.upload_interval_seconds:
break
try:
event = self.event_queue.get(block=True, timeout=self.upload_interval_seconds - elapsed_seconds)
events.append(event)
except queue.Empty:
break
except Exception as e:
logger.error("Failed to get event from queue: %s", str(e))
logger.debug("invoke uploading events, event count: %s", len(events))
if events:
self.request_client.track(events)
class Client:
"""Client for sending events to kanaries-track server"""
def __init__(
self,
*,
host: str,
auth_token: str,
debug: bool,
send: bool,
sync_send: bool,
max_queue_size: int,
timeout_seconds: int,
max_retries: int,
proxies: Dict[str, Any],
thread_count: int,
verify: bool,
upload_interval_seconds: int,
upload_size: int
):
self.host = host
self.auth_token = auth_token
self.debug = debug
self.send = send
self.sync_send = sync_send
self.max_queue_size = max_queue_size
self.timeout_seconds = timeout_seconds
self.max_retries = max_retries
self.proxies = proxies
self.thread_count = thread_count
self.verify = verify
self.upload_interval_seconds = upload_interval_seconds
self.upload_size = upload_size
self._consumers = []
self._request_client = RequestClient(
host=self.host,
auth_token=self.auth_token,
max_retries=self.max_retries,
timeout=self.timeout_seconds,
verify=self.verify,
proxy=self.proxies
)
self._event_queue = queue.Queue(self.max_queue_size)
if not self.sync_send and self.send:
for _ in range(self.thread_count):
consumer = _Consumer(
event_queue=self._event_queue,
request_client=self._request_client,
upload_size=self.upload_size,
upload_interval_seconds=self.upload_interval_seconds
)
consumer.start()
self._consumers.append(consumer)
atexit.register(self._end)
if self.debug:
logger.setLevel(logging.DEBUG)
def track(self, event: Dict[str, Any]):
"""Track an event"""
event = self._fill_data(event)
if not self.send:
return
if self.sync_send:
self._request_client.track([event])
else:
self._enqueue(event)
def _fill_data(self, event: Dict[str, Any]) -> Dict[str, Any]:
"""Fill data for an event"""
event["timestamp"] = datetime.now().replace(tzinfo=tzlocal()).isoformat()
event["message_id"] = str(uuid.uuid4())
return event
def _enqueue(self, event: Dict[str, Any]):
"""Enqueue an event"""
logger.debug("enqueue event: %s", event)
try:
self._event_queue.put(event, block=False)
except queue.Full:
logger.warning("Event queue is full, dropping event")
def _end(self):
"""End the client when the main thread exits"""
for consumer in self._consumers:
consumer.pause()
consumer.join()
@lru_cache(maxsize=1)
def get_client():
"""Get a client"""
return Client(
|
logger = logging.getLogger("kanaries_track")
class _Consumer(Thread):
def __init__(
self,
*,
event_queue: queue.Queue,
request_client: RequestClient,
upload_size: int,
upload_interval_seconds: int
) -> None:
super().__init__()
self.event_queue = event_queue
self.request_client = request_client
self.upload_size = upload_size
self.upload_interval_seconds = upload_interval_seconds
self.daemon = True
self.ruuning = True
def run(self):
"""Run the consumer"""
logger.debug("Consumer started")
while self.ruuning:
self._upload()
logger.debug("Consumer stopped")
def pause(self):
"""Pause the consumer"""
self.ruuning = False
def _upload(self):
"""Upload events"""
start_time = time.monotonic()
events = []
while len(events) < self.upload_size:
elapsed_seconds = time.monotonic() - start_time
if elapsed_seconds >= self.upload_interval_seconds:
break
try:
event = self.event_queue.get(block=True, timeout=self.upload_interval_seconds - elapsed_seconds)
events.append(event)
except queue.Empty:
break
except Exception as e:
logger.error("Failed to get event from queue: %s", str(e))
logger.debug("invoke uploading events, event count: %s", len(events))
if events:
self.request_client.track(events)
class Client:
"""Client for sending events to kanaries-track server"""
def __init__(
self,
*,
host: str,
auth_token: str,
debug: bool,
send: bool,
sync_send: bool,
max_queue_size: int,
timeout_seconds: int,
max_retries: int,
proxies: Dict[str, Any],
thread_count: int,
verify: bool,
upload_interval_seconds: int,
upload_size: int
):
self.host = host
self.auth_token = auth_token
self.debug = debug
self.send = send
self.sync_send = sync_send
self.max_queue_size = max_queue_size
self.timeout_seconds = timeout_seconds
self.max_retries = max_retries
self.proxies = proxies
self.thread_count = thread_count
self.verify = verify
self.upload_interval_seconds = upload_interval_seconds
self.upload_size = upload_size
self._consumers = []
self._request_client = RequestClient(
host=self.host,
auth_token=self.auth_token,
max_retries=self.max_retries,
timeout=self.timeout_seconds,
verify=self.verify,
proxy=self.proxies
)
self._event_queue = queue.Queue(self.max_queue_size)
if not self.sync_send and self.send:
for _ in range(self.thread_count):
consumer = _Consumer(
event_queue=self._event_queue,
request_client=self._request_client,
upload_size=self.upload_size,
upload_interval_seconds=self.upload_interval_seconds
)
consumer.start()
self._consumers.append(consumer)
atexit.register(self._end)
if self.debug:
logger.setLevel(logging.DEBUG)
def track(self, event: Dict[str, Any]):
"""Track an event"""
event = self._fill_data(event)
if not self.send:
return
if self.sync_send:
self._request_client.track([event])
else:
self._enqueue(event)
def _fill_data(self, event: Dict[str, Any]) -> Dict[str, Any]:
"""Fill data for an event"""
event["timestamp"] = datetime.now().replace(tzinfo=tzlocal()).isoformat()
event["message_id"] = str(uuid.uuid4())
return event
def _enqueue(self, event: Dict[str, Any]):
"""Enqueue an event"""
logger.debug("enqueue event: %s", event)
try:
self._event_queue.put(event, block=False)
except queue.Full:
logger.warning("Event queue is full, dropping event")
def _end(self):
"""End the client when the main thread exits"""
for consumer in self._consumers:
consumer.pause()
consumer.join()
@lru_cache(maxsize=1)
def get_client():
"""Get a client"""
return Client( | host=config.host, | 0 | 2023-12-06 06:01:32+00:00 | 2k |
Yingyue-L/Mamba-LLaVA | llava/model/llava_arch.py | [
{
"identifier": "build_vision_tower",
"path": "llava/model/multimodal_encoder/builder.py",
"snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists = os.path.exists(vision_tower)\n if is_absolute_path_exists or vision_tower.startswith(\"openai\") or vision_tower.startswith(\"laion\"):\n return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)\n\n raise ValueError(f'Unknown vision tower: {vision_tower}')"
},
{
"identifier": "build_vision_projector",
"path": "llava/model/multimodal_projector/builder.py",
"snippet": "def build_vision_projector(config, delay_load=False, **kwargs):\n projector_type = getattr(config, 'mm_projector_type', 'linear')\n\n if projector_type == 'linear':\n return nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n mlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n if mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]\n for _ in range(1, mlp_depth):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.hidden_size, config.hidden_size))\n return nn.Sequential(*modules)\n\n if projector_type == 'identity':\n return IdentityMap()\n\n raise ValueError(f'Unknown projector type: {projector_type}')"
},
{
"identifier": "IGNORE_INDEX",
"path": "llava/constants.py",
"snippet": "IGNORE_INDEX = -100"
},
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "llava/constants.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "DEFAULT_IMAGE_PATCH_TOKEN",
"path": "llava/constants.py",
"snippet": "DEFAULT_IMAGE_PATCH_TOKEN = \"<im_patch>\""
},
{
"identifier": "DEFAULT_IM_START_TOKEN",
"path": "llava/constants.py",
"snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\""
},
{
"identifier": "DEFAULT_IM_END_TOKEN",
"path": "llava/constants.py",
"snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\""
}
] | from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
import torch
import torch.nn as nn | 715 | # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"): | self.vision_tower = build_vision_tower(config, delay_load=True) | 0 | 2023-12-09 09:39:13+00:00 | 2k |
Theia-4869/MoSA | src/engine/evaluator.py | [
{
"identifier": "multilabel",
"path": "src/engine/eval/multilabel.py",
"snippet": "def get_continuous_ids(probe_labels: List[int]) -> Dict[int, int]:\ndef multihot(x: List[List[int]], nb_classes: int) -> np.ndarray:\ndef compute_map(\n scores: np.ndarray, multihot_targets: np.ndarray\n) -> Tuple[np.ndarray, np.ndarray, float, float]:\ndef compute_f1(\n multihot_targets: np.ndarray, scores: np.ndarray, threshold: float = 0.5\n) -> Tuple[float, float, float]:\ndef get_best_f1_scores(\n multihot_targets: np.ndarray, scores: np.ndarray, threshold_end: int\n) -> Dict[str, float]:"
},
{
"identifier": "singlelabel",
"path": "src/engine/eval/singlelabel.py",
"snippet": "def accuracy(y_probs, y_true):\ndef top_n_accuracy(y_probs, truths, n=1):\ndef compute_acc_auc(y_probs, y_true_ids):\ndef topks_correct(preds, labels, ks):\ndef topk_errors(preds, labels, ks):\ndef topk_accuracies(preds, labels, ks):"
},
{
"identifier": "logging",
"path": "src/utils/logging.py",
"snippet": "_FORMAT = \"[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s\"\ndef _suppress_print():\n def print_pass(*objects, sep=\" \", end=\"\\n\", file=sys.stdout, flush=False):\ndef _cached_log_stream(filename):\ndef setup_logging(\n num_gpu, num_shards, output=\"\", name=\"MOSA\", color=True):\ndef setup_single_logging(name, output=\"\"):\ndef get_logger(name):\ndef log_json_stats(stats, sort_keys=True):\n def __init__(self, *args, **kwargs):\n def formatMessage(self, record: logging.LogRecord) -> str:\nclass _ColorfulFormatter(logging.Formatter):"
}
] | import numpy as np
from collections import defaultdict
from typing import List, Union
from .eval import multilabel
from .eval import singlelabel
from ..utils import logging | 898 | #!/usr/bin/env python3
logger = logging.get_logger("MOSA")
class Evaluator():
"""
An evaluator with below logics:
1. find which eval module to use.
2. store the eval results, pretty print it in log file as well.
"""
def __init__(
self,
) -> None:
self.results = defaultdict(dict)
self.iteration = -1
self.threshold_end = 0.5
def update_iteration(self, iteration: int) -> None:
"""update iteration info"""
self.iteration = iteration
def update_result(self, metric: str, value: Union[float, dict]) -> None:
if self.iteration > -1:
key_name = "epoch_" + str(self.iteration)
else:
key_name = "final"
if isinstance(value, float):
self.results[key_name].update({metric: value})
else:
if metric in self.results[key_name]:
self.results[key_name][metric].update(value)
else:
self.results[key_name].update({metric: value})
def classify(self, probs, targets, test_data, multilabel=False):
"""
Evaluate classification result.
Args:
probs: np.ndarray for num_data x num_class, predicted probabilities
targets: np.ndarray for multilabel, list of integers for single label
test_labels: map test image ids to a list of class labels
"""
if not targets:
raise ValueError(
"When evaluating classification, need at least give targets")
if multilabel:
self._eval_multilabel(probs, targets, test_data)
else:
self._eval_singlelabel(probs, targets, test_data)
def _eval_singlelabel(
self,
scores: np.ndarray,
targets: List[int],
eval_type: str
) -> None:
"""
if number of labels > 2:
top1 and topk (5 by default) accuracy
if number of labels == 2:
top1 and rocauc
"""
| #!/usr/bin/env python3
logger = logging.get_logger("MOSA")
class Evaluator():
"""
An evaluator with below logics:
1. find which eval module to use.
2. store the eval results, pretty print it in log file as well.
"""
def __init__(
self,
) -> None:
self.results = defaultdict(dict)
self.iteration = -1
self.threshold_end = 0.5
def update_iteration(self, iteration: int) -> None:
"""update iteration info"""
self.iteration = iteration
def update_result(self, metric: str, value: Union[float, dict]) -> None:
if self.iteration > -1:
key_name = "epoch_" + str(self.iteration)
else:
key_name = "final"
if isinstance(value, float):
self.results[key_name].update({metric: value})
else:
if metric in self.results[key_name]:
self.results[key_name][metric].update(value)
else:
self.results[key_name].update({metric: value})
def classify(self, probs, targets, test_data, multilabel=False):
"""
Evaluate classification result.
Args:
probs: np.ndarray for num_data x num_class, predicted probabilities
targets: np.ndarray for multilabel, list of integers for single label
test_labels: map test image ids to a list of class labels
"""
if not targets:
raise ValueError(
"When evaluating classification, need at least give targets")
if multilabel:
self._eval_multilabel(probs, targets, test_data)
else:
self._eval_singlelabel(probs, targets, test_data)
def _eval_singlelabel(
self,
scores: np.ndarray,
targets: List[int],
eval_type: str
) -> None:
"""
if number of labels > 2:
top1 and topk (5 by default) accuracy
if number of labels == 2:
top1 and rocauc
""" | acc_dict = singlelabel.compute_acc_auc(scores, targets) | 1 | 2023-12-06 07:50:16+00:00 | 2k |
IBM/AI-assisted-chemical-sensing | src/chemsense/vision/cli/classification_analysis.py | [
{
"identifier": "setup_basic_logging_for_scripts",
"path": "src/chemsense/vision/logging_configuration.py",
"snippet": "def setup_basic_logging_for_scripts() -> None:\n \"\"\"Setup basic stdout logging for scripts.\"\"\"\n logging.basicConfig(\n stream=sys.stdout,\n level=logging.INFO,\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n )"
},
{
"identifier": "ENCODERS_REGISTRY",
"path": "src/chemsense/vision/modeling/encoders.py",
"snippet": "ENCODERS_REGISTRY = {\n \"mobilenetv2_35_96\": {\n \"processor\": AutoImageProcessor.from_pretrained(\"google/mobilenet_v2_0.35_96\"),\n \"model\": MobileNetV2Model.from_pretrained(\"google/mobilenet_v2_0.35_96\"),\n \"size\": 96,\n },\n \"mobilenetv2_100_224\": {\n \"processor\": AutoImageProcessor.from_pretrained(\"google/mobilenet_v2_1.0_224\"),\n \"model\": MobileNetV2Model.from_pretrained(\"google/mobilenet_v2_1.0_224\"),\n \"size\": 224,\n },\n \"mobilenetv2_140_224\": {\n \"processor\": AutoImageProcessor.from_pretrained(\"google/mobilenet_v2_1.4_224\"),\n \"model\": MobileNetV2Model.from_pretrained(\"google/mobilenet_v2_1.4_224\"),\n \"size\": 224,\n },\n \"resnet_18\": {\n \"processor\": AutoImageProcessor.from_pretrained(\"microsoft/resnet-18\"),\n \"model\": ResNetModel.from_pretrained(\"microsoft/resnet-18\"),\n \"size\": 224,\n },\n \"resnet_50\": {\n \"processor\": AutoImageProcessor.from_pretrained(\"microsoft/resnet-50\"),\n \"model\": ResNetModel.from_pretrained(\"microsoft/resnet-50\"),\n \"size\": 224,\n },\n \"resnet_101\": {\n \"processor\": AutoImageProcessor.from_pretrained(\"microsoft/resnet-101\"),\n \"model\": ResNetModel.from_pretrained(\"microsoft/resnet-101\"),\n \"size\": 224,\n },\n \"vit_base_224\": {\n \"processor\": ViTImageProcessor.from_pretrained(\"google/vit-base-patch16-224\"),\n \"model\": ViTModel.from_pretrained(\"google/vit-base-patch16-224\"),\n \"size\": 224,\n },\n \"vit_base_384\": {\n \"processor\": ViTImageProcessor.from_pretrained(\"google/vit-base-patch16-384\"),\n \"model\": ViTModel.from_pretrained(\"google/vit-base-patch16-384\"),\n \"size\": 384,\n },\n \"vit_large_224\": {\n \"processor\": ViTImageProcessor.from_pretrained(\"google/vit-large-patch16-224\"),\n \"model\": ViTModel.from_pretrained(\"google/vit-large-patch16-224\"),\n \"size\": 224,\n },\n \"beit_base_224\": {\n \"processor\": BeitImageProcessor.from_pretrained(\n \"microsoft/beit-base-patch16-224-pt22k-ft22k\"\n ),\n \"model\": BeitModel.from_pretrained(\n \"microsoft/beit-base-patch16-224-pt22k-ft22k\"\n ),\n \"size\": 224,\n },\n \"beit_base_384\": {\n \"processor\": BeitImageProcessor.from_pretrained(\n \"microsoft/beit-base-patch16-384\"\n ),\n \"model\": BeitModel.from_pretrained(\"microsoft/beit-base-patch16-384\"),\n \"size\": 384,\n },\n \"beit_large_224\": {\n \"processor\": BeitImageProcessor.from_pretrained(\n \"microsoft/beit-large-patch16-224-pt22k-ft22k\"\n ),\n \"model\": BeitModel.from_pretrained(\n \"microsoft/beit-large-patch16-224-pt22k-ft22k\"\n ),\n \"size\": 224,\n },\n}"
}
] | from pathlib import Path
from chemsense.vision.modeling.classification import (
attach_classification_head_fewshots,
attach_classification_head_kfold,
attach_classification_head_loco,
attach_classification_head_loco_sugars,
)
from ..logging_configuration import setup_basic_logging_for_scripts
from ..modeling.encoders import ENCODERS_REGISTRY
import click
import numpy as np
import pandas as pd
| 1,347 | """Training and testing models with extracted features."""
__copyright__ = """
LICENSED INTERNAL CODE. PROPERTY OF IBM.
IBM Research Licensed Internal Code
(C) Copyright IBM Corp. 2023
ALL RIGHTS RESERVED
"""
@click.command()
@click.option("--task", type=str, default="red_wines", help="Dataset name identifier.")
@click.option(
"--validation",
type=str,
default="kfold",
help="Validation strategy. Supported types are kfold, LOCO, few_shots and Sugar_LOCO.",
)
@click.option(
"--number_of_folds",
type=int,
default=5,
help="number of folds to be used in case of kfold validation.",
)
@click.option(
"--number_of_components",
type=int,
default=30,
help="Max number of principal components to be used.",
)
@click.option(
"--features_path",
required=True,
type=click.Path(path_type=Path, exists=True),
help="Path to directory containing extracted features.",
)
@click.option(
"--output_path",
required=True,
type=click.Path(path_type=Path),
help="Path to save classification model validation results.",
)
def main(
task: str,
validation: str,
number_of_folds: int,
number_of_components: int,
features_path: Path,
output_path: Path,
) -> None:
| """Training and testing models with extracted features."""
__copyright__ = """
LICENSED INTERNAL CODE. PROPERTY OF IBM.
IBM Research Licensed Internal Code
(C) Copyright IBM Corp. 2023
ALL RIGHTS RESERVED
"""
@click.command()
@click.option("--task", type=str, default="red_wines", help="Dataset name identifier.")
@click.option(
"--validation",
type=str,
default="kfold",
help="Validation strategy. Supported types are kfold, LOCO, few_shots and Sugar_LOCO.",
)
@click.option(
"--number_of_folds",
type=int,
default=5,
help="number of folds to be used in case of kfold validation.",
)
@click.option(
"--number_of_components",
type=int,
default=30,
help="Max number of principal components to be used.",
)
@click.option(
"--features_path",
required=True,
type=click.Path(path_type=Path, exists=True),
help="Path to directory containing extracted features.",
)
@click.option(
"--output_path",
required=True,
type=click.Path(path_type=Path),
help="Path to save classification model validation results.",
)
def main(
task: str,
validation: str,
number_of_folds: int,
number_of_components: int,
features_path: Path,
output_path: Path,
) -> None:
| setup_basic_logging_for_scripts()
| 0 | 2023-12-05 15:56:12+00:00 | 2k |
pymike00/tinychat | tests/llms/test_google_handler.py | [
{
"identifier": "GoogleAIHandler",
"path": "tinychat/llms/google.py",
"snippet": "class GoogleAIHandler:\n \"\"\"\n Handler class to interact with the OpenAI models.\n\n Returns chat responses and stores the chat history.\n\n TODO: add chat message dataclass so that we can enforce validation of\n message format that is needed for working client requests to the API?\n \"\"\"\n\n def __init__(self):\n self._messages = []\n self._client = GoogleAIClient()\n \n def export_conversation(self) -> str:\n string_conversation = \"\"\n for message in self._messages:\n print(message)\n if message[\"role\"] == \"user\":\n if string_conversation != \"\":\n string_conversation += \"\\n\\n\"\n string_conversation += f\"You: {message['parts'][0]['text']}\"\n else:\n string_conversation += f\"LLM: {message['parts'][0]['text']}\"\n return string_conversation\n\n def stream_response(self, user_input: str) -> Generator[str, None, None]:\n \"\"\"\n Yield stream responses from the client as they are received.\n\n This method sends the user input to the client and then yields each piece\n of the language model's response as it is received in real-time. After the\n streaming is complete, it updates the message list with the user input and\n the full language model response.\n\n :param user_input: The input string from the user to be sent to the model.\n :return: A generator yielding the model's response in streamed parts.\n \"\"\"\n self._messages.append({\"parts\": [{\"text\": user_input}], \"role\": \"user\"})\n stream = self._client.perform_stream_request(self._messages)\n lm_response = \"\"\n for event in stream.events(): # type: ignore\n if event.data != \"[DONE]\":\n json_load = json.loads(event.data)[\"candidates\"][0][\"content\"][\"parts\"][0]\n response_piece = json_load[\"text\"]\n lm_response += response_piece\n yield response_piece\n self._messages.append({\"parts\": [{\"text\": lm_response}], \"role\": \"model\"})"
},
{
"identifier": "GoogleAIClient",
"path": "tinychat/llms/google.py",
"snippet": "class GoogleAIClient(BaseLLMClient):\n \"\"\"\n Simple client for interacting with the Google API.\n Currently only supports the chat completions endpoint.\n\n :param model_name: The name of the model to be used for chat requests.\n \"\"\"\n\n BASE_GEMINI_ENDPOINT = \"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:streamGenerateContent\"\n SAFETY_SETTINGS = [\n {\n \"category\": \"HARM_CATEGORY_HARASSMENT\",\n \"threshold\": \"BLOCK_NONE\",\n },\n {\n \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n \"threshold\": \"BLOCK_NONE\",\n },\n {\n \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n \"threshold\": \"BLOCK_NONE\",\n },\n {\n \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n \"threshold\": \"BLOCK_NONE\",\n },\n ]\n\n def __init__(self) -> None:\n super().__init__(api_key_name=GOOGLE_API_KEY_NAME)\n\n @property\n def gemini_endpoint(self):\n return f\"{self.BASE_GEMINI_ENDPOINT}?alt=sse&key={self.api_key}\"\n\n @property\n def gemini_headers(self):\n return {\"Content-Type\": \"application/json\"}\n\n def perform_stream_request(self, messages: list[dict]) -> SSEClient:\n # info: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events\n data = {\"contents\": messages, \"safetySettings\": self.SAFETY_SETTINGS}\n response = requests.post(\n self.gemini_endpoint,\n headers=self.gemini_headers, # type: ignore\n json=data,\n stream=True\n )\n if response.status_code != 200:\n raise ValueError(\n f\"Server responded with an error. Status Code: {response.status_code}\"\n )\n return SSEClient(event_source=response) # type: ignore"
}
] | import json
import unittest
from unittest.mock import MagicMock, Mock, patch
from tinychat.llms.google import GoogleAIHandler, GoogleAIClient | 1,204 |
class TestGoogleGeminiHandlerStreaming(unittest.TestCase):
@patch.object(GoogleAIClient, "perform_stream_request")
def test_stream_response(self, mock_perform_stream_request):
# Create a mock SSEClient with a mock events method
mock_sse_client = MagicMock()
mock_stream = iter(
[
Mock(
data=json.dumps(
{
"candidates": [
{"content": {"parts": [{"text": "response part 1"}]}}
]
}
)
),
Mock(
data=json.dumps(
{
"candidates": [
{"content": {"parts": [{"text": "response part 2"}]}}
]
}
)
),
Mock(data="[DONE]"),
]
)
mock_sse_client.events.return_value = mock_stream
mock_perform_stream_request.return_value = mock_sse_client
|
class TestGoogleGeminiHandlerStreaming(unittest.TestCase):
@patch.object(GoogleAIClient, "perform_stream_request")
def test_stream_response(self, mock_perform_stream_request):
# Create a mock SSEClient with a mock events method
mock_sse_client = MagicMock()
mock_stream = iter(
[
Mock(
data=json.dumps(
{
"candidates": [
{"content": {"parts": [{"text": "response part 1"}]}}
]
}
)
),
Mock(
data=json.dumps(
{
"candidates": [
{"content": {"parts": [{"text": "response part 2"}]}}
]
}
)
),
Mock(data="[DONE]"),
]
)
mock_sse_client.events.return_value = mock_stream
mock_perform_stream_request.return_value = mock_sse_client
| handler = GoogleAIHandler() | 0 | 2023-12-11 20:40:02+00:00 | 2k |
nickruggeri/hypergraph-message-passing | test/model/test_sampling/test_helper_functions.py | [
{
"identifier": "_community_count_combinations",
"path": "src/model/sampling.py",
"snippet": "def _community_count_combinations(\n n_nodes: int, comm_counts: list[int]\n) -> Iterable[list[int]]:\n r\"\"\"Generate all possible community count vectors :math::`\\#`.\n\n Parameters\n ----------\n n_nodes: number of nodes in the hyperedges.\n comm_counts: list of community counts.\n The entry i of the list specifies the total number of nodes in community i in\n the full hypergraph.\n\n Yields\n -------\n All the possible vectors of community counts :math::`\\#`.\n \"\"\"\n K = len(comm_counts)\n\n yield from (\n counts\n for counts in _community_count_combinations_recursive(n_nodes, comm_counts)\n if len(counts) == K\n )"
},
{
"identifier": "_log_n_sharp",
"path": "src/model/sampling.py",
"snippet": "def _log_n_sharp(comm_counts: list[int], hye_comm_counts: list[int]) -> float:\n r\"\"\"compute the logarithm of the :math::`N_{\\#}` factor.\n\n Parameters\n ----------\n comm_counts: the number of nodes in every community of the hypergraph, as a list of\n length K, where K is the number of communities.\n hye_comm_counts: the number of nodes in the hyperedge contained in every community,\n as a list of length K.\n\n Returns\n -------\n The value of :math::`N_{\\#}`.\n \"\"\"\n if len(comm_counts) != len(hye_comm_counts):\n raise ValueError(\"The inputs have different lengths.\")\n return sum(\n log_binomial_coefficient(a, b) for a, b in zip(comm_counts, hye_comm_counts)\n )"
},
{
"identifier": "_sample_hye_from_count",
"path": "src/model/sampling.py",
"snippet": "def _sample_hye_from_count(\n comm_nodes: dict[int, np.ndarray],\n hye_comm_counts: list[int],\n rng: np.random.Generator | None,\n) -> tuple[int]:\n \"\"\"Sample a hyperedge given, for every community, the number of nodes in the\n hyperedge belonging to the community.\n\n Parameters\n ----------\n comm_nodes: dictionary specifying the nodes belonging to each community in the\n hypergraph.\n hye_comm_counts: list specifying at every entry i the number of nodes belonging to\n community i in the hyperedge to be sampled.\n rng: optional numpy random generator, to be utilized for sampling.\n\n Returns\n -------\n A hyperedge sampled satisfying hye_comm_counts.\n \"\"\"\n if rng is None:\n rng = np.random.default_rng()\n\n hye = []\n for comm, node_count in zip(comm_nodes, hye_comm_counts):\n new_nodes = list(rng.choice(comm_nodes[comm], size=node_count, replace=False))\n hye.extend(new_nodes)\n\n return tuple(sorted(map(int, hye)))"
}
] | import itertools
import numpy as np
import pytest
from collections import Counter
from typing import Dict, List
from scipy import special
from src.model.sampling import (
_community_count_combinations,
_log_n_sharp,
_sample_hye_from_count,
) | 1,425 |
n_nodes_all = [2, 5, 10, 25, 50, 100]
rng = np.random.default_rng(seed=123)
hye_comm_counts_all = [
rng.integers(low=0, high=max_val, size=q)
for _ in range(10)
for max_val in [5, 10]
for q in [2, 3, 4, 5]
]
comm_counts_all = sum(
(
[
hye_comm_count + rng.integers(low=0, high=high, size=len(hye_comm_count))
for hye_comm_count in hye_comm_counts_all
]
for high in [1, 5, 10]
),
start=[],
)
hye_comm_counts_all = [list(x) for x in hye_comm_counts_all]
comm_counts_all = [list(x) for x in comm_counts_all]
def generate_communities(comm_counts: List[int]) -> Dict[int, np.ndarray]:
N = sum(comm_counts)
K = len(comm_counts)
rng_tmp = np.random.default_rng(seed=21)
all_nodes = np.arange(N)
rng_tmp.shuffle(all_nodes)
cumcount = [0] + list(np.cumsum(comm_counts))
comm_nodes = dict()
for comm in range(K):
comm_nodes[comm] = all_nodes[cumcount[comm] : cumcount[comm + 1]]
return comm_nodes
commm_nodes_all = [generate_communities(comm_counts) for comm_counts in comm_counts_all]
########################################################################################
# Test _community_count_combinations, _log_n_sharp
@pytest.mark.parametrize(
"n_nodes, hye_comm_counts", itertools.product(n_nodes_all, hye_comm_counts_all)
)
def test_community_count_combinations_brute_force(n_nodes, hye_comm_counts):
all_combinations = itertools.product(*(range(a + 1) for a in hye_comm_counts))
all_combinations = [list(comb) for comb in all_combinations if n_nodes == sum(comb)]
assert sorted(all_combinations) == sorted(
_community_count_combinations(n_nodes, hye_comm_counts)
)
@pytest.mark.parametrize(
"comm_counts, hye_comm_counts",
zip(comm_counts_all, hye_comm_counts_all * 3),
)
def test_log_n_sharp_brute_force(comm_counts, hye_comm_counts):
brute_force = [special.binom(a, b) for a, b in zip(comm_counts, hye_comm_counts)]
brute_force = np.sum(np.log(brute_force))
assert np.allclose(brute_force, _log_n_sharp(comm_counts, hye_comm_counts))
########################################################################################
# Test _sample_hye_from_count
@pytest.fixture(
params=(
(comm_nodes, hye_comm_counts, rng)
for comm_nodes, hye_comm_counts in zip(commm_nodes_all, hye_comm_counts_all * 3)
for rgn in [None, np.random.default_rng(seed=34)]
)
)
def sampled_hye_with_info(request):
comm_nodes, hye_comm_counts, rng = request.param
node_to_comm = {node: comm for comm in comm_nodes for node in comm_nodes[comm]}
return (
|
n_nodes_all = [2, 5, 10, 25, 50, 100]
rng = np.random.default_rng(seed=123)
hye_comm_counts_all = [
rng.integers(low=0, high=max_val, size=q)
for _ in range(10)
for max_val in [5, 10]
for q in [2, 3, 4, 5]
]
comm_counts_all = sum(
(
[
hye_comm_count + rng.integers(low=0, high=high, size=len(hye_comm_count))
for hye_comm_count in hye_comm_counts_all
]
for high in [1, 5, 10]
),
start=[],
)
hye_comm_counts_all = [list(x) for x in hye_comm_counts_all]
comm_counts_all = [list(x) for x in comm_counts_all]
def generate_communities(comm_counts: List[int]) -> Dict[int, np.ndarray]:
N = sum(comm_counts)
K = len(comm_counts)
rng_tmp = np.random.default_rng(seed=21)
all_nodes = np.arange(N)
rng_tmp.shuffle(all_nodes)
cumcount = [0] + list(np.cumsum(comm_counts))
comm_nodes = dict()
for comm in range(K):
comm_nodes[comm] = all_nodes[cumcount[comm] : cumcount[comm + 1]]
return comm_nodes
commm_nodes_all = [generate_communities(comm_counts) for comm_counts in comm_counts_all]
########################################################################################
# Test _community_count_combinations, _log_n_sharp
@pytest.mark.parametrize(
"n_nodes, hye_comm_counts", itertools.product(n_nodes_all, hye_comm_counts_all)
)
def test_community_count_combinations_brute_force(n_nodes, hye_comm_counts):
all_combinations = itertools.product(*(range(a + 1) for a in hye_comm_counts))
all_combinations = [list(comb) for comb in all_combinations if n_nodes == sum(comb)]
assert sorted(all_combinations) == sorted(
_community_count_combinations(n_nodes, hye_comm_counts)
)
@pytest.mark.parametrize(
"comm_counts, hye_comm_counts",
zip(comm_counts_all, hye_comm_counts_all * 3),
)
def test_log_n_sharp_brute_force(comm_counts, hye_comm_counts):
brute_force = [special.binom(a, b) for a, b in zip(comm_counts, hye_comm_counts)]
brute_force = np.sum(np.log(brute_force))
assert np.allclose(brute_force, _log_n_sharp(comm_counts, hye_comm_counts))
########################################################################################
# Test _sample_hye_from_count
@pytest.fixture(
params=(
(comm_nodes, hye_comm_counts, rng)
for comm_nodes, hye_comm_counts in zip(commm_nodes_all, hye_comm_counts_all * 3)
for rgn in [None, np.random.default_rng(seed=34)]
)
)
def sampled_hye_with_info(request):
comm_nodes, hye_comm_counts, rng = request.param
node_to_comm = {node: comm for comm in comm_nodes for node in comm_nodes[comm]}
return ( | _sample_hye_from_count(comm_nodes, hye_comm_counts, rng), | 2 | 2023-12-06 22:01:38+00:00 | 2k |
sailfishos-chum/sailfishos-chum.github.io | chumweb/package.py | [
{
"identifier": "CONFIG",
"path": "chumweb/config.py",
"snippet": "CONFIG = init_config()"
},
{
"identifier": "RemoteImage",
"path": "chumweb/remote_image.py",
"snippet": "class RemoteImage:\n \"\"\"\n An image located on a remote computer that can be downloaded locally\n\n Attributes:\n remote_url URL to the icon on a remote server\n local_url Path to locally cached (and scaled) version of the icon\n \"\"\"\n remote_url: str\n local_path: str | None = None\n\n def __init__(self, remote_url):\n self.remote_url = remote_url"
}
] | import logging
import enum
import re
from dataclasses import dataclass, field
from datetime import datetime, UTC
from enum import StrEnum
from types import NoneType
from typing import List, Dict, Self, Set, Optional
from markupsafe import Markup
from . import CONFIG
from .remote_image import RemoteImage
from yaml import safe_load as yaml_load
from yaml.parser import ParserError
from yaml.scanner import ScannerError | 675 | """
Data classes for package metadata. It is also responsible for parsing the metadate of a single package
"""
logger = logging.getLogger(__name__)
class PackageApplicationCategory(StrEnum):
"""
Desktop application categories, from https://specifications.freedesktop.org/menu-spec/latest/apa.html
"""
accessibility = "Accessibility" # Added by Chum?
audio_video = "AudioVideo"
audio = "Audio"
video = "Video"
development = "Development"
education = "Education"
game = "Game"
graphics = "Graphics"
library = "Library" # Added by Chum?
maps = "Maps" # Added by Chum?
network = "Network"
office = "Office"
science = "Science"
settings = "Settings"
system = "System"
utility = "Utility"
other = "Other"
class PackageApplicationType(StrEnum):
"""
Type of the application that the package provides
Enums are based on https://www.freedesktop.org/software/appstream/docs/sect-AppStream-YAML.html#field-dep11-type
"""
generic = enum.auto()
console_application = "console-application"
desktop_application = "desktop-application"
addon = enum.auto()
codec = enum.auto()
inputmethod = enum.auto()
firmware = enum.auto()
@dataclass
class PackageVersion:
epoch: str
ver: str
rel: str
def __init__(self, epoch, ver, rel):
self.epoch = epoch
self.ver = ver
self.rel = rel
def to_short_str(self) -> str:
return self.ver.split('+', 2)[0]
def to_full_str(self) -> str:
return f"{self.ver}-{self.rel}"
@dataclass
class Package:
"""
Metadata of a RPM package with associated Chum metadata
"""
name: str
summary: str | None = None
description: str | Markup | None = None
title: str | None = None
| """
Data classes for package metadata. It is also responsible for parsing the metadate of a single package
"""
logger = logging.getLogger(__name__)
class PackageApplicationCategory(StrEnum):
"""
Desktop application categories, from https://specifications.freedesktop.org/menu-spec/latest/apa.html
"""
accessibility = "Accessibility" # Added by Chum?
audio_video = "AudioVideo"
audio = "Audio"
video = "Video"
development = "Development"
education = "Education"
game = "Game"
graphics = "Graphics"
library = "Library" # Added by Chum?
maps = "Maps" # Added by Chum?
network = "Network"
office = "Office"
science = "Science"
settings = "Settings"
system = "System"
utility = "Utility"
other = "Other"
class PackageApplicationType(StrEnum):
"""
Type of the application that the package provides
Enums are based on https://www.freedesktop.org/software/appstream/docs/sect-AppStream-YAML.html#field-dep11-type
"""
generic = enum.auto()
console_application = "console-application"
desktop_application = "desktop-application"
addon = enum.auto()
codec = enum.auto()
inputmethod = enum.auto()
firmware = enum.auto()
@dataclass
class PackageVersion:
epoch: str
ver: str
rel: str
def __init__(self, epoch, ver, rel):
self.epoch = epoch
self.ver = ver
self.rel = rel
def to_short_str(self) -> str:
return self.ver.split('+', 2)[0]
def to_full_str(self) -> str:
return f"{self.ver}-{self.rel}"
@dataclass
class Package:
"""
Metadata of a RPM package with associated Chum metadata
"""
name: str
summary: str | None = None
description: str | Markup | None = None
title: str | None = None | icon: RemoteImage | None = None | 1 | 2023-12-14 19:25:31+00:00 | 2k |
oVo-HxBots/URLUploadBot | Uploader/youtube.py | [
{
"identifier": "get_file_extension_from_url",
"path": "Uploader/functions/help_ytdl.py",
"snippet": "def get_file_extension_from_url(url):\n url_path = urlparse(url).path\n basename = os.path.basename(url_path)\n return basename.split(\".\")[-1]"
},
{
"identifier": "get_resolution",
"path": "Uploader/functions/help_ytdl.py",
"snippet": "def get_resolution(info_dict):\n if {\"width\", \"height\"} <= info_dict.keys():\n width = int(info_dict['width'])\n height = int(info_dict['height'])\n # https://support.google.com/youtube/answer/6375112\n elif info_dict['height'] == 1080:\n width = 1920\n height = 1080\n elif info_dict['height'] == 720:\n width = 1280\n height = 720\n elif info_dict['height'] == 480:\n width = 854\n height = 480\n elif info_dict['height'] == 360:\n width = 640\n height = 360\n elif info_dict['height'] == 240:\n width = 426\n height = 240\n return (width, height)"
}
] | import os
import wget
import asyncio
from urllib.parse import urlparse
from opencc import OpenCC
from youtube_dl import YoutubeDL
from pyrogram import Client, filters, enums
from pyrogram.types import Message
from pyrogram import Client, filters
from Uploader.config import Config
from sample_config import Config
from Uploader.functions.help_ytdl import get_file_extension_from_url, get_resolution | 979 | # MIT License
# Copyright (c) 2022 Hash Minner
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
if bool(os.environ.get("WEBHOOK")):
else:
YTDL_REGEX = (r"^((?:https?:)?\/\/)")
s2tw = OpenCC('s2tw.json').convert
@Client.on_callback_query(filters.regex("^ytdl_audio$"))
async def callback_query_ytdl_audio(_, callback_query):
try:
url = callback_query.message.reply_to_message.text
ydl_opts = {
'format': 'bestaudio',
'outtmpl': '%(title)s - %(extractor)s-%(id)s.%(ext)s',
'writethumbnail': True
}
with YoutubeDL(ydl_opts) as ydl:
message = callback_query.message
await message.reply_chat_action(enums.ChatAction.TYPING)
info_dict = ydl.extract_info(url, download=False)
# download
await callback_query.edit_message_text("**Downloading audio...**")
ydl.process_info(info_dict)
# upload
audio_file = ydl.prepare_filename(info_dict)
task = asyncio.create_task(send_audio(message, info_dict,
audio_file))
while not task.done():
await asyncio.sleep(3)
await message.reply_chat_action(enums.ChatAction.UPLOAD_DOCUMENT)
await message.reply_chat_action(enums.ChatAction.CANCEL)
await message.delete()
except Exception as e:
await message.reply_text(e)
await callback_query.message.reply_to_message.delete()
await callback_query.message.delete()
async def send_audio(message: Message, info_dict, audio_file):
basename = audio_file.rsplit(".", 1)[-2]
if info_dict['ext'] == 'webm':
audio_file_weba = f"{basename}.weba"
os.rename(audio_file, audio_file_weba)
audio_file = audio_file_weba
thumbnail_url = info_dict['thumbnail']
| # MIT License
# Copyright (c) 2022 Hash Minner
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
if bool(os.environ.get("WEBHOOK")):
else:
YTDL_REGEX = (r"^((?:https?:)?\/\/)")
s2tw = OpenCC('s2tw.json').convert
@Client.on_callback_query(filters.regex("^ytdl_audio$"))
async def callback_query_ytdl_audio(_, callback_query):
try:
url = callback_query.message.reply_to_message.text
ydl_opts = {
'format': 'bestaudio',
'outtmpl': '%(title)s - %(extractor)s-%(id)s.%(ext)s',
'writethumbnail': True
}
with YoutubeDL(ydl_opts) as ydl:
message = callback_query.message
await message.reply_chat_action(enums.ChatAction.TYPING)
info_dict = ydl.extract_info(url, download=False)
# download
await callback_query.edit_message_text("**Downloading audio...**")
ydl.process_info(info_dict)
# upload
audio_file = ydl.prepare_filename(info_dict)
task = asyncio.create_task(send_audio(message, info_dict,
audio_file))
while not task.done():
await asyncio.sleep(3)
await message.reply_chat_action(enums.ChatAction.UPLOAD_DOCUMENT)
await message.reply_chat_action(enums.ChatAction.CANCEL)
await message.delete()
except Exception as e:
await message.reply_text(e)
await callback_query.message.reply_to_message.delete()
await callback_query.message.delete()
async def send_audio(message: Message, info_dict, audio_file):
basename = audio_file.rsplit(".", 1)[-2]
if info_dict['ext'] == 'webm':
audio_file_weba = f"{basename}.weba"
os.rename(audio_file, audio_file_weba)
audio_file = audio_file_weba
thumbnail_url = info_dict['thumbnail'] | thumbnail_file = f"{basename}.{get_file_extension_from_url(thumbnail_url)}" | 0 | 2023-12-09 03:24:55+00:00 | 2k |
Jiawei-Yao0812/PixelFormer_DGR | pixelformer/networks/PQI.py | [
{
"identifier": "resize",
"path": "pixelformer/networks/utils.py",
"snippet": "def resize(input,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=None,\n warning=True):\n if warning:\n if size is not None and align_corners:\n input_h, input_w = tuple(int(x) for x in input.shape[2:])\n output_h, output_w = tuple(int(x) for x in size)\n if output_h > input_h or output_w > output_h:\n if ((output_h > 1 and output_w > 1 and input_h > 1\n and input_w > 1) and (output_h - 1) % (input_h - 1)\n and (output_w - 1) % (input_w - 1)):\n warnings.warn(\n f'When align_corners={align_corners}, '\n 'the output would more aligned if '\n f'input size {(input_h, input_w)} is `x+1` and '\n f'out size {(output_h, output_w)} is `nx+1`')\n if isinstance(size, torch.Size):\n size = tuple(int(x) for x in size)\n return F.interpolate(input, size, scale_factor, mode, align_corners)"
},
{
"identifier": "normal_init",
"path": "pixelformer/networks/utils.py",
"snippet": "def normal_init(module, mean=0, std=1, bias=0):\n if hasattr(module, 'weight') and module.weight is not None:\n nn.init.normal_(module.weight, mean, std)\n if hasattr(module, 'bias') and module.bias is not None:\n nn.init.constant_(module.bias, bias)"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from .utils import resize, normal_init | 769 |
class PPM(nn.ModuleList):
"""Pooling Pyramid Module used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict): Config of activation layers.
align_corners (bool): align_corners argument of F.interpolate.
"""
def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg,
act_cfg, align_corners):
super(PPM, self).__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
for pool_scale in pool_scales:
# == if batch size = 1, BN is not supported, change to GN
if pool_scale == 1: norm_cfg = dict(type='GN', requires_grad=True, num_groups=256)
self.append(
nn.Sequential(
nn.AdaptiveAvgPool2d(pool_scale),
ConvModule(
self.in_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=norm_cfg,
act_cfg=self.act_cfg)))
def forward(self, x):
"""Forward function."""
ppm_outs = []
for ppm in self:
ppm_out = ppm(x)
|
class PPM(nn.ModuleList):
"""Pooling Pyramid Module used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict): Config of activation layers.
align_corners (bool): align_corners argument of F.interpolate.
"""
def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg,
act_cfg, align_corners):
super(PPM, self).__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
for pool_scale in pool_scales:
# == if batch size = 1, BN is not supported, change to GN
if pool_scale == 1: norm_cfg = dict(type='GN', requires_grad=True, num_groups=256)
self.append(
nn.Sequential(
nn.AdaptiveAvgPool2d(pool_scale),
ConvModule(
self.in_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=norm_cfg,
act_cfg=self.act_cfg)))
def forward(self, x):
"""Forward function."""
ppm_outs = []
for ppm in self:
ppm_out = ppm(x) | upsampled_ppm_out = resize( | 0 | 2023-12-13 20:50:32+00:00 | 2k |
kramerlab/PeerLearning | peer.py | [
{
"identifier": "SuggestionBuffer",
"path": "suggestionbuffer.py",
"snippet": "class SuggestionBuffer:\n def __init__(self, capacity):\n self.buffer = deque(maxlen=capacity)\n\n def add(self, *args):\n self.buffer.append(args)\n\n def sample(self, batch_size):\n if len(self.buffer) > batch_size:\n return random.sample(self.buffer, batch_size)\n # else return None\n\n def latest(self):\n return [self.buffer[-1]]"
},
{
"identifier": "make_env",
"path": "utils.py",
"snippet": "def make_env(env_str, n_envs=1, **env_args):\n envs = []\n for _ in range(n_envs):\n def env_func():\n env = Monitor(gym.make(env_str, **env_args))\n env.seed(new_random_seed())\n return env\n\n envs.append(env_func)\n return DummyVecEnv(envs)"
}
] | from abc import ABC
from typing import Type
from suggestionbuffer import SuggestionBuffer
from utils import make_env
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
import itertools as it
import numpy as np
import torch | 1,381 | lr=0.95, switch_ratio=0, use_advantage=False,
max_peer_epochs=1_000_000_000):
"""
:param peers: An iterable of peer agents
:param lr: The learning rate for trust and agent values
:param switch_ratio: switch_ratio == 0 means no switching
:param use_advantage: use advantage instead of value for AV updates
"""
self.peers = peers
self.lr = lr
self.switch_ratio = switch_ratio
self.active_peer = None # index of currently learning peer
self.solo_epoch = False
self.use_advantage = use_advantage
self.max_peer_epochs = max_peer_epochs
if use_agent_values:
self.agent_values = np.full(len(peers), init_agent_values,
dtype=np.float32)
key = "agent_values"
for peer in peers:
peer.n_peers = len(peers)
peer.group = self
# setup agent values
if use_agent_values:
peer.peer_values[key] = self.agent_values # noqa (Eq. 6)
peer.peer_value_functions[key] = self._update_agent_values
def _update_agent_values(self, batch_size=10):
""" Updates the agent values with samples from the peers' buffers"""
targets = np.zeros_like(self.peers, dtype=np.float32)
counts = np.zeros_like(self.peers, dtype=np.float32)
for peer in self.peers:
bs = batch_size // len(self.peers)
# reward, action, peer, new_obs, old_obs
if peer.buffer is not None:
batch = peer.buffer.sample(bs)
if batch is None: # buffer not sufficiently full
return
obs = np.array([b[3] for b in batch]).reshape(bs, -1)
v = peer.value(obs)
if self.use_advantage:
# previous observations
prev_obs = np.array([b[4] for b in batch]).reshape(bs, -1)
prev_v = peer.value(prev_obs)
else:
prev_v = np.zeros_like(v) # no advantage (see Eq. 5)
for i in range(len(batch)): # Eq. 8
target = (batch[i][0] + peer.gamma * v[i]) - prev_v[i]
counts[batch[i][2]] += 1
targets[batch[i][2]] += target
# ensure counts are >= 1, don't change these values
targets[counts == 0] = self.agent_values[counts == 0]
counts[counts == 0] = 1
targets /= counts
self.agent_values += self.lr * (targets - self.agent_values) # Eq. 7
def learn(self, n_epochs, max_epoch_len, callbacks, **kwargs):
""" The outer peer learning routine. """
assert len(callbacks) == len(self.peers)
# more solo epochs
boost_single = 0 < self.switch_ratio < 1
if boost_single:
self.switch_ratio = 1 / self.switch_ratio
self.solo_epoch = False
peer_epochs = 0
for i in range(n_epochs):
# don't do peer learning forever
if peer_epochs < self.max_peer_epochs:
# ratio of 0 never performs a solo episode
if (i % (1 + self.switch_ratio) == 1) ^ boost_single:
self.solo_epoch = True
else:
peer_epochs += 1
else: # budget spent
self.solo_epoch = True
for p, peer, callback in zip(it.count(), self.peers, callbacks):
self.active_peer = p
peer.learn(self.solo_epoch, total_timesteps=max_epoch_len,
callback=callback, tb_log_name=f"Peer{p}",
reset_num_timesteps=False,
log_interval=None, **kwargs)
# update epoch for temperature decay
peer.epoch += 1
self.active_peer = None
def __len__(self):
return len(self.peers)
def make_peer_class(cls: Type[OffPolicyAlgorithm]):
""" Creates a mixin with the corresponding algorithm class.
:param cls: The learning algorithm (needs to have a callable critic).
:return: The mixed in peer agent class.
"""
class Peer(cls, ABC):
""" Abstract Peer class
needs to be mixed with a suitable algorithm. """
def __init__(self, temperature, temp_decay, algo_args, env,
use_trust=False, use_critic=False, init_trust_values=200,
buffer_size=1000, follow_steps=10, seed=None,
use_trust_buffer=True, solo_training=False,
peers_sample_with_noise=False,
sample_random_actions=False, sample_from_suggestions=True,
epsilon=0.0, env_args=None, only_follow_peers=False):
if env_args is None:
env_args = {}
super(Peer, self).__init__(**algo_args,
|
class PeerGroup:
""" A group of peers who train together. """
def __init__(self, peers, use_agent_values=False, init_agent_values=200.,
lr=0.95, switch_ratio=0, use_advantage=False,
max_peer_epochs=1_000_000_000):
"""
:param peers: An iterable of peer agents
:param lr: The learning rate for trust and agent values
:param switch_ratio: switch_ratio == 0 means no switching
:param use_advantage: use advantage instead of value for AV updates
"""
self.peers = peers
self.lr = lr
self.switch_ratio = switch_ratio
self.active_peer = None # index of currently learning peer
self.solo_epoch = False
self.use_advantage = use_advantage
self.max_peer_epochs = max_peer_epochs
if use_agent_values:
self.agent_values = np.full(len(peers), init_agent_values,
dtype=np.float32)
key = "agent_values"
for peer in peers:
peer.n_peers = len(peers)
peer.group = self
# setup agent values
if use_agent_values:
peer.peer_values[key] = self.agent_values # noqa (Eq. 6)
peer.peer_value_functions[key] = self._update_agent_values
def _update_agent_values(self, batch_size=10):
""" Updates the agent values with samples from the peers' buffers"""
targets = np.zeros_like(self.peers, dtype=np.float32)
counts = np.zeros_like(self.peers, dtype=np.float32)
for peer in self.peers:
bs = batch_size // len(self.peers)
# reward, action, peer, new_obs, old_obs
if peer.buffer is not None:
batch = peer.buffer.sample(bs)
if batch is None: # buffer not sufficiently full
return
obs = np.array([b[3] for b in batch]).reshape(bs, -1)
v = peer.value(obs)
if self.use_advantage:
# previous observations
prev_obs = np.array([b[4] for b in batch]).reshape(bs, -1)
prev_v = peer.value(prev_obs)
else:
prev_v = np.zeros_like(v) # no advantage (see Eq. 5)
for i in range(len(batch)): # Eq. 8
target = (batch[i][0] + peer.gamma * v[i]) - prev_v[i]
counts[batch[i][2]] += 1
targets[batch[i][2]] += target
# ensure counts are >= 1, don't change these values
targets[counts == 0] = self.agent_values[counts == 0]
counts[counts == 0] = 1
targets /= counts
self.agent_values += self.lr * (targets - self.agent_values) # Eq. 7
def learn(self, n_epochs, max_epoch_len, callbacks, **kwargs):
""" The outer peer learning routine. """
assert len(callbacks) == len(self.peers)
# more solo epochs
boost_single = 0 < self.switch_ratio < 1
if boost_single:
self.switch_ratio = 1 / self.switch_ratio
self.solo_epoch = False
peer_epochs = 0
for i in range(n_epochs):
# don't do peer learning forever
if peer_epochs < self.max_peer_epochs:
# ratio of 0 never performs a solo episode
if (i % (1 + self.switch_ratio) == 1) ^ boost_single:
self.solo_epoch = True
else:
peer_epochs += 1
else: # budget spent
self.solo_epoch = True
for p, peer, callback in zip(it.count(), self.peers, callbacks):
self.active_peer = p
peer.learn(self.solo_epoch, total_timesteps=max_epoch_len,
callback=callback, tb_log_name=f"Peer{p}",
reset_num_timesteps=False,
log_interval=None, **kwargs)
# update epoch for temperature decay
peer.epoch += 1
self.active_peer = None
def __len__(self):
return len(self.peers)
def make_peer_class(cls: Type[OffPolicyAlgorithm]):
""" Creates a mixin with the corresponding algorithm class.
:param cls: The learning algorithm (needs to have a callable critic).
:return: The mixed in peer agent class.
"""
class Peer(cls, ABC):
""" Abstract Peer class
needs to be mixed with a suitable algorithm. """
def __init__(self, temperature, temp_decay, algo_args, env,
use_trust=False, use_critic=False, init_trust_values=200,
buffer_size=1000, follow_steps=10, seed=None,
use_trust_buffer=True, solo_training=False,
peers_sample_with_noise=False,
sample_random_actions=False, sample_from_suggestions=True,
epsilon=0.0, env_args=None, only_follow_peers=False):
if env_args is None:
env_args = {}
super(Peer, self).__init__(**algo_args, | env=make_env(env, **env_args), | 1 | 2023-12-13 10:40:55+00:00 | 2k |
balewgize/skimmit | url_summary/views.py | [
{
"identifier": "Preference",
"path": "users/models.py",
"snippet": "class Preference(models.Model):\n class AIModels(models.TextChoices):\n GPT_3_5 = \"gpt-3.5-turbo\", \"GPT-3.5\"\n GEMINI_PRO = \"gemini-pro\", \"Gemini Pro\"\n\n SENTENCE_COUNT_CHOICES = tuple(zip(range(3, 11), range(3, 11)))\n\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n ai_model = models.CharField(\n max_length=20, choices=AIModels, default=AIModels.GPT_3_5\n )\n sentence_count = models.IntegerField(default=5, choices=SENTENCE_COUNT_CHOICES)"
},
{
"identifier": "ArticleURLForm",
"path": "url_summary/forms.py",
"snippet": "class ArticleURLForm(forms.Form):\n url = forms.URLField(\n widget=forms.URLInput(\n attrs={\n \"type\": \"url\",\n \"class\": \"form-control\",\n \"placeholder\": \"Enter URL\",\n \"required\": True,\n \"autofocus\": True,\n \"maxlength\": 500,\n }\n ),\n )\n\n def clean_url(self):\n url = self.cleaned_data[\"url\"]\n if \"www.youtube.com\" in url:\n raise forms.ValidationError(\"Invalid Article URL.\")\n return url"
},
{
"identifier": "VideoURLForm",
"path": "url_summary/forms.py",
"snippet": "class VideoURLForm(forms.Form):\n url = forms.URLField(\n widget=forms.URLInput(\n attrs={\n \"type\": \"url\",\n \"class\": \"form-control\",\n \"placeholder\": \"Enter YouTube Video URL\",\n \"required\": True,\n \"autofocus\": True,\n \"maxlength\": 100,\n }\n ),\n )\n\n def clean_url(self):\n try:\n url = self.cleaned_data[\"url\"]\n if not url.startswith(\"https://www.youtube.com/watch?v=\"):\n raise\n\n # there may be other parameters in the URL\n video_id = url.split(\"v=\")[1].split(\"&\")[0]\n if len(video_id) != 11:\n raise\n except:\n video_id = None\n\n if not video_id:\n raise forms.ValidationError(\"Invalid YouTube URL.\")\n return url"
},
{
"identifier": "URLSummary",
"path": "url_summary/models.py",
"snippet": "class URLSummary(models.Model):\n \"\"\"A class representing URL summary results.\"\"\"\n\n url = models.URLField(max_length=500)\n title = models.CharField(max_length=250, blank=True)\n summary = models.TextField(blank=True)\n text = models.TextField(blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n bookmarks = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name=\"bookmarked_summaries\",\n blank=True,\n )\n ai_model = models.CharField(\n max_length=20, choices=Preference.AIModels.choices, blank=True, null=True\n )\n\n class Meta:\n ordering = (\"-created_at\",)\n verbose_name = \"URL Summary\"\n\n def __str__(self) -> str:\n return f\"<URLSummary - {self.url}\""
},
{
"identifier": "download_page",
"path": "url_summary/utils/downloader.py",
"snippet": "def download_page(url: str) -> tuple[requests.Response, bool]:\n \"\"\"Download HTML page from URL\"\"\"\n\n filename = \"user_agents.txt\"\n user_agents = get_user_agents(filename)\n error = False\n try:\n if user_agents:\n headers[\"user-agent\"] = user_agents[0]\n\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n except Exception as e:\n print(e, response.status_code)\n error = True\n\n return response, error"
}
] | import os
import json
import readtime
import google.generativeai as genai
from django.http import JsonResponse
from bs4 import BeautifulSoup
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from openai import OpenAI
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api.formatters import TextFormatter
from pytube import YouTube
from users.models import Preference
from .forms import ArticleURLForm, VideoURLForm
from .models import URLSummary
from .utils.downloader import download_page | 1,394 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
def home(request):
context = {"article_form": ArticleURLForm(), "video_form": VideoURLForm()}
return render(request, "index.html", context=context)
def article_summary(request):
if request.method == "POST":
form = ArticleURLForm(request.POST)
if form.is_valid():
url = form.cleaned_data["url"]
if request.user.is_authenticated:
user_preference, _ = Preference.objects.get_or_create(user=request.user)
else:
user_preference = None
summary = get_article_summary(url, user_preference)
context = {"result": summary, "article_form": ArticleURLForm()}
else:
context = {"article_form": form}
context["video_form"] = VideoURLForm()
return render(request, "url_summary/article.html", context=context)
else:
return redirect("url_summary:home")
def video_summary(request):
if request.method == "POST":
form = VideoURLForm(request.POST)
if form.is_valid():
url = form.cleaned_data["url"]
if request.user.is_authenticated:
user_preference, _ = Preference.objects.get_or_create(user=request.user)
else:
user_preference = None
summary = get_video_summary(url, user_preference)
context = {"result": summary, "video_form": VideoURLForm()}
else:
context = {"video_form": form}
context["article_form"] = ArticleURLForm()
return render(request, "url_summary/video.html", context=context)
else:
return redirect("url_summary:home")
def get_article_summary(url: str, user_preference: Preference):
"""
Summarize articles by extracting HTML body text.
"""
summary_obj = URLSummary.objects.filter(url=url).first()
if summary_obj:
summary_dict = get_summary_details(summary_obj)
return summary_dict
summary_dict = {}
|
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
def home(request):
context = {"article_form": ArticleURLForm(), "video_form": VideoURLForm()}
return render(request, "index.html", context=context)
def article_summary(request):
if request.method == "POST":
form = ArticleURLForm(request.POST)
if form.is_valid():
url = form.cleaned_data["url"]
if request.user.is_authenticated:
user_preference, _ = Preference.objects.get_or_create(user=request.user)
else:
user_preference = None
summary = get_article_summary(url, user_preference)
context = {"result": summary, "article_form": ArticleURLForm()}
else:
context = {"article_form": form}
context["video_form"] = VideoURLForm()
return render(request, "url_summary/article.html", context=context)
else:
return redirect("url_summary:home")
def video_summary(request):
if request.method == "POST":
form = VideoURLForm(request.POST)
if form.is_valid():
url = form.cleaned_data["url"]
if request.user.is_authenticated:
user_preference, _ = Preference.objects.get_or_create(user=request.user)
else:
user_preference = None
summary = get_video_summary(url, user_preference)
context = {"result": summary, "video_form": VideoURLForm()}
else:
context = {"video_form": form}
context["article_form"] = ArticleURLForm()
return render(request, "url_summary/video.html", context=context)
else:
return redirect("url_summary:home")
def get_article_summary(url: str, user_preference: Preference):
"""
Summarize articles by extracting HTML body text.
"""
summary_obj = URLSummary.objects.filter(url=url).first()
if summary_obj:
summary_dict = get_summary_details(summary_obj)
return summary_dict
summary_dict = {} | response, error = download_page(url) | 4 | 2023-12-13 13:47:20+00:00 | 2k |
ZS-YANG/FemtoDet-v3 | projects/XDecoder/xdecoder/inference/texttoimage_regionretrieval_inferencer.py | [
{
"identifier": "DetInferencer",
"path": "mmdet/apis/det_inferencer.py",
"snippet": " VOID = None\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',\n '.tiff', '.webp')\nclass DetInferencer(BaseInferencer):\n def __init__(self,\n model: Optional[Union[ModelType, str]] = None,\n weights: Optional[str] = None,\n device: Optional[str] = None,\n scope: Optional[str] = 'mmdet',\n palette: str = 'none',\n show_progress: bool = True) -> None:\n def _load_weights_to_model(self, model: nn.Module,\n checkpoint: Optional[dict],\n cfg: Optional[ConfigType]) -> None:\n def _init_pipeline(self, cfg: ConfigType) -> Compose:\n def _get_transform_idx(self, pipeline_cfg: ConfigType,\n name: Union[str, Tuple[str, type]]) -> int:\n def _init_visualizer(self, cfg: ConfigType) -> Optional[Visualizer]:\n def _inputs_to_list(self, inputs: InputsType) -> list:\n def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs):\n def _get_chunk_data(self, inputs: Iterable, chunk_size: int):\n def __call__(\n self,\n inputs: InputsType,\n batch_size: int = 1,\n return_vis: bool = False,\n show: bool = False,\n wait_time: int = 0,\n no_save_vis: bool = False,\n draw_pred: bool = True,\n pred_score_thr: float = 0.3,\n return_datasamples: bool = False,\n print_result: bool = False,\n no_save_pred: bool = True,\n out_dir: str = '',\n # by open image task\n texts: Optional[Union[str, list]] = None,\n # by open panoptic task\n stuff_texts: Optional[Union[str, list]] = None,\n # by GLIP\n custom_entities: bool = False,\n **kwargs) -> dict:\n def visualize(self,\n inputs: InputsType,\n preds: PredType,\n return_vis: bool = False,\n show: bool = False,\n wait_time: int = 0,\n draw_pred: bool = True,\n pred_score_thr: float = 0.3,\n no_save_vis: bool = False,\n img_out_dir: str = '',\n **kwargs) -> Union[List[np.ndarray], None]:\n def postprocess(\n self,\n preds: PredType,\n visualization: Optional[List[np.ndarray]] = None,\n return_datasamples: bool = False,\n print_result: bool = False,\n no_save_pred: bool = False,\n pred_out_dir: str = '',\n **kwargs,\n ) -> Dict:\n def pred2dict(self,\n data_sample: DetDataSample,\n pred_out_dir: str = '') -> Dict:"
},
{
"identifier": "ConfigType",
"path": "mmdet/utils/typing_utils.py",
"snippet": ""
}
] | import copy
import torch
from typing import Iterable, Optional, Union
from mmengine.dataset import Compose
from rich.progress import track
from mmdet.apis.det_inferencer import DetInferencer, InputsType
from mmdet.utils import ConfigType | 1,193 |
class TextToImageRegionRetrievalInferencer(DetInferencer):
def _init_pipeline(self, cfg: ConfigType) -> Compose:
"""Initialize the test pipeline."""
pipeline_cfg = cfg.test_dataloader.dataset.pipeline
# For inference, the key of ``img_id`` is not used.
if 'meta_keys' in pipeline_cfg[-1]:
pipeline_cfg[-1]['meta_keys'] = tuple(
meta_key for meta_key in pipeline_cfg[-1]['meta_keys']
if meta_key != 'img_id')
load_img_idx = self._get_transform_idx(pipeline_cfg,
'LoadImageFromFile')
if load_img_idx == -1:
raise ValueError(
'LoadImageFromFile is not found in the test pipeline')
pipeline_cfg[load_img_idx]['type'] = 'mmdet.InferencerLoader'
retrieval_pipeline = Compose(pipeline_cfg)
grounding_pipeline_cp = copy.deepcopy(pipeline_cfg)
grounding_pipeline_cp[1].scale = cfg.grounding_scale
grounding_pipeline = Compose(grounding_pipeline_cp)
return {
'grounding_pipeline': grounding_pipeline,
'retrieval_pipeline': retrieval_pipeline
}
def _get_chunk_data(self, inputs: Iterable, pipeline, chunk_size: int):
"""Get batch data from inputs.
Args:
inputs (Iterable): An iterable dataset.
chunk_size (int): Equivalent to batch size.
Yields:
list: batch data.
"""
inputs_iter = iter(inputs)
while True:
try:
chunk_data = []
for _ in range(chunk_size):
inputs_ = next(inputs_iter)
chunk_data.append(
(inputs_, pipeline(copy.deepcopy(inputs_))))
yield chunk_data
except StopIteration:
if chunk_data:
yield chunk_data
break
def preprocess(self,
|
class TextToImageRegionRetrievalInferencer(DetInferencer):
def _init_pipeline(self, cfg: ConfigType) -> Compose:
"""Initialize the test pipeline."""
pipeline_cfg = cfg.test_dataloader.dataset.pipeline
# For inference, the key of ``img_id`` is not used.
if 'meta_keys' in pipeline_cfg[-1]:
pipeline_cfg[-1]['meta_keys'] = tuple(
meta_key for meta_key in pipeline_cfg[-1]['meta_keys']
if meta_key != 'img_id')
load_img_idx = self._get_transform_idx(pipeline_cfg,
'LoadImageFromFile')
if load_img_idx == -1:
raise ValueError(
'LoadImageFromFile is not found in the test pipeline')
pipeline_cfg[load_img_idx]['type'] = 'mmdet.InferencerLoader'
retrieval_pipeline = Compose(pipeline_cfg)
grounding_pipeline_cp = copy.deepcopy(pipeline_cfg)
grounding_pipeline_cp[1].scale = cfg.grounding_scale
grounding_pipeline = Compose(grounding_pipeline_cp)
return {
'grounding_pipeline': grounding_pipeline,
'retrieval_pipeline': retrieval_pipeline
}
def _get_chunk_data(self, inputs: Iterable, pipeline, chunk_size: int):
"""Get batch data from inputs.
Args:
inputs (Iterable): An iterable dataset.
chunk_size (int): Equivalent to batch size.
Yields:
list: batch data.
"""
inputs_iter = iter(inputs)
while True:
try:
chunk_data = []
for _ in range(chunk_size):
inputs_ = next(inputs_iter)
chunk_data.append(
(inputs_, pipeline(copy.deepcopy(inputs_))))
yield chunk_data
except StopIteration:
if chunk_data:
yield chunk_data
break
def preprocess(self, | inputs: InputsType, | 0 | 2023-12-11 15:23:03+00:00 | 2k |
mit-ll-ai-technology/maite | src/maite/_internals/interop/huggingface/image_classifier.py | [
{
"identifier": "BaseHFModel",
"path": "src/maite/_internals/interop/huggingface/base.py",
"snippet": "class BaseHFModel(nn.Module, BaseModel):\n def __init__(\n self,\n model_name: str,\n model: Union[HuggingFaceWithLogits, HuggingFaceWithDetection],\n processor: Optional[HuggingFaceProcessor] = None,\n post_processor: Optional[HuggingFaceObjectDetectionPostProcessor] = None,\n ) -> None:\n def get_labels(self) -> Sequence[str]:"
},
{
"identifier": "HuggingFacePredictions",
"path": "src/maite/_internals/interop/huggingface/typing.py",
"snippet": "class HuggingFacePredictions:\n scores: Tensor\n labels: Optional[Union[Tensor, Sequence[Sequence[str]]]] = None"
},
{
"identifier": "HuggingFaceProbs",
"path": "src/maite/_internals/interop/huggingface/typing.py",
"snippet": "class HuggingFaceProbs:\n probs: Tensor"
},
{
"identifier": "HuggingFaceProcessor",
"path": "src/maite/_internals/interop/huggingface/typing.py",
"snippet": "class HuggingFaceProcessor(Protocol):\n def __call__(\n self,\n images: Sequence[ArrayLike],\n return_tensors: Union[bool, str] = False,\n **kwargs: Any,\n ) -> BatchFeature:\n ..."
},
{
"identifier": "HuggingFaceWithLogits",
"path": "src/maite/_internals/interop/huggingface/typing.py",
"snippet": "class HuggingFaceWithLogits(HuggingFaceModule, Protocol):\n def __call__(\n self, pixel_values: Union[ArrayLike, Sequence[ArrayLike]], **kwargs: Any\n ) -> HasLogits:\n ..."
}
] | from typing import TYPE_CHECKING, Any, List, Optional, Union, cast
from typing_extensions import Self
from maite.errors import InvalidArgument
from maite.protocols import HasDataImage, HasLogits, SupportsArray
from .base import BaseHFModel, InteropModelMetadata
from .typing import (
HuggingFacePredictions,
HuggingFaceProbs,
HuggingFaceProcessor,
HuggingFaceWithLogits,
)
from transformers import AutoFeatureExtractor, AutoModelForImageClassification
import torch as tr | 1,012 | # Copyright 2023, MASSACHUSETTS INSTITUTE OF TECHNOLOGY
# Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014).
# SPDX-License-Identifier: MIT
__all__ = ["HuggingFaceImageClassifier"]
class HuggingFaceImageClassifier(BaseHFModel):
"""
Wrapper for HuggingFace image classifiation models.
This interface uses `AutoFeatureExtractor` and `AutoModelForImageClassification`
to load the HuggingFace models.
"""
metadata: InteropModelMetadata
def __init__(
self,
model_name: str,
model: HuggingFaceWithLogits,
processor: Optional[HuggingFaceProcessor] = None,
top_k: Optional[int] = None,
) -> None:
"""
Initialize HuggingFaceImageClassifier.
Parameters
----------
model_name: str
A Huggingface model name from model id, e.g. "microsoft/resnet-50"
processor : HuggingFaceProcessor
A HuggingFace feature extractor for a given model.
model : HuggingFaceModel
A HuggingFace image classification model.
Examples
--------
>>> from transformers import AutoFeatureExtractor, AutoModelForImageClassification
>>> processor = AutoFeatureExtractor.from_pretrained("microsoft/resnet-50")
>>> model = AutoModelForImageClassification.from_pretrained("microsoft/resnet-50")
>>> hf_model = HuggingFaceImageClassifier(processor, model)
"""
super().__init__(model_name=model_name, model=model, processor=processor)
self._top_k = top_k
self.metadata = InteropModelMetadata(
model_name=model_name, provider="HuggingFace", task="Image Classification"
)
def preprocessor(
self,
data: SupportsArray,
) -> HasDataImage:
"""
Preprocess images for a HuggingFace object detector.
Parameters
----------
images : Sequence[ArrayLike]
The images to preprocess.
Returns
-------
tr.Tensor
The preprocessed images.
Examples
--------
"""
assert self._processor is not None, "No processor was provided."
assert isinstance(data, (list, tuple))
image_features = self._processor(images=data, return_tensors="pt")[
"pixel_values"
]
assert isinstance(image_features, tr.Tensor)
return {"image": image_features}
def post_processor(
self, outputs: HasLogits
| # Copyright 2023, MASSACHUSETTS INSTITUTE OF TECHNOLOGY
# Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014).
# SPDX-License-Identifier: MIT
__all__ = ["HuggingFaceImageClassifier"]
class HuggingFaceImageClassifier(BaseHFModel):
"""
Wrapper for HuggingFace image classifiation models.
This interface uses `AutoFeatureExtractor` and `AutoModelForImageClassification`
to load the HuggingFace models.
"""
metadata: InteropModelMetadata
def __init__(
self,
model_name: str,
model: HuggingFaceWithLogits,
processor: Optional[HuggingFaceProcessor] = None,
top_k: Optional[int] = None,
) -> None:
"""
Initialize HuggingFaceImageClassifier.
Parameters
----------
model_name: str
A Huggingface model name from model id, e.g. "microsoft/resnet-50"
processor : HuggingFaceProcessor
A HuggingFace feature extractor for a given model.
model : HuggingFaceModel
A HuggingFace image classification model.
Examples
--------
>>> from transformers import AutoFeatureExtractor, AutoModelForImageClassification
>>> processor = AutoFeatureExtractor.from_pretrained("microsoft/resnet-50")
>>> model = AutoModelForImageClassification.from_pretrained("microsoft/resnet-50")
>>> hf_model = HuggingFaceImageClassifier(processor, model)
"""
super().__init__(model_name=model_name, model=model, processor=processor)
self._top_k = top_k
self.metadata = InteropModelMetadata(
model_name=model_name, provider="HuggingFace", task="Image Classification"
)
def preprocessor(
self,
data: SupportsArray,
) -> HasDataImage:
"""
Preprocess images for a HuggingFace object detector.
Parameters
----------
images : Sequence[ArrayLike]
The images to preprocess.
Returns
-------
tr.Tensor
The preprocessed images.
Examples
--------
"""
assert self._processor is not None, "No processor was provided."
assert isinstance(data, (list, tuple))
image_features = self._processor(images=data, return_tensors="pt")[
"pixel_values"
]
assert isinstance(image_features, tr.Tensor)
return {"image": image_features}
def post_processor(
self, outputs: HasLogits | ) -> Union[HuggingFacePredictions, HuggingFaceProbs]: | 1 | 2023-12-12 15:34:16+00:00 | 2k |
djcopley/ShellOracle | src/shelloracle/providers/ollama.py | [
{
"identifier": "Provider",
"path": "src/shelloracle/provider.py",
"snippet": "class Provider(Protocol):\n \"\"\"\n LLM Provider Protocol\n\n All LLM backends must implement this interface.\n \"\"\"\n name: str\n\n @abstractmethod\n def generate(self, prompt: str) -> AsyncIterator[str]:\n \"\"\"\n This is an asynchronous generator method which defines the protocol that a provider implementation\n should adhere to. The method takes a prompt as an argument and produces an asynchronous stream\n of string results.\n\n :param prompt: A string value which serves as input to the provider's process of generating results.\n :return: An asynchronous generator yielding string results.\n \"\"\"\n # If you are wondering why the 'generate' signature doesn't include 'async', see\n # https://mypy.readthedocs.io/en/stable/more_types.html#asynchronous-iterators"
},
{
"identifier": "ProviderError",
"path": "src/shelloracle/provider.py",
"snippet": "class ProviderError(Exception):\n \"\"\"LLM providers raise this error to gracefully indicate something has gone wrong.\"\"\""
},
{
"identifier": "Setting",
"path": "src/shelloracle/config/setting.py",
"snippet": "class Setting(Generic[T]):\n def __init__(self, *, name: str | None = None, default: T | None = None) -> None:\n self.name = name\n self.default = default\n\n def __set_name__(self, owner: type[Provider], name: str) -> None:\n if not self.name:\n self.name = name\n # Set the default value in the config dictionary if it doesn't exist\n provider_table = config.global_config.get(\"provider\", {})\n provider_table.setdefault(owner.name, {}).setdefault(name, self.default)\n config.global_config[\"provider\"] = provider_table\n\n def __get__(self, instance: Provider, owner: type[Provider]) -> T:\n return config.global_config.get(\"provider\", {}).get(instance.name, {})[self.name]\n\n def __set__(self, instance: Provider, value: T) -> None:\n config.global_config.setdefault(\"provider\", {}).setdefault(instance.name, {})[self.name] = value"
}
] | import json
import httpx
from dataclasses import dataclass, asdict
from typing import Any, AsyncIterator
from ..provider import Provider, ProviderError
from ..config import Setting | 1,256 | from __future__ import annotations
def dataclass_to_json(obj: Any) -> dict[str, Any]:
"""Convert dataclass to a json dict
This function filters out 'None' values.
:param obj: the dataclass to serialize
:return: serialized dataclass
:raises TypeError: if obj is not a dataclass
"""
return {k: v for k, v in asdict(obj).items() if v is not None}
@dataclass
class GenerateRequest:
model: str
"""(required) the model name"""
prompt: str | None = None
"""the prompt to generate a response for"""
images: list[str] | None = None
"""a list of base64-encoded images (for multimodal models such as llava)"""
format: str | None = None
"""the format to return a response in. Currently the only accepted value is json"""
options: dict | None = None
"""additional model parameters listed in the documentation for the Modelfile such as temperature"""
system: str | None = None
"""system prompt to (overrides what is defined in the Modelfile)"""
template: str | None = None
"""the full prompt or prompt template (overrides what is defined in the Modelfile)"""
context: str | None = None
"""the context parameter returned from a previous request to /generate, this can be used to keep a short
conversational memory"""
stream: bool | None = None
"""if false the response will be returned as a single response object, rather than a stream of objects"""
raw: bool | None = None
"""if true no formatting will be applied to the prompt and no context will be returned. You may choose to use
the raw parameter if you are specifying a full templated prompt in your request to the API, and are managing
history yourself. JSON mode"""
class Ollama(Provider):
name = "Ollama"
host = Setting(default="localhost")
port = Setting(default=11434)
model = Setting(default="codellama:13b")
system_prompt = Setting(
default=(
"Based on the following user description, generate a corresponding Bash command. Focus solely "
"on interpreting the requirements and translating them into a single, executable Bash command. "
"Ensure accuracy and relevance to the user's description. The output should be a valid Bash "
"command that directly aligns with the user's intent, ready for execution in a command-line "
"environment. Output nothing except for the command. No code block, no English explanation, "
"no start/end tags."
)
)
@property
def endpoint(self) -> str:
# computed property because python descriptors need to be bound to an instance before access
return f"http://{self.host}:{self.port}/api/generate"
async def generate(self, prompt: str) -> AsyncIterator[str]:
request = GenerateRequest(self.model, prompt, system=self.system_prompt, stream=True)
data = dataclass_to_json(request)
try:
async with httpx.AsyncClient() as client:
async with client.stream("POST", self.endpoint, json=data, timeout=20.0) as stream:
async for line in stream.aiter_lines():
response = json.loads(line)
if "error" in response:
| from __future__ import annotations
def dataclass_to_json(obj: Any) -> dict[str, Any]:
"""Convert dataclass to a json dict
This function filters out 'None' values.
:param obj: the dataclass to serialize
:return: serialized dataclass
:raises TypeError: if obj is not a dataclass
"""
return {k: v for k, v in asdict(obj).items() if v is not None}
@dataclass
class GenerateRequest:
model: str
"""(required) the model name"""
prompt: str | None = None
"""the prompt to generate a response for"""
images: list[str] | None = None
"""a list of base64-encoded images (for multimodal models such as llava)"""
format: str | None = None
"""the format to return a response in. Currently the only accepted value is json"""
options: dict | None = None
"""additional model parameters listed in the documentation for the Modelfile such as temperature"""
system: str | None = None
"""system prompt to (overrides what is defined in the Modelfile)"""
template: str | None = None
"""the full prompt or prompt template (overrides what is defined in the Modelfile)"""
context: str | None = None
"""the context parameter returned from a previous request to /generate, this can be used to keep a short
conversational memory"""
stream: bool | None = None
"""if false the response will be returned as a single response object, rather than a stream of objects"""
raw: bool | None = None
"""if true no formatting will be applied to the prompt and no context will be returned. You may choose to use
the raw parameter if you are specifying a full templated prompt in your request to the API, and are managing
history yourself. JSON mode"""
class Ollama(Provider):
name = "Ollama"
host = Setting(default="localhost")
port = Setting(default=11434)
model = Setting(default="codellama:13b")
system_prompt = Setting(
default=(
"Based on the following user description, generate a corresponding Bash command. Focus solely "
"on interpreting the requirements and translating them into a single, executable Bash command. "
"Ensure accuracy and relevance to the user's description. The output should be a valid Bash "
"command that directly aligns with the user's intent, ready for execution in a command-line "
"environment. Output nothing except for the command. No code block, no English explanation, "
"no start/end tags."
)
)
@property
def endpoint(self) -> str:
# computed property because python descriptors need to be bound to an instance before access
return f"http://{self.host}:{self.port}/api/generate"
async def generate(self, prompt: str) -> AsyncIterator[str]:
request = GenerateRequest(self.model, prompt, system=self.system_prompt, stream=True)
data = dataclass_to_json(request)
try:
async with httpx.AsyncClient() as client:
async with client.stream("POST", self.endpoint, json=data, timeout=20.0) as stream:
async for line in stream.aiter_lines():
response = json.loads(line)
if "error" in response: | raise ProviderError(response["error"]) | 1 | 2023-12-11 20:23:31+00:00 | 2k |
juniberry/PacketIRC | packetirc.py | [
{
"identifier": "LOG_FILE",
"path": "settings.py",
"snippet": "LOG_FILE = \"packetirc.log\""
},
{
"identifier": "LOG_LEVEL",
"path": "settings.py",
"snippet": "LOG_LEVEL = logging.INFO"
},
{
"identifier": "SERVER",
"path": "settings.py",
"snippet": "SERVER = \"\""
},
{
"identifier": "PORT",
"path": "settings.py",
"snippet": "PORT = 6667"
},
{
"identifier": "PASS",
"path": "settings.py",
"snippet": "PASS = \"\""
},
{
"identifier": "CHANNEL",
"path": "settings.py",
"snippet": "CHANNEL = \"#Testing\""
},
{
"identifier": "HIDE_SERVER",
"path": "settings.py",
"snippet": "HIDE_SERVER = True"
},
{
"identifier": "MAX_RETRIES",
"path": "settings.py",
"snippet": "MAX_RETRIES = 3"
},
{
"identifier": "RETRY_DELAY",
"path": "settings.py",
"snippet": "RETRY_DELAY = 5 # seconds"
},
{
"identifier": "HELP_INFO",
"path": "settings.py",
"snippet": "HELP_INFO = \"\"\"\nPacketIRC commands:\n /quit [message] - Disconnect from the server with optional message.\n /msg <nickname> <message> - Send a private message to the specified user.\n /join <channel> - Join the specified channel.\n /names - Shows a list of users in the channel.\n /topic [new topic] - Set a new topic for the current channel or request the topic.\n /away [message] - Set an away message or clear the away status.\n /whois <nickname> - Retrieves information about the specified user.\n /help - Display this help message.\n\"\"\""
},
{
"identifier": "WELCOME_MESSAGE",
"path": "settings.py",
"snippet": "WELCOME_MESSAGE = \"\"\"\nWelcome to PacketIRC!\nType /help for a list of commands.\n\"\"\""
},
{
"identifier": "BAD_WORDS_FILE",
"path": "settings.py",
"snippet": "BAD_WORDS_FILE = \"bad_words.txt\""
},
{
"identifier": "BAD_WORDS_FILTER",
"path": "settings.py",
"snippet": "BAD_WORDS_FILTER = False"
}
] | import socket
import threading
import random
import time
import logging
import re
import irc.client
import os
import sys
from settings import LOG_FILE, LOG_LEVEL, SERVER, PORT, PASS, CHANNEL, HIDE_SERVER, MAX_RETRIES, RETRY_DELAY, HELP_INFO, WELCOME_MESSAGE, BAD_WORDS_FILE, BAD_WORDS_FILTER | 666 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
______ _ _____ ______ ______
(_____ \ | | _ (_____|_____ \ / _____)
_____) )___ ____| | _ ____| |_ _ _____) ) /
| ____/ _ |/ ___) | / ) _ ) _) | | (_____ (| |
| | ( ( | ( (___| |< ( (/ /| |__ _| |_ | | \_____
|_| \_||_|\____)_| \_)____)\___|_____) |_|\______)
PacketIRC is a bandwidth-conscious IRC client specifically designed for packet radio communication.
It includes a client-side implementation with simplified IRC functionalities.
File: client.py
Author: Daria Juniper @juniberry
Date: 10-Dec-2023
Changes:
12-Dec-2023 - Initial version 1.0 beta.
"""
# Import settings from an external configuration file.
# Globals
VERSION = 'v1.1b'
BAD_WORDS = []
HOME_PATH = os.path.dirname(os.path.abspath(__file__)) # Grab home path for use with logging et al.
# State
is_running = True
# Initialize logging.
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
______ _ _____ ______ ______
(_____ \ | | _ (_____|_____ \ / _____)
_____) )___ ____| | _ ____| |_ _ _____) ) /
| ____/ _ |/ ___) | / ) _ ) _) | | (_____ (| |
| | ( ( | ( (___| |< ( (/ /| |__ _| |_ | | \_____
|_| \_||_|\____)_| \_)____)\___|_____) |_|\______)
PacketIRC is a bandwidth-conscious IRC client specifically designed for packet radio communication.
It includes a client-side implementation with simplified IRC functionalities.
File: client.py
Author: Daria Juniper @juniberry
Date: 10-Dec-2023
Changes:
12-Dec-2023 - Initial version 1.0 beta.
"""
# Import settings from an external configuration file.
# Globals
VERSION = 'v1.1b'
BAD_WORDS = []
HOME_PATH = os.path.dirname(os.path.abspath(__file__)) # Grab home path for use with logging et al.
# State
is_running = True
# Initialize logging. | logging.basicConfig(filename=os.path.join(HOME_PATH, LOG_FILE), filemode='w', level=LOG_LEVEL, format='%(asctime)s - %(levelname)s - %(message)s') | 0 | 2023-12-13 19:08:48+00:00 | 2k |
Tps-F/rvc-onnx-test | onnxlib/attentions.py | [
{
"identifier": "commons",
"path": "onnxlib/commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\ndef get_padding(kernel_size, dilation=1):\ndef kl_divergence(m_p, logs_p, m_q, logs_q):\ndef rand_gumbel(shape):\ndef rand_gumbel_like(x):\ndef slice_segments(x, ids_str, segment_size=4):\ndef slice_segments2(x, ids_str, segment_size=4):\ndef rand_slice_segments(x, x_lengths=None, segment_size=4):\ndef get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):\ndef add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):\ndef cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):\ndef subsequent_mask(length):\ndef fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):\ndef convert_pad_shape(pad_shape: List[List[int]]) -> List[int]:\ndef shift_1d(x):\ndef sequence_mask(length: torch.Tensor, max_length: Optional[int] = None):\ndef generate_path(duration, mask):\ndef clip_grad_value_(parameters, clip_value, norm_type=2):"
},
{
"identifier": "modules",
"path": "onnxlib/modules.py",
"snippet": "LRELU_SLOPE = 0.1\nclass LayerNorm(nn.Module):\nclass ConvReluNorm(nn.Module):\nclass DDSConv(nn.Module):\nclass WN(torch.nn.Module):\nclass ResBlock1(torch.nn.Module):\nclass ResBlock2(torch.nn.Module):\nclass Log(nn.Module):\nclass Flip(nn.Module):\nclass ElementwiseAffine(nn.Module):\nclass ResidualCouplingLayer(nn.Module):\nclass ConvFlow(nn.Module):\n def __init__(self, channels, eps=1e-5):\n def forward(self, x):\n def __init__(\n self,\n in_channels,\n hidden_channels,\n out_channels,\n kernel_size,\n n_layers,\n p_dropout,\n ):\n def forward(self, x, x_mask):\n def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):\n def forward(self, x, x_mask, g: Optional[torch.Tensor] = None):\n def __init__(\n self,\n hidden_channels,\n kernel_size,\n dilation_rate,\n n_layers,\n gin_channels=0,\n p_dropout=0,\n ):\n def forward(\n self, x: torch.Tensor, x_mask: torch.Tensor, g: Optional[torch.Tensor] = None\n ):\n def remove_weight_norm(self):\n def __prepare_scriptable__(self):\n def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):\n def forward(self, x: torch.Tensor, x_mask: Optional[torch.Tensor] = None):\n def remove_weight_norm(self):\n def __prepare_scriptable__(self):\n def __init__(self, channels, kernel_size=3, dilation=(1, 3)):\n def forward(self, x, x_mask: Optional[torch.Tensor] = None):\n def remove_weight_norm(self):\n def __prepare_scriptable__(self):\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n g: Optional[torch.Tensor] = None,\n reverse: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n g: Optional[torch.Tensor] = None,\n reverse: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:\n def __init__(self, channels):\n def forward(self, x, x_mask, reverse=False, **kwargs):\n def __init__(\n self,\n channels,\n hidden_channels,\n kernel_size,\n dilation_rate,\n n_layers,\n p_dropout=0,\n gin_channels=0,\n mean_only=False,\n ):\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n g: Optional[torch.Tensor] = None,\n reverse: bool = False,\n ):\n def remove_weight_norm(self):\n def __prepare_scriptable__(self):\n def __init__(\n self,\n in_channels,\n filter_channels,\n kernel_size,\n n_layers,\n num_bins=10,\n tail_bound=5.0,\n ):\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n g: Optional[torch.Tensor] = None,\n reverse=False,\n ):"
},
{
"identifier": "LayerNorm",
"path": "onnxlib/modules.py",
"snippet": "class LayerNorm(nn.Module):\n def __init__(self, channels, eps=1e-5):\n super(LayerNorm, self).__init__()\n self.channels = channels\n self.eps = eps\n\n self.gamma = nn.Parameter(torch.ones(channels))\n self.beta = nn.Parameter(torch.zeros(channels))\n\n def forward(self, x):\n x = x.transpose(1, -1)\n x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)\n return x.transpose(1, -1)"
}
] | import math
import torch
from typing import Optional
from torch import nn
from torch.nn import functional as F
from onnxlib import commons, modules
from onnxlib.modules import LayerNorm | 1,523 |
class Encoder(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
window_size=10,
**kwargs
):
super(Encoder, self).__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = int(n_layers)
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.drop = nn.Dropout(p_dropout)
self.attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for i in range(self.n_layers):
self.attn_layers.append(
MultiHeadAttention(
hidden_channels,
hidden_channels,
n_heads,
p_dropout=p_dropout,
window_size=window_size,
)
)
|
class Encoder(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
window_size=10,
**kwargs
):
super(Encoder, self).__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = int(n_layers)
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.drop = nn.Dropout(p_dropout)
self.attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for i in range(self.n_layers):
self.attn_layers.append(
MultiHeadAttention(
hidden_channels,
hidden_channels,
n_heads,
p_dropout=p_dropout,
window_size=window_size,
)
) | self.norm_layers_1.append(LayerNorm(hidden_channels)) | 2 | 2023-12-09 04:08:04+00:00 | 2k |
zhenqincn/FedKSeed | utils_data/load_data.py | [
{
"identifier": "DefaultToken",
"path": "utils_data/default_tokens.py",
"snippet": "class DefaultToken(Enum):\n PAD_TOKEN = \"[PAD]\"\n EOS_TOKEN = \"</s>\"\n BOS_TOKEN = \"<s>\"\n UNK_TOKEN = \"<unk>\"\n IGNORE_INDEX = -100"
},
{
"identifier": "partition_idx_labeldir",
"path": "utils_data/partition_data.py",
"snippet": "def partition_idx_labeldir(y, n_parties, alpha, num_classes):\n min_size = 0\n min_require_size = 10\n K = num_classes\n N = y.shape[0]\n net_dataidx_map = {}\n\n while min_size < min_require_size:\n idx_batch = [[] for _ in range(n_parties)]\n for k in range(K):\n idx_k = np.where(y == k)[0]\n np.random.shuffle(idx_k)\n proportions = np.random.dirichlet(np.repeat(alpha, n_parties))\n # Balance\n proportions = np.array([p * (len(idx_j) < N / n_parties) for p, idx_j in zip(proportions, idx_batch)])\n proportions = proportions / proportions.sum()\n proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1]\n idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))]\n min_size = min([len(idx_j) for idx_j in idx_batch])\n for j in range(n_parties):\n np.random.shuffle(idx_batch[j])\n net_dataidx_map[j] = idx_batch[j]\n return net_dataidx_map"
}
] | import numpy as np
import torch
from torch.utils.data import DataLoader, Subset
from transformers import AutoTokenizer
from utils_data.default_tokens import DefaultToken
from utils_data.partition_data import partition_idx_labeldir
from collections import Counter
from utils_data.llm_dataset import LLMDataset, LLMDataCollator
from utils_data.natural_instruction_loader import get_instruction_dataset | 862 |
def get_loaders(args, only_eval=False):
"""
Return: list of train_loaders, eval_loader
"""
tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=True)
tokenizer.model_max_length = args.max_length
special_tokens = dict()
if tokenizer.pad_token is None:
special_tokens["pad_token"] = DefaultToken.PAD_TOKEN.value
if tokenizer.eos_token is None:
special_tokens["eos_token"] = DefaultToken.EOS_TOKEN.value
if tokenizer.bos_token is None:
special_tokens["bos_token"] = DefaultToken.BOS_TOKEN.value
if tokenizer.unk_token is None:
special_tokens["unk_token"] = DefaultToken.UNK_TOKEN.value
tokenizer.add_special_tokens(special_tokens)
# Generation task
if args.dataset == 'dolly':
if args.eval_metric == 'loss':
raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=False)
else:
raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=True)
data_collator = LLMDataCollator(tokenizer=tokenizer)
# only use a subset of raw dataset
raw_datasets, _ = torch.utils.data.dataset.random_split(raw_datasets, [int(len(raw_datasets) * args.dataset_subsample), len(raw_datasets) - int(len(raw_datasets) * args.dataset_subsample)])
y_all = np.array([item['categories'] for item in raw_datasets])
index_eval = np.where(y_all == args.zerotask)[0]
# delete the indices of eval samples from the all set
index_train = np.delete(np.arange(len(y_all)), index_eval)
raw_datasets = np.array(raw_datasets)
train_set = raw_datasets[index_train]
eval_set = raw_datasets[index_eval]
y_train = np.array([item['categories'] for item in train_set])
counter = Counter(y_train)
noniid = args.iid
if 'dir' in noniid:
|
def get_loaders(args, only_eval=False):
"""
Return: list of train_loaders, eval_loader
"""
tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=True)
tokenizer.model_max_length = args.max_length
special_tokens = dict()
if tokenizer.pad_token is None:
special_tokens["pad_token"] = DefaultToken.PAD_TOKEN.value
if tokenizer.eos_token is None:
special_tokens["eos_token"] = DefaultToken.EOS_TOKEN.value
if tokenizer.bos_token is None:
special_tokens["bos_token"] = DefaultToken.BOS_TOKEN.value
if tokenizer.unk_token is None:
special_tokens["unk_token"] = DefaultToken.UNK_TOKEN.value
tokenizer.add_special_tokens(special_tokens)
# Generation task
if args.dataset == 'dolly':
if args.eval_metric == 'loss':
raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=False)
else:
raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=True)
data_collator = LLMDataCollator(tokenizer=tokenizer)
# only use a subset of raw dataset
raw_datasets, _ = torch.utils.data.dataset.random_split(raw_datasets, [int(len(raw_datasets) * args.dataset_subsample), len(raw_datasets) - int(len(raw_datasets) * args.dataset_subsample)])
y_all = np.array([item['categories'] for item in raw_datasets])
index_eval = np.where(y_all == args.zerotask)[0]
# delete the indices of eval samples from the all set
index_train = np.delete(np.arange(len(y_all)), index_eval)
raw_datasets = np.array(raw_datasets)
train_set = raw_datasets[index_train]
eval_set = raw_datasets[index_eval]
y_train = np.array([item['categories'] for item in train_set])
counter = Counter(y_train)
noniid = args.iid
if 'dir' in noniid: | split_dic = partition_idx_labeldir(y_train, n_parties=args.num_clients, alpha=float(noniid[3:]), num_classes=len(counter)) | 1 | 2023-12-08 02:58:31+00:00 | 2k |
merlresearch/PixPNet | pixpnet/optim.py | [
{
"identifier": "get_logger",
"path": "pixpnet/utils.py",
"snippet": "def get_logger(name):\n logging.basicConfig(\n format=\"%(asctime)s[%(process)d][%(levelname)s] %(message)s\",\n datefmt=\"%Y-%m-%dT%H:%M:%S\",\n )\n logger = logging.getLogger(name)\n logger.setLevel(os.environ.get(\"PIXPNET_LOG_LEVEL\", \"INFO\"))\n return logger"
},
{
"identifier": "intersect_func_and_kwargs",
"path": "pixpnet/utils.py",
"snippet": "def intersect_func_and_kwargs(func, kwargs, exclude_func_args=None, exclude_kwargs=None, return_invalid=True):\n func_args = {*get_all_func_args(func)} - (set() if exclude_func_args is None else {*exclude_func_args})\n if isinstance(kwargs, argparse.Namespace):\n kwargs = vars(kwargs)\n kwargs_keys = {*kwargs.keys()} - (set() if exclude_kwargs is None else {*exclude_kwargs})\n\n intersecting_keys = kwargs_keys & func_args\n intersected_dict = {k: kwargs[k] for k in intersecting_keys}\n if return_invalid:\n return intersected_dict, kwargs_keys - func_args\n return intersected_dict"
}
] | import argparse
import inspect
import re
import torch
from typing import Any, Dict, Optional, Set, Tuple, Type
from pytorch_warmup import ExponentialWarmup
from pytorch_warmup.base import BaseWarmup
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR, StepLR
from pixpnet.utils import get_logger, intersect_func_and_kwargs | 760 | # Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL)
#
# SPDX-License-Identifier: AGPL-3.0-or-later
logger = get_logger(__name__)
_OPTIMIZER_MAP = {attr: getattr(torch.optim, attr) for attr in dir(torch.optim) if attr != "Optimizer"}
_OPTIMIZER_MAP = {attr: cls for attr, cls in _OPTIMIZER_MAP.items() if inspect.isclass(cls)}
_LOOSE_OPTIMIZER_MAP = {}
for _attr, _cls in _OPTIMIZER_MAP.items():
_attr_split = re.split(r"(?=(?<!^)[A-Z][a-z]|(?<![A-Z])[A-Z]$)", _attr)
_attr_lower = "".join(map(str.lower, _attr_split))
_attr_lower_ = "_".join(map(str.lower, _attr_split))
if _attr_lower in _LOOSE_OPTIMIZER_MAP or _attr_lower_ in _LOOSE_OPTIMIZER_MAP:
_cls_existing = _LOOSE_OPTIMIZER_MAP.get(_attr_lower, _LOOSE_OPTIMIZER_MAP.get(_attr_lower_))
raise RuntimeError(
f"{_attr_lower} already in optimizers! Overlapping class names in "
f"lowercase was unexpected and cannot be resolved: "
f"{_cls_existing} and {_cls}"
)
_LOOSE_OPTIMIZER_MAP[_attr_lower] = _cls
if _attr_lower != _attr_lower_:
_LOOSE_OPTIMIZER_MAP[_attr_lower_] = _cls
def get_optimizer_cls(
config: argparse.Namespace,
ignore: Optional[Set[str]] = None,
) -> Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]:
if ignore is None:
ignore = set()
try:
optimizer_cls = _LOOSE_OPTIMIZER_MAP[config.optimizer.name.lower()]
except KeyError:
raise ValueError(f'No such optimizer "{config.optimizer.name}"')
| # Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL)
#
# SPDX-License-Identifier: AGPL-3.0-or-later
logger = get_logger(__name__)
_OPTIMIZER_MAP = {attr: getattr(torch.optim, attr) for attr in dir(torch.optim) if attr != "Optimizer"}
_OPTIMIZER_MAP = {attr: cls for attr, cls in _OPTIMIZER_MAP.items() if inspect.isclass(cls)}
_LOOSE_OPTIMIZER_MAP = {}
for _attr, _cls in _OPTIMIZER_MAP.items():
_attr_split = re.split(r"(?=(?<!^)[A-Z][a-z]|(?<![A-Z])[A-Z]$)", _attr)
_attr_lower = "".join(map(str.lower, _attr_split))
_attr_lower_ = "_".join(map(str.lower, _attr_split))
if _attr_lower in _LOOSE_OPTIMIZER_MAP or _attr_lower_ in _LOOSE_OPTIMIZER_MAP:
_cls_existing = _LOOSE_OPTIMIZER_MAP.get(_attr_lower, _LOOSE_OPTIMIZER_MAP.get(_attr_lower_))
raise RuntimeError(
f"{_attr_lower} already in optimizers! Overlapping class names in "
f"lowercase was unexpected and cannot be resolved: "
f"{_cls_existing} and {_cls}"
)
_LOOSE_OPTIMIZER_MAP[_attr_lower] = _cls
if _attr_lower != _attr_lower_:
_LOOSE_OPTIMIZER_MAP[_attr_lower_] = _cls
def get_optimizer_cls(
config: argparse.Namespace,
ignore: Optional[Set[str]] = None,
) -> Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]:
if ignore is None:
ignore = set()
try:
optimizer_cls = _LOOSE_OPTIMIZER_MAP[config.optimizer.name.lower()]
except KeyError:
raise ValueError(f'No such optimizer "{config.optimizer.name}"') | hparams, invalid_keys = intersect_func_and_kwargs( | 1 | 2023-12-06 23:49:31+00:00 | 2k |
dhh1995/MeGraph | megraph/args_utils.py | [
{
"identifier": "get_default_config",
"path": "megraph/io_utils.py",
"snippet": "def get_default_config(args):\n dataset_name = args.dataset_name\n dataset_subname = args.dataset_subname\n model_name = args.model\n conv_name = args.layer\n\n # Config\n cfg_file = args.config_file\n if cfg_file is not None:\n config = read_config_file(cfg_file)\n if config is None:\n cfg_file = None\n print(\n f\"[Warning] Could not found {cfg_file}, \"\n \"fall back to default config files.\"\n )\n else:\n config[\"config_file\"] = cfg_file\n if cfg_file is None:\n cfg_files = get_default_config_filenames(\n model_name, conv_name, dataset_name, dataset_subname\n )\n config = {}\n found_files = []\n for f in cfg_files:\n new_config = read_config_file(f, folder=args.configs_dir)\n if new_config is not None:\n print(f\"Overwrite default config using {f}:\")\n print(new_config)\n config.update(new_config)\n found_files.append(f)\n config[\"config_file\"] = found_files\n return config"
},
{
"identifier": "get_raw_cmdline",
"path": "megraph/io_utils.py",
"snippet": "def get_raw_cmdline():\n with open(\"/proc/self/cmdline\") as f:\n x = f.readlines()\n if x is None or len(x) == 0:\n return None\n return x[0].replace(\"\\x00\", \" \")"
}
] | import git
from .io_utils import get_default_config, get_raw_cmdline | 704 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : args.py
# Author : Honghua Dong
# Email : [email protected]
#
# Distributed under terms of the MIT license.
__all__ = ["ArgsBuilder", "add_git_and_cmd_line_info", "get_args_and_model"]
class ArgsBuilder(object):
"""A meta-class to be inherit that support args register and setup from args"""
__hyperparams__ = []
__parser__ = None
__prefix__ = "--"
@classmethod
def _set_parser_and_prefix(cls, parser, prefix):
cls.__parser__ = parser
if prefix is None:
prefix = "--"
else:
prefix = f"--{prefix}-"
cls.__prefix__ = prefix
@classmethod
def _add_argument(cls, name, *args, **kwargs):
cls.__hyperparams__.append(name)
name = name.replace("_", "-")
cls.__parser__.add_argument(cls.__prefix__ + name, *args, **kwargs)
@classmethod
def from_args(cls, args, prefix=None, **kwargs):
if prefix is None:
prefix = ""
else:
prefix = str(prefix) + "_"
print(f"From Args: {cls.__name__} with {kwargs}")
init_params = {k: getattr(args, prefix + k) for k in cls.__hyperparams__}
init_params.update(kwargs)
return cls(**init_params)
def add_git_and_cmd_line_info(args):
| #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : args.py
# Author : Honghua Dong
# Email : [email protected]
#
# Distributed under terms of the MIT license.
__all__ = ["ArgsBuilder", "add_git_and_cmd_line_info", "get_args_and_model"]
class ArgsBuilder(object):
"""A meta-class to be inherit that support args register and setup from args"""
__hyperparams__ = []
__parser__ = None
__prefix__ = "--"
@classmethod
def _set_parser_and_prefix(cls, parser, prefix):
cls.__parser__ = parser
if prefix is None:
prefix = "--"
else:
prefix = f"--{prefix}-"
cls.__prefix__ = prefix
@classmethod
def _add_argument(cls, name, *args, **kwargs):
cls.__hyperparams__.append(name)
name = name.replace("_", "-")
cls.__parser__.add_argument(cls.__prefix__ + name, *args, **kwargs)
@classmethod
def from_args(cls, args, prefix=None, **kwargs):
if prefix is None:
prefix = ""
else:
prefix = str(prefix) + "_"
print(f"From Args: {cls.__name__} with {kwargs}")
init_params = {k: getattr(args, prefix + k) for k in cls.__hyperparams__}
init_params.update(kwargs)
return cls(**init_params)
def add_git_and_cmd_line_info(args): | args.raw_cmdline = get_raw_cmdline() | 1 | 2023-12-12 04:17:13+00:00 | 2k |
SJTU-Quant/SUNNY-GNN | main.py | [
{
"identifier": "train_baseline",
"path": "train/train_baseline.py",
"snippet": "def train(cfg):\ndef train_explain(cfg):"
},
{
"identifier": "train_gnn",
"path": "train/train_gnn.py",
"snippet": "def train(cfg):"
},
{
"identifier": "train_hgn",
"path": "train/train_hgn.py",
"snippet": "def train(cfg):"
}
] | import argparse
import yaml
import os
import torch
import random
import copy
import numpy as np
from train import train_baseline, train_gnn, train_hgn
from tools.get_data import get_dataset | 847 |
def parse_args():
parser = argparse.ArgumentParser(description="Self-explainable GNN/HGN")
parser.add_argument('--method', type=str, default='snexgnn',
help='self-explainable GNN/HGN type',
choices=['snexgnn', 'snexhgn', 'gat', 'gcn', 'simplehgn'])
parser.add_argument('--encoder', type=str, default='gat',
help='GNN/HGN encoder type',
choices=['gat', 'gcn', 'simplehgn'])
parser.add_argument('--dataset', type=str, default='citeseer',
help='dataset name',
choices=['citeseer', 'cora', 'pubmed',
'amazon-photo', 'coauthor-physics', 'coauthor-cs',
'imdb', 'dblp', 'acm'])
parser.add_argument('--gpu', type=int, default=0, help='gpu id')
parser.add_argument('--num_seeds', type=int, default=1, help='number of random seeds')
parser.add_argument('--eval_explanation', type=bool, default=False,
help='whether to evaluate explanation fidelity')
return parser.parse_args()
class Config(object):
def __init__(self, args):
abs_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(abs_dir, 'log')
os.makedirs(log_dir, exist_ok=True)
data_dir = os.path.join(abs_dir, 'dataset', args.dataset)
self.method = args.method
self.encoder_type = args.encoder
self.dataset = args.dataset
self.abs_dir = abs_dir
self.data_dir = data_dir
self.gpu = args.gpu
self.index = None
self.graph_path = f'{data_dir}/{args.dataset}_graph.bin'
self.index_path = f'{data_dir}/{args.dataset}_index.bin'
self.check_dataset()
self.ckpt_dir = os.path.join(abs_dir, 'ckpt')
self.hyparams = self.load_hyperparams(args)
self.eval_explanation = args.eval_explanation
def check_dataset(self):
if not os.path.exists(self.graph_path):
get_dataset(self.dataset, self.data_dir)
def load_hyperparams(self, args):
yml_path = os.path.join(self.abs_dir, 'configs', f'{args.dataset}.yml')
with open(yml_path, 'r') as f:
hyperparams = yaml.load(f, Loader=yaml.FullLoader)
return hyperparams
def set_seed(self, seed):
self.seed = seed
self.encoder_path = f'{self.ckpt_dir}/{self.dataset}/{self.encoder_type}-seed-{seed}-pretrain.pt'
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main():
results = {}
for seed in range(args.num_seeds):
setup_seed(seed)
cfg.set_seed(seed)
print(f'===========seed: {seed}===========')
if cfg.method in ['snexgnn', 'snexhgn']:
print(f"Dataset: {cfg.dataset}, Method: {cfg.method}-{cfg.encoder_type}")
if not os.path.exists(cfg.encoder_path):
print(f"Pretrain {cfg.encoder_type}...")
cfg_cp = copy.deepcopy(cfg)
cfg_cp.method = cfg_cp.encoder_type
|
def parse_args():
parser = argparse.ArgumentParser(description="Self-explainable GNN/HGN")
parser.add_argument('--method', type=str, default='snexgnn',
help='self-explainable GNN/HGN type',
choices=['snexgnn', 'snexhgn', 'gat', 'gcn', 'simplehgn'])
parser.add_argument('--encoder', type=str, default='gat',
help='GNN/HGN encoder type',
choices=['gat', 'gcn', 'simplehgn'])
parser.add_argument('--dataset', type=str, default='citeseer',
help='dataset name',
choices=['citeseer', 'cora', 'pubmed',
'amazon-photo', 'coauthor-physics', 'coauthor-cs',
'imdb', 'dblp', 'acm'])
parser.add_argument('--gpu', type=int, default=0, help='gpu id')
parser.add_argument('--num_seeds', type=int, default=1, help='number of random seeds')
parser.add_argument('--eval_explanation', type=bool, default=False,
help='whether to evaluate explanation fidelity')
return parser.parse_args()
class Config(object):
def __init__(self, args):
abs_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(abs_dir, 'log')
os.makedirs(log_dir, exist_ok=True)
data_dir = os.path.join(abs_dir, 'dataset', args.dataset)
self.method = args.method
self.encoder_type = args.encoder
self.dataset = args.dataset
self.abs_dir = abs_dir
self.data_dir = data_dir
self.gpu = args.gpu
self.index = None
self.graph_path = f'{data_dir}/{args.dataset}_graph.bin'
self.index_path = f'{data_dir}/{args.dataset}_index.bin'
self.check_dataset()
self.ckpt_dir = os.path.join(abs_dir, 'ckpt')
self.hyparams = self.load_hyperparams(args)
self.eval_explanation = args.eval_explanation
def check_dataset(self):
if not os.path.exists(self.graph_path):
get_dataset(self.dataset, self.data_dir)
def load_hyperparams(self, args):
yml_path = os.path.join(self.abs_dir, 'configs', f'{args.dataset}.yml')
with open(yml_path, 'r') as f:
hyperparams = yaml.load(f, Loader=yaml.FullLoader)
return hyperparams
def set_seed(self, seed):
self.seed = seed
self.encoder_path = f'{self.ckpt_dir}/{self.dataset}/{self.encoder_type}-seed-{seed}-pretrain.pt'
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main():
results = {}
for seed in range(args.num_seeds):
setup_seed(seed)
cfg.set_seed(seed)
print(f'===========seed: {seed}===========')
if cfg.method in ['snexgnn', 'snexhgn']:
print(f"Dataset: {cfg.dataset}, Method: {cfg.method}-{cfg.encoder_type}")
if not os.path.exists(cfg.encoder_path):
print(f"Pretrain {cfg.encoder_type}...")
cfg_cp = copy.deepcopy(cfg)
cfg_cp.method = cfg_cp.encoder_type | train_gnn.train(cfg_cp) | 1 | 2023-12-12 02:46:00+00:00 | 2k |
dvmazur/mixtral-offloading | src/expert_wrapper.py | [
{
"identifier": "nested_flatten",
"path": "src/utils.py",
"snippet": "def nested_flatten(t):\n \"\"\"\n Turn nested list/tuple/dict into a flat iterator.\n \"\"\"\n if isinstance(t, (list, tuple)):\n for x in t:\n yield from nested_flatten(x)\n elif isinstance(t, dict):\n for k, v in sorted(t.items()):\n yield from nested_flatten(v)\n else:\n yield t"
},
{
"identifier": "nested_pack",
"path": "src/utils.py",
"snippet": "def nested_pack(flat, structure):\n \"\"\"\n Restore nested structure from flattened state\n :param flat: result of nested_flatten\n :param structure: used as example when recovering structure\n :returns: nested structure like :structure: filled with elements of :flat:\n \"\"\"\n return _nested_pack(iter(flat), structure)"
}
] | import typing as tp
import torch
from torch import nn
from .utils import nested_flatten, nested_pack | 742 |
class MixtralExpertWrapper(nn.Module):
def __init__(
self,
expert_module: tp.Any,
device: torch.device,
):
super().__init__()
expert_module, self.storage = self.replace_layer_storage(expert_module, device)
self.expert_module = lambda *args, **kwargs: expert_module(*args, **kwargs)
self._register_state_dict_hook(self._add_storage_to_state_dict_hook)
self._register_load_state_dict_pre_hook(self._load_storage_from_state_dict_hook)
@staticmethod
def _add_storage_to_state_dict_hook(self, state_dict, prefix, local_metadata):
state_dict[prefix + 'storage'] = torch.as_tensor(self.storage, dtype=torch.uint8)
return state_dict
def _load_storage_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.storage.copy_(state_dict[prefix + 'storage'].storage().untyped())
del state_dict[prefix + 'storage']
def forward(self, *args, **kwargs):
return self.expert_module(*args, **kwargs)
@staticmethod
def replace_layer_storage(
layer: tp.Any,
device: torch.device,
):
state_dict = {
f"w{i}": {
"W_q": getattr(layer, f"w{i}").W_q,
"meta": getattr(layer, f"w{i}").meta,
"bias": getattr(layer, f"w{i}").bias,
}
for i in range(1, 4)
}
storage_size = 0
offsets = [0]
for x in nested_flatten(state_dict):
if not isinstance(x, torch.Tensor):
continue
storage_size += x.nbytes
offsets.append(storage_size)
storage = torch.UntypedStorage(storage_size, device=device)
i = 0
new_flattened_states = list()
for x in nested_flatten(state_dict):
if not isinstance(x, torch.Tensor):
new_flattened_states.append(x)
continue
start = offsets[i]
end = offsets[i + 1]
a_view = torch.as_tensor(storage[start:end], dtype=x.dtype, device=device).view(x.shape)
a_view[...] = x
assert a_view.data_ptr() == storage.data_ptr() + start
i += 1
new_flattened_states.append(a_view)
|
class MixtralExpertWrapper(nn.Module):
def __init__(
self,
expert_module: tp.Any,
device: torch.device,
):
super().__init__()
expert_module, self.storage = self.replace_layer_storage(expert_module, device)
self.expert_module = lambda *args, **kwargs: expert_module(*args, **kwargs)
self._register_state_dict_hook(self._add_storage_to_state_dict_hook)
self._register_load_state_dict_pre_hook(self._load_storage_from_state_dict_hook)
@staticmethod
def _add_storage_to_state_dict_hook(self, state_dict, prefix, local_metadata):
state_dict[prefix + 'storage'] = torch.as_tensor(self.storage, dtype=torch.uint8)
return state_dict
def _load_storage_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.storage.copy_(state_dict[prefix + 'storage'].storage().untyped())
del state_dict[prefix + 'storage']
def forward(self, *args, **kwargs):
return self.expert_module(*args, **kwargs)
@staticmethod
def replace_layer_storage(
layer: tp.Any,
device: torch.device,
):
state_dict = {
f"w{i}": {
"W_q": getattr(layer, f"w{i}").W_q,
"meta": getattr(layer, f"w{i}").meta,
"bias": getattr(layer, f"w{i}").bias,
}
for i in range(1, 4)
}
storage_size = 0
offsets = [0]
for x in nested_flatten(state_dict):
if not isinstance(x, torch.Tensor):
continue
storage_size += x.nbytes
offsets.append(storage_size)
storage = torch.UntypedStorage(storage_size, device=device)
i = 0
new_flattened_states = list()
for x in nested_flatten(state_dict):
if not isinstance(x, torch.Tensor):
new_flattened_states.append(x)
continue
start = offsets[i]
end = offsets[i + 1]
a_view = torch.as_tensor(storage[start:end], dtype=x.dtype, device=device).view(x.shape)
a_view[...] = x
assert a_view.data_ptr() == storage.data_ptr() + start
i += 1
new_flattened_states.append(a_view)
| state_dict = nested_pack(new_flattened_states, state_dict) | 1 | 2023-12-15 03:32:35+00:00 | 2k |
CircleRadon/Osprey | osprey/datasets/stage2_data.py | [
{
"identifier": "preprocess",
"path": "osprey/train/train.py",
"snippet": "def preprocess(\n sources: Sequence[str],\n tokenizer: transformers.PreTrainedTokenizer,\n has_image: bool = False\n) -> Dict:\n \"\"\"\n Given a list of sources, each is a conversation list. This transform:\n 1. Add signal '### ' at the beginning each sentence, with end signal '\\n';\n 2. Concatenate conversations together;\n 3. Tokenize the concatenated conversation;\n 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.\n \"\"\"\n if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.PLAIN:\n return preprocess_plain(sources, tokenizer)\n if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.LLAMA_2:\n return preprocess_llama_2(sources, tokenizer, has_image=has_image)\n if conversation_lib.default_conversation.version.startswith(\"v1\"):\n return preprocess_v1(sources, tokenizer, has_image=has_image)\n # add end signal and concatenate together\n conversations = []\n for source in sources:\n header = f\"{conversation_lib.default_conversation.system}\\n\\n\"\n conversation = _add_speaker_and_signal(header, source)\n conversations.append(conversation)\n # tokenize conversations\n def get_tokenize_len(prompts):\n return [len(tokenizer_image_token(prompt, tokenizer)) for prompt in prompts]\n\n if has_image:\n input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations]\n else:\n conversations_tokenized = _tokenize_fn(conversations, tokenizer)\n input_ids = conversations_tokenized[\"input_ids\"]\n\n targets = copy.deepcopy(input_ids)\n for target, source in zip(targets, sources):\n if has_image:\n tokenized_lens = get_tokenize_len([header] + [s[\"value\"] for s in source])\n else:\n tokenized_lens = _tokenize_fn([header] + [s[\"value\"] for s in source], tokenizer)[\"input_ids_lens\"]\n speakers = [sentence[\"from\"] for sentence in source]\n _mask_targets(target, tokenized_lens, speakers)\n\n return dict(input_ids=input_ids, labels=targets)"
},
{
"identifier": "preprocess_multimodal",
"path": "osprey/train/train.py",
"snippet": "def preprocess_multimodal(\n sources: Sequence[str],\n data_args: DataArguments,\n cur_token_len: int = 0\n) -> Dict:\n\n for source in sources:\n for sentence in source:\n if DEFAULT_IMAGE_TOKEN in sentence['value']:\n sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, '').strip()\n sentence['value'] = DEFAULT_IMAGE_TOKEN + '\\n' + sentence['value']\n sentence['value'] = sentence['value'].strip()\n if \"mmtag\" in conversation_lib.default_conversation.version:\n sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, '<Image>' + DEFAULT_IMAGE_TOKEN + '</Image>')\n replace_token = DEFAULT_IMAGE_TOKEN\n if data_args.mm_use_im_start_end:\n replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN\n sentence[\"value\"] = sentence[\"value\"].replace(DEFAULT_IMAGE_TOKEN, replace_token)\n\n return sources"
}
] | import copy
import os
import random
import numpy as np
import torch
from osprey.train.train import preprocess, preprocess_multimodal
from torch.utils.data import Dataset
from pycocotools.coco import COCO
from pycocotools import mask as maskUtils
from PIL import Image | 1,598 |
class CustomDataset(Dataset):
def __init__(self,
tokenizer=None,
data_args=None,
ann_file=None,
img_prefix=None,
max_gt_per_img=20,
):
self.data_args = data_args
self.tokenizer = tokenizer
self.max_gt_per_img = max_gt_per_img
self.img_prefix = img_prefix
self.data_infos = self.load_annotations(ann_file)
super().__init__()
def __len__(self):
return len(self.data_infos)
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.img_ids = self.coco.getImgIds()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name']
info['height'] = int(info['height'])
info['width'] = int(info['width'])
ann_ids = self.coco.getAnnIds(imgIds=[i])
ann_info = self.coco.loadAnns(ann_ids)
if len(ann_info)==0:
continue
data_infos.append(info)
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
return ann_info
def annToMask(self, mask_ann, h, w):
if isinstance(mask_ann, list):
rles = maskUtils.frPyObjects(mask_ann, h, w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, h, w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def process_text(self, data_item):
image = data_item['img']
ori_labels = data_item['gt_labels']
ori_masks = np.array(data_item['gt_masks'])
ori_masks = torch.from_numpy(ori_masks)
shuffle_ids = torch.randperm(len(ori_labels))
if len(shuffle_ids) > self.max_gt_per_img:
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
ori_masks = ori_masks[shuffle_ids]
ori_labels = [ori_labels[i] for i in shuffle_ids]
sources = dict()
sources['conversations'] = []
# print("num:",len(ori_labels))
for i in range(len(ori_labels)):
question = '<region>'
question = question.replace('<region>', '<mask><pos>')
if i == 0:
question = self.begin_str + question
answer = ori_labels[i]
sources['conversations'].append(
{'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
cur_token_len = (image.shape[1] // 16) * (image.shape[2] // 16)
assert image.shape[1] == image.shape[2]
# a hard code [] for sources
sources = preprocess_multimodal(
copy.deepcopy([sources['conversations']]),
self.data_args,
cur_token_len)
# print(sources)
|
class CustomDataset(Dataset):
def __init__(self,
tokenizer=None,
data_args=None,
ann_file=None,
img_prefix=None,
max_gt_per_img=20,
):
self.data_args = data_args
self.tokenizer = tokenizer
self.max_gt_per_img = max_gt_per_img
self.img_prefix = img_prefix
self.data_infos = self.load_annotations(ann_file)
super().__init__()
def __len__(self):
return len(self.data_infos)
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.img_ids = self.coco.getImgIds()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name']
info['height'] = int(info['height'])
info['width'] = int(info['width'])
ann_ids = self.coco.getAnnIds(imgIds=[i])
ann_info = self.coco.loadAnns(ann_ids)
if len(ann_info)==0:
continue
data_infos.append(info)
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
return ann_info
def annToMask(self, mask_ann, h, w):
if isinstance(mask_ann, list):
rles = maskUtils.frPyObjects(mask_ann, h, w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, h, w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def process_text(self, data_item):
image = data_item['img']
ori_labels = data_item['gt_labels']
ori_masks = np.array(data_item['gt_masks'])
ori_masks = torch.from_numpy(ori_masks)
shuffle_ids = torch.randperm(len(ori_labels))
if len(shuffle_ids) > self.max_gt_per_img:
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
ori_masks = ori_masks[shuffle_ids]
ori_labels = [ori_labels[i] for i in shuffle_ids]
sources = dict()
sources['conversations'] = []
# print("num:",len(ori_labels))
for i in range(len(ori_labels)):
question = '<region>'
question = question.replace('<region>', '<mask><pos>')
if i == 0:
question = self.begin_str + question
answer = ori_labels[i]
sources['conversations'].append(
{'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
cur_token_len = (image.shape[1] // 16) * (image.shape[2] // 16)
assert image.shape[1] == image.shape[2]
# a hard code [] for sources
sources = preprocess_multimodal(
copy.deepcopy([sources['conversations']]),
self.data_args,
cur_token_len)
# print(sources)
| data_dict = preprocess( | 0 | 2023-12-17 16:21:45+00:00 | 2k |
open-mmlab/PIA | animatediff/data/dataset.py | [
{
"identifier": "zero_rank_print",
"path": "animatediff/utils/util.py",
"snippet": "def zero_rank_print(s):\n if (not dist.is_initialized()) or (dist.is_initialized() and dist.get_rank() == 0): print(\"### \" + s)"
},
{
"identifier": "detect_edges",
"path": "animatediff/utils/util.py",
"snippet": "def detect_edges(lum: np.ndarray) -> np.ndarray:\n \"\"\"Detect edges using the luma channel of a frame.\n\n Arguments:\n lum: 2D 8-bit image representing the luma channel of a frame.\n\n Returns:\n 2D 8-bit image of the same size as the input, where pixels with values of 255\n represent edges, and all other pixels are 0.\n \"\"\"\n # Initialize kernel.\n kernel_size = estimated_kernel_size(lum.shape[1], lum.shape[0])\n kernel = np.ones((kernel_size, kernel_size), np.uint8)\n\n # Estimate levels for thresholding.\n # TODO(0.6.3): Add config file entries for sigma, aperture/kernel size, etc.\n sigma: float = 1.0 / 3.0\n median = np.median(lum)\n low = int(max(0, (1.0 - sigma) * median))\n high = int(min(255, (1.0 + sigma) * median))\n\n # Calculate edges using Canny algorithm, and reduce noise by dilating the edges.\n # This increases edge overlap leading to improved robustness against noise and slow\n # camera movement. Note that very large kernel sizes can negatively affect accuracy.\n edges = cv2.Canny(lum, low, high)\n return cv2.dilate(edges, kernel)"
}
] | import os, io, csv, math, random
import numpy as np
import torch
import torchvision.transforms as transforms
import cv2
from einops import rearrange
from decord import VideoReader
from torch.utils.data.dataset import Dataset
from animatediff.utils.util import zero_rank_print, detect_edges | 851 |
def get_score(video_data,
cond_frame_idx,
weight=[1.0, 1.0, 1.0, 1.0],
use_edge=True):
"""
Similar to get_score under utils/util.py/detect_edges
"""
"""
the shape of video_data is f c h w, np.ndarray
"""
h, w = video_data.shape[1], video_data.shape[2]
cond_frame = video_data[cond_frame_idx]
cond_hsv_list = list(
cv2.split(
cv2.cvtColor(cond_frame.astype(np.float32), cv2.COLOR_RGB2HSV)))
if use_edge:
cond_frame_lum = cond_hsv_list[-1]
cond_frame_edge = detect_edges(cond_frame_lum.astype(np.uint8))
cond_hsv_list.append(cond_frame_edge)
score_sum = []
for frame_idx in range(video_data.shape[0]):
frame = video_data[frame_idx]
hsv_list = list(
cv2.split(cv2.cvtColor(frame.astype(np.float32),
cv2.COLOR_RGB2HSV)))
if use_edge:
frame_img_lum = hsv_list[-1]
frame_img_edge = detect_edges(lum=frame_img_lum.astype(np.uint8))
hsv_list.append(frame_img_edge)
hsv_diff = [
np.abs(hsv_list[c] - cond_hsv_list[c]) for c in range(len(weight))
]
hsv_mse = [np.sum(hsv_diff[c]) * weight[c] for c in range(len(weight))]
score_sum.append(sum(hsv_mse) / (h * w) / (sum(weight)))
return score_sum
class WebVid10M(Dataset):
def __init__(
self,
csv_path, video_folder,
sample_size=256, sample_stride=4, sample_n_frames=16,
is_image=False,
):
|
def get_score(video_data,
cond_frame_idx,
weight=[1.0, 1.0, 1.0, 1.0],
use_edge=True):
"""
Similar to get_score under utils/util.py/detect_edges
"""
"""
the shape of video_data is f c h w, np.ndarray
"""
h, w = video_data.shape[1], video_data.shape[2]
cond_frame = video_data[cond_frame_idx]
cond_hsv_list = list(
cv2.split(
cv2.cvtColor(cond_frame.astype(np.float32), cv2.COLOR_RGB2HSV)))
if use_edge:
cond_frame_lum = cond_hsv_list[-1]
cond_frame_edge = detect_edges(cond_frame_lum.astype(np.uint8))
cond_hsv_list.append(cond_frame_edge)
score_sum = []
for frame_idx in range(video_data.shape[0]):
frame = video_data[frame_idx]
hsv_list = list(
cv2.split(cv2.cvtColor(frame.astype(np.float32),
cv2.COLOR_RGB2HSV)))
if use_edge:
frame_img_lum = hsv_list[-1]
frame_img_edge = detect_edges(lum=frame_img_lum.astype(np.uint8))
hsv_list.append(frame_img_edge)
hsv_diff = [
np.abs(hsv_list[c] - cond_hsv_list[c]) for c in range(len(weight))
]
hsv_mse = [np.sum(hsv_diff[c]) * weight[c] for c in range(len(weight))]
score_sum.append(sum(hsv_mse) / (h * w) / (sum(weight)))
return score_sum
class WebVid10M(Dataset):
def __init__(
self,
csv_path, video_folder,
sample_size=256, sample_stride=4, sample_n_frames=16,
is_image=False,
): | zero_rank_print(f"loading annotations from {csv_path} ...") | 0 | 2023-12-21 03:29:34+00:00 | 2k |
VikParuchuri/texify | ocr_image.py | [
{
"identifier": "batch_inference",
"path": "texify/inference.py",
"snippet": "def batch_inference(images, model, processor, temperature=settings.TEMPERATURE, max_tokens=settings.MAX_TOKENS):\n images = [image.convert(\"RGB\") for image in images]\n encodings = processor(images=images, return_tensors=\"pt\", add_special_tokens=False)\n pixel_values = encodings[\"pixel_values\"].to(model.dtype)\n pixel_values = pixel_values.to(model.device)\n\n additional_kwargs = {}\n if temperature > 0:\n additional_kwargs[\"temperature\"] = temperature\n additional_kwargs[\"do_sample\"] = True\n additional_kwargs[\"top_p\"] = 0.95\n\n generated_ids = model.generate(\n pixel_values=pixel_values,\n max_new_tokens=max_tokens,\n decoder_start_token_id=processor.tokenizer.bos_token_id,\n **additional_kwargs,\n )\n\n generated_text = processor.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)\n generated_text = [postprocess(text) for text in generated_text]\n return generated_text"
},
{
"identifier": "load_model",
"path": "texify/model/model.py",
"snippet": "def load_model(checkpoint=settings.MODEL_CHECKPOINT, device=settings.TORCH_DEVICE_MODEL, dtype=settings.MODEL_DTYPE):\n config = get_config(checkpoint)\n AutoModel.register(VariableDonutSwinConfig, VariableDonutSwinModel)\n\n model = VisionEncoderDecoderModel.from_pretrained(checkpoint, config=config, torch_dtype=dtype)\n model = model.to(device)\n model = model.eval()\n print(f\"Loaded texify model to {device} with {dtype} dtype\")\n return model"
},
{
"identifier": "load_processor",
"path": "texify/model/processor.py",
"snippet": "def load_processor():\n AutoImageProcessor.register(VariableDonutSwinConfig, VariableDonutImageProcessor)\n processor = VariableDonutProcessor.from_pretrained(settings.MODEL_CHECKPOINT)\n processor.image_processor.max_size = settings.MAX_IMAGE_SIZE\n processor.image_processor.size = [settings.MAX_IMAGE_SIZE[\"height\"], settings.MAX_IMAGE_SIZE[\"width\"]]\n processor.image_processor.image_mean = IMAGE_MEAN\n processor.image_processor.image_std = IMAGE_STD\n processor.image_processor.train = False\n\n processor.tokenizer.model_max_length = settings.MAX_TOKENS\n processor.train = False\n return processor"
},
{
"identifier": "replace_katex_invalid",
"path": "texify/output.py",
"snippet": "def replace_katex_invalid(string):\n # KaTeX cannot render all LaTeX, so we need to replace some things\n string = re.sub(r'\\\\tag\\{.*?\\}', '', string)\n string = re.sub(r'\\\\(?:Bigg?|bigg?)\\{(.*?)\\}', r'\\1', string)\n string = re.sub(r'\\\\quad\\\\mbox\\{(.*?)\\}', r'\\1', string)\n string = re.sub(r'\\\\mbox\\{(.*?)\\}', r'\\1', string)\n string = remove_inner_dollars(string)\n return string"
},
{
"identifier": "settings",
"path": "texify/settings.py",
"snippet": "class Settings(BaseSettings):\n class Config:\n TORCH_DEVICE: Optional[str] = None\n MAX_TOKENS: int = 384 # Will not work well above 768, since it was not trained with more\n MAX_IMAGE_SIZE: Dict = {\"height\": 420, \"width\": 420}\n MODEL_CHECKPOINT: str = \"vikp/texify\"\n BATCH_SIZE: int = 16 # Should use ~5GB of RAM\n DATA_DIR: str = \"data\"\n TEMPERATURE: float = 0.0 # Temperature for generation, 0.0 means greedy\n def TORCH_DEVICE_MODEL(self) -> str:\n def CUDA(self) -> bool:\n def MODEL_DTYPE(self) -> torch.dtype:"
},
{
"identifier": "is_valid_image",
"path": "texify/util.py",
"snippet": "def is_valid_image(file_path):\n if not os.path.isfile(file_path):\n return False\n\n filename = os.path.basename(file_path)\n if filename.startswith(\".\"):\n return False\n\n try:\n with Image.open(file_path) as img:\n img.verify()\n return True\n except Exception:\n return False"
}
] | import argparse
import os.path
import json
from texify.inference import batch_inference
from texify.model.model import load_model
from texify.model.processor import load_processor
from PIL import Image
from texify.output import replace_katex_invalid
from texify.settings import settings
from texify.util import is_valid_image | 1,160 |
def inference_single_image(image_path, json_path, model, processor, katex_compatible=False):
image = Image.open(image_path)
text = batch_inference([image], model, processor)
if katex_compatible:
text = [replace_katex_invalid(t) for t in text]
write_data = [{"image_path": image_path, "text": text[0]}]
with open(json_path, "w+") as f:
json_repr = json.dumps(write_data, indent=4)
f.write(json_repr)
def inference_image_dir(image_dir, json_path, model, processor, max=None, katex_compatible=False):
image_paths = [os.path.join(image_dir, image_name) for image_name in os.listdir(image_dir)]
|
def inference_single_image(image_path, json_path, model, processor, katex_compatible=False):
image = Image.open(image_path)
text = batch_inference([image], model, processor)
if katex_compatible:
text = [replace_katex_invalid(t) for t in text]
write_data = [{"image_path": image_path, "text": text[0]}]
with open(json_path, "w+") as f:
json_repr = json.dumps(write_data, indent=4)
f.write(json_repr)
def inference_image_dir(image_dir, json_path, model, processor, max=None, katex_compatible=False):
image_paths = [os.path.join(image_dir, image_name) for image_name in os.listdir(image_dir)] | image_paths = [ip for ip in image_paths if is_valid_image(ip)] | 5 | 2023-12-18 22:59:58+00:00 | 2k |
dcharatan/pixelsplat | src/visualization/drawing/points.py | [
{
"identifier": "generate_conversions",
"path": "src/visualization/drawing/coordinate_conversion.py",
"snippet": "def generate_conversions(\n shape: tuple[int, int],\n device: torch.device,\n x_range: Optional[Pair] = None,\n y_range: Optional[Pair] = None,\n) -> tuple[\n ConversionFunction, # conversion from world coordinates to pixel coordinates\n ConversionFunction, # conversion from pixel coordinates to world coordinates\n]:\n h, w = shape\n x_range = sanitize_pair((0, w) if x_range is None else x_range, device)\n y_range = sanitize_pair((0, h) if y_range is None else y_range, device)\n minima, maxima = torch.stack((x_range, y_range), dim=-1)\n wh = torch.tensor((w, h), dtype=torch.float32, device=device)\n\n def convert_world_to_pixel(\n xy: Float[Tensor, \"*batch 2\"],\n ) -> Float[Tensor, \"*batch 2\"]:\n return (xy - minima) / (maxima - minima) * wh\n\n def convert_pixel_to_world(\n xy: Float[Tensor, \"*batch 2\"],\n ) -> Float[Tensor, \"*batch 2\"]:\n return xy / wh * (maxima - minima) + minima\n\n return convert_world_to_pixel, convert_pixel_to_world"
},
{
"identifier": "render_over_image",
"path": "src/visualization/drawing/rendering.py",
"snippet": "def render_over_image(\n image: Float[Tensor, \"3 height width\"],\n color_function: ColorFunction,\n device: torch.device,\n subdivision: int = 8,\n num_passes: int = 1,\n) -> Float[Tensor, \"3 height width\"]:\n _, h, w = image.shape\n overlay = render(\n (h, w),\n color_function,\n device,\n subdivision=subdivision,\n num_passes=num_passes,\n )\n color, alpha = overlay.split((3, 1), dim=0)\n return image * (1 - alpha) + color * alpha"
},
{
"identifier": "Pair",
"path": "src/visualization/drawing/types.py",
"snippet": "def sanitize_vector(\n vector: Vector,\n dim: int,\n device: torch.device,\n) -> Float[Tensor, \"*#batch dim\"]:\ndef sanitize_scalar(scalar: Scalar, device: torch.device) -> Float[Tensor, \"*#batch\"]:\ndef sanitize_pair(pair: Pair, device: torch.device) -> Float[Tensor, \"2\"]:"
}
] | from typing import Optional
from einops import repeat
from jaxtyping import Float
from torch import Tensor
from .coordinate_conversion import generate_conversions
from .rendering import render_over_image
from .types import Pair, Scalar, Vector, sanitize_scalar, sanitize_vector
import torch | 839 |
def draw_points(
image: Float[Tensor, "3 height width"],
points: Vector,
color: Vector = [1, 1, 1],
radius: Scalar = 1,
inner_radius: Scalar = 0,
num_msaa_passes: int = 1,
x_range: Optional[Pair] = None,
y_range: Optional[Pair] = None,
) -> Float[Tensor, "3 height width"]:
device = image.device
points = sanitize_vector(points, 2, device)
color = sanitize_vector(color, 3, device)
radius = sanitize_scalar(radius, device)
inner_radius = sanitize_scalar(inner_radius, device)
(num_points,) = torch.broadcast_shapes(
points.shape[0],
color.shape[0],
radius.shape,
inner_radius.shape,
)
# Convert world-space points to pixel space.
_, h, w = image.shape
|
def draw_points(
image: Float[Tensor, "3 height width"],
points: Vector,
color: Vector = [1, 1, 1],
radius: Scalar = 1,
inner_radius: Scalar = 0,
num_msaa_passes: int = 1,
x_range: Optional[Pair] = None,
y_range: Optional[Pair] = None,
) -> Float[Tensor, "3 height width"]:
device = image.device
points = sanitize_vector(points, 2, device)
color = sanitize_vector(color, 3, device)
radius = sanitize_scalar(radius, device)
inner_radius = sanitize_scalar(inner_radius, device)
(num_points,) = torch.broadcast_shapes(
points.shape[0],
color.shape[0],
radius.shape,
inner_radius.shape,
)
# Convert world-space points to pixel space.
_, h, w = image.shape | world_to_pixel, _ = generate_conversions((h, w), device, x_range, y_range) | 0 | 2023-12-20 19:45:59+00:00 | 2k |
nianhua99/PandoraNext-Helper | share/share.py | [
{
"identifier": "db",
"path": "model.py",
"snippet": "class User(db.Model):\n def keys(self):\n def __getitem__(self, item):\n def __repr__(self):"
},
{
"identifier": "share_tools",
"path": "util/share_tools.py",
"snippet": "def get_host():\ndef get_share_token(access_token, unique_name, expires_in=0, show_conversations=False, show_userinfo=True):\ndef get_share_token_info(share_token, access_token=None):"
},
{
"identifier": "ApiResponse",
"path": "util/api_response.py",
"snippet": "class ApiResponse:\n\n @staticmethod\n def success(data):\n return jsonify({\n 'status': 0,\n 'message': '请求成功',\n 'data': data\n })\n\n @staticmethod\n def error(message, status=-1):\n return jsonify({\n 'status': status,\n 'message': message\n }), 500\n\n @staticmethod\n def unauthorized(message):\n return jsonify({\n 'status': 444,\n 'message': message\n }), 444"
},
{
"identifier": "sync_pandora",
"path": "util/pandora_tools.py",
"snippet": "def sync_pandora():\n make_json()\n fresh_setup()"
}
] | import json
from datetime import datetime
from flask import Blueprint, request
from flask_jwt_extended import jwt_required
from loguru import logger
from sqlalchemy import and_, text
from model import db, User
from util import share_tools
from util.api_response import ApiResponse
from util.pandora_tools import sync_pandora | 680 |
share_bp = Blueprint('share_bp', __name__)
def account2share(accounts):
shares = []
for account in accounts:
_share_list = json.loads(account.share_list)
for share in _share_list:
share['email'] = account.email
share['account_id'] = account.id
shares.append(share)
return shares
@share_bp.route('/list')
@jwt_required()
def share_list():
accounts = db.session.query(User).all()
return ApiResponse.success(account2share(accounts))
@share_bp.route('/search', methods=['POST'])
@jwt_required()
def search():
# 根据email和unique_name模糊搜索
email = request.json.get('email')
unique_name = request.json.get('unique_name')
accounts = db.session.query(User).filter(and_(User.email.like(f'%{email}%') if email else text(''), User.share_list.like(f'%{unique_name}%') if unique_name else text(''))).all()
shares = account2share(accounts)
if unique_name:
shares = list(filter(lambda x: unique_name in x['unique_name'], shares))
return ApiResponse.success(shares)
@share_bp.route('/add', methods=['POST'])
@jwt_required()
def share_add():
account_id = request.json.get('account_id')
unique_name = request.json.get('unique_name')
password = request.json.get('password')
comment = request.form.get('comment')
account = db.session.query(User).filter_by(id=account_id).first()
if account:
if not account.access_token:
return ApiResponse.error('请先登录账号')
else:
try:
|
share_bp = Blueprint('share_bp', __name__)
def account2share(accounts):
shares = []
for account in accounts:
_share_list = json.loads(account.share_list)
for share in _share_list:
share['email'] = account.email
share['account_id'] = account.id
shares.append(share)
return shares
@share_bp.route('/list')
@jwt_required()
def share_list():
accounts = db.session.query(User).all()
return ApiResponse.success(account2share(accounts))
@share_bp.route('/search', methods=['POST'])
@jwt_required()
def search():
# 根据email和unique_name模糊搜索
email = request.json.get('email')
unique_name = request.json.get('unique_name')
accounts = db.session.query(User).filter(and_(User.email.like(f'%{email}%') if email else text(''), User.share_list.like(f'%{unique_name}%') if unique_name else text(''))).all()
shares = account2share(accounts)
if unique_name:
shares = list(filter(lambda x: unique_name in x['unique_name'], shares))
return ApiResponse.success(shares)
@share_bp.route('/add', methods=['POST'])
@jwt_required()
def share_add():
account_id = request.json.get('account_id')
unique_name = request.json.get('unique_name')
password = request.json.get('password')
comment = request.form.get('comment')
account = db.session.query(User).filter_by(id=account_id).first()
if account:
if not account.access_token:
return ApiResponse.error('请先登录账号')
else:
try: | res = share_tools.get_share_token(account.access_token, unique_name) | 1 | 2023-12-18 13:18:50+00:00 | 2k |
shroominic/fastui-chat | src/fastui_chat/chat.py | [
{
"identifier": "ChatInputForm",
"path": "src/fastui_chat/components.py",
"snippet": "class ChatInputForm(c.Form):\n \"\"\"\n Component for displaying a chat input form.\n \"\"\"\n\n fire_page_event: str\n display_mode: str = \"inline\"\n class_name: str = \"row row-cols-lg-3 justify-content-center\"\n form_fields: list[c.FormFieldInput] = [\n c.FormFieldInput(\n title=\"\",\n name=\"user_msg\",\n placeholder=\"Message ChatBot...\",\n class_name=\"py-4\",\n ),\n ]\n\n def __init__(\n self,\n *,\n submit_url: str,\n fire_page_event: str,\n **data: Any,\n ) -> None:\n data[\"submit_url\"] = submit_url\n data[\"fire_page_event\"] = fire_page_event\n super().__init__(**data, footer=[])\n self.footer = [\n c.FireEvent(event=e.PageEvent(name=self.fire_page_event)),\n ]"
},
{
"identifier": "ChatMessage",
"path": "src/fastui_chat/components.py",
"snippet": "class ChatMessage(c.Div):\n \"\"\"\n Component for displaying a chat message.\n \"\"\"\n\n content: Union[str, list[Union[str, dict]]]\n msg_type: Literal[\"human\", \"ai\"]\n class_name: str = \"container col-sm-4 my-4\"\n display_alias: DisplayAlias = {\"human\": \"You\", \"ai\": \"ChatBot\"}\n\n @property\n def images(self) -> list[str]:\n \"\"\"Return a list of image URLs in the message content.\"\"\"\n if isinstance(self.content, str):\n return []\n return [\n (\n item[\"image_url\"][\"url\"]\n if isinstance(item[\"image_url\"], dict)\n else item[\"image_url\"]\n )\n for item in self.content\n if isinstance(item, dict) and item[\"type\"] == \"image_url\"\n ]\n\n @property\n def message(self) -> str:\n \"\"\"Return the message content.\"\"\"\n return (\n self.content\n if isinstance(self.content, str)\n else self.content[0]\n if isinstance(self.content[0], str)\n else self.content[0][\"text\"]\n )\n\n def __init__(\n self,\n msg_type: Literal[\"human\", \"ai\"],\n content: Union[str, list[Union[str, dict]]],\n **data: Any,\n ) -> None:\n if msg_type == \"AIMessageChunk\":\n msg_type = \"ai\"\n data[\"msg_type\"] = msg_type\n data[\"content\"] = content\n super().__init__(**data, components=[])\n self.components = [\n c.Heading(text=self.display_alias[self.msg_type], level=6),\n c.Markdown(text=self.message),\n *(\n c.Image(\n src=image_url,\n class_name=\"img-fluid\",\n )\n for image_url in self.images\n ),\n ]"
},
{
"identifier": "get_history",
"path": "src/fastui_chat/db.py",
"snippet": "async def get_history() -> AsyncGenerator[BaseChatMessageHistory, None]:\n if \"chat_history\" not in database:\n raise RuntimeError(\"Database not initialized\")\n yield database[\"chat_history\"]"
},
{
"identifier": "get_session",
"path": "src/fastui_chat/db.py",
"snippet": "async def get_session() -> AsyncGenerator[ChatSession, None]:\n if \"chat_history\" not in database or \"chat_handler\" not in database:\n raise RuntimeError(\"Database not initialized\")\n yield ChatSession(\n history=database[\"chat_history\"],\n chat_handler=database[\"chat_handler\"],\n )"
},
{
"identifier": "ChatSession",
"path": "src/fastui_chat/session.py",
"snippet": "class ChatSession:\n def __init__(\n self,\n *,\n chat_handler: Runnable[HumanMessage, AIMessage],\n history: BaseChatMessageHistory,\n ) -> None:\n self.history = history\n self.chat_handler = chat_handler\n\n async def astream(self, user_msg: str):\n async for message in self.chat_handler.astream(\n HumanMessage(content=user_msg),\n config={\n \"run_name\": \"ChatMessage\",\n \"configurable\": {\"session_id\": \"\"},\n },\n ):\n yield message"
}
] | from typing import Annotated, AsyncIterable
from fastapi import APIRouter, Depends, Form
from fastapi.responses import StreamingResponse
from fastui import AnyComponent, FastUI
from fastui import components as c
from fastui.events import PageEvent
from langchain_core.chat_history import BaseChatMessageHistory
from .components import ChatInputForm, ChatMessage
from .db import get_history, get_session
from .session import ChatSession
import asyncio | 1,428 |
router = APIRouter()
@router.get("/", response_model=FastUI, response_model_exclude_none=True)
async def chat_ui() -> list[AnyComponent]:
"""
Main endpoint for showing the Chat UI and handling user input.
"""
return [
c.Page(
components=[
c.ServerLoad(
path="/chat/history",
load_trigger=PageEvent(name="chat-load"),
components=[],
),
ChatInputForm(
submit_url="/api/chat/generate",
fire_page_event="chat-load",
),
],
)
]
@router.get("/chat/history", response_model=FastUI, response_model_exclude_none=True)
async def chat_history(
history: Annotated[BaseChatMessageHistory, Depends(get_history)],
) -> list[AnyComponent]:
"""
Endpoint for showing the Chat History UI.
"""
return [ChatMessage(msg.type, msg.content) for msg in history.messages]
@router.post("/chat/generate", response_model=FastUI, response_model_exclude_none=True)
async def chat_generate(user_msg: Annotated[str, Form(...)]) -> list[AnyComponent]:
"""
Endpoint for showing the Chat Generate UI.
"""
return [
ChatMessage("human", user_msg),
c.ServerLoad(
path="/chat/generate/response?user_msg=" + user_msg,
load_trigger=PageEvent(name="generate-response"),
components=[c.Text(text="...")],
sse=True,
),
ChatInputForm(
submit_url="/api/chat/generate",
fire_page_event="generate-response",
),
]
@router.get("/chat/generate/response")
async def sse_ai_response(
user_msg: str,
|
router = APIRouter()
@router.get("/", response_model=FastUI, response_model_exclude_none=True)
async def chat_ui() -> list[AnyComponent]:
"""
Main endpoint for showing the Chat UI and handling user input.
"""
return [
c.Page(
components=[
c.ServerLoad(
path="/chat/history",
load_trigger=PageEvent(name="chat-load"),
components=[],
),
ChatInputForm(
submit_url="/api/chat/generate",
fire_page_event="chat-load",
),
],
)
]
@router.get("/chat/history", response_model=FastUI, response_model_exclude_none=True)
async def chat_history(
history: Annotated[BaseChatMessageHistory, Depends(get_history)],
) -> list[AnyComponent]:
"""
Endpoint for showing the Chat History UI.
"""
return [ChatMessage(msg.type, msg.content) for msg in history.messages]
@router.post("/chat/generate", response_model=FastUI, response_model_exclude_none=True)
async def chat_generate(user_msg: Annotated[str, Form(...)]) -> list[AnyComponent]:
"""
Endpoint for showing the Chat Generate UI.
"""
return [
ChatMessage("human", user_msg),
c.ServerLoad(
path="/chat/generate/response?user_msg=" + user_msg,
load_trigger=PageEvent(name="generate-response"),
components=[c.Text(text="...")],
sse=True,
),
ChatInputForm(
submit_url="/api/chat/generate",
fire_page_event="generate-response",
),
]
@router.get("/chat/generate/response")
async def sse_ai_response(
user_msg: str, | session: Annotated[ChatSession, Depends(get_session)], | 3 | 2023-12-17 15:07:48+00:00 | 2k |
SHI-Labs/VCoder | vcoder_llava/model/vcoder_ds_llava_arch.py | [
{
"identifier": "build_vision_tower",
"path": "vcoder_llava/model/multimodal_encoder/builder.py",
"snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists = os.path.exists(vision_tower)\n if is_absolute_path_exists or vision_tower.startswith(\"openai\") or vision_tower.startswith(\"laion\"):\n return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)\n\n raise ValueError(f'Unknown vision tower: {vision_tower}')"
},
{
"identifier": "build_vision_projector",
"path": "vcoder_llava/model/multimodal_projector/builder.py",
"snippet": "def build_vision_projector(config, delay_load=False, **kwargs):\n projector_type = getattr(config, 'mm_projector_type', 'linear')\n\n if projector_type == 'linear':\n return nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n mlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n if mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]\n for _ in range(1, mlp_depth):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.hidden_size, config.hidden_size))\n return nn.Sequential(*modules)\n\n if projector_type == 'identity':\n return IdentityMap()\n\n raise ValueError(f'Unknown projector type: {projector_type}')"
},
{
"identifier": "build_seg_projector",
"path": "vcoder_llava/model/multimodal_adapter/builder.py",
"snippet": "def build_seg_projector(config, delay_load=False, **kwargs):\n projector_type = getattr(config, 'seg_mm_projector_type', 'linear')\n\n if projector_type == 'linear':\n return nn.Linear(config.seg_mm_hidden_size, config.hidden_size)\n\n mlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n if mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(config.seg_mm_hidden_size, config.hidden_size)]\n for _ in range(1, mlp_depth):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.hidden_size, config.hidden_size))\n return nn.Sequential(*modules)\n\n if projector_type == 'identity':\n return IdentityMap()\n\n raise ValueError(f'Unknown seg projector type: {projector_type}')"
},
{
"identifier": "build_depth_projector",
"path": "vcoder_llava/model/multimodal_depth_adapter/builder.py",
"snippet": "def build_depth_projector(config, delay_load=False, **kwargs):\n projector_type = getattr(config, 'depth_mm_projector_type', 'linear')\n\n if projector_type == 'linear':\n return nn.Linear(config.depth_mm_hidden_size, config.hidden_size)\n\n mlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n if mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(config.depth_mm_hidden_size, config.hidden_size)]\n for _ in range(1, mlp_depth):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.hidden_size, config.hidden_size))\n return nn.Sequential(*modules)\n\n if projector_type == 'identity':\n return IdentityMap()\n\n raise ValueError(f'Unknown depth projector type: {projector_type}')"
},
{
"identifier": "IGNORE_INDEX",
"path": "vcoder_llava/constants.py",
"snippet": "IGNORE_INDEX = -100"
},
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "vcoder_llava/constants.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "SEG_TOKEN_INDEX",
"path": "vcoder_llava/constants.py",
"snippet": "SEG_TOKEN_INDEX = -300"
},
{
"identifier": "DEPTH_TOKEN_INDEX",
"path": "vcoder_llava/constants.py",
"snippet": "DEPTH_TOKEN_INDEX = -400"
}
] | from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from .multimodal_adapter.builder import build_seg_projector
from .multimodal_depth_adapter.builder import build_depth_projector
from vcoder_llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, SEG_TOKEN_INDEX, DEPTH_TOKEN_INDEX
import torch
import torch.nn as nn | 1,210 | # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class VCoderDSLlavaMetaModel:
def __init__(self, config):
super(VCoderDSLlavaMetaModel, self).__init__(config)
self.config = config
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class VCoderDSLlavaMetaModel:
def __init__(self, config):
super(VCoderDSLlavaMetaModel, self).__init__(config)
self.config = config
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True) | self.mm_projector = build_vision_projector(config) | 1 | 2023-12-17 07:46:27+00:00 | 2k |
galatolofederico/microchain | microchain/engine/engine.py | [
{
"identifier": "Function",
"path": "microchain/engine/function.py",
"snippet": "class Function:\n def __init__(self):\n self.call_signature = inspect.signature(self.__call__) \n self.call_parameters = []\n for name, parameter in self.call_signature.parameters.items():\n if parameter.annotation == inspect._empty:\n raise ValueError(f\"Parameter {name} must have an annotation\")\n \n self.call_parameters.append(dict(\n name=name,\n annotation=parameter.annotation\n ))\n self.state = None\n self.engine = None\n \n def bind(self, *, state, engine):\n self.state = state\n self.engine = engine\n\n @property\n def name(self):\n return type(self).__name__\n\n @property\n def example(self):\n if not isinstance(self.example_args, list):\n raise ValueError(\"example_args must be a list\")\n if len(self.example_args) != len(self.call_parameters):\n raise ValueError(f\"example_args must have the same length as call_parameters ({len(self.call_parameters)})\")\n\n bound = self.call_signature.bind(*self.example_args)\n \n return f\"{self.name}({', '.join([f'{name}={value}' for name, value in bound.arguments.items()])})\"\n \n @property\n def signature(self):\n arguments = [f\"{parameter['name']}: {parameter['annotation'].__name__}\" for parameter in self.call_parameters]\n return f\"{self.name}({', '.join(arguments)})\"\n\n @property\n def help(self):\n return f\"{self.signature}\\n{self.description}.\\nExample: {self.example}\\n\"\n\n @property\n def error(self):\n return f\"Error: wrong format. Use {self.signature}. Example: {self.example}. Please try again.\"\n\n def check_bind(self):\n if self.state is None:\n raise ValueError(\"You must register the function to an Engine\")\n\n def safe_call(self, args, kwargs):\n self.check_bind()\n try:\n return FunctionResult.SUCCESS, str(self.__call__(*args, **kwargs))\n except Exception as e:\n print(colored(f\"Exception in Function call {e}\", \"red\"))\n print(colored(''.join(traceback.TracebackException.from_exception(e).format()), \"red\"))\n return FunctionResult.ERROR, self.error\n\n def __call__(self, command):\n raise NotImplementedError"
},
{
"identifier": "FunctionResult",
"path": "microchain/engine/function.py",
"snippet": "class FunctionResult(enum.Enum):\n SUCCESS = 0\n ERROR = 1"
}
] | import ast
from microchain.engine.function import Function, FunctionResult | 801 |
class Engine:
def __init__(self, state=dict()):
self.state = state
self.functions = dict()
self.help_called = False
self.agent = None
def register(self, function):
self.functions[function.name] = function
function.bind(state=self.state, engine=self)
def bind(self, agent):
self.agent = agent
def stop(self):
if self.agent is None:
raise ValueError("You must bind the engine to an agent before stopping")
self.agent.stop()
def execute(self, command):
if self.agent is None:
raise ValueError("You must bind the engine to an agent before executing commands")
if not self.help_called:
raise ValueError("You never accessed the help property. Building a prompt without including the help string is a very bad idea.")
try:
tree = ast.parse(command)
except SyntaxError:
|
class Engine:
def __init__(self, state=dict()):
self.state = state
self.functions = dict()
self.help_called = False
self.agent = None
def register(self, function):
self.functions[function.name] = function
function.bind(state=self.state, engine=self)
def bind(self, agent):
self.agent = agent
def stop(self):
if self.agent is None:
raise ValueError("You must bind the engine to an agent before stopping")
self.agent.stop()
def execute(self, command):
if self.agent is None:
raise ValueError("You must bind the engine to an agent before executing commands")
if not self.help_called:
raise ValueError("You never accessed the help property. Building a prompt without including the help string is a very bad idea.")
try:
tree = ast.parse(command)
except SyntaxError: | return FunctionResult.ERROR, f"Error: syntax error in command {command}. Please try again." | 1 | 2023-12-19 10:57:56+00:00 | 2k |
OSU-NLP-Group/SeeAct | src/data_utils/format_prompt_utils.py | [
{
"identifier": "get_tree_repr",
"path": "src/data_utils/dom_utils.py",
"snippet": "def get_tree_repr(\n tree, max_value_length=5, max_length=20, id_mapping={}, keep_html_brackets=False\n):\n if isinstance(tree, str):\n tree = etree.fromstring(tree)\n else:\n tree = copy.deepcopy(tree)\n for node in tree.xpath(\"//*\"):\n if node.tag != \"text\":\n if \"backend_node_id\" in node.attrib:\n if node.attrib[\"backend_node_id\"] not in id_mapping:\n id_mapping[node.attrib[\"backend_node_id\"]] = len(id_mapping)\n node.attrib[\"backend_node_id\"] = str(\n id_mapping[node.attrib[\"backend_node_id\"]]\n )\n get_attribute_repr(node, max_value_length, max_length)\n else:\n node.text = \" \".join(node.text.split()[:max_length])\n tree_repr = etree.tostring(tree, encoding=\"unicode\")\n\n tree_repr = tree_repr.replace('\"', \" \")\n tree_repr = (\n tree_repr.replace(\"meta= \", \"\").replace(\"id= \", \"id=\").replace(\" >\", \">\")\n )\n tree_repr = re.sub(r\"<text>(.*?)</text>\", r\"\\1\", tree_repr)\n if not keep_html_brackets:\n tree_repr = tree_repr.replace(\"/>\", \"$/$>\")\n tree_repr = re.sub(r\"</(.+?)>\", r\")\", tree_repr)\n tree_repr = re.sub(r\"<(.+?)>\", r\"(\\1\", tree_repr)\n tree_repr = tree_repr.replace(\"$/$\", \")\")\n\n html_escape_table = [\n (\""\", '\"'),\n (\"&\", \"&\"),\n (\"<\", \"<\"),\n (\">\", \">\"),\n (\" \", \" \"),\n (\"–\", \"-\"),\n (\"’\", \"'\"),\n (\"‘\", \"'\"),\n (\"“\", '\"'),\n (\"”\", '\"'),\n (\"'\", \"'\"),\n (\"(\", \"(\"),\n (\")\", \")\"),\n ]\n for k, v in html_escape_table:\n tree_repr = tree_repr.replace(k, v)\n tree_repr = re.sub(r\"\\s+\", \" \", tree_repr).strip()\n\n return tree_repr, id_mapping"
},
{
"identifier": "data_prune_tree",
"path": "src/data_utils/dom_utils.py",
"snippet": "def data_prune_tree(\n dom_tree,\n candidate_set,\n max_depth=5,\n max_children=50,\n max_sibling=3,\n):\n nodes_to_keep = set()\n for candidate_id in candidate_set:\n candidate_node = dom_tree.xpath(f'//*[@backend_node_id=\"{candidate_id}\"]')[0]\n nodes_to_keep.add(candidate_node.attrib[\"backend_node_id\"])\n # get all ancestors\n nodes_to_keep.update(\n [\n x.attrib.get(\"backend_node_id\", \"\")\n for x in candidate_node.xpath(\"ancestor::*\")\n ]\n )\n # get descendants with max depth\n nodes_to_keep.update(\n [\n x.attrib.get(\"backend_node_id\", \"\")\n for x in get_descendants(candidate_node, max_depth)\n ][:max_children]\n )\n # get siblings within range\n parent = candidate_node.getparent()\n if parent is not None:\n siblings = [x for x in parent.getchildren() if x.tag != \"text\"]\n idx_in_sibling = siblings.index(candidate_node)\n nodes_to_keep.update(\n [\n x.attrib.get(\"backend_node_id\", \"\")\n for x in siblings[\n max(0, idx_in_sibling - max_sibling): idx_in_sibling\n + max_sibling\n + 1\n ]\n ]\n )\n # clone the tree\n new_tree = copy.deepcopy(dom_tree)\n # remove nodes not in nodes_to_keep\n for node in new_tree.xpath(\"//*\")[::-1]:\n if node.tag != \"text\":\n is_keep = node.attrib.get(\"backend_node_id\", \"\") in nodes_to_keep\n is_candidate = node.attrib.get(\"backend_node_id\", \"\") in candidate_set\n else:\n is_keep = (\n node.getparent().attrib.get(\"backend_node_id\", \"\") in nodes_to_keep\n )\n is_candidate = (\n node.getparent().attrib.get(\"backend_node_id\", \"\") in candidate_set\n )\n if not is_keep and node.getparent() is not None:\n node.getparent().remove(node)\n else:\n if not is_candidate or node.tag == \"text\":\n node.attrib.pop(\"backend_node_id\", None)\n if (\n len(node.attrib) == 0\n and not any([x.tag == \"text\" for x in node.getchildren()])\n and node.getparent() is not None\n and node.tag != \"text\"\n and len(node.getchildren()) <= 1\n ):\n # insert all children into parent\n for child in node.getchildren():\n node.addprevious(child)\n node.getparent().remove(node)\n return new_tree, nodes_to_keep"
}
] | import string
import lxml
from .dom_utils import get_tree_repr, data_prune_tree | 1,404 | # -*- coding: utf-8 -*-
# Copyright (c) 2024 OSU Natural Language Processing Group
#
# Licensed under the OpenRAIL-S License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.licenses.ai/ai-pubs-open-rails-vz1
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def data_format_input_multichoice(
sample, candidate_ids, gt=-1, previous_k=5, keep_html_brackets=False
):
# Parse html into a dom tree
dom_tree = lxml.etree.fromstring(sample["cleaned_html"])
dom_tree, node_to_keep = data_prune_tree(dom_tree, candidate_ids)
| # -*- coding: utf-8 -*-
# Copyright (c) 2024 OSU Natural Language Processing Group
#
# Licensed under the OpenRAIL-S License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.licenses.ai/ai-pubs-open-rails-vz1
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def data_format_input_multichoice(
sample, candidate_ids, gt=-1, previous_k=5, keep_html_brackets=False
):
# Parse html into a dom tree
dom_tree = lxml.etree.fromstring(sample["cleaned_html"])
dom_tree, node_to_keep = data_prune_tree(dom_tree, candidate_ids) | tree_repr, id_mapping = get_tree_repr( | 0 | 2023-12-21 18:22:11+00:00 | 2k |
DeepWok/mase | machop/chop/passes/graph/analysis/add_metadata/add_software_metadata.py | [
{
"identifier": "get_mase_op",
"path": "machop/chop/passes/graph/utils.py",
"snippet": "def get_mase_op(node):\n return node.meta[\"mase\"].parameters[\"common\"][\"mase_op\"]"
},
{
"identifier": "get_mase_type",
"path": "machop/chop/passes/graph/utils.py",
"snippet": "def get_mase_type(node):\n return node.meta[\"mase\"].parameters[\"common\"][\"mase_type\"]"
},
{
"identifier": "SOFTWARE_PARAM_ANALYSIS_LAYERS",
"path": "machop/chop/passes/graph/analysis/add_metadata/software_metadata_layers.py",
"snippet": "SOFTWARE_PARAM_ANALYSIS_LAYERS = {\n \"module\": {\n \"batch_norm1d\": analyze_software_meta_param_nn_module_batch_norm,\n \"batch_norm2d\": analyze_software_meta_param_nn_module_batch_norm,\n # default:\n \"default\": analyze_software_meta_param_nn_module_default,\n },\n \"module_related_func\": {\n \"adaptive_avg_pool1d\": analyze_software_meta_param_module_related_func_default,\n \"adaptive_avg_pool2d\": analyze_software_meta_param_module_related_func_default,\n \"adaptive_max_pool1d\": analyze_software_meta_param_module_related_func_default,\n \"adaptive_max_pool2d\": analyze_software_meta_param_module_related_func_default,\n \"avg_pool1d\": analyze_software_meta_param_module_related_func_default,\n \"avg_pool2d\": analyze_software_meta_param_module_related_func_default,\n \"batch_norm\": analyze_software_meta_param_module_related_func_default,\n \"conv1d\": analyze_software_meta_param_module_related_func_default,\n \"conv2d\": analyze_software_meta_param_module_related_func_default,\n \"layer_norm\": analyze_software_meta_param_module_related_func_default,\n \"linear\": analyze_software_meta_param_module_related_func_default,\n \"max_pool1d\": analyze_software_meta_param_module_related_func_default,\n \"max_pool2d\": analyze_software_meta_param_module_related_func_default,\n \"relu\": analyze_software_meta_param_module_related_func_default,\n # NOTE: These ops were added to support MobileNetV2 and MobileNetV3\n \"relu6\": analyze_software_meta_param_module_related_func_default,\n \"hardswish\": analyze_software_meta_param_module_related_func_default,\n \"hardsigmoid\": analyze_software_meta_param_module_related_func_default,\n \"dropout\": analyze_software_meta_param_module_related_func_default,\n # default:\n \"default\": analyze_software_meta_param_module_related_func_default,\n },\n # builtin func\n \"builtin_func\": {\n \"mul\": analyze_software_meta_param_builtin_func_default,\n \"sub\": analyze_software_meta_param_builtin_func_default,\n \"add\": analyze_software_meta_param_builtin_func_default,\n \"matmul\": analyze_software_meta_param_builtin_func_default,\n \"bmm\": analyze_software_meta_param_builtin_func_default,\n # default:\n \"default\": analyze_software_meta_param_builtin_func_default,\n },\n \"implicit_func\": {\n \"size\": analyze_software_meta_param_implicit_func_default,\n \"view\": analyze_software_meta_param_implicit_func_default,\n \"flatten\": analyze_software_meta_param_implicit_func_default,\n \"t\": analyze_software_meta_param_implicit_func_default,\n \"constant\": analyze_software_meta_param_implicit_func_default,\n \"default\": analyze_software_meta_param_implicit_func_default,\n \"ge\": analyze_software_meta_param_implicit_func_default,\n \"where\": analyze_software_meta_param_implicit_func_default,\n \"clamp_\": analyze_software_meta_param_implicit_func_default,\n \"abs\": analyze_software_meta_param_implicit_func_default,\n \"stack\": analyze_software_meta_param_implicit_func_default,\n \"getitem\": analyze_software_meta_param_implicit_func_default,\n \"getattr\": analyze_software_meta_param_implicit_func_default,\n },\n \"placeholder\": {\n \"placeholder\": analyze_software_meta_param_placeholder,\n },\n \"get_attr\": {\n \"get_attr\": analyze_software_meta_param_get_attr,\n },\n \"output\": {\n \"output\": analyze_software_meta_param_output,\n },\n \"patched_func\": {\n \"default\": analyze_software_meta_param_patched_func_default,\n },\n}"
}
] | import logging
from ...utils import get_mase_op, get_mase_type
from .software_metadata_layers import SOFTWARE_PARAM_ANALYSIS_LAYERS | 1,107 |
logger = logging.getLogger(__name__)
def add_software_metadata_analysis_pass(graph, pass_args=None):
"""add software metadata
:param graph: a MaseGraph
:type graph: MaseGraph
:param pass_args: this pass does not need any arguments, defaults to None
:type pass_args: _type_, optional
:return: return a tuple of a MaseGraph and an empty dict (no additional info to return)
:rtype: tuple(MaseGraph, Dict)
"""
for node in graph.fx_graph.nodes:
|
logger = logging.getLogger(__name__)
def add_software_metadata_analysis_pass(graph, pass_args=None):
"""add software metadata
:param graph: a MaseGraph
:type graph: MaseGraph
:param pass_args: this pass does not need any arguments, defaults to None
:type pass_args: _type_, optional
:return: return a tuple of a MaseGraph and an empty dict (no additional info to return)
:rtype: tuple(MaseGraph, Dict)
"""
for node in graph.fx_graph.nodes: | mase_op = get_mase_op(node) | 0 | 2023-12-18 12:50:53+00:00 | 2k |
PratikSingh121/ResearchPlot | main.py | [
{
"identifier": "GetPromptTemplates",
"path": "app/prompt_templates.py",
"snippet": "class GetPromptTemplates:\n def __init__(self, topic):\n self.topic = topic\n self.question_parser = CommaSeparatedListOutputParser()\n \n def ResearchPromptTemplate(self, questions = ''):\n if questions != '':\n research_bot_final_prompt = research_bot_prompt + \"\\n\\nQuestions Answered by the user : \" + questions + \"\\n\\n\" + \"Output :\\n\\n\";\n else:\n research_bot_final_prompt = research_bot_prompt + \"\\n\\n\" + \"Output :\\n\\n\";\n \n ResearchPromptTemplate = PromptTemplate(template= research_bot_final_prompt, input_variables=[\"Topic\"])\n # partial_variables={\"format_instructions\": self.research_parser.get_format_instructions()} \n return ResearchPromptTemplate.format_prompt(Topic = self.topic).to_string()\n \n def QuestionPromptTemplate(self):\n QuestionPromptTemplate = PromptTemplate(template= question_forming_prompt, input_variables=[\"Topic\"], partial_variables={\"format_instructions\": self.question_parser.get_format_instructions()})\n\n return QuestionPromptTemplate.format_prompt(Topic = self.topic).to_string()\n \n def MermaidPromptTemplate(self, information):\n MermaidPromptTemplate = PromptTemplate(template= mermaid_maker_prompt, input_variables=[\"information\"])\n\n return MermaidPromptTemplate.format_prompt(information = information).to_string()"
},
{
"identifier": "QuestionFraming",
"path": "app/question_framing.py",
"snippet": "class QuestionFraming:\n def __init__(self, question: list):\n self.question = question\n self.answer = []\n\n def ask_questions(self):\n print('\\033[91m' + \"Answer the following questions: (Leave blank if no answer)\" + '\\033[0m')\n for i in self.question:\n # print in blue color\n print('\\033[94m' + i + \"\\n > \" + '\\033[0m', end=\"\")\n answer = input()\n if answer == \"\":\n answer = \"No answer\"\n self.answer.append(answer)\n # Add more questions if needed\n\n def format_information(self):\n information = dict(zip(self.question, self.answer))\n return information"
},
{
"identifier": "Chains",
"path": "packages/chains.py",
"snippet": "class Chains:\n def __init__(self, *args):\n self._chains = args\n\n def chain(self, PromptTemplate, parser = None):\n message = [\n SystemMessage(content = PromptTemplate)\n ]\n output = llm(message)\n if parser is None:\n return output.content\n else:\n return parser.parse(output.content)"
}
] | from langchain.output_parsers import CommaSeparatedListOutputParser
from app.prompt_templates import GetPromptTemplates
from app.question_framing import QuestionFraming
from packages.chains import Chains
import subprocess
import os | 818 | #app
#package
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Getting Topic
print('\033[93m' + "Enter the topic. You can add just a keyword or a description.\nTopic : > " + '\033[0m', end="")
topic = input()
print()
#Objects
Chain = Chains()
PromptTemplate = GetPromptTemplates(topic)
QuestionParser = CommaSeparatedListOutputParser()
# Getting Questions
print('\033[92m' + "Do you want to answer some questions? (y/n) \nAnswer : > " + '\033[0m', end="")
questions_allowed = input()
print()
if questions_allowed == 'y':
questions_allowed = True
else:
questions_allowed = False
if questions_allowed:
QuestionsList = Chain.chain(PromptTemplate = PromptTemplate.QuestionPromptTemplate(), parser = QuestionParser)
| #app
#package
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Getting Topic
print('\033[93m' + "Enter the topic. You can add just a keyword or a description.\nTopic : > " + '\033[0m', end="")
topic = input()
print()
#Objects
Chain = Chains()
PromptTemplate = GetPromptTemplates(topic)
QuestionParser = CommaSeparatedListOutputParser()
# Getting Questions
print('\033[92m' + "Do you want to answer some questions? (y/n) \nAnswer : > " + '\033[0m', end="")
questions_allowed = input()
print()
if questions_allowed == 'y':
questions_allowed = True
else:
questions_allowed = False
if questions_allowed:
QuestionsList = Chain.chain(PromptTemplate = PromptTemplate.QuestionPromptTemplate(), parser = QuestionParser) | questionframing = QuestionFraming(QuestionsList) | 1 | 2023-12-17 10:23:00+00:00 | 2k |
yeyt97/AirDropPlus | AirDropPlus.py | [
{
"identifier": "Config",
"path": "config.py",
"snippet": "class Config:\n def __init__(self, config_path):\n self.config = configparser.ConfigParser()\n self.config.read(config_path, encoding='utf-8')\n\n self.config_path = config_path\n self.key = self.config.get('config', 'key')\n self.save_path = self.config.get('config', 'save_path')\n if self.save_path == '' or self.save_path is None:\n self.save_path = os.path.join(os.path.expanduser('~'), 'Downloads')\n\n self.port = int(self.config.get('config', 'port'))\n self.basic_notifier = False if self.config.get('config', 'basic_notifier')=='0' else True\n\n self.version = self.config.get('info', 'version')"
},
{
"identifier": "create_notifier",
"path": "notifier.py",
"snippet": "def create_notifier(basic: bool = True):\n return BasicNotifier() if basic else Notifier()"
},
{
"identifier": "Server",
"path": "server.py",
"snippet": "class Server:\n def __init__(self, config: Config, notifier: INotifier):\n self.config = config\n self.notifier = notifier\n self.blueprint = Blueprint('server', __name__)\n self.register_routes()\n self.app = Flask(__name__)\n self.app.register_blueprint(self.blueprint)\n\n def run(self, host: str, port: int):\n self.app.run(host=host, port=port)\n\n def register_routes(self):\n \"\"\" ----------- 统一处理 ----------- \"\"\"\n # 统一认证\n @self.blueprint.before_request\n def check_api_key():\n if request.path == '/':\n return\n auth_header = request.headers.get(\"Authorization\")\n if auth_header != self.config.key:\n return Result.error(msg='密钥错误', code=401)\n version = request.headers.get(\"ShortcutVersion\")\n if version != self.config.version:\n msg = f'版本不匹配\\n\\nWindows版本为:{self.config.version}\\n快捷指令版本为:{version}'\n return Result.error(msg=msg, code=400)\n\n # 统一异常处理\n @self.blueprint.errorhandler(Exception)\n def handle_all_exceptions(error):\n msg = str(error)\n self.notifier.notify('错误', '遇到一个错误' + msg)\n return Result.error(msg, 500)\n\n \"\"\" ----------- 测试 ----------- \"\"\"\n @self.blueprint.route('/')\n def test():\n return 'Hello world!'\n\n \"\"\" ----------- 文件 ----------- \"\"\"\n # 手机端发送接下来要发送的文件列表\n @self.blueprint.route('/file/send/list', methods=['POST'])\n def send_file_list():\n filename_list = request.form['file_list'].splitlines()\n self.notifier.show_future_files(self.config.save_path, filename_list, to_mobile=False)\n return Result.success(msg=\"发送成功\")\n\n # 手机端发送文件\n @self.blueprint.route('/file/send', methods=['POST'])\n def send_file():\n if 'file' not in request.files:\n return Result.error(msg=\"文件不存在\")\n file = request.files['file']\n ori_filename = request.form['filename']\n notify_content = request.form['notify_content']\n filename = utils.avoid_duplicate_filename(self.config.save_path, ori_filename)\n file.save(os.path.join(self.config.save_path, filename))\n\n if notify_content != '':\n ori_filename_list = notify_content.splitlines()\n if len(ori_filename_list) == 1:\n self.notifier.show_received_file(self.config.save_path, filename, ori_filename)\n else:\n self.notifier.show_received_files(self.config.save_path, ori_filename_list)\n return Result.success(msg=\"发送成功\")\n\n # 获取电脑端复制的文件的路径列表\n @self.blueprint.route('/file/receive/list')\n def receive_file_list():\n success, res = utils.get_clipboard_files()\n if not success:\n msg = f'未复制文件: {res}'\n self.notifier.notify('错误', msg)\n return Result.error(msg=msg)\n if len(res) > 0:\n file_names = [os.path.basename(path) for path in res]\n self.notifier.show_future_files(None, file_names, to_mobile=True)\n return Result.success(data=res)\n return Result.error(msg='Windows未复制文件')\n\n # 获取电脑端文件\n @self.blueprint.route('/file/receive', methods=['POST'])\n def receive_file():\n path = request.form.get('path')\n file_name = os.path.basename(path)\n # self.notifier.notify('文件', f'发送: {file_name}')\n with open(path, 'rb') as f:\n file_content = f.read()\n return flask.send_file(io.BytesIO(file_content), as_attachment=True, download_name=file_name)\n\n \"\"\" ----------- 剪贴板 ----------- \"\"\"\n # 获取电脑端剪贴板\n @self.blueprint.route('/clipboard/receive')\n def receive_clipboard():\n success, res = utils.get_clipboard_content()\n if not success:\n msg = f'获取剪贴板出错: {res}'\n self.notifier.notify('错误', msg)\n return Result.error(msg=msg)\n if res != '':\n self.notifier.notify('剪贴板', f'发送: {res}')\n return Result.success(data=res)\n else:\n self.notifier.notify('剪贴板', '发送失败: Windows剪贴板为空')\n return Result.error(msg='Windows剪贴板为空')\n\n # 接收手机端剪贴板\n @self.blueprint.route('/clipboard/send', methods=['POST'])\n def send_clipboard():\n clipboard = request.form['clipboard']\n if clipboard is None or clipboard == '':\n self.notifier.notify('剪贴板', '接收失败: iPhone剪贴板为空')\n return Result.error(msg='iPhone剪贴板为空')\n success, msg = utils.set_clipboard_content(clipboard)\n if success:\n self.notifier.notify('剪贴板', f'收到剪贴板内容: {clipboard}')\n else:\n self.notifier.notify('错误', f'设置剪贴板出错: {msg}')\n return Result.success(msg='发送成功') if success else Result.error(msg=msg)"
}
] | import os
import sys
import utils
from config import Config
from notifier import create_notifier
from server import Server | 1,571 |
if __name__ == '__main__':
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
config_file_path = os.path.join(SCRIPT_DIR, 'config', 'config.ini')
|
if __name__ == '__main__':
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
config_file_path = os.path.join(SCRIPT_DIR, 'config', 'config.ini') | config = Config(config_file_path) | 0 | 2023-12-19 08:16:21+00:00 | 2k |
byeongjun-park/HarmonyView | ldm/thirdp/psp/model_irse.py | [
{
"identifier": "get_blocks",
"path": "ldm/thirdp/psp/helpers.py",
"snippet": "def get_blocks(num_layers):\n\tif num_layers == 50:\n\t\tblocks = [\n\t\t\tget_block(in_channel=64, depth=64, num_units=3),\n\t\t\tget_block(in_channel=64, depth=128, num_units=4),\n\t\t\tget_block(in_channel=128, depth=256, num_units=14),\n\t\t\tget_block(in_channel=256, depth=512, num_units=3)\n\t\t]\n\telif num_layers == 100:\n\t\tblocks = [\n\t\t\tget_block(in_channel=64, depth=64, num_units=3),\n\t\t\tget_block(in_channel=64, depth=128, num_units=13),\n\t\t\tget_block(in_channel=128, depth=256, num_units=30),\n\t\t\tget_block(in_channel=256, depth=512, num_units=3)\n\t\t]\n\telif num_layers == 152:\n\t\tblocks = [\n\t\t\tget_block(in_channel=64, depth=64, num_units=3),\n\t\t\tget_block(in_channel=64, depth=128, num_units=8),\n\t\t\tget_block(in_channel=128, depth=256, num_units=36),\n\t\t\tget_block(in_channel=256, depth=512, num_units=3)\n\t\t]\n\telse:\n\t\traise ValueError(\"Invalid number of layers: {}. Must be one of [50, 100, 152]\".format(num_layers))\n\treturn blocks"
},
{
"identifier": "Flatten",
"path": "ldm/thirdp/psp/helpers.py",
"snippet": "class Flatten(Module):\n\tdef forward(self, input):\n\t\treturn input.view(input.size(0), -1)"
},
{
"identifier": "bottleneck_IR",
"path": "ldm/thirdp/psp/helpers.py",
"snippet": "class bottleneck_IR(Module):\n\tdef __init__(self, in_channel, depth, stride):\n\t\tsuper(bottleneck_IR, self).__init__()\n\t\tif in_channel == depth:\n\t\t\tself.shortcut_layer = MaxPool2d(1, stride)\n\t\telse:\n\t\t\tself.shortcut_layer = Sequential(\n\t\t\t\tConv2d(in_channel, depth, (1, 1), stride, bias=False),\n\t\t\t\tBatchNorm2d(depth)\n\t\t\t)\n\t\tself.res_layer = Sequential(\n\t\t\tBatchNorm2d(in_channel),\n\t\t\tConv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),\n\t\t\tConv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)\n\t\t)\n\n\tdef forward(self, x):\n\t\tshortcut = self.shortcut_layer(x)\n\t\tres = self.res_layer(x)\n\t\treturn res + shortcut"
},
{
"identifier": "bottleneck_IR_SE",
"path": "ldm/thirdp/psp/helpers.py",
"snippet": "class bottleneck_IR_SE(Module):\n\tdef __init__(self, in_channel, depth, stride):\n\t\tsuper(bottleneck_IR_SE, self).__init__()\n\t\tif in_channel == depth:\n\t\t\tself.shortcut_layer = MaxPool2d(1, stride)\n\t\telse:\n\t\t\tself.shortcut_layer = Sequential(\n\t\t\t\tConv2d(in_channel, depth, (1, 1), stride, bias=False),\n\t\t\t\tBatchNorm2d(depth)\n\t\t\t)\n\t\tself.res_layer = Sequential(\n\t\t\tBatchNorm2d(in_channel),\n\t\t\tConv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),\n\t\t\tPReLU(depth),\n\t\t\tConv2d(depth, depth, (3, 3), stride, 1, bias=False),\n\t\t\tBatchNorm2d(depth),\n\t\t\tSEModule(depth, 16)\n\t\t)\n\n\tdef forward(self, x):\n\t\tshortcut = self.shortcut_layer(x)\n\t\tres = self.res_layer(x)\n\t\treturn res + shortcut"
},
{
"identifier": "l2_norm",
"path": "ldm/thirdp/psp/helpers.py",
"snippet": "def l2_norm(input, axis=1):\n\tnorm = torch.norm(input, 2, axis, True)\n\toutput = torch.div(input, norm)\n\treturn output"
}
] | from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
from ldm.thirdp.psp.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm | 1,154 | # https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
| # https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se" | blocks = get_blocks(num_layers) | 0 | 2023-12-21 04:44:00+00:00 | 2k |
srlabs/black-basta-buster | extractblock.py | [
{
"identifier": "detect_magic_size",
"path": "decryptblocks.py",
"snippet": "def make_int(i):\ndef make_int_or_percent(i):\ndef xor_blocks(var, key, byteorder=sys.byteorder):\ndef write_block(fd, offset, block):\ndef main():\ndef decrypt_file(f, keyblock, fsize=None, is_dry=True, lower_limit=None, upper_limit=None):\n def advise(t, start, end):\n def advise(*args, **kwargs):\nclass Percent(int):\nGB = 1024*1024*1024\nBLOCK_SIZE = 64\n BLOCK_SIZE = len(null)"
},
{
"identifier": "ranges_for_file",
"path": "ranges.py",
"snippet": "def ranges_for_file(path, fsize=None):\n #return ranges_for_file_real(path)\n if fsize is None:\n return ranges_for_file_generated(path)\n else:\n return ranges_for_file_size(fsize)"
}
] | import argparse
import logging
import sys
import logging
import math
from collections import deque
from itertools import islice
from pathlib import Path
from hexdump import hexdump
from decryptblocks import detect_magic_size, make_int, make_int_or_percent, Percent
from ranges import ranges_for_file
from collections import Counter | 1,455 |
log = logging.getLogger(__name__)
def extract_block(fd, offset, size=64):
#log.debug("Reading %r at %r for %r ", fd, offset, size)
fd.seek(offset)
block = fd.read(size)
log.debug("Read %i bytes at %r for %r:\n%s", len(block), offset, size, hexdump(block, result="return"))
return block
def make_int_or_auto(s):
if s.strip() == "auto":
return "auto"
else:
return make_int(s)
### Entropy taken from https://stackoverflow.com/a/37890790/2015768
def eta(data, unit='natural'):
base = {
'shannon' : 2.,
'natural' : math.exp(1),
'hartley' : 10.
}
if len(data) <= 1:
return 0
counts = Counter()
for d in data:
counts[d] += 1
ent = 0
probs = [float(c) / len(data) for c in counts.values()]
for p in probs:
if p > 0.:
ent -= p * math.log(p, base[unit])
return ent
BLOCKSIZE = 64
NULLBLOCK = b'\x00' * BLOCKSIZE
def auto_detect_key_block(f, fsize=None, lower_limit=None, upper_limit=None):
if fsize is None:
fsize = detect_magic_size(f)
block = None
if lower_limit is None:
# we skip the first few block, unless explicitly requested
lower_limit = next(islice(ranges_for_file(f, fsize), 5, 6))[0]
if upper_limit is None:
upper_limit = fsize
CONFIDENCE = 5
with open(f, "rb") as fd:
confidence_blocks = deque(maxlen=CONFIDENCE)
for n, (offset, length) in enumerate(filter(lambda offset_len: lower_limit <= offset_len[0] < upper_limit, ranges_for_file(f, fsize))):
t = True
for i in (-2, -1, 1, 2):
b = extract_block(fd, offset-i*BLOCKSIZE)
t &= b == NULLBLOCK
log.debug("T is now: %s", t)
#if not t:
# raise
if t:
log.debug("Confidence: %s", confidence_blocks)
b = extract_block(fd, offset)
if b == NULLBLOCK:
log.debug("B is null")
else:
log.debug("Adding confidence at %d %r", offset, b)
confidence_blocks.append((offset, b))
if len(confidence_blocks) == CONFIDENCE:
if all((b == x[1] for x in confidence_blocks)):
log.info ("Found blocks: %r", confidence_blocks)
block = b # Urhgs. This is spaghetti control flow. Sorry.
break
else:
log.info("Not all blocks are equal to %r: %s", b, confidence_blocks)
raise
else:
log.info("only %d blocks: %s", len(confidence_blocks), confidence_blocks)
else:
print ("non found")
raise
return block
def main():
argparser = argparse.ArgumentParser(description="Extracts a 64 byte long chunk out of a file. This can be useful for taking that block as an encryption key.")
argparser.add_argument("--hexdump", action="store_true")
argparser.add_argument("--dry", action="store_true",
help="Do not write anything")
argparser.add_argument("--size", type=int, default=0x40, help="Chunk size")
argparser.add_argument("--start-at", type=make_int_or_percent, default=None, help="Start the automatic determination from here, only")
argparser.add_argument("--output", type=Path, help="Write the chunk to a file rather than stdout")
argparser.add_argument("file", type=Path, help="The file to cut a chunk out of")
argparser.add_argument("offset", type=make_int_or_auto, help="Position to cut the chunk out of the file, or 'auto' to detect encrypted zero bytes")
args = argparser.parse_args()
offset = args.offset
f = args.file
size = args.size
start_at = args.start_at
logging.basicConfig(level=logging.INFO)
fsize = detect_magic_size(f)
| #!/usr/bin/env python3
# Copyright 2023 Tobias Mueller <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
try:
except ModuleNotFoundError:
log = logging.getLogger(__name__)
log.warning("hexdump module not found. Try pip install hexdump")
def hexdump(*args, **kwargs):
log.error("Cannot find the hexdump module. Try pip install hexdump")
log = logging.getLogger(__name__)
def extract_block(fd, offset, size=64):
#log.debug("Reading %r at %r for %r ", fd, offset, size)
fd.seek(offset)
block = fd.read(size)
log.debug("Read %i bytes at %r for %r:\n%s", len(block), offset, size, hexdump(block, result="return"))
return block
def make_int_or_auto(s):
if s.strip() == "auto":
return "auto"
else:
return make_int(s)
### Entropy taken from https://stackoverflow.com/a/37890790/2015768
def eta(data, unit='natural'):
base = {
'shannon' : 2.,
'natural' : math.exp(1),
'hartley' : 10.
}
if len(data) <= 1:
return 0
counts = Counter()
for d in data:
counts[d] += 1
ent = 0
probs = [float(c) / len(data) for c in counts.values()]
for p in probs:
if p > 0.:
ent -= p * math.log(p, base[unit])
return ent
BLOCKSIZE = 64
NULLBLOCK = b'\x00' * BLOCKSIZE
def auto_detect_key_block(f, fsize=None, lower_limit=None, upper_limit=None):
if fsize is None:
fsize = detect_magic_size(f)
block = None
if lower_limit is None:
# we skip the first few block, unless explicitly requested
lower_limit = next(islice(ranges_for_file(f, fsize), 5, 6))[0]
if upper_limit is None:
upper_limit = fsize
CONFIDENCE = 5
with open(f, "rb") as fd:
confidence_blocks = deque(maxlen=CONFIDENCE)
for n, (offset, length) in enumerate(filter(lambda offset_len: lower_limit <= offset_len[0] < upper_limit, ranges_for_file(f, fsize))):
t = True
for i in (-2, -1, 1, 2):
b = extract_block(fd, offset-i*BLOCKSIZE)
t &= b == NULLBLOCK
log.debug("T is now: %s", t)
#if not t:
# raise
if t:
log.debug("Confidence: %s", confidence_blocks)
b = extract_block(fd, offset)
if b == NULLBLOCK:
log.debug("B is null")
else:
log.debug("Adding confidence at %d %r", offset, b)
confidence_blocks.append((offset, b))
if len(confidence_blocks) == CONFIDENCE:
if all((b == x[1] for x in confidence_blocks)):
log.info ("Found blocks: %r", confidence_blocks)
block = b # Urhgs. This is spaghetti control flow. Sorry.
break
else:
log.info("Not all blocks are equal to %r: %s", b, confidence_blocks)
raise
else:
log.info("only %d blocks: %s", len(confidence_blocks), confidence_blocks)
else:
print ("non found")
raise
return block
def main():
argparser = argparse.ArgumentParser(description="Extracts a 64 byte long chunk out of a file. This can be useful for taking that block as an encryption key.")
argparser.add_argument("--hexdump", action="store_true")
argparser.add_argument("--dry", action="store_true",
help="Do not write anything")
argparser.add_argument("--size", type=int, default=0x40, help="Chunk size")
argparser.add_argument("--start-at", type=make_int_or_percent, default=None, help="Start the automatic determination from here, only")
argparser.add_argument("--output", type=Path, help="Write the chunk to a file rather than stdout")
argparser.add_argument("file", type=Path, help="The file to cut a chunk out of")
argparser.add_argument("offset", type=make_int_or_auto, help="Position to cut the chunk out of the file, or 'auto' to detect encrypted zero bytes")
args = argparser.parse_args()
offset = args.offset
f = args.file
size = args.size
start_at = args.start_at
logging.basicConfig(level=logging.INFO)
fsize = detect_magic_size(f) | if isinstance(start_at, Percent): | 0 | 2023-12-20 20:04:51+00:00 | 2k |
EntySec/SeaShell | seashell/core/console.py | [
{
"identifier": "Banner",
"path": "seashell/utils/ui/banner.py",
"snippet": "class Banner(object):\n \"\"\" Subclass of seashell.core module.\n\n This subclass of seashell.core module is intended for\n providing tools for printing banners in UI.\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n self.config = Config()\n\n self.badges = Badges()\n self.color_script = ColorScript()\n\n def print_random_banner(self) -> None:\n \"\"\" Print random banner.\n\n :return None: None\n \"\"\"\n\n if os.path.exists(self.config.banners_path):\n banners = []\n all_banners = os.listdir(self.config.banners_path)\n\n for banner in all_banners:\n banners.append(banner)\n\n if banners:\n banner = \"\"\n\n while not banner:\n random_banner = random.randint(0, len(banners) - 1)\n banner = self.color_script.parse_file(\n self.config.banners_path + banners[random_banner]\n )\n\n self.badges.print_empty(f\"%newline%end{banner}%end%newline\")\n else:\n self.badges.print_warning(\"No banners detected.\")\n else:\n self.badges.print_warning(\"No banners detected.\")"
},
{
"identifier": "Tip",
"path": "seashell/utils/ui/tip.py",
"snippet": "class Tip(object):\n \"\"\" Subclass of seashell.core module.\n\n This subclass of seashell.core module is intended for\n providing tools for printing tips in UI.\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n self.config = Config()\n\n self.badges = Badges()\n self.color_script = ColorScript()\n\n def print_random_tip(self) -> None:\n \"\"\" Print random tip.\n\n :return None: None\n \"\"\"\n\n if os.path.exists(self.config.tips_path):\n tips = []\n all_tips = os.listdir(self.config.tips_path)\n\n for tip in all_tips:\n tips.append(tip)\n\n if tips:\n tip = \"\"\n\n while not tip:\n random_tip = random.randint(0, len(tips) - 1)\n tip = self.color_script.parse_file(\n self.config.tips_path + tips[random_tip]\n )\n\n self.badges.print_empty(f\"%newline%endSeaShell Tip: {tip}%end%newline\")\n else:\n self.badges.print_warning(\"No tips detected.\")\n else:\n self.badges.print_warning(\"No tips detected.\")"
},
{
"identifier": "Config",
"path": "seashell/lib/config.py",
"snippet": "class Config(object):\n \"\"\" Subclass of seashell.core module.\n\n This subclass of seashell.core module is intended for providing\n basic configuration for SeaShell.\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n self.user_path = f'{pathlib.Path.home()}/.seashell/'\n self.base_path = f'{os.path.dirname(os.path.dirname(__file__))}/'\n self.data_path = self.base_path + 'data/'\n\n self.banners_path = self.data_path + 'banners/'\n self.tips_path = self.data_path + 'tips/'\n\n self.modules_path = self.base_path + 'modules/'\n self.plugins_path = self.base_path + 'plugins/'\n self.commands_path = self.base_path + 'commands/'\n\n self.loot_path = self.user_path + 'loot/'\n\n def setup(self) -> None:\n \"\"\" Setup config and create paths.\n\n :return None: None\n \"\"\"\n\n if not os.path.exists(self.user_path):\n os.mkdir(self.user_path)\n\n if not os.path.exists(self.loot_path):\n os.mkdir(self.loot_path)"
}
] | import os
import cmd
import sys
from badges import Badges, Tables
from colorscript import ColorScript
from hatsploit.lib.commands import Commands
from hatsploit.lib.runtime import Runtime
from seashell.utils.ui.banner import Banner
from seashell.utils.ui.tip import Tip
from seashell.lib.config import Config | 1,297 | """
MIT License
Copyright (c) 2020-2024 EntySec
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class Console(cmd.Cmd):
""" Subclass of seashell.core module.
This subclass of seashell.core modules is intended for providing
main SeaShell Framework console interface.
"""
def __init__(self) -> None:
super().__init__()
cmd.Cmd.__init__(self)
self.badges = Badges()
self.tables = Tables()
self.banner = Banner()
| """
MIT License
Copyright (c) 2020-2024 EntySec
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class Console(cmd.Cmd):
""" Subclass of seashell.core module.
This subclass of seashell.core modules is intended for providing
main SeaShell Framework console interface.
"""
def __init__(self) -> None:
super().__init__()
cmd.Cmd.__init__(self)
self.badges = Badges()
self.tables = Tables()
self.banner = Banner() | self.tip = Tip() | 1 | 2023-12-17 04:14:16+00:00 | 2k |
FlagOpen/TACO | train.py | [
{
"identifier": "Trainer",
"path": "train_utils.py",
"snippet": "class Trainer(transformers.Trainer):\n \"\"\"Use CosineAnnealingLR from pytorch \n \"\"\"\n \n def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):\n \"\"\"\n Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or\n passed as an argument.\n\n Args:\n num_training_steps (int): The number of training steps to do.\n \"\"\"\n if self.lr_scheduler is None:\n num_warmup_steps=self.args.get_warmup_steps(num_training_steps)\n if getattr(self.args, 'use_cosine_anneal_with_warmup', False):\n lr_max=1\n lr_min=1e-1\n cosine_anneal_with_warmup = lambda cur_iter: max(cur_iter / num_warmup_steps, 1e-9) if cur_iter < num_warmup_steps else \\\n (lr_min + 0.5*(lr_max-lr_min)*(1.0+math.cos((cur_iter-num_warmup_steps)/(num_training_steps-num_warmup_steps)*math.pi)))\n \n self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(\n optimizer=self.optimizer if optimizer is None else optimizer, \n lr_lambda=cosine_anneal_with_warmup,\n )\n else:\n self.lr_scheduler = get_scheduler(\n self.args.lr_scheduler_type,\n optimizer=self.optimizer if optimizer is None else optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps,\n )\n self._created_lr_scheduler = True\n return self.lr_scheduler"
},
{
"identifier": "DEFAULT_PAD_TOKEN",
"path": "datamodule/constants.py",
"snippet": "DEFAULT_PAD_TOKEN = \"[PAD]\""
},
{
"identifier": "DEFAULT_EOS_TOKEN",
"path": "datamodule/constants.py",
"snippet": "DEFAULT_EOS_TOKEN = \"<|endoftext|>\""
},
{
"identifier": "DEFAULT_BOS_TOKEN",
"path": "datamodule/constants.py",
"snippet": "DEFAULT_BOS_TOKEN = \"<|endoftext|>\""
},
{
"identifier": "TacoDataset",
"path": "datamodule/taco_dataset.py",
"snippet": "class TacoDataset(Dataset):\n \"\"\"Dataset for fine-tune.\"\"\"\n \n def __init__(self, data_path: str, debug: bool=False, learning_skill: int=None):\n super(TacoDataset, self).__init__()\n logging.warning(\"Loading tokenized data...\")\n if os.path.exists(data_path):\n dataset = load_from_disk(data_path).shuffle()\n else:\n raise ValueError(\" The specified data_path does not exist. Please provide a tokenized dataset\")\n \n if not all(key in dataset.column_names for key in ['input_ids', 'source_ids_lens']):\n raise ValueError(\"Data has not been tokenized. Please tokenize the data first.\")\n if debug:\n dataset = dataset.select(range(1000))\n if learning_skill:\n dataset = dataset.filter(lambda entry: entry['labels'][learning_skill])\n \n logging.warning(\"Collect columns of hf dataset... This may take some time...\")\n input_ids = dataset['input_ids']\n source_ids_lens = dataset['source_ids_lens']\n \n self.learning_skill = None\n if learning_skill:\n scores = dataset['scores']\n scores = preprocess_scores(scores, source_ids_lens, learning_skill)\n self.scores = scores\n self.learning_skill = learning_skill\n \n logging.warning(\"Processing inputs...\")\n data_dict = preprocess(input_ids, source_ids_lens)\n \n self.input_ids = data_dict[\"input_ids\"]\n self.labels = data_dict[\"labels\"]\n\n def __len__(self):\n return len(self.input_ids)\n\n def __getitem__(self, i) -> Dict[str, torch.Tensor]:\n if self.learning_skill:\n return dict(input_ids=self.input_ids[i], labels=self.labels[i], scores=self.scores[i])\n else:\n return dict(input_ids=self.input_ids[i], labels=self.labels[i])"
},
{
"identifier": "DataCollatorForTacoDataset",
"path": "datamodule/taco_dataset.py",
"snippet": "class DataCollatorForTacoDataset(object):\n \"\"\"Collate examples for fine-tune.\"\"\"\n\n tokenizer: transformers.PreTrainedTokenizer\n\n def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:\n input_ids, labels = tuple([instance[key] for instance in instances] for key in (\"input_ids\", \"labels\"))\n input_ids = torch.nn.utils.rnn.pad_sequence(\n input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id\n )\n labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)\n return dict(\n input_ids=input_ids,\n labels=labels,\n )"
}
] | from typing import Optional, Dict
from dataclasses import dataclass, field
from train_utils import Trainer
from datamodule import DEFAULT_PAD_TOKEN, DEFAULT_EOS_TOKEN, DEFAULT_BOS_TOKEN, TacoDataset, DataCollatorForTacoDataset
import transformers | 1,568 | """
Finetune models on TACO-Dataset train split
"""
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="bigcode/tiny_starcoder_py")
@dataclass
class DataArguments:
data_path: str = field(default=None, metadata={"help": "Path to the training data."})
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
adam_beta1: float = field(default=0.9)
adam_beta2: float = field(default=0.95)
use_cosine_anneal_with_warmup: bool = field(default=True)
model_max_length: int = field(
default=2048,
metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
)
resume_from_checkpoint: bool = field(
default=False,
metadata={"help": "load the last checkpoint in args.output_dir as saved by a previous instance of Trainer."}
)
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def make_taco_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
"""Make dataset and collator for fine-tune"""
| """
Finetune models on TACO-Dataset train split
"""
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="bigcode/tiny_starcoder_py")
@dataclass
class DataArguments:
data_path: str = field(default=None, metadata={"help": "Path to the training data."})
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
adam_beta1: float = field(default=0.9)
adam_beta2: float = field(default=0.95)
use_cosine_anneal_with_warmup: bool = field(default=True)
model_max_length: int = field(
default=2048,
metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
)
resume_from_checkpoint: bool = field(
default=False,
metadata={"help": "load the last checkpoint in args.output_dir as saved by a previous instance of Trainer."}
)
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def make_taco_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
"""Make dataset and collator for fine-tune""" | train_dataset = TacoDataset(data_path=data_args.data_path) | 4 | 2023-12-20 03:12:01+00:00 | 2k |
Subsets and Splits